diff options
Diffstat (limited to 'drivers')
684 files changed, 6477 insertions, 4037 deletions
diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c index fc6c416f8724..d5999eb41c00 100644 --- a/drivers/acpi/acpi_apd.c +++ b/drivers/acpi/acpi_apd.c | |||
| @@ -180,8 +180,8 @@ static const struct acpi_device_id acpi_apd_device_ids[] = { | |||
| 180 | { "APMC0D0F", APD_ADDR(xgene_i2c_desc) }, | 180 | { "APMC0D0F", APD_ADDR(xgene_i2c_desc) }, |
| 181 | { "BRCM900D", APD_ADDR(vulcan_spi_desc) }, | 181 | { "BRCM900D", APD_ADDR(vulcan_spi_desc) }, |
| 182 | { "CAV900D", APD_ADDR(vulcan_spi_desc) }, | 182 | { "CAV900D", APD_ADDR(vulcan_spi_desc) }, |
| 183 | { "HISI0A21", APD_ADDR(hip07_i2c_desc) }, | 183 | { "HISI02A1", APD_ADDR(hip07_i2c_desc) }, |
| 184 | { "HISI0A22", APD_ADDR(hip08_i2c_desc) }, | 184 | { "HISI02A2", APD_ADDR(hip08_i2c_desc) }, |
| 185 | #endif | 185 | #endif |
| 186 | { } | 186 | { } |
| 187 | }; | 187 | }; |
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index e51a1e98e62f..f88caf5aab76 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c | |||
| @@ -85,6 +85,7 @@ static const struct lpss_device_desc lpss_dma_desc = { | |||
| 85 | }; | 85 | }; |
| 86 | 86 | ||
| 87 | struct lpss_private_data { | 87 | struct lpss_private_data { |
| 88 | struct acpi_device *adev; | ||
| 88 | void __iomem *mmio_base; | 89 | void __iomem *mmio_base; |
| 89 | resource_size_t mmio_size; | 90 | resource_size_t mmio_size; |
| 90 | unsigned int fixed_clk_rate; | 91 | unsigned int fixed_clk_rate; |
| @@ -155,6 +156,12 @@ static struct pwm_lookup byt_pwm_lookup[] = { | |||
| 155 | 156 | ||
| 156 | static void byt_pwm_setup(struct lpss_private_data *pdata) | 157 | static void byt_pwm_setup(struct lpss_private_data *pdata) |
| 157 | { | 158 | { |
| 159 | struct acpi_device *adev = pdata->adev; | ||
| 160 | |||
| 161 | /* Only call pwm_add_table for the first PWM controller */ | ||
| 162 | if (!adev->pnp.unique_id || strcmp(adev->pnp.unique_id, "1")) | ||
| 163 | return; | ||
| 164 | |||
| 158 | if (!acpi_dev_present("INT33FD", NULL, -1)) | 165 | if (!acpi_dev_present("INT33FD", NULL, -1)) |
| 159 | pwm_add_table(byt_pwm_lookup, ARRAY_SIZE(byt_pwm_lookup)); | 166 | pwm_add_table(byt_pwm_lookup, ARRAY_SIZE(byt_pwm_lookup)); |
| 160 | } | 167 | } |
| @@ -180,6 +187,12 @@ static struct pwm_lookup bsw_pwm_lookup[] = { | |||
| 180 | 187 | ||
| 181 | static void bsw_pwm_setup(struct lpss_private_data *pdata) | 188 | static void bsw_pwm_setup(struct lpss_private_data *pdata) |
| 182 | { | 189 | { |
| 190 | struct acpi_device *adev = pdata->adev; | ||
| 191 | |||
| 192 | /* Only call pwm_add_table for the first PWM controller */ | ||
| 193 | if (!adev->pnp.unique_id || strcmp(adev->pnp.unique_id, "1")) | ||
| 194 | return; | ||
| 195 | |||
| 183 | pwm_add_table(bsw_pwm_lookup, ARRAY_SIZE(bsw_pwm_lookup)); | 196 | pwm_add_table(bsw_pwm_lookup, ARRAY_SIZE(bsw_pwm_lookup)); |
| 184 | } | 197 | } |
| 185 | 198 | ||
| @@ -456,6 +469,7 @@ static int acpi_lpss_create_device(struct acpi_device *adev, | |||
| 456 | goto err_out; | 469 | goto err_out; |
| 457 | } | 470 | } |
| 458 | 471 | ||
| 472 | pdata->adev = adev; | ||
| 459 | pdata->dev_desc = dev_desc; | 473 | pdata->dev_desc = dev_desc; |
| 460 | 474 | ||
| 461 | if (dev_desc->setup) | 475 | if (dev_desc->setup) |
diff --git a/drivers/acpi/acpi_watchdog.c b/drivers/acpi/acpi_watchdog.c index 8c4e0a18460a..bf22c29d2517 100644 --- a/drivers/acpi/acpi_watchdog.c +++ b/drivers/acpi/acpi_watchdog.c | |||
| @@ -86,7 +86,12 @@ void __init acpi_watchdog_init(void) | |||
| 86 | 86 | ||
| 87 | found = false; | 87 | found = false; |
| 88 | resource_list_for_each_entry(rentry, &resource_list) { | 88 | resource_list_for_each_entry(rentry, &resource_list) { |
| 89 | if (resource_contains(rentry->res, &res)) { | 89 | if (rentry->res->flags == res.flags && |
| 90 | resource_overlaps(rentry->res, &res)) { | ||
| 91 | if (res.start < rentry->res->start) | ||
| 92 | rentry->res->start = res.start; | ||
| 93 | if (res.end > rentry->res->end) | ||
| 94 | rentry->res->end = res.end; | ||
| 90 | found = true; | 95 | found = true; |
| 91 | break; | 96 | break; |
| 92 | } | 97 | } |
diff --git a/drivers/acpi/acpica/nsxfeval.c b/drivers/acpi/acpica/nsxfeval.c index 538c61677c10..783f4c838aee 100644 --- a/drivers/acpi/acpica/nsxfeval.c +++ b/drivers/acpi/acpica/nsxfeval.c | |||
| @@ -100,9 +100,13 @@ acpi_evaluate_object_typed(acpi_handle handle, | |||
| 100 | free_buffer_on_error = TRUE; | 100 | free_buffer_on_error = TRUE; |
| 101 | } | 101 | } |
| 102 | 102 | ||
| 103 | status = acpi_get_handle(handle, pathname, &target_handle); | 103 | if (pathname) { |
| 104 | if (ACPI_FAILURE(status)) { | 104 | status = acpi_get_handle(handle, pathname, &target_handle); |
| 105 | return_ACPI_STATUS(status); | 105 | if (ACPI_FAILURE(status)) { |
| 106 | return_ACPI_STATUS(status); | ||
| 107 | } | ||
| 108 | } else { | ||
| 109 | target_handle = handle; | ||
| 106 | } | 110 | } |
| 107 | 111 | ||
| 108 | full_pathname = acpi_ns_get_external_pathname(target_handle); | 112 | full_pathname = acpi_ns_get_external_pathname(target_handle); |
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index ddb01e9fa5b2..ae3d6d152633 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c | |||
| @@ -151,6 +151,10 @@ static bool ec_freeze_events __read_mostly = false; | |||
| 151 | module_param(ec_freeze_events, bool, 0644); | 151 | module_param(ec_freeze_events, bool, 0644); |
| 152 | MODULE_PARM_DESC(ec_freeze_events, "Disabling event handling during suspend/resume"); | 152 | MODULE_PARM_DESC(ec_freeze_events, "Disabling event handling during suspend/resume"); |
| 153 | 153 | ||
| 154 | static bool ec_no_wakeup __read_mostly; | ||
| 155 | module_param(ec_no_wakeup, bool, 0644); | ||
| 156 | MODULE_PARM_DESC(ec_no_wakeup, "Do not wake up from suspend-to-idle"); | ||
| 157 | |||
| 154 | struct acpi_ec_query_handler { | 158 | struct acpi_ec_query_handler { |
| 155 | struct list_head node; | 159 | struct list_head node; |
| 156 | acpi_ec_query_func func; | 160 | acpi_ec_query_func func; |
| @@ -535,6 +539,14 @@ static void acpi_ec_disable_event(struct acpi_ec *ec) | |||
| 535 | spin_unlock_irqrestore(&ec->lock, flags); | 539 | spin_unlock_irqrestore(&ec->lock, flags); |
| 536 | __acpi_ec_flush_event(ec); | 540 | __acpi_ec_flush_event(ec); |
| 537 | } | 541 | } |
| 542 | |||
| 543 | void acpi_ec_flush_work(void) | ||
| 544 | { | ||
| 545 | if (first_ec) | ||
| 546 | __acpi_ec_flush_event(first_ec); | ||
| 547 | |||
| 548 | flush_scheduled_work(); | ||
| 549 | } | ||
| 538 | #endif /* CONFIG_PM_SLEEP */ | 550 | #endif /* CONFIG_PM_SLEEP */ |
| 539 | 551 | ||
| 540 | static bool acpi_ec_guard_event(struct acpi_ec *ec) | 552 | static bool acpi_ec_guard_event(struct acpi_ec *ec) |
| @@ -1729,7 +1741,7 @@ error: | |||
| 1729 | * functioning ECDT EC first in order to handle the events. | 1741 | * functioning ECDT EC first in order to handle the events. |
| 1730 | * https://bugzilla.kernel.org/show_bug.cgi?id=115021 | 1742 | * https://bugzilla.kernel.org/show_bug.cgi?id=115021 |
| 1731 | */ | 1743 | */ |
| 1732 | int __init acpi_ec_ecdt_start(void) | 1744 | static int __init acpi_ec_ecdt_start(void) |
| 1733 | { | 1745 | { |
| 1734 | acpi_handle handle; | 1746 | acpi_handle handle; |
| 1735 | 1747 | ||
| @@ -1880,6 +1892,32 @@ static int acpi_ec_suspend(struct device *dev) | |||
| 1880 | return 0; | 1892 | return 0; |
| 1881 | } | 1893 | } |
| 1882 | 1894 | ||
| 1895 | static int acpi_ec_suspend_noirq(struct device *dev) | ||
| 1896 | { | ||
| 1897 | struct acpi_ec *ec = acpi_driver_data(to_acpi_device(dev)); | ||
| 1898 | |||
| 1899 | /* | ||
| 1900 | * The SCI handler doesn't run at this point, so the GPE can be | ||
| 1901 | * masked at the low level without side effects. | ||
| 1902 | */ | ||
| 1903 | if (ec_no_wakeup && test_bit(EC_FLAGS_STARTED, &ec->flags) && | ||
| 1904 | ec->reference_count >= 1) | ||
| 1905 | acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE); | ||
| 1906 | |||
| 1907 | return 0; | ||
| 1908 | } | ||
| 1909 | |||
| 1910 | static int acpi_ec_resume_noirq(struct device *dev) | ||
| 1911 | { | ||
| 1912 | struct acpi_ec *ec = acpi_driver_data(to_acpi_device(dev)); | ||
| 1913 | |||
| 1914 | if (ec_no_wakeup && test_bit(EC_FLAGS_STARTED, &ec->flags) && | ||
| 1915 | ec->reference_count >= 1) | ||
| 1916 | acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE); | ||
| 1917 | |||
| 1918 | return 0; | ||
| 1919 | } | ||
| 1920 | |||
| 1883 | static int acpi_ec_resume(struct device *dev) | 1921 | static int acpi_ec_resume(struct device *dev) |
| 1884 | { | 1922 | { |
| 1885 | struct acpi_ec *ec = | 1923 | struct acpi_ec *ec = |
| @@ -1891,6 +1929,7 @@ static int acpi_ec_resume(struct device *dev) | |||
| 1891 | #endif | 1929 | #endif |
| 1892 | 1930 | ||
| 1893 | static const struct dev_pm_ops acpi_ec_pm = { | 1931 | static const struct dev_pm_ops acpi_ec_pm = { |
| 1932 | SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(acpi_ec_suspend_noirq, acpi_ec_resume_noirq) | ||
| 1894 | SET_SYSTEM_SLEEP_PM_OPS(acpi_ec_suspend, acpi_ec_resume) | 1933 | SET_SYSTEM_SLEEP_PM_OPS(acpi_ec_suspend, acpi_ec_resume) |
| 1895 | }; | 1934 | }; |
| 1896 | 1935 | ||
| @@ -1964,20 +2003,17 @@ static inline void acpi_ec_query_exit(void) | |||
| 1964 | int __init acpi_ec_init(void) | 2003 | int __init acpi_ec_init(void) |
| 1965 | { | 2004 | { |
| 1966 | int result; | 2005 | int result; |
| 2006 | int ecdt_fail, dsdt_fail; | ||
| 1967 | 2007 | ||
| 1968 | /* register workqueue for _Qxx evaluations */ | 2008 | /* register workqueue for _Qxx evaluations */ |
| 1969 | result = acpi_ec_query_init(); | 2009 | result = acpi_ec_query_init(); |
| 1970 | if (result) | 2010 | if (result) |
| 1971 | goto err_exit; | 2011 | return result; |
| 1972 | /* Now register the driver for the EC */ | ||
| 1973 | result = acpi_bus_register_driver(&acpi_ec_driver); | ||
| 1974 | if (result) | ||
| 1975 | goto err_exit; | ||
| 1976 | 2012 | ||
| 1977 | err_exit: | 2013 | /* Drivers must be started after acpi_ec_query_init() */ |
| 1978 | if (result) | 2014 | ecdt_fail = acpi_ec_ecdt_start(); |
| 1979 | acpi_ec_query_exit(); | 2015 | dsdt_fail = acpi_bus_register_driver(&acpi_ec_driver); |
| 1980 | return result; | 2016 | return ecdt_fail && dsdt_fail ? -ENODEV : 0; |
| 1981 | } | 2017 | } |
| 1982 | 2018 | ||
| 1983 | /* EC driver currently not unloadable */ | 2019 | /* EC driver currently not unloadable */ |
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index 9531d3276f65..3f5af4d7a739 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h | |||
| @@ -185,7 +185,6 @@ typedef int (*acpi_ec_query_func) (void *data); | |||
| 185 | int acpi_ec_init(void); | 185 | int acpi_ec_init(void); |
| 186 | int acpi_ec_ecdt_probe(void); | 186 | int acpi_ec_ecdt_probe(void); |
| 187 | int acpi_ec_dsdt_probe(void); | 187 | int acpi_ec_dsdt_probe(void); |
| 188 | int acpi_ec_ecdt_start(void); | ||
| 189 | void acpi_ec_block_transactions(void); | 188 | void acpi_ec_block_transactions(void); |
| 190 | void acpi_ec_unblock_transactions(void); | 189 | void acpi_ec_unblock_transactions(void); |
| 191 | int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit, | 190 | int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit, |
| @@ -193,6 +192,10 @@ int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit, | |||
| 193 | void *data); | 192 | void *data); |
| 194 | void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit); | 193 | void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit); |
| 195 | 194 | ||
| 195 | #ifdef CONFIG_PM_SLEEP | ||
| 196 | void acpi_ec_flush_work(void); | ||
| 197 | #endif | ||
| 198 | |||
| 196 | 199 | ||
| 197 | /*-------------------------------------------------------------------------- | 200 | /*-------------------------------------------------------------------------- |
| 198 | Suspend/Resume | 201 | Suspend/Resume |
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c index edb0c79f7c64..917f1cc0fda4 100644 --- a/drivers/acpi/numa.c +++ b/drivers/acpi/numa.c | |||
| @@ -443,7 +443,7 @@ int __init acpi_numa_init(void) | |||
| 443 | * So go over all cpu entries in SRAT to get apicid to node mapping. | 443 | * So go over all cpu entries in SRAT to get apicid to node mapping. |
| 444 | */ | 444 | */ |
| 445 | 445 | ||
| 446 | /* SRAT: Static Resource Affinity Table */ | 446 | /* SRAT: System Resource Affinity Table */ |
| 447 | if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) { | 447 | if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) { |
| 448 | struct acpi_subtable_proc srat_proc[3]; | 448 | struct acpi_subtable_proc srat_proc[3]; |
| 449 | 449 | ||
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c index 917c789f953d..476a52c60cf3 100644 --- a/drivers/acpi/property.c +++ b/drivers/acpi/property.c | |||
| @@ -1047,7 +1047,7 @@ static struct fwnode_handle *acpi_graph_get_child_prop_value( | |||
| 1047 | fwnode_for_each_child_node(fwnode, child) { | 1047 | fwnode_for_each_child_node(fwnode, child) { |
| 1048 | u32 nr; | 1048 | u32 nr; |
| 1049 | 1049 | ||
| 1050 | if (!fwnode_property_read_u32(fwnode, prop_name, &nr)) | 1050 | if (fwnode_property_read_u32(child, prop_name, &nr)) |
| 1051 | continue; | 1051 | continue; |
| 1052 | 1052 | ||
| 1053 | if (val == nr) | 1053 | if (val == nr) |
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 33897298f03e..70fd5502c284 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c | |||
| @@ -2084,7 +2084,6 @@ int __init acpi_scan_init(void) | |||
| 2084 | 2084 | ||
| 2085 | acpi_gpe_apply_masked_gpes(); | 2085 | acpi_gpe_apply_masked_gpes(); |
| 2086 | acpi_update_all_gpes(); | 2086 | acpi_update_all_gpes(); |
| 2087 | acpi_ec_ecdt_start(); | ||
| 2088 | 2087 | ||
| 2089 | acpi_scan_initialized = true; | 2088 | acpi_scan_initialized = true; |
| 2090 | 2089 | ||
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index be17664736b2..fa8243c5c062 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c | |||
| @@ -777,11 +777,11 @@ static void acpi_freeze_sync(void) | |||
| 777 | /* | 777 | /* |
| 778 | * Process all pending events in case there are any wakeup ones. | 778 | * Process all pending events in case there are any wakeup ones. |
| 779 | * | 779 | * |
| 780 | * The EC driver uses the system workqueue, so that one needs to be | 780 | * The EC driver uses the system workqueue and an additional special |
| 781 | * flushed too. | 781 | * one, so those need to be flushed too. |
| 782 | */ | 782 | */ |
| 783 | acpi_ec_flush_work(); | ||
| 783 | acpi_os_wait_events_complete(); | 784 | acpi_os_wait_events_complete(); |
| 784 | flush_scheduled_work(); | ||
| 785 | s2idle_wakeup = false; | 785 | s2idle_wakeup = false; |
| 786 | } | 786 | } |
| 787 | 787 | ||
diff --git a/drivers/acpi/spcr.c b/drivers/acpi/spcr.c index 4ac3e06b41d8..98aa8c808a33 100644 --- a/drivers/acpi/spcr.c +++ b/drivers/acpi/spcr.c | |||
| @@ -17,6 +17,16 @@ | |||
| 17 | #include <linux/serial_core.h> | 17 | #include <linux/serial_core.h> |
| 18 | 18 | ||
| 19 | /* | 19 | /* |
| 20 | * Erratum 44 for QDF2432v1 and QDF2400v1 SoCs describes the BUSY bit as | ||
| 21 | * occasionally getting stuck as 1. To avoid the potential for a hang, check | ||
| 22 | * TXFE == 0 instead of BUSY == 1. This may not be suitable for all UART | ||
| 23 | * implementations, so only do so if an affected platform is detected in | ||
| 24 | * parse_spcr(). | ||
| 25 | */ | ||
| 26 | bool qdf2400_e44_present; | ||
| 27 | EXPORT_SYMBOL(qdf2400_e44_present); | ||
| 28 | |||
| 29 | /* | ||
| 20 | * Some Qualcomm Datacenter Technologies SoCs have a defective UART BUSY bit. | 30 | * Some Qualcomm Datacenter Technologies SoCs have a defective UART BUSY bit. |
| 21 | * Detect them by examining the OEM fields in the SPCR header, similiar to PCI | 31 | * Detect them by examining the OEM fields in the SPCR header, similiar to PCI |
| 22 | * quirk detection in pci_mcfg.c. | 32 | * quirk detection in pci_mcfg.c. |
| @@ -147,8 +157,30 @@ int __init parse_spcr(bool earlycon) | |||
| 147 | goto done; | 157 | goto done; |
| 148 | } | 158 | } |
| 149 | 159 | ||
| 150 | if (qdf2400_erratum_44_present(&table->header)) | 160 | /* |
| 151 | uart = "qdf2400_e44"; | 161 | * If the E44 erratum is required, then we need to tell the pl011 |
| 162 | * driver to implement the work-around. | ||
| 163 | * | ||
| 164 | * The global variable is used by the probe function when it | ||
| 165 | * creates the UARTs, whether or not they're used as a console. | ||
| 166 | * | ||
| 167 | * If the user specifies "traditional" earlycon, the qdf2400_e44 | ||
| 168 | * console name matches the EARLYCON_DECLARE() statement, and | ||
| 169 | * SPCR is not used. Parameter "earlycon" is false. | ||
| 170 | * | ||
| 171 | * If the user specifies "SPCR" earlycon, then we need to update | ||
| 172 | * the console name so that it also says "qdf2400_e44". Parameter | ||
| 173 | * "earlycon" is true. | ||
| 174 | * | ||
| 175 | * For consistency, if we change the console name, then we do it | ||
| 176 | * for everyone, not just earlycon. | ||
| 177 | */ | ||
| 178 | if (qdf2400_erratum_44_present(&table->header)) { | ||
| 179 | qdf2400_e44_present = true; | ||
| 180 | if (earlycon) | ||
| 181 | uart = "qdf2400_e44"; | ||
| 182 | } | ||
| 183 | |||
| 152 | if (xgene_8250_erratum_present(table)) | 184 | if (xgene_8250_erratum_present(table)) |
| 153 | iotype = "mmio32"; | 185 | iotype = "mmio32"; |
| 154 | 186 | ||
diff --git a/drivers/android/binder.c b/drivers/android/binder.c index f7665c31feca..831cdd7d197d 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c | |||
| @@ -3362,7 +3362,7 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma) | |||
| 3362 | const char *failure_string; | 3362 | const char *failure_string; |
| 3363 | struct binder_buffer *buffer; | 3363 | struct binder_buffer *buffer; |
| 3364 | 3364 | ||
| 3365 | if (proc->tsk != current) | 3365 | if (proc->tsk != current->group_leader) |
| 3366 | return -EINVAL; | 3366 | return -EINVAL; |
| 3367 | 3367 | ||
| 3368 | if ((vma->vm_end - vma->vm_start) > SZ_4M) | 3368 | if ((vma->vm_end - vma->vm_start) > SZ_4M) |
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index 948fc86980a1..363fc5330c21 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig | |||
| @@ -215,7 +215,7 @@ config SATA_FSL | |||
| 215 | 215 | ||
| 216 | config SATA_GEMINI | 216 | config SATA_GEMINI |
| 217 | tristate "Gemini SATA bridge support" | 217 | tristate "Gemini SATA bridge support" |
| 218 | depends on PATA_FTIDE010 | 218 | depends on ARCH_GEMINI || COMPILE_TEST |
| 219 | default ARCH_GEMINI | 219 | default ARCH_GEMINI |
| 220 | help | 220 | help |
| 221 | This enabled support for the FTIDE010 to SATA bridge | 221 | This enabled support for the FTIDE010 to SATA bridge |
| @@ -613,7 +613,7 @@ config PATA_FTIDE010 | |||
| 613 | tristate "Faraday Technology FTIDE010 PATA support" | 613 | tristate "Faraday Technology FTIDE010 PATA support" |
| 614 | depends on OF | 614 | depends on OF |
| 615 | depends on ARM | 615 | depends on ARM |
| 616 | default ARCH_GEMINI | 616 | depends on SATA_GEMINI |
| 617 | help | 617 | help |
| 618 | This option enables support for the Faraday FTIDE010 | 618 | This option enables support for the Faraday FTIDE010 |
| 619 | PATA controller found in the Cortina Gemini SoCs. | 619 | PATA controller found in the Cortina Gemini SoCs. |
diff --git a/drivers/ata/ahci_da850.c b/drivers/ata/ahci_da850.c index 1a50cd3b4233..9b34dff64536 100644 --- a/drivers/ata/ahci_da850.c +++ b/drivers/ata/ahci_da850.c | |||
| @@ -216,12 +216,16 @@ static int ahci_da850_probe(struct platform_device *pdev) | |||
| 216 | return rc; | 216 | return rc; |
| 217 | 217 | ||
| 218 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); | 218 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); |
| 219 | if (!res) | 219 | if (!res) { |
| 220 | rc = -ENODEV; | ||
| 220 | goto disable_resources; | 221 | goto disable_resources; |
| 222 | } | ||
| 221 | 223 | ||
| 222 | pwrdn_reg = devm_ioremap(dev, res->start, resource_size(res)); | 224 | pwrdn_reg = devm_ioremap(dev, res->start, resource_size(res)); |
| 223 | if (!pwrdn_reg) | 225 | if (!pwrdn_reg) { |
| 226 | rc = -ENOMEM; | ||
| 224 | goto disable_resources; | 227 | goto disable_resources; |
| 228 | } | ||
| 225 | 229 | ||
| 226 | da850_sata_init(dev, pwrdn_reg, hpriv->mmio, mpy); | 230 | da850_sata_init(dev, pwrdn_reg, hpriv->mmio, mpy); |
| 227 | 231 | ||
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 8453f9a4682f..1945a8ea2099 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
| @@ -2083,7 +2083,7 @@ unsigned int ata_read_log_page(struct ata_device *dev, u8 log, | |||
| 2083 | retry: | 2083 | retry: |
| 2084 | ata_tf_init(dev, &tf); | 2084 | ata_tf_init(dev, &tf); |
| 2085 | if (dev->dma_mode && ata_id_has_read_log_dma_ext(dev->id) && | 2085 | if (dev->dma_mode && ata_id_has_read_log_dma_ext(dev->id) && |
| 2086 | !(dev->horkage & ATA_HORKAGE_NO_NCQ_LOG)) { | 2086 | !(dev->horkage & ATA_HORKAGE_NO_DMA_LOG)) { |
| 2087 | tf.command = ATA_CMD_READ_LOG_DMA_EXT; | 2087 | tf.command = ATA_CMD_READ_LOG_DMA_EXT; |
| 2088 | tf.protocol = ATA_PROT_DMA; | 2088 | tf.protocol = ATA_PROT_DMA; |
| 2089 | dma = true; | 2089 | dma = true; |
| @@ -2102,8 +2102,8 @@ retry: | |||
| 2102 | buf, sectors * ATA_SECT_SIZE, 0); | 2102 | buf, sectors * ATA_SECT_SIZE, 0); |
| 2103 | 2103 | ||
| 2104 | if (err_mask && dma) { | 2104 | if (err_mask && dma) { |
| 2105 | dev->horkage |= ATA_HORKAGE_NO_NCQ_LOG; | 2105 | dev->horkage |= ATA_HORKAGE_NO_DMA_LOG; |
| 2106 | ata_dev_warn(dev, "READ LOG DMA EXT failed, trying unqueued\n"); | 2106 | ata_dev_warn(dev, "READ LOG DMA EXT failed, trying PIO\n"); |
| 2107 | goto retry; | 2107 | goto retry; |
| 2108 | } | 2108 | } |
| 2109 | 2109 | ||
| @@ -2411,6 +2411,9 @@ static void ata_dev_config_trusted(struct ata_device *dev) | |||
| 2411 | u64 trusted_cap; | 2411 | u64 trusted_cap; |
| 2412 | unsigned int err; | 2412 | unsigned int err; |
| 2413 | 2413 | ||
| 2414 | if (!ata_id_has_trusted(dev->id)) | ||
| 2415 | return; | ||
| 2416 | |||
| 2414 | if (!ata_identify_page_supported(dev, ATA_LOG_SECURITY)) { | 2417 | if (!ata_identify_page_supported(dev, ATA_LOG_SECURITY)) { |
| 2415 | ata_dev_warn(dev, | 2418 | ata_dev_warn(dev, |
| 2416 | "Security Log not supported\n"); | 2419 | "Security Log not supported\n"); |
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index b70bcf6d2914..3dbd05532c09 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c | |||
| @@ -1434,7 +1434,7 @@ void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev, | |||
| 1434 | 1434 | ||
| 1435 | /** | 1435 | /** |
| 1436 | * ata_eh_done - EH action complete | 1436 | * ata_eh_done - EH action complete |
| 1437 | * @ap: target ATA port | 1437 | * @link: ATA link for which EH actions are complete |
| 1438 | * @dev: target ATA dev for per-dev action (can be NULL) | 1438 | * @dev: target ATA dev for per-dev action (can be NULL) |
| 1439 | * @action: action just completed | 1439 | * @action: action just completed |
| 1440 | * | 1440 | * |
| @@ -1576,7 +1576,7 @@ unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key) | |||
| 1576 | 1576 | ||
| 1577 | /** | 1577 | /** |
| 1578 | * ata_eh_request_sense - perform REQUEST_SENSE_DATA_EXT | 1578 | * ata_eh_request_sense - perform REQUEST_SENSE_DATA_EXT |
| 1579 | * @dev: device to perform REQUEST_SENSE_SENSE_DATA_EXT to | 1579 | * @qc: qc to perform REQUEST_SENSE_SENSE_DATA_EXT to |
| 1580 | * @cmd: scsi command for which the sense code should be set | 1580 | * @cmd: scsi command for which the sense code should be set |
| 1581 | * | 1581 | * |
| 1582 | * Perform REQUEST_SENSE_DATA_EXT after the device reported CHECK | 1582 | * Perform REQUEST_SENSE_DATA_EXT after the device reported CHECK |
| @@ -4175,7 +4175,6 @@ static void ata_eh_handle_port_resume(struct ata_port *ap) | |||
| 4175 | struct ata_link *link; | 4175 | struct ata_link *link; |
| 4176 | struct ata_device *dev; | 4176 | struct ata_device *dev; |
| 4177 | unsigned long flags; | 4177 | unsigned long flags; |
| 4178 | int rc = 0; | ||
| 4179 | 4178 | ||
| 4180 | /* are we resuming? */ | 4179 | /* are we resuming? */ |
| 4181 | spin_lock_irqsave(ap->lock, flags); | 4180 | spin_lock_irqsave(ap->lock, flags); |
| @@ -4202,7 +4201,7 @@ static void ata_eh_handle_port_resume(struct ata_port *ap) | |||
| 4202 | ata_acpi_set_state(ap, ap->pm_mesg); | 4201 | ata_acpi_set_state(ap, ap->pm_mesg); |
| 4203 | 4202 | ||
| 4204 | if (ap->ops->port_resume) | 4203 | if (ap->ops->port_resume) |
| 4205 | rc = ap->ops->port_resume(ap); | 4204 | ap->ops->port_resume(ap); |
| 4206 | 4205 | ||
| 4207 | /* tell ACPI that we're resuming */ | 4206 | /* tell ACPI that we're resuming */ |
| 4208 | ata_acpi_on_resume(ap); | 4207 | ata_acpi_on_resume(ap); |
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index d462c5a3a7ef..44ba292f2cd7 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c | |||
| @@ -3030,10 +3030,12 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc) | |||
| 3030 | static struct ata_device *ata_find_dev(struct ata_port *ap, int devno) | 3030 | static struct ata_device *ata_find_dev(struct ata_port *ap, int devno) |
| 3031 | { | 3031 | { |
| 3032 | if (!sata_pmp_attached(ap)) { | 3032 | if (!sata_pmp_attached(ap)) { |
| 3033 | if (likely(devno < ata_link_max_devices(&ap->link))) | 3033 | if (likely(devno >= 0 && |
| 3034 | devno < ata_link_max_devices(&ap->link))) | ||
| 3034 | return &ap->link.device[devno]; | 3035 | return &ap->link.device[devno]; |
| 3035 | } else { | 3036 | } else { |
| 3036 | if (likely(devno < ap->nr_pmp_links)) | 3037 | if (likely(devno >= 0 && |
| 3038 | devno < ap->nr_pmp_links)) | ||
| 3037 | return &ap->pmp_link[devno].device[0]; | 3039 | return &ap->pmp_link[devno].device[0]; |
| 3038 | } | 3040 | } |
| 3039 | 3041 | ||
diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c index ee9844758736..537d11869069 100644 --- a/drivers/ata/sata_rcar.c +++ b/drivers/ata/sata_rcar.c | |||
| @@ -858,6 +858,14 @@ static const struct of_device_id sata_rcar_match[] = { | |||
| 858 | .compatible = "renesas,sata-r8a7795", | 858 | .compatible = "renesas,sata-r8a7795", |
| 859 | .data = (void *)RCAR_GEN2_SATA | 859 | .data = (void *)RCAR_GEN2_SATA |
| 860 | }, | 860 | }, |
| 861 | { | ||
| 862 | .compatible = "renesas,rcar-gen2-sata", | ||
| 863 | .data = (void *)RCAR_GEN2_SATA | ||
| 864 | }, | ||
| 865 | { | ||
| 866 | .compatible = "renesas,rcar-gen3-sata", | ||
| 867 | .data = (void *)RCAR_GEN2_SATA | ||
| 868 | }, | ||
| 861 | { }, | 869 | { }, |
| 862 | }; | 870 | }; |
| 863 | MODULE_DEVICE_TABLE(of, sata_rcar_match); | 871 | MODULE_DEVICE_TABLE(of, sata_rcar_match); |
diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c index 2ae24c28e70c..1c152aed6b82 100644 --- a/drivers/base/dma-coherent.c +++ b/drivers/base/dma-coherent.c | |||
| @@ -25,7 +25,7 @@ static inline struct dma_coherent_mem *dev_get_coherent_memory(struct device *de | |||
| 25 | { | 25 | { |
| 26 | if (dev && dev->dma_mem) | 26 | if (dev && dev->dma_mem) |
| 27 | return dev->dma_mem; | 27 | return dev->dma_mem; |
| 28 | return dma_coherent_default_memory; | 28 | return NULL; |
| 29 | } | 29 | } |
| 30 | 30 | ||
| 31 | static inline dma_addr_t dma_get_device_base(struct device *dev, | 31 | static inline dma_addr_t dma_get_device_base(struct device *dev, |
| @@ -165,34 +165,15 @@ void *dma_mark_declared_memory_occupied(struct device *dev, | |||
| 165 | } | 165 | } |
| 166 | EXPORT_SYMBOL(dma_mark_declared_memory_occupied); | 166 | EXPORT_SYMBOL(dma_mark_declared_memory_occupied); |
| 167 | 167 | ||
| 168 | /** | 168 | static void *__dma_alloc_from_coherent(struct dma_coherent_mem *mem, |
| 169 | * dma_alloc_from_coherent() - try to allocate memory from the per-device coherent area | 169 | ssize_t size, dma_addr_t *dma_handle) |
| 170 | * | ||
| 171 | * @dev: device from which we allocate memory | ||
| 172 | * @size: size of requested memory area | ||
| 173 | * @dma_handle: This will be filled with the correct dma handle | ||
| 174 | * @ret: This pointer will be filled with the virtual address | ||
| 175 | * to allocated area. | ||
| 176 | * | ||
| 177 | * This function should be only called from per-arch dma_alloc_coherent() | ||
| 178 | * to support allocation from per-device coherent memory pools. | ||
| 179 | * | ||
| 180 | * Returns 0 if dma_alloc_coherent should continue with allocating from | ||
| 181 | * generic memory areas, or !0 if dma_alloc_coherent should return @ret. | ||
| 182 | */ | ||
| 183 | int dma_alloc_from_coherent(struct device *dev, ssize_t size, | ||
| 184 | dma_addr_t *dma_handle, void **ret) | ||
| 185 | { | 170 | { |
| 186 | struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); | ||
| 187 | int order = get_order(size); | 171 | int order = get_order(size); |
| 188 | unsigned long flags; | 172 | unsigned long flags; |
| 189 | int pageno; | 173 | int pageno; |
| 190 | int dma_memory_map; | 174 | int dma_memory_map; |
| 175 | void *ret; | ||
| 191 | 176 | ||
| 192 | if (!mem) | ||
| 193 | return 0; | ||
| 194 | |||
| 195 | *ret = NULL; | ||
| 196 | spin_lock_irqsave(&mem->spinlock, flags); | 177 | spin_lock_irqsave(&mem->spinlock, flags); |
| 197 | 178 | ||
| 198 | if (unlikely(size > (mem->size << PAGE_SHIFT))) | 179 | if (unlikely(size > (mem->size << PAGE_SHIFT))) |
| @@ -203,21 +184,50 @@ int dma_alloc_from_coherent(struct device *dev, ssize_t size, | |||
| 203 | goto err; | 184 | goto err; |
| 204 | 185 | ||
| 205 | /* | 186 | /* |
| 206 | * Memory was found in the per-device area. | 187 | * Memory was found in the coherent area. |
| 207 | */ | 188 | */ |
| 208 | *dma_handle = dma_get_device_base(dev, mem) + (pageno << PAGE_SHIFT); | 189 | *dma_handle = mem->device_base + (pageno << PAGE_SHIFT); |
| 209 | *ret = mem->virt_base + (pageno << PAGE_SHIFT); | 190 | ret = mem->virt_base + (pageno << PAGE_SHIFT); |
| 210 | dma_memory_map = (mem->flags & DMA_MEMORY_MAP); | 191 | dma_memory_map = (mem->flags & DMA_MEMORY_MAP); |
| 211 | spin_unlock_irqrestore(&mem->spinlock, flags); | 192 | spin_unlock_irqrestore(&mem->spinlock, flags); |
| 212 | if (dma_memory_map) | 193 | if (dma_memory_map) |
| 213 | memset(*ret, 0, size); | 194 | memset(ret, 0, size); |
| 214 | else | 195 | else |
| 215 | memset_io(*ret, 0, size); | 196 | memset_io(ret, 0, size); |
| 216 | 197 | ||
| 217 | return 1; | 198 | return ret; |
| 218 | 199 | ||
| 219 | err: | 200 | err: |
| 220 | spin_unlock_irqrestore(&mem->spinlock, flags); | 201 | spin_unlock_irqrestore(&mem->spinlock, flags); |
| 202 | return NULL; | ||
| 203 | } | ||
| 204 | |||
| 205 | /** | ||
| 206 | * dma_alloc_from_dev_coherent() - allocate memory from device coherent pool | ||
| 207 | * @dev: device from which we allocate memory | ||
| 208 | * @size: size of requested memory area | ||
| 209 | * @dma_handle: This will be filled with the correct dma handle | ||
| 210 | * @ret: This pointer will be filled with the virtual address | ||
| 211 | * to allocated area. | ||
| 212 | * | ||
| 213 | * This function should be only called from per-arch dma_alloc_coherent() | ||
| 214 | * to support allocation from per-device coherent memory pools. | ||
| 215 | * | ||
| 216 | * Returns 0 if dma_alloc_coherent should continue with allocating from | ||
| 217 | * generic memory areas, or !0 if dma_alloc_coherent should return @ret. | ||
| 218 | */ | ||
| 219 | int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size, | ||
| 220 | dma_addr_t *dma_handle, void **ret) | ||
| 221 | { | ||
| 222 | struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); | ||
| 223 | |||
| 224 | if (!mem) | ||
| 225 | return 0; | ||
| 226 | |||
| 227 | *ret = __dma_alloc_from_coherent(mem, size, dma_handle); | ||
| 228 | if (*ret) | ||
| 229 | return 1; | ||
| 230 | |||
| 221 | /* | 231 | /* |
| 222 | * In the case where the allocation can not be satisfied from the | 232 | * In the case where the allocation can not be satisfied from the |
| 223 | * per-device area, try to fall back to generic memory if the | 233 | * per-device area, try to fall back to generic memory if the |
| @@ -225,25 +235,20 @@ err: | |||
| 225 | */ | 235 | */ |
| 226 | return mem->flags & DMA_MEMORY_EXCLUSIVE; | 236 | return mem->flags & DMA_MEMORY_EXCLUSIVE; |
| 227 | } | 237 | } |
| 228 | EXPORT_SYMBOL(dma_alloc_from_coherent); | 238 | EXPORT_SYMBOL(dma_alloc_from_dev_coherent); |
| 229 | 239 | ||
| 230 | /** | 240 | void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle) |
| 231 | * dma_release_from_coherent() - try to free the memory allocated from per-device coherent memory pool | ||
| 232 | * @dev: device from which the memory was allocated | ||
| 233 | * @order: the order of pages allocated | ||
| 234 | * @vaddr: virtual address of allocated pages | ||
| 235 | * | ||
| 236 | * This checks whether the memory was allocated from the per-device | ||
| 237 | * coherent memory pool and if so, releases that memory. | ||
| 238 | * | ||
| 239 | * Returns 1 if we correctly released the memory, or 0 if | ||
| 240 | * dma_release_coherent() should proceed with releasing memory from | ||
| 241 | * generic pools. | ||
| 242 | */ | ||
| 243 | int dma_release_from_coherent(struct device *dev, int order, void *vaddr) | ||
| 244 | { | 241 | { |
| 245 | struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); | 242 | if (!dma_coherent_default_memory) |
| 243 | return NULL; | ||
| 244 | |||
| 245 | return __dma_alloc_from_coherent(dma_coherent_default_memory, size, | ||
| 246 | dma_handle); | ||
| 247 | } | ||
| 246 | 248 | ||
| 249 | static int __dma_release_from_coherent(struct dma_coherent_mem *mem, | ||
| 250 | int order, void *vaddr) | ||
| 251 | { | ||
| 247 | if (mem && vaddr >= mem->virt_base && vaddr < | 252 | if (mem && vaddr >= mem->virt_base && vaddr < |
| 248 | (mem->virt_base + (mem->size << PAGE_SHIFT))) { | 253 | (mem->virt_base + (mem->size << PAGE_SHIFT))) { |
| 249 | int page = (vaddr - mem->virt_base) >> PAGE_SHIFT; | 254 | int page = (vaddr - mem->virt_base) >> PAGE_SHIFT; |
| @@ -256,28 +261,39 @@ int dma_release_from_coherent(struct device *dev, int order, void *vaddr) | |||
| 256 | } | 261 | } |
| 257 | return 0; | 262 | return 0; |
| 258 | } | 263 | } |
| 259 | EXPORT_SYMBOL(dma_release_from_coherent); | ||
| 260 | 264 | ||
| 261 | /** | 265 | /** |
| 262 | * dma_mmap_from_coherent() - try to mmap the memory allocated from | 266 | * dma_release_from_dev_coherent() - free memory to device coherent memory pool |
| 263 | * per-device coherent memory pool to userspace | ||
| 264 | * @dev: device from which the memory was allocated | 267 | * @dev: device from which the memory was allocated |
| 265 | * @vma: vm_area for the userspace memory | 268 | * @order: the order of pages allocated |
| 266 | * @vaddr: cpu address returned by dma_alloc_from_coherent | 269 | * @vaddr: virtual address of allocated pages |
| 267 | * @size: size of the memory buffer allocated by dma_alloc_from_coherent | ||
| 268 | * @ret: result from remap_pfn_range() | ||
| 269 | * | 270 | * |
| 270 | * This checks whether the memory was allocated from the per-device | 271 | * This checks whether the memory was allocated from the per-device |
| 271 | * coherent memory pool and if so, maps that memory to the provided vma. | 272 | * coherent memory pool and if so, releases that memory. |
| 272 | * | 273 | * |
| 273 | * Returns 1 if we correctly mapped the memory, or 0 if the caller should | 274 | * Returns 1 if we correctly released the memory, or 0 if the caller should |
| 274 | * proceed with mapping memory from generic pools. | 275 | * proceed with releasing memory from generic pools. |
| 275 | */ | 276 | */ |
| 276 | int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma, | 277 | int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr) |
| 277 | void *vaddr, size_t size, int *ret) | ||
| 278 | { | 278 | { |
| 279 | struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); | 279 | struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); |
| 280 | 280 | ||
| 281 | return __dma_release_from_coherent(mem, order, vaddr); | ||
| 282 | } | ||
| 283 | EXPORT_SYMBOL(dma_release_from_dev_coherent); | ||
| 284 | |||
| 285 | int dma_release_from_global_coherent(int order, void *vaddr) | ||
| 286 | { | ||
| 287 | if (!dma_coherent_default_memory) | ||
| 288 | return 0; | ||
| 289 | |||
| 290 | return __dma_release_from_coherent(dma_coherent_default_memory, order, | ||
| 291 | vaddr); | ||
| 292 | } | ||
| 293 | |||
| 294 | static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem, | ||
| 295 | struct vm_area_struct *vma, void *vaddr, size_t size, int *ret) | ||
| 296 | { | ||
| 281 | if (mem && vaddr >= mem->virt_base && vaddr + size <= | 297 | if (mem && vaddr >= mem->virt_base && vaddr + size <= |
| 282 | (mem->virt_base + (mem->size << PAGE_SHIFT))) { | 298 | (mem->virt_base + (mem->size << PAGE_SHIFT))) { |
| 283 | unsigned long off = vma->vm_pgoff; | 299 | unsigned long off = vma->vm_pgoff; |
| @@ -296,7 +312,39 @@ int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma, | |||
| 296 | } | 312 | } |
| 297 | return 0; | 313 | return 0; |
| 298 | } | 314 | } |
| 299 | EXPORT_SYMBOL(dma_mmap_from_coherent); | 315 | |
| 316 | /** | ||
| 317 | * dma_mmap_from_dev_coherent() - mmap memory from the device coherent pool | ||
| 318 | * @dev: device from which the memory was allocated | ||
| 319 | * @vma: vm_area for the userspace memory | ||
| 320 | * @vaddr: cpu address returned by dma_alloc_from_dev_coherent | ||
| 321 | * @size: size of the memory buffer allocated | ||
| 322 | * @ret: result from remap_pfn_range() | ||
| 323 | * | ||
| 324 | * This checks whether the memory was allocated from the per-device | ||
| 325 | * coherent memory pool and if so, maps that memory to the provided vma. | ||
| 326 | * | ||
| 327 | * Returns 1 if we correctly mapped the memory, or 0 if the caller should | ||
| 328 | * proceed with mapping memory from generic pools. | ||
| 329 | */ | ||
| 330 | int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma, | ||
| 331 | void *vaddr, size_t size, int *ret) | ||
| 332 | { | ||
| 333 | struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); | ||
| 334 | |||
| 335 | return __dma_mmap_from_coherent(mem, vma, vaddr, size, ret); | ||
| 336 | } | ||
| 337 | EXPORT_SYMBOL(dma_mmap_from_dev_coherent); | ||
| 338 | |||
| 339 | int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr, | ||
| 340 | size_t size, int *ret) | ||
| 341 | { | ||
| 342 | if (!dma_coherent_default_memory) | ||
| 343 | return 0; | ||
| 344 | |||
| 345 | return __dma_mmap_from_coherent(dma_coherent_default_memory, vma, | ||
| 346 | vaddr, size, ret); | ||
| 347 | } | ||
| 300 | 348 | ||
| 301 | /* | 349 | /* |
| 302 | * Support for reserved memory regions defined in device tree | 350 | * Support for reserved memory regions defined in device tree |
diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c index 5096755d185e..b555ff9dd8fc 100644 --- a/drivers/base/dma-mapping.c +++ b/drivers/base/dma-mapping.c | |||
| @@ -235,7 +235,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, | |||
| 235 | 235 | ||
| 236 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | 236 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); |
| 237 | 237 | ||
| 238 | if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret)) | 238 | if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) |
| 239 | return ret; | 239 | return ret; |
| 240 | 240 | ||
| 241 | if (off < count && user_count <= (count - off)) { | 241 | if (off < count && user_count <= (count - off)) { |
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c index b9f907eedbf7..bfbe1e154128 100644 --- a/drivers/base/firmware_class.c +++ b/drivers/base/firmware_class.c | |||
| @@ -30,7 +30,6 @@ | |||
| 30 | #include <linux/syscore_ops.h> | 30 | #include <linux/syscore_ops.h> |
| 31 | #include <linux/reboot.h> | 31 | #include <linux/reboot.h> |
| 32 | #include <linux/security.h> | 32 | #include <linux/security.h> |
| 33 | #include <linux/swait.h> | ||
| 34 | 33 | ||
| 35 | #include <generated/utsrelease.h> | 34 | #include <generated/utsrelease.h> |
| 36 | 35 | ||
| @@ -112,13 +111,13 @@ static inline long firmware_loading_timeout(void) | |||
| 112 | * state of the firmware loading. | 111 | * state of the firmware loading. |
| 113 | */ | 112 | */ |
| 114 | struct fw_state { | 113 | struct fw_state { |
| 115 | struct swait_queue_head wq; | 114 | struct completion completion; |
| 116 | enum fw_status status; | 115 | enum fw_status status; |
| 117 | }; | 116 | }; |
| 118 | 117 | ||
| 119 | static void fw_state_init(struct fw_state *fw_st) | 118 | static void fw_state_init(struct fw_state *fw_st) |
| 120 | { | 119 | { |
| 121 | init_swait_queue_head(&fw_st->wq); | 120 | init_completion(&fw_st->completion); |
| 122 | fw_st->status = FW_STATUS_UNKNOWN; | 121 | fw_st->status = FW_STATUS_UNKNOWN; |
| 123 | } | 122 | } |
| 124 | 123 | ||
| @@ -131,9 +130,7 @@ static int __fw_state_wait_common(struct fw_state *fw_st, long timeout) | |||
| 131 | { | 130 | { |
| 132 | long ret; | 131 | long ret; |
| 133 | 132 | ||
| 134 | ret = swait_event_interruptible_timeout(fw_st->wq, | 133 | ret = wait_for_completion_killable_timeout(&fw_st->completion, timeout); |
| 135 | __fw_state_is_done(READ_ONCE(fw_st->status)), | ||
| 136 | timeout); | ||
| 137 | if (ret != 0 && fw_st->status == FW_STATUS_ABORTED) | 134 | if (ret != 0 && fw_st->status == FW_STATUS_ABORTED) |
| 138 | return -ENOENT; | 135 | return -ENOENT; |
| 139 | if (!ret) | 136 | if (!ret) |
| @@ -148,35 +145,34 @@ static void __fw_state_set(struct fw_state *fw_st, | |||
| 148 | WRITE_ONCE(fw_st->status, status); | 145 | WRITE_ONCE(fw_st->status, status); |
| 149 | 146 | ||
| 150 | if (status == FW_STATUS_DONE || status == FW_STATUS_ABORTED) | 147 | if (status == FW_STATUS_DONE || status == FW_STATUS_ABORTED) |
| 151 | swake_up(&fw_st->wq); | 148 | complete_all(&fw_st->completion); |
| 152 | } | 149 | } |
| 153 | 150 | ||
| 154 | #define fw_state_start(fw_st) \ | 151 | #define fw_state_start(fw_st) \ |
| 155 | __fw_state_set(fw_st, FW_STATUS_LOADING) | 152 | __fw_state_set(fw_st, FW_STATUS_LOADING) |
| 156 | #define fw_state_done(fw_st) \ | 153 | #define fw_state_done(fw_st) \ |
| 157 | __fw_state_set(fw_st, FW_STATUS_DONE) | 154 | __fw_state_set(fw_st, FW_STATUS_DONE) |
| 155 | #define fw_state_aborted(fw_st) \ | ||
| 156 | __fw_state_set(fw_st, FW_STATUS_ABORTED) | ||
| 158 | #define fw_state_wait(fw_st) \ | 157 | #define fw_state_wait(fw_st) \ |
| 159 | __fw_state_wait_common(fw_st, MAX_SCHEDULE_TIMEOUT) | 158 | __fw_state_wait_common(fw_st, MAX_SCHEDULE_TIMEOUT) |
| 160 | 159 | ||
| 161 | #ifndef CONFIG_FW_LOADER_USER_HELPER | ||
| 162 | |||
| 163 | #define fw_state_is_aborted(fw_st) false | ||
| 164 | |||
| 165 | #else /* CONFIG_FW_LOADER_USER_HELPER */ | ||
| 166 | |||
| 167 | static int __fw_state_check(struct fw_state *fw_st, enum fw_status status) | 160 | static int __fw_state_check(struct fw_state *fw_st, enum fw_status status) |
| 168 | { | 161 | { |
| 169 | return fw_st->status == status; | 162 | return fw_st->status == status; |
| 170 | } | 163 | } |
| 171 | 164 | ||
| 165 | #define fw_state_is_aborted(fw_st) \ | ||
| 166 | __fw_state_check(fw_st, FW_STATUS_ABORTED) | ||
| 167 | |||
| 168 | #ifdef CONFIG_FW_LOADER_USER_HELPER | ||
| 169 | |||
| 172 | #define fw_state_aborted(fw_st) \ | 170 | #define fw_state_aborted(fw_st) \ |
| 173 | __fw_state_set(fw_st, FW_STATUS_ABORTED) | 171 | __fw_state_set(fw_st, FW_STATUS_ABORTED) |
| 174 | #define fw_state_is_done(fw_st) \ | 172 | #define fw_state_is_done(fw_st) \ |
| 175 | __fw_state_check(fw_st, FW_STATUS_DONE) | 173 | __fw_state_check(fw_st, FW_STATUS_DONE) |
| 176 | #define fw_state_is_loading(fw_st) \ | 174 | #define fw_state_is_loading(fw_st) \ |
| 177 | __fw_state_check(fw_st, FW_STATUS_LOADING) | 175 | __fw_state_check(fw_st, FW_STATUS_LOADING) |
| 178 | #define fw_state_is_aborted(fw_st) \ | ||
| 179 | __fw_state_check(fw_st, FW_STATUS_ABORTED) | ||
| 180 | #define fw_state_wait_timeout(fw_st, timeout) \ | 176 | #define fw_state_wait_timeout(fw_st, timeout) \ |
| 181 | __fw_state_wait_common(fw_st, timeout) | 177 | __fw_state_wait_common(fw_st, timeout) |
| 182 | 178 | ||
| @@ -1200,6 +1196,28 @@ _request_firmware_prepare(struct firmware **firmware_p, const char *name, | |||
| 1200 | return 1; /* need to load */ | 1196 | return 1; /* need to load */ |
| 1201 | } | 1197 | } |
| 1202 | 1198 | ||
| 1199 | /* | ||
| 1200 | * Batched requests need only one wake, we need to do this step last due to the | ||
| 1201 | * fallback mechanism. The buf is protected with kref_get(), and it won't be | ||
| 1202 | * released until the last user calls release_firmware(). | ||
| 1203 | * | ||
| 1204 | * Failed batched requests are possible as well, in such cases we just share | ||
| 1205 | * the struct firmware_buf and won't release it until all requests are woken | ||
| 1206 | * and have gone through this same path. | ||
| 1207 | */ | ||
| 1208 | static void fw_abort_batch_reqs(struct firmware *fw) | ||
| 1209 | { | ||
| 1210 | struct firmware_buf *buf; | ||
| 1211 | |||
| 1212 | /* Loaded directly? */ | ||
| 1213 | if (!fw || !fw->priv) | ||
| 1214 | return; | ||
| 1215 | |||
| 1216 | buf = fw->priv; | ||
| 1217 | if (!fw_state_is_aborted(&buf->fw_st)) | ||
| 1218 | fw_state_aborted(&buf->fw_st); | ||
| 1219 | } | ||
| 1220 | |||
| 1203 | /* called from request_firmware() and request_firmware_work_func() */ | 1221 | /* called from request_firmware() and request_firmware_work_func() */ |
| 1204 | static int | 1222 | static int |
| 1205 | _request_firmware(const struct firmware **firmware_p, const char *name, | 1223 | _request_firmware(const struct firmware **firmware_p, const char *name, |
| @@ -1243,6 +1261,7 @@ _request_firmware(const struct firmware **firmware_p, const char *name, | |||
| 1243 | 1261 | ||
| 1244 | out: | 1262 | out: |
| 1245 | if (ret < 0) { | 1263 | if (ret < 0) { |
| 1264 | fw_abort_batch_reqs(fw); | ||
| 1246 | release_firmware(fw); | 1265 | release_firmware(fw); |
| 1247 | fw = NULL; | 1266 | fw = NULL; |
| 1248 | } | 1267 | } |
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 60303aa28587..e8ca5e2cf1e5 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c | |||
| @@ -209,6 +209,34 @@ static void genpd_sd_counter_inc(struct generic_pm_domain *genpd) | |||
| 209 | smp_mb__after_atomic(); | 209 | smp_mb__after_atomic(); |
| 210 | } | 210 | } |
| 211 | 211 | ||
| 212 | #ifdef CONFIG_DEBUG_FS | ||
| 213 | static void genpd_update_accounting(struct generic_pm_domain *genpd) | ||
| 214 | { | ||
| 215 | ktime_t delta, now; | ||
| 216 | |||
| 217 | now = ktime_get(); | ||
| 218 | delta = ktime_sub(now, genpd->accounting_time); | ||
| 219 | |||
| 220 | /* | ||
| 221 | * If genpd->status is active, it means we are just | ||
| 222 | * out of off and so update the idle time and vice | ||
| 223 | * versa. | ||
| 224 | */ | ||
| 225 | if (genpd->status == GPD_STATE_ACTIVE) { | ||
| 226 | int state_idx = genpd->state_idx; | ||
| 227 | |||
| 228 | genpd->states[state_idx].idle_time = | ||
| 229 | ktime_add(genpd->states[state_idx].idle_time, delta); | ||
| 230 | } else { | ||
| 231 | genpd->on_time = ktime_add(genpd->on_time, delta); | ||
| 232 | } | ||
| 233 | |||
| 234 | genpd->accounting_time = now; | ||
| 235 | } | ||
| 236 | #else | ||
| 237 | static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {} | ||
| 238 | #endif | ||
| 239 | |||
| 212 | static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed) | 240 | static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed) |
| 213 | { | 241 | { |
| 214 | unsigned int state_idx = genpd->state_idx; | 242 | unsigned int state_idx = genpd->state_idx; |
| @@ -361,6 +389,7 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on, | |||
| 361 | } | 389 | } |
| 362 | 390 | ||
| 363 | genpd->status = GPD_STATE_POWER_OFF; | 391 | genpd->status = GPD_STATE_POWER_OFF; |
| 392 | genpd_update_accounting(genpd); | ||
| 364 | 393 | ||
| 365 | list_for_each_entry(link, &genpd->slave_links, slave_node) { | 394 | list_for_each_entry(link, &genpd->slave_links, slave_node) { |
| 366 | genpd_sd_counter_dec(link->master); | 395 | genpd_sd_counter_dec(link->master); |
| @@ -413,6 +442,8 @@ static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth) | |||
| 413 | goto err; | 442 | goto err; |
| 414 | 443 | ||
| 415 | genpd->status = GPD_STATE_ACTIVE; | 444 | genpd->status = GPD_STATE_ACTIVE; |
| 445 | genpd_update_accounting(genpd); | ||
| 446 | |||
| 416 | return 0; | 447 | return 0; |
| 417 | 448 | ||
| 418 | err: | 449 | err: |
| @@ -1540,6 +1571,7 @@ int pm_genpd_init(struct generic_pm_domain *genpd, | |||
| 1540 | genpd->max_off_time_changed = true; | 1571 | genpd->max_off_time_changed = true; |
| 1541 | genpd->provider = NULL; | 1572 | genpd->provider = NULL; |
| 1542 | genpd->has_provider = false; | 1573 | genpd->has_provider = false; |
| 1574 | genpd->accounting_time = ktime_get(); | ||
| 1543 | genpd->domain.ops.runtime_suspend = genpd_runtime_suspend; | 1575 | genpd->domain.ops.runtime_suspend = genpd_runtime_suspend; |
| 1544 | genpd->domain.ops.runtime_resume = genpd_runtime_resume; | 1576 | genpd->domain.ops.runtime_resume = genpd_runtime_resume; |
| 1545 | genpd->domain.ops.prepare = pm_genpd_prepare; | 1577 | genpd->domain.ops.prepare = pm_genpd_prepare; |
| @@ -1743,7 +1775,7 @@ static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate, | |||
| 1743 | mutex_lock(&of_genpd_mutex); | 1775 | mutex_lock(&of_genpd_mutex); |
| 1744 | list_add(&cp->link, &of_genpd_providers); | 1776 | list_add(&cp->link, &of_genpd_providers); |
| 1745 | mutex_unlock(&of_genpd_mutex); | 1777 | mutex_unlock(&of_genpd_mutex); |
| 1746 | pr_debug("Added domain provider from %s\n", np->full_name); | 1778 | pr_debug("Added domain provider from %pOF\n", np); |
| 1747 | 1779 | ||
| 1748 | return 0; | 1780 | return 0; |
| 1749 | } | 1781 | } |
| @@ -2149,16 +2181,16 @@ static int genpd_parse_state(struct genpd_power_state *genpd_state, | |||
| 2149 | err = of_property_read_u32(state_node, "entry-latency-us", | 2181 | err = of_property_read_u32(state_node, "entry-latency-us", |
| 2150 | &entry_latency); | 2182 | &entry_latency); |
| 2151 | if (err) { | 2183 | if (err) { |
| 2152 | pr_debug(" * %s missing entry-latency-us property\n", | 2184 | pr_debug(" * %pOF missing entry-latency-us property\n", |
| 2153 | state_node->full_name); | 2185 | state_node); |
| 2154 | return -EINVAL; | 2186 | return -EINVAL; |
| 2155 | } | 2187 | } |
| 2156 | 2188 | ||
| 2157 | err = of_property_read_u32(state_node, "exit-latency-us", | 2189 | err = of_property_read_u32(state_node, "exit-latency-us", |
| 2158 | &exit_latency); | 2190 | &exit_latency); |
| 2159 | if (err) { | 2191 | if (err) { |
| 2160 | pr_debug(" * %s missing exit-latency-us property\n", | 2192 | pr_debug(" * %pOF missing exit-latency-us property\n", |
| 2161 | state_node->full_name); | 2193 | state_node); |
| 2162 | return -EINVAL; | 2194 | return -EINVAL; |
| 2163 | } | 2195 | } |
| 2164 | 2196 | ||
| @@ -2212,8 +2244,8 @@ int of_genpd_parse_idle_states(struct device_node *dn, | |||
| 2212 | ret = genpd_parse_state(&st[i++], np); | 2244 | ret = genpd_parse_state(&st[i++], np); |
| 2213 | if (ret) { | 2245 | if (ret) { |
| 2214 | pr_err | 2246 | pr_err |
| 2215 | ("Parsing idle state node %s failed with err %d\n", | 2247 | ("Parsing idle state node %pOF failed with err %d\n", |
| 2216 | np->full_name, ret); | 2248 | np, ret); |
| 2217 | of_node_put(np); | 2249 | of_node_put(np); |
| 2218 | kfree(st); | 2250 | kfree(st); |
| 2219 | return ret; | 2251 | return ret; |
| @@ -2327,7 +2359,7 @@ exit: | |||
| 2327 | return 0; | 2359 | return 0; |
| 2328 | } | 2360 | } |
| 2329 | 2361 | ||
| 2330 | static int pm_genpd_summary_show(struct seq_file *s, void *data) | 2362 | static int genpd_summary_show(struct seq_file *s, void *data) |
| 2331 | { | 2363 | { |
| 2332 | struct generic_pm_domain *genpd; | 2364 | struct generic_pm_domain *genpd; |
| 2333 | int ret = 0; | 2365 | int ret = 0; |
| @@ -2350,21 +2382,187 @@ static int pm_genpd_summary_show(struct seq_file *s, void *data) | |||
| 2350 | return ret; | 2382 | return ret; |
| 2351 | } | 2383 | } |
| 2352 | 2384 | ||
| 2353 | static int pm_genpd_summary_open(struct inode *inode, struct file *file) | 2385 | static int genpd_status_show(struct seq_file *s, void *data) |
| 2354 | { | 2386 | { |
| 2355 | return single_open(file, pm_genpd_summary_show, NULL); | 2387 | static const char * const status_lookup[] = { |
| 2388 | [GPD_STATE_ACTIVE] = "on", | ||
| 2389 | [GPD_STATE_POWER_OFF] = "off" | ||
| 2390 | }; | ||
| 2391 | |||
| 2392 | struct generic_pm_domain *genpd = s->private; | ||
| 2393 | int ret = 0; | ||
| 2394 | |||
| 2395 | ret = genpd_lock_interruptible(genpd); | ||
| 2396 | if (ret) | ||
| 2397 | return -ERESTARTSYS; | ||
| 2398 | |||
| 2399 | if (WARN_ON_ONCE(genpd->status >= ARRAY_SIZE(status_lookup))) | ||
| 2400 | goto exit; | ||
| 2401 | |||
| 2402 | if (genpd->status == GPD_STATE_POWER_OFF) | ||
| 2403 | seq_printf(s, "%s-%u\n", status_lookup[genpd->status], | ||
| 2404 | genpd->state_idx); | ||
| 2405 | else | ||
| 2406 | seq_printf(s, "%s\n", status_lookup[genpd->status]); | ||
| 2407 | exit: | ||
| 2408 | genpd_unlock(genpd); | ||
| 2409 | return ret; | ||
| 2356 | } | 2410 | } |
| 2357 | 2411 | ||
| 2358 | static const struct file_operations pm_genpd_summary_fops = { | 2412 | static int genpd_sub_domains_show(struct seq_file *s, void *data) |
| 2359 | .open = pm_genpd_summary_open, | 2413 | { |
| 2360 | .read = seq_read, | 2414 | struct generic_pm_domain *genpd = s->private; |
| 2361 | .llseek = seq_lseek, | 2415 | struct gpd_link *link; |
| 2362 | .release = single_release, | 2416 | int ret = 0; |
| 2363 | }; | 2417 | |
| 2418 | ret = genpd_lock_interruptible(genpd); | ||
| 2419 | if (ret) | ||
| 2420 | return -ERESTARTSYS; | ||
| 2421 | |||
| 2422 | list_for_each_entry(link, &genpd->master_links, master_node) | ||
| 2423 | seq_printf(s, "%s\n", link->slave->name); | ||
| 2424 | |||
| 2425 | genpd_unlock(genpd); | ||
| 2426 | return ret; | ||
| 2427 | } | ||
| 2428 | |||
| 2429 | static int genpd_idle_states_show(struct seq_file *s, void *data) | ||
| 2430 | { | ||
| 2431 | struct generic_pm_domain *genpd = s->private; | ||
| 2432 | unsigned int i; | ||
| 2433 | int ret = 0; | ||
| 2434 | |||
| 2435 | ret = genpd_lock_interruptible(genpd); | ||
| 2436 | if (ret) | ||
| 2437 | return -ERESTARTSYS; | ||
| 2438 | |||
| 2439 | seq_puts(s, "State Time Spent(ms)\n"); | ||
| 2440 | |||
| 2441 | for (i = 0; i < genpd->state_count; i++) { | ||
| 2442 | ktime_t delta = 0; | ||
| 2443 | s64 msecs; | ||
| 2444 | |||
| 2445 | if ((genpd->status == GPD_STATE_POWER_OFF) && | ||
| 2446 | (genpd->state_idx == i)) | ||
| 2447 | delta = ktime_sub(ktime_get(), genpd->accounting_time); | ||
| 2448 | |||
| 2449 | msecs = ktime_to_ms( | ||
| 2450 | ktime_add(genpd->states[i].idle_time, delta)); | ||
| 2451 | seq_printf(s, "S%-13i %lld\n", i, msecs); | ||
| 2452 | } | ||
| 2453 | |||
| 2454 | genpd_unlock(genpd); | ||
| 2455 | return ret; | ||
| 2456 | } | ||
| 2457 | |||
| 2458 | static int genpd_active_time_show(struct seq_file *s, void *data) | ||
| 2459 | { | ||
| 2460 | struct generic_pm_domain *genpd = s->private; | ||
| 2461 | ktime_t delta = 0; | ||
| 2462 | int ret = 0; | ||
| 2463 | |||
| 2464 | ret = genpd_lock_interruptible(genpd); | ||
| 2465 | if (ret) | ||
| 2466 | return -ERESTARTSYS; | ||
| 2467 | |||
| 2468 | if (genpd->status == GPD_STATE_ACTIVE) | ||
| 2469 | delta = ktime_sub(ktime_get(), genpd->accounting_time); | ||
| 2470 | |||
| 2471 | seq_printf(s, "%lld ms\n", ktime_to_ms( | ||
| 2472 | ktime_add(genpd->on_time, delta))); | ||
| 2473 | |||
| 2474 | genpd_unlock(genpd); | ||
| 2475 | return ret; | ||
| 2476 | } | ||
| 2477 | |||
| 2478 | static int genpd_total_idle_time_show(struct seq_file *s, void *data) | ||
| 2479 | { | ||
| 2480 | struct generic_pm_domain *genpd = s->private; | ||
| 2481 | ktime_t delta = 0, total = 0; | ||
| 2482 | unsigned int i; | ||
| 2483 | int ret = 0; | ||
| 2484 | |||
| 2485 | ret = genpd_lock_interruptible(genpd); | ||
| 2486 | if (ret) | ||
| 2487 | return -ERESTARTSYS; | ||
| 2488 | |||
| 2489 | for (i = 0; i < genpd->state_count; i++) { | ||
| 2490 | |||
| 2491 | if ((genpd->status == GPD_STATE_POWER_OFF) && | ||
| 2492 | (genpd->state_idx == i)) | ||
| 2493 | delta = ktime_sub(ktime_get(), genpd->accounting_time); | ||
| 2494 | |||
| 2495 | total = ktime_add(total, genpd->states[i].idle_time); | ||
| 2496 | } | ||
| 2497 | total = ktime_add(total, delta); | ||
| 2498 | |||
| 2499 | seq_printf(s, "%lld ms\n", ktime_to_ms(total)); | ||
| 2500 | |||
| 2501 | genpd_unlock(genpd); | ||
| 2502 | return ret; | ||
| 2503 | } | ||
| 2504 | |||
| 2505 | |||
| 2506 | static int genpd_devices_show(struct seq_file *s, void *data) | ||
| 2507 | { | ||
| 2508 | struct generic_pm_domain *genpd = s->private; | ||
| 2509 | struct pm_domain_data *pm_data; | ||
| 2510 | const char *kobj_path; | ||
| 2511 | int ret = 0; | ||
| 2512 | |||
| 2513 | ret = genpd_lock_interruptible(genpd); | ||
| 2514 | if (ret) | ||
| 2515 | return -ERESTARTSYS; | ||
| 2516 | |||
| 2517 | list_for_each_entry(pm_data, &genpd->dev_list, list_node) { | ||
| 2518 | kobj_path = kobject_get_path(&pm_data->dev->kobj, | ||
| 2519 | genpd_is_irq_safe(genpd) ? | ||
| 2520 | GFP_ATOMIC : GFP_KERNEL); | ||
| 2521 | if (kobj_path == NULL) | ||
| 2522 | continue; | ||
| 2523 | |||
| 2524 | seq_printf(s, "%s\n", kobj_path); | ||
| 2525 | kfree(kobj_path); | ||
| 2526 | } | ||
| 2527 | |||
| 2528 | genpd_unlock(genpd); | ||
| 2529 | return ret; | ||
| 2530 | } | ||
| 2531 | |||
| 2532 | #define define_genpd_open_function(name) \ | ||
| 2533 | static int genpd_##name##_open(struct inode *inode, struct file *file) \ | ||
| 2534 | { \ | ||
| 2535 | return single_open(file, genpd_##name##_show, inode->i_private); \ | ||
| 2536 | } | ||
| 2537 | |||
| 2538 | define_genpd_open_function(summary); | ||
| 2539 | define_genpd_open_function(status); | ||
| 2540 | define_genpd_open_function(sub_domains); | ||
| 2541 | define_genpd_open_function(idle_states); | ||
| 2542 | define_genpd_open_function(active_time); | ||
| 2543 | define_genpd_open_function(total_idle_time); | ||
| 2544 | define_genpd_open_function(devices); | ||
| 2545 | |||
| 2546 | #define define_genpd_debugfs_fops(name) \ | ||
| 2547 | static const struct file_operations genpd_##name##_fops = { \ | ||
| 2548 | .open = genpd_##name##_open, \ | ||
| 2549 | .read = seq_read, \ | ||
| 2550 | .llseek = seq_lseek, \ | ||
| 2551 | .release = single_release, \ | ||
| 2552 | } | ||
| 2553 | |||
| 2554 | define_genpd_debugfs_fops(summary); | ||
| 2555 | define_genpd_debugfs_fops(status); | ||
| 2556 | define_genpd_debugfs_fops(sub_domains); | ||
| 2557 | define_genpd_debugfs_fops(idle_states); | ||
| 2558 | define_genpd_debugfs_fops(active_time); | ||
| 2559 | define_genpd_debugfs_fops(total_idle_time); | ||
| 2560 | define_genpd_debugfs_fops(devices); | ||
| 2364 | 2561 | ||
| 2365 | static int __init pm_genpd_debug_init(void) | 2562 | static int __init pm_genpd_debug_init(void) |
| 2366 | { | 2563 | { |
| 2367 | struct dentry *d; | 2564 | struct dentry *d; |
| 2565 | struct generic_pm_domain *genpd; | ||
| 2368 | 2566 | ||
| 2369 | pm_genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL); | 2567 | pm_genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL); |
| 2370 | 2568 | ||
| @@ -2372,10 +2570,29 @@ static int __init pm_genpd_debug_init(void) | |||
| 2372 | return -ENOMEM; | 2570 | return -ENOMEM; |
| 2373 | 2571 | ||
| 2374 | d = debugfs_create_file("pm_genpd_summary", S_IRUGO, | 2572 | d = debugfs_create_file("pm_genpd_summary", S_IRUGO, |
| 2375 | pm_genpd_debugfs_dir, NULL, &pm_genpd_summary_fops); | 2573 | pm_genpd_debugfs_dir, NULL, &genpd_summary_fops); |
| 2376 | if (!d) | 2574 | if (!d) |
| 2377 | return -ENOMEM; | 2575 | return -ENOMEM; |
| 2378 | 2576 | ||
| 2577 | list_for_each_entry(genpd, &gpd_list, gpd_list_node) { | ||
| 2578 | d = debugfs_create_dir(genpd->name, pm_genpd_debugfs_dir); | ||
| 2579 | if (!d) | ||
| 2580 | return -ENOMEM; | ||
| 2581 | |||
| 2582 | debugfs_create_file("current_state", 0444, | ||
| 2583 | d, genpd, &genpd_status_fops); | ||
| 2584 | debugfs_create_file("sub_domains", 0444, | ||
| 2585 | d, genpd, &genpd_sub_domains_fops); | ||
| 2586 | debugfs_create_file("idle_states", 0444, | ||
| 2587 | d, genpd, &genpd_idle_states_fops); | ||
| 2588 | debugfs_create_file("active_time", 0444, | ||
| 2589 | d, genpd, &genpd_active_time_fops); | ||
| 2590 | debugfs_create_file("total_idle_time", 0444, | ||
| 2591 | d, genpd, &genpd_total_idle_time_fops); | ||
| 2592 | debugfs_create_file("devices", 0444, | ||
| 2593 | d, genpd, &genpd_devices_fops); | ||
| 2594 | } | ||
| 2595 | |||
| 2379 | return 0; | 2596 | return 0; |
| 2380 | } | 2597 | } |
| 2381 | late_initcall(pm_genpd_debug_init); | 2598 | late_initcall(pm_genpd_debug_init); |
diff --git a/drivers/base/power/opp/of.c b/drivers/base/power/opp/of.c index 57eec1ca0569..0b718886479b 100644 --- a/drivers/base/power/opp/of.c +++ b/drivers/base/power/opp/of.c | |||
| @@ -248,15 +248,22 @@ void dev_pm_opp_of_remove_table(struct device *dev) | |||
| 248 | } | 248 | } |
| 249 | EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table); | 249 | EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table); |
| 250 | 250 | ||
| 251 | /* Returns opp descriptor node for a device, caller must do of_node_put() */ | 251 | /* Returns opp descriptor node for a device node, caller must |
| 252 | struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev) | 252 | * do of_node_put() */ |
| 253 | static struct device_node *_opp_of_get_opp_desc_node(struct device_node *np) | ||
| 253 | { | 254 | { |
| 254 | /* | 255 | /* |
| 255 | * There should be only ONE phandle present in "operating-points-v2" | 256 | * There should be only ONE phandle present in "operating-points-v2" |
| 256 | * property. | 257 | * property. |
| 257 | */ | 258 | */ |
| 258 | 259 | ||
| 259 | return of_parse_phandle(dev->of_node, "operating-points-v2", 0); | 260 | return of_parse_phandle(np, "operating-points-v2", 0); |
| 261 | } | ||
| 262 | |||
| 263 | /* Returns opp descriptor node for a device, caller must do of_node_put() */ | ||
| 264 | struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev) | ||
| 265 | { | ||
| 266 | return _opp_of_get_opp_desc_node(dev->of_node); | ||
| 260 | } | 267 | } |
| 261 | EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_opp_desc_node); | 268 | EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_opp_desc_node); |
| 262 | 269 | ||
| @@ -539,8 +546,12 @@ int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask) | |||
| 539 | 546 | ||
| 540 | ret = dev_pm_opp_of_add_table(cpu_dev); | 547 | ret = dev_pm_opp_of_add_table(cpu_dev); |
| 541 | if (ret) { | 548 | if (ret) { |
| 542 | pr_err("%s: couldn't find opp table for cpu:%d, %d\n", | 549 | /* |
| 543 | __func__, cpu, ret); | 550 | * OPP may get registered dynamically, don't print error |
| 551 | * message here. | ||
| 552 | */ | ||
| 553 | pr_debug("%s: couldn't find opp table for cpu:%d, %d\n", | ||
| 554 | __func__, cpu, ret); | ||
| 544 | 555 | ||
| 545 | /* Free all other OPPs */ | 556 | /* Free all other OPPs */ |
| 546 | dev_pm_opp_of_cpumask_remove_table(cpumask); | 557 | dev_pm_opp_of_cpumask_remove_table(cpumask); |
| @@ -572,8 +583,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_add_table); | |||
| 572 | int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, | 583 | int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, |
| 573 | struct cpumask *cpumask) | 584 | struct cpumask *cpumask) |
| 574 | { | 585 | { |
| 575 | struct device_node *np, *tmp_np; | 586 | struct device_node *np, *tmp_np, *cpu_np; |
| 576 | struct device *tcpu_dev; | ||
| 577 | int cpu, ret = 0; | 587 | int cpu, ret = 0; |
| 578 | 588 | ||
| 579 | /* Get OPP descriptor node */ | 589 | /* Get OPP descriptor node */ |
| @@ -593,19 +603,18 @@ int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, | |||
| 593 | if (cpu == cpu_dev->id) | 603 | if (cpu == cpu_dev->id) |
| 594 | continue; | 604 | continue; |
| 595 | 605 | ||
| 596 | tcpu_dev = get_cpu_device(cpu); | 606 | cpu_np = of_get_cpu_node(cpu, NULL); |
| 597 | if (!tcpu_dev) { | 607 | if (!cpu_np) { |
| 598 | dev_err(cpu_dev, "%s: failed to get cpu%d device\n", | 608 | dev_err(cpu_dev, "%s: failed to get cpu%d node\n", |
| 599 | __func__, cpu); | 609 | __func__, cpu); |
| 600 | ret = -ENODEV; | 610 | ret = -ENOENT; |
| 601 | goto put_cpu_node; | 611 | goto put_cpu_node; |
| 602 | } | 612 | } |
| 603 | 613 | ||
| 604 | /* Get OPP descriptor node */ | 614 | /* Get OPP descriptor node */ |
| 605 | tmp_np = dev_pm_opp_of_get_opp_desc_node(tcpu_dev); | 615 | tmp_np = _opp_of_get_opp_desc_node(cpu_np); |
| 606 | if (!tmp_np) { | 616 | if (!tmp_np) { |
| 607 | dev_err(tcpu_dev, "%s: Couldn't find opp node.\n", | 617 | pr_err("%pOF: Couldn't find opp node\n", cpu_np); |
| 608 | __func__); | ||
| 609 | ret = -ENOENT; | 618 | ret = -ENOENT; |
| 610 | goto put_cpu_node; | 619 | goto put_cpu_node; |
| 611 | } | 620 | } |
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index 144e6d8fafc8..b49efe33099e 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c | |||
| @@ -412,15 +412,17 @@ void device_set_wakeup_capable(struct device *dev, bool capable) | |||
| 412 | if (!!dev->power.can_wakeup == !!capable) | 412 | if (!!dev->power.can_wakeup == !!capable) |
| 413 | return; | 413 | return; |
| 414 | 414 | ||
| 415 | dev->power.can_wakeup = capable; | ||
| 415 | if (device_is_registered(dev) && !list_empty(&dev->power.entry)) { | 416 | if (device_is_registered(dev) && !list_empty(&dev->power.entry)) { |
| 416 | if (capable) { | 417 | if (capable) { |
| 417 | if (wakeup_sysfs_add(dev)) | 418 | int ret = wakeup_sysfs_add(dev); |
| 418 | return; | 419 | |
| 420 | if (ret) | ||
| 421 | dev_info(dev, "Wakeup sysfs attributes not added\n"); | ||
| 419 | } else { | 422 | } else { |
| 420 | wakeup_sysfs_remove(dev); | 423 | wakeup_sysfs_remove(dev); |
| 421 | } | 424 | } |
| 422 | } | 425 | } |
| 423 | dev->power.can_wakeup = capable; | ||
| 424 | } | 426 | } |
| 425 | EXPORT_SYMBOL_GPL(device_set_wakeup_capable); | 427 | EXPORT_SYMBOL_GPL(device_set_wakeup_capable); |
| 426 | 428 | ||
diff --git a/drivers/block/loop.c b/drivers/block/loop.c index ef8334949b42..f321b96405f5 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c | |||
| @@ -221,8 +221,7 @@ static void __loop_update_dio(struct loop_device *lo, bool dio) | |||
| 221 | } | 221 | } |
| 222 | 222 | ||
| 223 | static int | 223 | static int |
| 224 | figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit, | 224 | figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit) |
| 225 | loff_t logical_blocksize) | ||
| 226 | { | 225 | { |
| 227 | loff_t size = get_size(offset, sizelimit, lo->lo_backing_file); | 226 | loff_t size = get_size(offset, sizelimit, lo->lo_backing_file); |
| 228 | sector_t x = (sector_t)size; | 227 | sector_t x = (sector_t)size; |
| @@ -234,12 +233,6 @@ figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit, | |||
| 234 | lo->lo_offset = offset; | 233 | lo->lo_offset = offset; |
| 235 | if (lo->lo_sizelimit != sizelimit) | 234 | if (lo->lo_sizelimit != sizelimit) |
| 236 | lo->lo_sizelimit = sizelimit; | 235 | lo->lo_sizelimit = sizelimit; |
| 237 | if (lo->lo_flags & LO_FLAGS_BLOCKSIZE) { | ||
| 238 | lo->lo_logical_blocksize = logical_blocksize; | ||
| 239 | blk_queue_physical_block_size(lo->lo_queue, lo->lo_blocksize); | ||
| 240 | blk_queue_logical_block_size(lo->lo_queue, | ||
| 241 | lo->lo_logical_blocksize); | ||
| 242 | } | ||
| 243 | set_capacity(lo->lo_disk, x); | 236 | set_capacity(lo->lo_disk, x); |
| 244 | bd_set_size(bdev, (loff_t)get_capacity(bdev->bd_disk) << 9); | 237 | bd_set_size(bdev, (loff_t)get_capacity(bdev->bd_disk) << 9); |
| 245 | /* let user-space know about the new size */ | 238 | /* let user-space know about the new size */ |
| @@ -820,7 +813,6 @@ static void loop_config_discard(struct loop_device *lo) | |||
| 820 | struct file *file = lo->lo_backing_file; | 813 | struct file *file = lo->lo_backing_file; |
| 821 | struct inode *inode = file->f_mapping->host; | 814 | struct inode *inode = file->f_mapping->host; |
| 822 | struct request_queue *q = lo->lo_queue; | 815 | struct request_queue *q = lo->lo_queue; |
| 823 | int lo_bits = 9; | ||
| 824 | 816 | ||
| 825 | /* | 817 | /* |
| 826 | * We use punch hole to reclaim the free space used by the | 818 | * We use punch hole to reclaim the free space used by the |
| @@ -840,11 +832,9 @@ static void loop_config_discard(struct loop_device *lo) | |||
| 840 | 832 | ||
| 841 | q->limits.discard_granularity = inode->i_sb->s_blocksize; | 833 | q->limits.discard_granularity = inode->i_sb->s_blocksize; |
| 842 | q->limits.discard_alignment = 0; | 834 | q->limits.discard_alignment = 0; |
| 843 | if (lo->lo_flags & LO_FLAGS_BLOCKSIZE) | ||
| 844 | lo_bits = blksize_bits(lo->lo_logical_blocksize); | ||
| 845 | 835 | ||
| 846 | blk_queue_max_discard_sectors(q, UINT_MAX >> lo_bits); | 836 | blk_queue_max_discard_sectors(q, UINT_MAX >> 9); |
| 847 | blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> lo_bits); | 837 | blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> 9); |
| 848 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); | 838 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); |
| 849 | } | 839 | } |
| 850 | 840 | ||
| @@ -938,7 +928,6 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode, | |||
| 938 | 928 | ||
| 939 | lo->use_dio = false; | 929 | lo->use_dio = false; |
| 940 | lo->lo_blocksize = lo_blocksize; | 930 | lo->lo_blocksize = lo_blocksize; |
| 941 | lo->lo_logical_blocksize = 512; | ||
| 942 | lo->lo_device = bdev; | 931 | lo->lo_device = bdev; |
| 943 | lo->lo_flags = lo_flags; | 932 | lo->lo_flags = lo_flags; |
| 944 | lo->lo_backing_file = file; | 933 | lo->lo_backing_file = file; |
| @@ -1104,7 +1093,6 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info) | |||
| 1104 | int err; | 1093 | int err; |
| 1105 | struct loop_func_table *xfer; | 1094 | struct loop_func_table *xfer; |
| 1106 | kuid_t uid = current_uid(); | 1095 | kuid_t uid = current_uid(); |
| 1107 | int lo_flags = lo->lo_flags; | ||
| 1108 | 1096 | ||
| 1109 | if (lo->lo_encrypt_key_size && | 1097 | if (lo->lo_encrypt_key_size && |
| 1110 | !uid_eq(lo->lo_key_owner, uid) && | 1098 | !uid_eq(lo->lo_key_owner, uid) && |
| @@ -1137,26 +1125,9 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info) | |||
| 1137 | if (err) | 1125 | if (err) |
| 1138 | goto exit; | 1126 | goto exit; |
| 1139 | 1127 | ||
| 1140 | if (info->lo_flags & LO_FLAGS_BLOCKSIZE) { | ||
| 1141 | if (!(lo->lo_flags & LO_FLAGS_BLOCKSIZE)) | ||
| 1142 | lo->lo_logical_blocksize = 512; | ||
| 1143 | lo->lo_flags |= LO_FLAGS_BLOCKSIZE; | ||
| 1144 | if (LO_INFO_BLOCKSIZE(info) != 512 && | ||
| 1145 | LO_INFO_BLOCKSIZE(info) != 1024 && | ||
| 1146 | LO_INFO_BLOCKSIZE(info) != 2048 && | ||
| 1147 | LO_INFO_BLOCKSIZE(info) != 4096) | ||
| 1148 | return -EINVAL; | ||
| 1149 | if (LO_INFO_BLOCKSIZE(info) > lo->lo_blocksize) | ||
| 1150 | return -EINVAL; | ||
| 1151 | } | ||
| 1152 | |||
| 1153 | if (lo->lo_offset != info->lo_offset || | 1128 | if (lo->lo_offset != info->lo_offset || |
| 1154 | lo->lo_sizelimit != info->lo_sizelimit || | 1129 | lo->lo_sizelimit != info->lo_sizelimit) { |
| 1155 | lo->lo_flags != lo_flags || | 1130 | if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit)) { |
| 1156 | ((lo->lo_flags & LO_FLAGS_BLOCKSIZE) && | ||
| 1157 | lo->lo_logical_blocksize != LO_INFO_BLOCKSIZE(info))) { | ||
| 1158 | if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit, | ||
| 1159 | LO_INFO_BLOCKSIZE(info))) { | ||
| 1160 | err = -EFBIG; | 1131 | err = -EFBIG; |
| 1161 | goto exit; | 1132 | goto exit; |
| 1162 | } | 1133 | } |
| @@ -1348,8 +1319,7 @@ static int loop_set_capacity(struct loop_device *lo) | |||
| 1348 | if (unlikely(lo->lo_state != Lo_bound)) | 1319 | if (unlikely(lo->lo_state != Lo_bound)) |
| 1349 | return -ENXIO; | 1320 | return -ENXIO; |
| 1350 | 1321 | ||
| 1351 | return figure_loop_size(lo, lo->lo_offset, lo->lo_sizelimit, | 1322 | return figure_loop_size(lo, lo->lo_offset, lo->lo_sizelimit); |
| 1352 | lo->lo_logical_blocksize); | ||
| 1353 | } | 1323 | } |
| 1354 | 1324 | ||
| 1355 | static int loop_set_dio(struct loop_device *lo, unsigned long arg) | 1325 | static int loop_set_dio(struct loop_device *lo, unsigned long arg) |
diff --git a/drivers/block/loop.h b/drivers/block/loop.h index 2c096b9a17b8..fecd3f97ef8c 100644 --- a/drivers/block/loop.h +++ b/drivers/block/loop.h | |||
| @@ -49,7 +49,6 @@ struct loop_device { | |||
| 49 | struct file * lo_backing_file; | 49 | struct file * lo_backing_file; |
| 50 | struct block_device *lo_device; | 50 | struct block_device *lo_device; |
| 51 | unsigned lo_blocksize; | 51 | unsigned lo_blocksize; |
| 52 | unsigned lo_logical_blocksize; | ||
| 53 | void *key_data; | 52 | void *key_data; |
| 54 | 53 | ||
| 55 | gfp_t old_gfp_mask; | 54 | gfp_t old_gfp_mask; |
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 87a0a29f6e7e..5bdf923294a5 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c | |||
| @@ -908,7 +908,8 @@ static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg) | |||
| 908 | continue; | 908 | continue; |
| 909 | } | 909 | } |
| 910 | sk_set_memalloc(sock->sk); | 910 | sk_set_memalloc(sock->sk); |
| 911 | sock->sk->sk_sndtimeo = nbd->tag_set.timeout; | 911 | if (nbd->tag_set.timeout) |
| 912 | sock->sk->sk_sndtimeo = nbd->tag_set.timeout; | ||
| 912 | atomic_inc(&config->recv_threads); | 913 | atomic_inc(&config->recv_threads); |
| 913 | refcount_inc(&nbd->config_refs); | 914 | refcount_inc(&nbd->config_refs); |
| 914 | old = nsock->sock; | 915 | old = nsock->sock; |
| @@ -922,6 +923,8 @@ static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg) | |||
| 922 | mutex_unlock(&nsock->tx_lock); | 923 | mutex_unlock(&nsock->tx_lock); |
| 923 | sockfd_put(old); | 924 | sockfd_put(old); |
| 924 | 925 | ||
| 926 | clear_bit(NBD_DISCONNECTED, &config->runtime_flags); | ||
| 927 | |||
| 925 | /* We take the tx_mutex in an error path in the recv_work, so we | 928 | /* We take the tx_mutex in an error path in the recv_work, so we |
| 926 | * need to queue_work outside of the tx_mutex. | 929 | * need to queue_work outside of the tx_mutex. |
| 927 | */ | 930 | */ |
| @@ -978,11 +981,15 @@ static void send_disconnects(struct nbd_device *nbd) | |||
| 978 | int i, ret; | 981 | int i, ret; |
| 979 | 982 | ||
| 980 | for (i = 0; i < config->num_connections; i++) { | 983 | for (i = 0; i < config->num_connections; i++) { |
| 984 | struct nbd_sock *nsock = config->socks[i]; | ||
| 985 | |||
| 981 | iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request)); | 986 | iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request)); |
| 987 | mutex_lock(&nsock->tx_lock); | ||
| 982 | ret = sock_xmit(nbd, i, 1, &from, 0, NULL); | 988 | ret = sock_xmit(nbd, i, 1, &from, 0, NULL); |
| 983 | if (ret <= 0) | 989 | if (ret <= 0) |
| 984 | dev_err(disk_to_dev(nbd->disk), | 990 | dev_err(disk_to_dev(nbd->disk), |
| 985 | "Send disconnect failed %d\n", ret); | 991 | "Send disconnect failed %d\n", ret); |
| 992 | mutex_unlock(&nsock->tx_lock); | ||
| 986 | } | 993 | } |
| 987 | } | 994 | } |
| 988 | 995 | ||
| @@ -991,9 +998,8 @@ static int nbd_disconnect(struct nbd_device *nbd) | |||
| 991 | struct nbd_config *config = nbd->config; | 998 | struct nbd_config *config = nbd->config; |
| 992 | 999 | ||
| 993 | dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n"); | 1000 | dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n"); |
| 994 | if (!test_and_set_bit(NBD_DISCONNECT_REQUESTED, | 1001 | set_bit(NBD_DISCONNECT_REQUESTED, &config->runtime_flags); |
| 995 | &config->runtime_flags)) | 1002 | send_disconnects(nbd); |
| 996 | send_disconnects(nbd); | ||
| 997 | return 0; | 1003 | return 0; |
| 998 | } | 1004 | } |
| 999 | 1005 | ||
| @@ -1074,7 +1080,9 @@ static int nbd_start_device(struct nbd_device *nbd) | |||
| 1074 | return -ENOMEM; | 1080 | return -ENOMEM; |
| 1075 | } | 1081 | } |
| 1076 | sk_set_memalloc(config->socks[i]->sock->sk); | 1082 | sk_set_memalloc(config->socks[i]->sock->sk); |
| 1077 | config->socks[i]->sock->sk->sk_sndtimeo = nbd->tag_set.timeout; | 1083 | if (nbd->tag_set.timeout) |
| 1084 | config->socks[i]->sock->sk->sk_sndtimeo = | ||
| 1085 | nbd->tag_set.timeout; | ||
| 1078 | atomic_inc(&config->recv_threads); | 1086 | atomic_inc(&config->recv_threads); |
| 1079 | refcount_inc(&nbd->config_refs); | 1087 | refcount_inc(&nbd->config_refs); |
| 1080 | INIT_WORK(&args->work, recv_work); | 1088 | INIT_WORK(&args->work, recv_work); |
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c index 6b16ead1da58..ad9749463d4f 100644 --- a/drivers/block/sunvdc.c +++ b/drivers/block/sunvdc.c | |||
| @@ -875,6 +875,56 @@ static void print_version(void) | |||
| 875 | printk(KERN_INFO "%s", version); | 875 | printk(KERN_INFO "%s", version); |
| 876 | } | 876 | } |
| 877 | 877 | ||
| 878 | struct vdc_check_port_data { | ||
| 879 | int dev_no; | ||
| 880 | char *type; | ||
| 881 | }; | ||
| 882 | |||
| 883 | static int vdc_device_probed(struct device *dev, void *arg) | ||
| 884 | { | ||
| 885 | struct vio_dev *vdev = to_vio_dev(dev); | ||
| 886 | struct vdc_check_port_data *port_data; | ||
| 887 | |||
| 888 | port_data = (struct vdc_check_port_data *)arg; | ||
| 889 | |||
| 890 | if ((vdev->dev_no == port_data->dev_no) && | ||
| 891 | (!(strcmp((char *)&vdev->type, port_data->type))) && | ||
| 892 | dev_get_drvdata(dev)) { | ||
| 893 | /* This device has already been configured | ||
| 894 | * by vdc_port_probe() | ||
| 895 | */ | ||
| 896 | return 1; | ||
| 897 | } else { | ||
| 898 | return 0; | ||
| 899 | } | ||
| 900 | } | ||
| 901 | |||
| 902 | /* Determine whether the VIO device is part of an mpgroup | ||
| 903 | * by locating all the virtual-device-port nodes associated | ||
| 904 | * with the parent virtual-device node for the VIO device | ||
| 905 | * and checking whether any of these nodes are vdc-ports | ||
| 906 | * which have already been configured. | ||
| 907 | * | ||
| 908 | * Returns true if this device is part of an mpgroup and has | ||
| 909 | * already been probed. | ||
| 910 | */ | ||
| 911 | static bool vdc_port_mpgroup_check(struct vio_dev *vdev) | ||
| 912 | { | ||
| 913 | struct vdc_check_port_data port_data; | ||
| 914 | struct device *dev; | ||
| 915 | |||
| 916 | port_data.dev_no = vdev->dev_no; | ||
| 917 | port_data.type = (char *)&vdev->type; | ||
| 918 | |||
| 919 | dev = device_find_child(vdev->dev.parent, &port_data, | ||
| 920 | vdc_device_probed); | ||
| 921 | |||
| 922 | if (dev) | ||
| 923 | return true; | ||
| 924 | |||
| 925 | return false; | ||
| 926 | } | ||
| 927 | |||
| 878 | static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) | 928 | static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) |
| 879 | { | 929 | { |
| 880 | struct mdesc_handle *hp; | 930 | struct mdesc_handle *hp; |
| @@ -893,6 +943,14 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) | |||
| 893 | goto err_out_release_mdesc; | 943 | goto err_out_release_mdesc; |
| 894 | } | 944 | } |
| 895 | 945 | ||
| 946 | /* Check if this device is part of an mpgroup */ | ||
| 947 | if (vdc_port_mpgroup_check(vdev)) { | ||
| 948 | printk(KERN_WARNING | ||
| 949 | "VIO: Ignoring extra vdisk port %s", | ||
| 950 | dev_name(&vdev->dev)); | ||
| 951 | goto err_out_release_mdesc; | ||
| 952 | } | ||
| 953 | |||
| 896 | port = kzalloc(sizeof(*port), GFP_KERNEL); | 954 | port = kzalloc(sizeof(*port), GFP_KERNEL); |
| 897 | err = -ENOMEM; | 955 | err = -ENOMEM; |
| 898 | if (!port) { | 956 | if (!port) { |
| @@ -943,6 +1001,9 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) | |||
| 943 | if (err) | 1001 | if (err) |
| 944 | goto err_out_free_tx_ring; | 1002 | goto err_out_free_tx_ring; |
| 945 | 1003 | ||
| 1004 | /* Note that the device driver_data is used to determine | ||
| 1005 | * whether the port has been probed. | ||
| 1006 | */ | ||
| 946 | dev_set_drvdata(&vdev->dev, port); | 1007 | dev_set_drvdata(&vdev->dev, port); |
| 947 | 1008 | ||
| 948 | mdesc_release(hp); | 1009 | mdesc_release(hp); |
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 4e02aa5fdac0..d3d5523862c2 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c | |||
| @@ -381,6 +381,7 @@ static void virtblk_config_changed_work(struct work_struct *work) | |||
| 381 | struct request_queue *q = vblk->disk->queue; | 381 | struct request_queue *q = vblk->disk->queue; |
| 382 | char cap_str_2[10], cap_str_10[10]; | 382 | char cap_str_2[10], cap_str_10[10]; |
| 383 | char *envp[] = { "RESIZE=1", NULL }; | 383 | char *envp[] = { "RESIZE=1", NULL }; |
| 384 | unsigned long long nblocks; | ||
| 384 | u64 capacity; | 385 | u64 capacity; |
| 385 | 386 | ||
| 386 | /* Host must always specify the capacity. */ | 387 | /* Host must always specify the capacity. */ |
| @@ -393,16 +394,19 @@ static void virtblk_config_changed_work(struct work_struct *work) | |||
| 393 | capacity = (sector_t)-1; | 394 | capacity = (sector_t)-1; |
| 394 | } | 395 | } |
| 395 | 396 | ||
| 396 | string_get_size(capacity, queue_logical_block_size(q), | 397 | nblocks = DIV_ROUND_UP_ULL(capacity, queue_logical_block_size(q) >> 9); |
| 398 | |||
| 399 | string_get_size(nblocks, queue_logical_block_size(q), | ||
| 397 | STRING_UNITS_2, cap_str_2, sizeof(cap_str_2)); | 400 | STRING_UNITS_2, cap_str_2, sizeof(cap_str_2)); |
| 398 | string_get_size(capacity, queue_logical_block_size(q), | 401 | string_get_size(nblocks, queue_logical_block_size(q), |
| 399 | STRING_UNITS_10, cap_str_10, sizeof(cap_str_10)); | 402 | STRING_UNITS_10, cap_str_10, sizeof(cap_str_10)); |
| 400 | 403 | ||
| 401 | dev_notice(&vdev->dev, | 404 | dev_notice(&vdev->dev, |
| 402 | "new size: %llu %d-byte logical blocks (%s/%s)\n", | 405 | "new size: %llu %d-byte logical blocks (%s/%s)\n", |
| 403 | (unsigned long long)capacity, | 406 | nblocks, |
| 404 | queue_logical_block_size(q), | 407 | queue_logical_block_size(q), |
| 405 | cap_str_10, cap_str_2); | 408 | cap_str_10, |
| 409 | cap_str_2); | ||
| 406 | 410 | ||
| 407 | set_capacity(vblk->disk, capacity); | 411 | set_capacity(vblk->disk, capacity); |
| 408 | revalidate_disk(vblk->disk); | 412 | revalidate_disk(vblk->disk); |
| @@ -541,12 +545,9 @@ virtblk_cache_type_store(struct device *dev, struct device_attribute *attr, | |||
| 541 | int i; | 545 | int i; |
| 542 | 546 | ||
| 543 | BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE)); | 547 | BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE)); |
| 544 | for (i = ARRAY_SIZE(virtblk_cache_types); --i >= 0; ) | 548 | i = sysfs_match_string(virtblk_cache_types, buf); |
| 545 | if (sysfs_streq(buf, virtblk_cache_types[i])) | ||
| 546 | break; | ||
| 547 | |||
| 548 | if (i < 0) | 549 | if (i < 0) |
| 549 | return -EINVAL; | 550 | return i; |
| 550 | 551 | ||
| 551 | virtio_cwrite8(vdev, offsetof(struct virtio_blk_config, wce), i); | 552 | virtio_cwrite8(vdev, offsetof(struct virtio_blk_config, wce), i); |
| 552 | virtblk_update_cache_mode(vdev); | 553 | virtblk_update_cache_mode(vdev); |
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c index 792da683e70d..2adb8599be93 100644 --- a/drivers/block/xen-blkback/xenbus.c +++ b/drivers/block/xen-blkback/xenbus.c | |||
| @@ -244,6 +244,7 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif) | |||
| 244 | { | 244 | { |
| 245 | struct pending_req *req, *n; | 245 | struct pending_req *req, *n; |
| 246 | unsigned int j, r; | 246 | unsigned int j, r; |
| 247 | bool busy = false; | ||
| 247 | 248 | ||
| 248 | for (r = 0; r < blkif->nr_rings; r++) { | 249 | for (r = 0; r < blkif->nr_rings; r++) { |
| 249 | struct xen_blkif_ring *ring = &blkif->rings[r]; | 250 | struct xen_blkif_ring *ring = &blkif->rings[r]; |
| @@ -261,8 +262,10 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif) | |||
| 261 | * don't have any discard_io or other_io requests. So, checking | 262 | * don't have any discard_io or other_io requests. So, checking |
| 262 | * for inflight IO is enough. | 263 | * for inflight IO is enough. |
| 263 | */ | 264 | */ |
| 264 | if (atomic_read(&ring->inflight) > 0) | 265 | if (atomic_read(&ring->inflight) > 0) { |
| 265 | return -EBUSY; | 266 | busy = true; |
| 267 | continue; | ||
| 268 | } | ||
| 266 | 269 | ||
| 267 | if (ring->irq) { | 270 | if (ring->irq) { |
| 268 | unbind_from_irqhandler(ring->irq, ring); | 271 | unbind_from_irqhandler(ring->irq, ring); |
| @@ -300,6 +303,9 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif) | |||
| 300 | WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages)); | 303 | WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages)); |
| 301 | ring->active = false; | 304 | ring->active = false; |
| 302 | } | 305 | } |
| 306 | if (busy) | ||
| 307 | return -EBUSY; | ||
| 308 | |||
| 303 | blkif->nr_ring_pages = 0; | 309 | blkif->nr_ring_pages = 0; |
| 304 | /* | 310 | /* |
| 305 | * blkif->rings was allocated in connect_ring, so we should free it in | 311 | * blkif->rings was allocated in connect_ring, so we should free it in |
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index c852ed3c01d5..2468c28d4771 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c | |||
| @@ -111,7 +111,7 @@ struct blk_shadow { | |||
| 111 | }; | 111 | }; |
| 112 | 112 | ||
| 113 | struct blkif_req { | 113 | struct blkif_req { |
| 114 | int error; | 114 | blk_status_t error; |
| 115 | }; | 115 | }; |
| 116 | 116 | ||
| 117 | static inline struct blkif_req *blkif_req(struct request *rq) | 117 | static inline struct blkif_req *blkif_req(struct request *rq) |
| @@ -708,6 +708,7 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri | |||
| 708 | * existing persistent grants, or if we have to get new grants, | 708 | * existing persistent grants, or if we have to get new grants, |
| 709 | * as there are not sufficiently many free. | 709 | * as there are not sufficiently many free. |
| 710 | */ | 710 | */ |
| 711 | bool new_persistent_gnts = false; | ||
| 711 | struct scatterlist *sg; | 712 | struct scatterlist *sg; |
| 712 | int num_sg, max_grefs, num_grant; | 713 | int num_sg, max_grefs, num_grant; |
| 713 | 714 | ||
| @@ -719,19 +720,21 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri | |||
| 719 | */ | 720 | */ |
| 720 | max_grefs += INDIRECT_GREFS(max_grefs); | 721 | max_grefs += INDIRECT_GREFS(max_grefs); |
| 721 | 722 | ||
| 722 | /* | 723 | /* Check if we have enough persistent grants to allocate a requests */ |
| 723 | * We have to reserve 'max_grefs' grants because persistent | 724 | if (rinfo->persistent_gnts_c < max_grefs) { |
| 724 | * grants are shared by all rings. | 725 | new_persistent_gnts = true; |
| 725 | */ | 726 | |
| 726 | if (max_grefs > 0) | 727 | if (gnttab_alloc_grant_references( |
| 727 | if (gnttab_alloc_grant_references(max_grefs, &setup.gref_head) < 0) { | 728 | max_grefs - rinfo->persistent_gnts_c, |
| 729 | &setup.gref_head) < 0) { | ||
| 728 | gnttab_request_free_callback( | 730 | gnttab_request_free_callback( |
| 729 | &rinfo->callback, | 731 | &rinfo->callback, |
| 730 | blkif_restart_queue_callback, | 732 | blkif_restart_queue_callback, |
| 731 | rinfo, | 733 | rinfo, |
| 732 | max_grefs); | 734 | max_grefs - rinfo->persistent_gnts_c); |
| 733 | return 1; | 735 | return 1; |
| 734 | } | 736 | } |
| 737 | } | ||
| 735 | 738 | ||
| 736 | /* Fill out a communications ring structure. */ | 739 | /* Fill out a communications ring structure. */ |
| 737 | id = blkif_ring_get_request(rinfo, req, &ring_req); | 740 | id = blkif_ring_get_request(rinfo, req, &ring_req); |
| @@ -832,7 +835,7 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri | |||
| 832 | if (unlikely(require_extra_req)) | 835 | if (unlikely(require_extra_req)) |
| 833 | rinfo->shadow[extra_id].req = *extra_ring_req; | 836 | rinfo->shadow[extra_id].req = *extra_ring_req; |
| 834 | 837 | ||
| 835 | if (max_grefs > 0) | 838 | if (new_persistent_gnts) |
| 836 | gnttab_free_grant_references(setup.gref_head); | 839 | gnttab_free_grant_references(setup.gref_head); |
| 837 | 840 | ||
| 838 | return 0; | 841 | return 0; |
| @@ -906,8 +909,8 @@ out_err: | |||
| 906 | return BLK_STS_IOERR; | 909 | return BLK_STS_IOERR; |
| 907 | 910 | ||
| 908 | out_busy: | 911 | out_busy: |
| 909 | spin_unlock_irqrestore(&rinfo->ring_lock, flags); | ||
| 910 | blk_mq_stop_hw_queue(hctx); | 912 | blk_mq_stop_hw_queue(hctx); |
| 913 | spin_unlock_irqrestore(&rinfo->ring_lock, flags); | ||
| 911 | return BLK_STS_RESOURCE; | 914 | return BLK_STS_RESOURCE; |
| 912 | } | 915 | } |
| 913 | 916 | ||
| @@ -1616,7 +1619,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) | |||
| 1616 | if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { | 1619 | if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { |
| 1617 | printk(KERN_WARNING "blkfront: %s: %s op failed\n", | 1620 | printk(KERN_WARNING "blkfront: %s: %s op failed\n", |
| 1618 | info->gd->disk_name, op_name(bret->operation)); | 1621 | info->gd->disk_name, op_name(bret->operation)); |
| 1619 | blkif_req(req)->error = -EOPNOTSUPP; | 1622 | blkif_req(req)->error = BLK_STS_NOTSUPP; |
| 1620 | } | 1623 | } |
| 1621 | if (unlikely(bret->status == BLKIF_RSP_ERROR && | 1624 | if (unlikely(bret->status == BLKIF_RSP_ERROR && |
| 1622 | rinfo->shadow[id].req.u.rw.nr_segments == 0)) { | 1625 | rinfo->shadow[id].req.u.rw.nr_segments == 0)) { |
| @@ -2072,9 +2075,9 @@ static int blkfront_resume(struct xenbus_device *dev) | |||
| 2072 | /* | 2075 | /* |
| 2073 | * Get the bios in the request so we can re-queue them. | 2076 | * Get the bios in the request so we can re-queue them. |
| 2074 | */ | 2077 | */ |
| 2075 | if (req_op(shadow[i].request) == REQ_OP_FLUSH || | 2078 | if (req_op(shadow[j].request) == REQ_OP_FLUSH || |
| 2076 | req_op(shadow[i].request) == REQ_OP_DISCARD || | 2079 | req_op(shadow[j].request) == REQ_OP_DISCARD || |
| 2077 | req_op(shadow[i].request) == REQ_OP_SECURE_ERASE || | 2080 | req_op(shadow[j].request) == REQ_OP_SECURE_ERASE || |
| 2078 | shadow[j].request->cmd_flags & REQ_FUA) { | 2081 | shadow[j].request->cmd_flags & REQ_FUA) { |
| 2079 | /* | 2082 | /* |
| 2080 | * Flush operations don't contain bios, so | 2083 | * Flush operations don't contain bios, so |
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 856d5dc02451..3b1b6340ba13 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c | |||
| @@ -308,7 +308,7 @@ static ssize_t comp_algorithm_store(struct device *dev, | |||
| 308 | struct device_attribute *attr, const char *buf, size_t len) | 308 | struct device_attribute *attr, const char *buf, size_t len) |
| 309 | { | 309 | { |
| 310 | struct zram *zram = dev_to_zram(dev); | 310 | struct zram *zram = dev_to_zram(dev); |
| 311 | char compressor[CRYPTO_MAX_ALG_NAME]; | 311 | char compressor[ARRAY_SIZE(zram->compressor)]; |
| 312 | size_t sz; | 312 | size_t sz; |
| 313 | 313 | ||
| 314 | strlcpy(compressor, buf, sizeof(compressor)); | 314 | strlcpy(compressor, buf, sizeof(compressor)); |
| @@ -327,7 +327,7 @@ static ssize_t comp_algorithm_store(struct device *dev, | |||
| 327 | return -EBUSY; | 327 | return -EBUSY; |
| 328 | } | 328 | } |
| 329 | 329 | ||
| 330 | strlcpy(zram->compressor, compressor, sizeof(compressor)); | 330 | strcpy(zram->compressor, compressor); |
| 331 | up_write(&zram->init_lock); | 331 | up_write(&zram->init_lock); |
| 332 | return len; | 332 | return len; |
| 333 | } | 333 | } |
diff --git a/drivers/bus/uniphier-system-bus.c b/drivers/bus/uniphier-system-bus.c index 1e6e0269edcc..f76be6bd6eb3 100644 --- a/drivers/bus/uniphier-system-bus.c +++ b/drivers/bus/uniphier-system-bus.c | |||
| @@ -256,10 +256,23 @@ static int uniphier_system_bus_probe(struct platform_device *pdev) | |||
| 256 | 256 | ||
| 257 | uniphier_system_bus_set_reg(priv); | 257 | uniphier_system_bus_set_reg(priv); |
| 258 | 258 | ||
| 259 | platform_set_drvdata(pdev, priv); | ||
| 260 | |||
| 259 | /* Now, the bus is configured. Populate platform_devices below it */ | 261 | /* Now, the bus is configured. Populate platform_devices below it */ |
| 260 | return of_platform_default_populate(dev->of_node, NULL, dev); | 262 | return of_platform_default_populate(dev->of_node, NULL, dev); |
| 261 | } | 263 | } |
| 262 | 264 | ||
| 265 | static int __maybe_unused uniphier_system_bus_resume(struct device *dev) | ||
| 266 | { | ||
| 267 | uniphier_system_bus_set_reg(dev_get_drvdata(dev)); | ||
| 268 | |||
| 269 | return 0; | ||
| 270 | } | ||
| 271 | |||
| 272 | static const struct dev_pm_ops uniphier_system_bus_pm_ops = { | ||
| 273 | SET_SYSTEM_SLEEP_PM_OPS(NULL, uniphier_system_bus_resume) | ||
| 274 | }; | ||
| 275 | |||
| 263 | static const struct of_device_id uniphier_system_bus_match[] = { | 276 | static const struct of_device_id uniphier_system_bus_match[] = { |
| 264 | { .compatible = "socionext,uniphier-system-bus" }, | 277 | { .compatible = "socionext,uniphier-system-bus" }, |
| 265 | { /* sentinel */ } | 278 | { /* sentinel */ } |
| @@ -271,6 +284,7 @@ static struct platform_driver uniphier_system_bus_driver = { | |||
| 271 | .driver = { | 284 | .driver = { |
| 272 | .name = "uniphier-system-bus", | 285 | .name = "uniphier-system-bus", |
| 273 | .of_match_table = uniphier_system_bus_match, | 286 | .of_match_table = uniphier_system_bus_match, |
| 287 | .pm = &uniphier_system_bus_pm_ops, | ||
| 274 | }, | 288 | }, |
| 275 | }; | 289 | }; |
| 276 | module_platform_driver(uniphier_system_bus_driver); | 290 | module_platform_driver(uniphier_system_bus_driver); |
diff --git a/drivers/char/random.c b/drivers/char/random.c index afa3ce7d3e72..8ad92707e45f 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c | |||
| @@ -1492,7 +1492,7 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller, | |||
| 1492 | #ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM | 1492 | #ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM |
| 1493 | print_once = true; | 1493 | print_once = true; |
| 1494 | #endif | 1494 | #endif |
| 1495 | pr_notice("random: %s called from %pF with crng_init=%d\n", | 1495 | pr_notice("random: %s called from %pS with crng_init=%d\n", |
| 1496 | func_name, caller, crng_init); | 1496 | func_name, caller, crng_init); |
| 1497 | } | 1497 | } |
| 1498 | 1498 | ||
diff --git a/drivers/clk/clk-gemini.c b/drivers/clk/clk-gemini.c index c391a49aaaff..b4cf2f699a21 100644 --- a/drivers/clk/clk-gemini.c +++ b/drivers/clk/clk-gemini.c | |||
| @@ -237,6 +237,18 @@ static int gemini_reset(struct reset_controller_dev *rcdev, | |||
| 237 | BIT(GEMINI_RESET_CPU1) | BIT(id)); | 237 | BIT(GEMINI_RESET_CPU1) | BIT(id)); |
| 238 | } | 238 | } |
| 239 | 239 | ||
| 240 | static int gemini_reset_assert(struct reset_controller_dev *rcdev, | ||
| 241 | unsigned long id) | ||
| 242 | { | ||
| 243 | return 0; | ||
| 244 | } | ||
| 245 | |||
| 246 | static int gemini_reset_deassert(struct reset_controller_dev *rcdev, | ||
| 247 | unsigned long id) | ||
| 248 | { | ||
| 249 | return 0; | ||
| 250 | } | ||
| 251 | |||
| 240 | static int gemini_reset_status(struct reset_controller_dev *rcdev, | 252 | static int gemini_reset_status(struct reset_controller_dev *rcdev, |
| 241 | unsigned long id) | 253 | unsigned long id) |
| 242 | { | 254 | { |
| @@ -253,6 +265,8 @@ static int gemini_reset_status(struct reset_controller_dev *rcdev, | |||
| 253 | 265 | ||
| 254 | static const struct reset_control_ops gemini_reset_ops = { | 266 | static const struct reset_control_ops gemini_reset_ops = { |
| 255 | .reset = gemini_reset, | 267 | .reset = gemini_reset, |
| 268 | .assert = gemini_reset_assert, | ||
| 269 | .deassert = gemini_reset_deassert, | ||
| 256 | .status = gemini_reset_status, | 270 | .status = gemini_reset_status, |
| 257 | }; | 271 | }; |
| 258 | 272 | ||
diff --git a/drivers/clk/keystone/sci-clk.c b/drivers/clk/keystone/sci-clk.c index 43b0f2f08df2..9cdf9d5050ac 100644 --- a/drivers/clk/keystone/sci-clk.c +++ b/drivers/clk/keystone/sci-clk.c | |||
| @@ -22,6 +22,7 @@ | |||
| 22 | #include <linux/platform_device.h> | 22 | #include <linux/platform_device.h> |
| 23 | #include <linux/slab.h> | 23 | #include <linux/slab.h> |
| 24 | #include <linux/soc/ti/ti_sci_protocol.h> | 24 | #include <linux/soc/ti/ti_sci_protocol.h> |
| 25 | #include <linux/bsearch.h> | ||
| 25 | 26 | ||
| 26 | #define SCI_CLK_SSC_ENABLE BIT(0) | 27 | #define SCI_CLK_SSC_ENABLE BIT(0) |
| 27 | #define SCI_CLK_ALLOW_FREQ_CHANGE BIT(1) | 28 | #define SCI_CLK_ALLOW_FREQ_CHANGE BIT(1) |
| @@ -44,6 +45,7 @@ struct sci_clk_data { | |||
| 44 | * @dev: Device pointer for the clock provider | 45 | * @dev: Device pointer for the clock provider |
| 45 | * @clk_data: Clock data | 46 | * @clk_data: Clock data |
| 46 | * @clocks: Clocks array for this device | 47 | * @clocks: Clocks array for this device |
| 48 | * @num_clocks: Total number of clocks for this provider | ||
| 47 | */ | 49 | */ |
| 48 | struct sci_clk_provider { | 50 | struct sci_clk_provider { |
| 49 | const struct ti_sci_handle *sci; | 51 | const struct ti_sci_handle *sci; |
| @@ -51,6 +53,7 @@ struct sci_clk_provider { | |||
| 51 | struct device *dev; | 53 | struct device *dev; |
| 52 | const struct sci_clk_data *clk_data; | 54 | const struct sci_clk_data *clk_data; |
| 53 | struct clk_hw **clocks; | 55 | struct clk_hw **clocks; |
| 56 | int num_clocks; | ||
| 54 | }; | 57 | }; |
| 55 | 58 | ||
| 56 | /** | 59 | /** |
| @@ -58,7 +61,6 @@ struct sci_clk_provider { | |||
| 58 | * @hw: Hardware clock cookie for common clock framework | 61 | * @hw: Hardware clock cookie for common clock framework |
| 59 | * @dev_id: Device index | 62 | * @dev_id: Device index |
| 60 | * @clk_id: Clock index | 63 | * @clk_id: Clock index |
| 61 | * @node: Clocks list link | ||
| 62 | * @provider: Master clock provider | 64 | * @provider: Master clock provider |
| 63 | * @flags: Flags for the clock | 65 | * @flags: Flags for the clock |
| 64 | */ | 66 | */ |
| @@ -66,7 +68,6 @@ struct sci_clk { | |||
| 66 | struct clk_hw hw; | 68 | struct clk_hw hw; |
| 67 | u16 dev_id; | 69 | u16 dev_id; |
| 68 | u8 clk_id; | 70 | u8 clk_id; |
| 69 | struct list_head node; | ||
| 70 | struct sci_clk_provider *provider; | 71 | struct sci_clk_provider *provider; |
| 71 | u8 flags; | 72 | u8 flags; |
| 72 | }; | 73 | }; |
| @@ -367,6 +368,19 @@ err: | |||
| 367 | return &sci_clk->hw; | 368 | return &sci_clk->hw; |
| 368 | } | 369 | } |
| 369 | 370 | ||
| 371 | static int _cmp_sci_clk(const void *a, const void *b) | ||
| 372 | { | ||
| 373 | const struct sci_clk *ca = a; | ||
| 374 | const struct sci_clk *cb = *(struct sci_clk **)b; | ||
| 375 | |||
| 376 | if (ca->dev_id == cb->dev_id && ca->clk_id == cb->clk_id) | ||
| 377 | return 0; | ||
| 378 | if (ca->dev_id > cb->dev_id || | ||
| 379 | (ca->dev_id == cb->dev_id && ca->clk_id > cb->clk_id)) | ||
| 380 | return 1; | ||
| 381 | return -1; | ||
| 382 | } | ||
| 383 | |||
| 370 | /** | 384 | /** |
| 371 | * sci_clk_get - Xlate function for getting clock handles | 385 | * sci_clk_get - Xlate function for getting clock handles |
| 372 | * @clkspec: device tree clock specifier | 386 | * @clkspec: device tree clock specifier |
| @@ -380,29 +394,22 @@ err: | |||
| 380 | static struct clk_hw *sci_clk_get(struct of_phandle_args *clkspec, void *data) | 394 | static struct clk_hw *sci_clk_get(struct of_phandle_args *clkspec, void *data) |
| 381 | { | 395 | { |
| 382 | struct sci_clk_provider *provider = data; | 396 | struct sci_clk_provider *provider = data; |
| 383 | u16 dev_id; | 397 | struct sci_clk **clk; |
| 384 | u8 clk_id; | 398 | struct sci_clk key; |
| 385 | const struct sci_clk_data *clks = provider->clk_data; | ||
| 386 | struct clk_hw **clocks = provider->clocks; | ||
| 387 | 399 | ||
| 388 | if (clkspec->args_count != 2) | 400 | if (clkspec->args_count != 2) |
| 389 | return ERR_PTR(-EINVAL); | 401 | return ERR_PTR(-EINVAL); |
| 390 | 402 | ||
| 391 | dev_id = clkspec->args[0]; | 403 | key.dev_id = clkspec->args[0]; |
| 392 | clk_id = clkspec->args[1]; | 404 | key.clk_id = clkspec->args[1]; |
| 393 | 405 | ||
| 394 | while (clks->num_clks) { | 406 | clk = bsearch(&key, provider->clocks, provider->num_clocks, |
| 395 | if (clks->dev == dev_id) { | 407 | sizeof(clk), _cmp_sci_clk); |
| 396 | if (clk_id >= clks->num_clks) | ||
| 397 | return ERR_PTR(-EINVAL); | ||
| 398 | |||
| 399 | return clocks[clk_id]; | ||
| 400 | } | ||
| 401 | 408 | ||
| 402 | clks++; | 409 | if (!clk) |
| 403 | } | 410 | return ERR_PTR(-ENODEV); |
| 404 | 411 | ||
| 405 | return ERR_PTR(-ENODEV); | 412 | return &(*clk)->hw; |
| 406 | } | 413 | } |
| 407 | 414 | ||
| 408 | static int ti_sci_init_clocks(struct sci_clk_provider *p) | 415 | static int ti_sci_init_clocks(struct sci_clk_provider *p) |
| @@ -410,18 +417,29 @@ static int ti_sci_init_clocks(struct sci_clk_provider *p) | |||
| 410 | const struct sci_clk_data *data = p->clk_data; | 417 | const struct sci_clk_data *data = p->clk_data; |
| 411 | struct clk_hw *hw; | 418 | struct clk_hw *hw; |
| 412 | int i; | 419 | int i; |
| 420 | int num_clks = 0; | ||
| 413 | 421 | ||
| 414 | while (data->num_clks) { | 422 | while (data->num_clks) { |
| 415 | p->clocks = devm_kcalloc(p->dev, data->num_clks, | 423 | num_clks += data->num_clks; |
| 416 | sizeof(struct sci_clk), | 424 | data++; |
| 417 | GFP_KERNEL); | 425 | } |
| 418 | if (!p->clocks) | ||
| 419 | return -ENOMEM; | ||
| 420 | 426 | ||
| 427 | p->num_clocks = num_clks; | ||
| 428 | |||
| 429 | p->clocks = devm_kcalloc(p->dev, num_clks, sizeof(struct sci_clk), | ||
| 430 | GFP_KERNEL); | ||
| 431 | if (!p->clocks) | ||
| 432 | return -ENOMEM; | ||
| 433 | |||
| 434 | num_clks = 0; | ||
| 435 | |||
| 436 | data = p->clk_data; | ||
| 437 | |||
| 438 | while (data->num_clks) { | ||
| 421 | for (i = 0; i < data->num_clks; i++) { | 439 | for (i = 0; i < data->num_clks; i++) { |
| 422 | hw = _sci_clk_build(p, data->dev, i); | 440 | hw = _sci_clk_build(p, data->dev, i); |
| 423 | if (!IS_ERR(hw)) { | 441 | if (!IS_ERR(hw)) { |
| 424 | p->clocks[i] = hw; | 442 | p->clocks[num_clks++] = hw; |
| 425 | continue; | 443 | continue; |
| 426 | } | 444 | } |
| 427 | 445 | ||
diff --git a/drivers/clk/meson/clk-mpll.c b/drivers/clk/meson/clk-mpll.c index 39eab69fe51a..44a5a535ca63 100644 --- a/drivers/clk/meson/clk-mpll.c +++ b/drivers/clk/meson/clk-mpll.c | |||
| @@ -161,6 +161,13 @@ static int mpll_set_rate(struct clk_hw *hw, | |||
| 161 | reg = PARM_SET(p->width, p->shift, reg, 1); | 161 | reg = PARM_SET(p->width, p->shift, reg, 1); |
| 162 | writel(reg, mpll->base + p->reg_off); | 162 | writel(reg, mpll->base + p->reg_off); |
| 163 | 163 | ||
| 164 | p = &mpll->ssen; | ||
| 165 | if (p->width != 0) { | ||
| 166 | reg = readl(mpll->base + p->reg_off); | ||
| 167 | reg = PARM_SET(p->width, p->shift, reg, 1); | ||
| 168 | writel(reg, mpll->base + p->reg_off); | ||
| 169 | } | ||
| 170 | |||
| 164 | p = &mpll->n2; | 171 | p = &mpll->n2; |
| 165 | reg = readl(mpll->base + p->reg_off); | 172 | reg = readl(mpll->base + p->reg_off); |
| 166 | reg = PARM_SET(p->width, p->shift, reg, n2); | 173 | reg = PARM_SET(p->width, p->shift, reg, n2); |
diff --git a/drivers/clk/meson/clkc.h b/drivers/clk/meson/clkc.h index d6feafe8bd6c..1629da9b4141 100644 --- a/drivers/clk/meson/clkc.h +++ b/drivers/clk/meson/clkc.h | |||
| @@ -118,6 +118,7 @@ struct meson_clk_mpll { | |||
| 118 | struct parm sdm_en; | 118 | struct parm sdm_en; |
| 119 | struct parm n2; | 119 | struct parm n2; |
| 120 | struct parm en; | 120 | struct parm en; |
| 121 | struct parm ssen; | ||
| 121 | spinlock_t *lock; | 122 | spinlock_t *lock; |
| 122 | }; | 123 | }; |
| 123 | 124 | ||
diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c index a897ea45327c..a7ea5f3da89d 100644 --- a/drivers/clk/meson/gxbb.c +++ b/drivers/clk/meson/gxbb.c | |||
| @@ -528,6 +528,11 @@ static struct meson_clk_mpll gxbb_mpll0 = { | |||
| 528 | .shift = 14, | 528 | .shift = 14, |
| 529 | .width = 1, | 529 | .width = 1, |
| 530 | }, | 530 | }, |
| 531 | .ssen = { | ||
| 532 | .reg_off = HHI_MPLL_CNTL, | ||
| 533 | .shift = 25, | ||
| 534 | .width = 1, | ||
| 535 | }, | ||
| 531 | .lock = &clk_lock, | 536 | .lock = &clk_lock, |
| 532 | .hw.init = &(struct clk_init_data){ | 537 | .hw.init = &(struct clk_init_data){ |
| 533 | .name = "mpll0", | 538 | .name = "mpll0", |
diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c index bb3f1de876b1..6ec512ad2598 100644 --- a/drivers/clk/meson/meson8b.c +++ b/drivers/clk/meson/meson8b.c | |||
| @@ -267,6 +267,11 @@ static struct meson_clk_mpll meson8b_mpll0 = { | |||
| 267 | .shift = 14, | 267 | .shift = 14, |
| 268 | .width = 1, | 268 | .width = 1, |
| 269 | }, | 269 | }, |
| 270 | .ssen = { | ||
| 271 | .reg_off = HHI_MPLL_CNTL, | ||
| 272 | .shift = 25, | ||
| 273 | .width = 1, | ||
| 274 | }, | ||
| 270 | .lock = &clk_lock, | 275 | .lock = &clk_lock, |
| 271 | .hw.init = &(struct clk_init_data){ | 276 | .hw.init = &(struct clk_init_data){ |
| 272 | .name = "mpll0", | 277 | .name = "mpll0", |
diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c index 0748a0b333c5..9a6476aa7d81 100644 --- a/drivers/clk/samsung/clk-exynos5420.c +++ b/drivers/clk/samsung/clk-exynos5420.c | |||
| @@ -1283,16 +1283,16 @@ static const struct samsung_pll_rate_table exynos5420_pll2550x_24mhz_tbl[] __ini | |||
| 1283 | static const struct samsung_pll_rate_table exynos5420_epll_24mhz_tbl[] = { | 1283 | static const struct samsung_pll_rate_table exynos5420_epll_24mhz_tbl[] = { |
| 1284 | PLL_36XX_RATE(600000000U, 100, 2, 1, 0), | 1284 | PLL_36XX_RATE(600000000U, 100, 2, 1, 0), |
| 1285 | PLL_36XX_RATE(400000000U, 200, 3, 2, 0), | 1285 | PLL_36XX_RATE(400000000U, 200, 3, 2, 0), |
| 1286 | PLL_36XX_RATE(393216000U, 197, 3, 2, 25690), | 1286 | PLL_36XX_RATE(393216003U, 197, 3, 2, -25690), |
| 1287 | PLL_36XX_RATE(361267200U, 301, 5, 2, 3671), | 1287 | PLL_36XX_RATE(361267218U, 301, 5, 2, 3671), |
| 1288 | PLL_36XX_RATE(200000000U, 200, 3, 3, 0), | 1288 | PLL_36XX_RATE(200000000U, 200, 3, 3, 0), |
| 1289 | PLL_36XX_RATE(196608000U, 197, 3, 3, -25690), | 1289 | PLL_36XX_RATE(196608001U, 197, 3, 3, -25690), |
| 1290 | PLL_36XX_RATE(180633600U, 301, 5, 3, 3671), | 1290 | PLL_36XX_RATE(180633609U, 301, 5, 3, 3671), |
| 1291 | PLL_36XX_RATE(131072000U, 131, 3, 3, 4719), | 1291 | PLL_36XX_RATE(131072006U, 131, 3, 3, 4719), |
| 1292 | PLL_36XX_RATE(100000000U, 200, 3, 4, 0), | 1292 | PLL_36XX_RATE(100000000U, 200, 3, 4, 0), |
| 1293 | PLL_36XX_RATE(65536000U, 131, 3, 4, 4719), | 1293 | PLL_36XX_RATE( 65536003U, 131, 3, 4, 4719), |
| 1294 | PLL_36XX_RATE(49152000U, 197, 3, 5, 25690), | 1294 | PLL_36XX_RATE( 49152000U, 197, 3, 5, -25690), |
| 1295 | PLL_36XX_RATE(32768000U, 131, 3, 5, 4719), | 1295 | PLL_36XX_RATE( 32768001U, 131, 3, 5, 4719), |
| 1296 | }; | 1296 | }; |
| 1297 | 1297 | ||
| 1298 | static struct samsung_pll_clock exynos5x_plls[nr_plls] __initdata = { | 1298 | static struct samsung_pll_clock exynos5x_plls[nr_plls] __initdata = { |
diff --git a/drivers/clk/sunxi-ng/ccu-sun5i.c b/drivers/clk/sunxi-ng/ccu-sun5i.c index 5372bf8be5e6..31d7ffda9aab 100644 --- a/drivers/clk/sunxi-ng/ccu-sun5i.c +++ b/drivers/clk/sunxi-ng/ccu-sun5i.c | |||
| @@ -184,7 +184,7 @@ static struct ccu_mux cpu_clk = { | |||
| 184 | .hw.init = CLK_HW_INIT_PARENTS("cpu", | 184 | .hw.init = CLK_HW_INIT_PARENTS("cpu", |
| 185 | cpu_parents, | 185 | cpu_parents, |
| 186 | &ccu_mux_ops, | 186 | &ccu_mux_ops, |
| 187 | CLK_IS_CRITICAL), | 187 | CLK_SET_RATE_PARENT | CLK_IS_CRITICAL), |
| 188 | } | 188 | } |
| 189 | }; | 189 | }; |
| 190 | 190 | ||
diff --git a/drivers/clk/x86/clk-pmc-atom.c b/drivers/clk/x86/clk-pmc-atom.c index f99abc1106f0..08ef69945ffb 100644 --- a/drivers/clk/x86/clk-pmc-atom.c +++ b/drivers/clk/x86/clk-pmc-atom.c | |||
| @@ -186,6 +186,13 @@ static struct clk_plt *plt_clk_register(struct platform_device *pdev, int id, | |||
| 186 | pclk->reg = base + PMC_CLK_CTL_OFFSET + id * PMC_CLK_CTL_SIZE; | 186 | pclk->reg = base + PMC_CLK_CTL_OFFSET + id * PMC_CLK_CTL_SIZE; |
| 187 | spin_lock_init(&pclk->lock); | 187 | spin_lock_init(&pclk->lock); |
| 188 | 188 | ||
| 189 | /* | ||
| 190 | * If the clock was already enabled by the firmware mark it as critical | ||
| 191 | * to avoid it being gated by the clock framework if no driver owns it. | ||
| 192 | */ | ||
| 193 | if (plt_clk_is_enabled(&pclk->hw)) | ||
| 194 | init.flags |= CLK_IS_CRITICAL; | ||
| 195 | |||
| 189 | ret = devm_clk_hw_register(&pdev->dev, &pclk->hw); | 196 | ret = devm_clk_hw_register(&pdev->dev, &pclk->hw); |
| 190 | if (ret) { | 197 | if (ret) { |
| 191 | pclk = ERR_PTR(ret); | 198 | pclk = ERR_PTR(ret); |
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index fcae5ca6ac92..54a67f8a28eb 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig | |||
| @@ -262,7 +262,7 @@ config CLKSRC_LPC32XX | |||
| 262 | 262 | ||
| 263 | config CLKSRC_PISTACHIO | 263 | config CLKSRC_PISTACHIO |
| 264 | bool "Clocksource for Pistachio SoC" if COMPILE_TEST | 264 | bool "Clocksource for Pistachio SoC" if COMPILE_TEST |
| 265 | depends on HAS_IOMEM | 265 | depends on GENERIC_CLOCKEVENTS && HAS_IOMEM |
| 266 | select TIMER_OF | 266 | select TIMER_OF |
| 267 | help | 267 | help |
| 268 | Enables the clocksource for the Pistachio SoC. | 268 | Enables the clocksource for the Pistachio SoC. |
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index aae87c4c546e..72bbfccef113 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c | |||
| @@ -1440,7 +1440,7 @@ static int __init arch_timer_mem_acpi_init(int platform_timer_count) | |||
| 1440 | * While unlikely, it's theoretically possible that none of the frames | 1440 | * While unlikely, it's theoretically possible that none of the frames |
| 1441 | * in a timer expose the combination of feature we want. | 1441 | * in a timer expose the combination of feature we want. |
| 1442 | */ | 1442 | */ |
| 1443 | for (i = i; i < timer_count; i++) { | 1443 | for (i = 0; i < timer_count; i++) { |
| 1444 | timer = &timers[i]; | 1444 | timer = &timers[i]; |
| 1445 | 1445 | ||
| 1446 | frame = arch_timer_mem_find_best_frame(timer); | 1446 | frame = arch_timer_mem_find_best_frame(timer); |
diff --git a/drivers/clocksource/em_sti.c b/drivers/clocksource/em_sti.c index bc48cbf6a795..269db74a0658 100644 --- a/drivers/clocksource/em_sti.c +++ b/drivers/clocksource/em_sti.c | |||
| @@ -305,7 +305,7 @@ static int em_sti_probe(struct platform_device *pdev) | |||
| 305 | irq = platform_get_irq(pdev, 0); | 305 | irq = platform_get_irq(pdev, 0); |
| 306 | if (irq < 0) { | 306 | if (irq < 0) { |
| 307 | dev_err(&pdev->dev, "failed to get irq\n"); | 307 | dev_err(&pdev->dev, "failed to get irq\n"); |
| 308 | return -EINVAL; | 308 | return irq; |
| 309 | } | 309 | } |
| 310 | 310 | ||
| 311 | /* map memory, let base point to the STI instance */ | 311 | /* map memory, let base point to the STI instance */ |
| @@ -314,11 +314,12 @@ static int em_sti_probe(struct platform_device *pdev) | |||
| 314 | if (IS_ERR(p->base)) | 314 | if (IS_ERR(p->base)) |
| 315 | return PTR_ERR(p->base); | 315 | return PTR_ERR(p->base); |
| 316 | 316 | ||
| 317 | if (devm_request_irq(&pdev->dev, irq, em_sti_interrupt, | 317 | ret = devm_request_irq(&pdev->dev, irq, em_sti_interrupt, |
| 318 | IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING, | 318 | IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING, |
| 319 | dev_name(&pdev->dev), p)) { | 319 | dev_name(&pdev->dev), p); |
| 320 | if (ret) { | ||
| 320 | dev_err(&pdev->dev, "failed to request low IRQ\n"); | 321 | dev_err(&pdev->dev, "failed to request low IRQ\n"); |
| 321 | return -ENOENT; | 322 | return ret; |
| 322 | } | 323 | } |
| 323 | 324 | ||
| 324 | /* get hold of clock */ | 325 | /* get hold of clock */ |
diff --git a/drivers/clocksource/timer-of.c b/drivers/clocksource/timer-of.c index d509b500a7b5..4d7aef9d9c15 100644 --- a/drivers/clocksource/timer-of.c +++ b/drivers/clocksource/timer-of.c | |||
| @@ -128,9 +128,9 @@ static __init int timer_base_init(struct device_node *np, | |||
| 128 | const char *name = of_base->name ? of_base->name : np->full_name; | 128 | const char *name = of_base->name ? of_base->name : np->full_name; |
| 129 | 129 | ||
| 130 | of_base->base = of_io_request_and_map(np, of_base->index, name); | 130 | of_base->base = of_io_request_and_map(np, of_base->index, name); |
| 131 | if (!of_base->base) { | 131 | if (IS_ERR(of_base->base)) { |
| 132 | pr_err("Failed to iomap (%s)\n", name); | 132 | pr_err("Failed to iomap (%s)\n", name); |
| 133 | return -ENXIO; | 133 | return PTR_ERR(of_base->base); |
| 134 | } | 134 | } |
| 135 | 135 | ||
| 136 | return 0; | 136 | return 0; |
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index 2011fec2d6ad..bdce4488ded1 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm | |||
| @@ -71,15 +71,6 @@ config ARM_HIGHBANK_CPUFREQ | |||
| 71 | 71 | ||
| 72 | If in doubt, say N. | 72 | If in doubt, say N. |
| 73 | 73 | ||
| 74 | config ARM_DB8500_CPUFREQ | ||
| 75 | tristate "ST-Ericsson DB8500 cpufreq" if COMPILE_TEST && !ARCH_U8500 | ||
| 76 | default ARCH_U8500 | ||
| 77 | depends on HAS_IOMEM | ||
| 78 | depends on !CPU_THERMAL || THERMAL | ||
| 79 | help | ||
| 80 | This adds the CPUFreq driver for ST-Ericsson Ux500 (DB8500) SoC | ||
| 81 | series. | ||
| 82 | |||
| 83 | config ARM_IMX6Q_CPUFREQ | 74 | config ARM_IMX6Q_CPUFREQ |
| 84 | tristate "Freescale i.MX6 cpufreq support" | 75 | tristate "Freescale i.MX6 cpufreq support" |
| 85 | depends on ARCH_MXC | 76 | depends on ARCH_MXC |
| @@ -96,14 +87,13 @@ config ARM_KIRKWOOD_CPUFREQ | |||
| 96 | This adds the CPUFreq driver for Marvell Kirkwood | 87 | This adds the CPUFreq driver for Marvell Kirkwood |
| 97 | SoCs. | 88 | SoCs. |
| 98 | 89 | ||
| 99 | config ARM_MT8173_CPUFREQ | 90 | config ARM_MEDIATEK_CPUFREQ |
| 100 | tristate "Mediatek MT8173 CPUFreq support" | 91 | tristate "CPU Frequency scaling support for MediaTek SoCs" |
| 101 | depends on ARCH_MEDIATEK && REGULATOR | 92 | depends on ARCH_MEDIATEK && REGULATOR |
| 102 | depends on ARM64 || (ARM_CPU_TOPOLOGY && COMPILE_TEST) | ||
| 103 | depends on !CPU_THERMAL || THERMAL | 93 | depends on !CPU_THERMAL || THERMAL |
| 104 | select PM_OPP | 94 | select PM_OPP |
| 105 | help | 95 | help |
| 106 | This adds the CPUFreq driver support for Mediatek MT8173 SoC. | 96 | This adds the CPUFreq driver support for MediaTek SoCs. |
| 107 | 97 | ||
| 108 | config ARM_OMAP2PLUS_CPUFREQ | 98 | config ARM_OMAP2PLUS_CPUFREQ |
| 109 | bool "TI OMAP2+" | 99 | bool "TI OMAP2+" |
| @@ -242,6 +232,11 @@ config ARM_STI_CPUFREQ | |||
| 242 | this config option if you wish to add CPUFreq support for STi based | 232 | this config option if you wish to add CPUFreq support for STi based |
| 243 | SoCs. | 233 | SoCs. |
| 244 | 234 | ||
| 235 | config ARM_TANGO_CPUFREQ | ||
| 236 | bool | ||
| 237 | depends on CPUFREQ_DT && ARCH_TANGO | ||
| 238 | default y | ||
| 239 | |||
| 245 | config ARM_TEGRA20_CPUFREQ | 240 | config ARM_TEGRA20_CPUFREQ |
| 246 | bool "Tegra20 CPUFreq support" | 241 | bool "Tegra20 CPUFreq support" |
| 247 | depends on ARCH_TEGRA | 242 | depends on ARCH_TEGRA |
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index ab3a42cd29ef..c7af9b2a255e 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile | |||
| @@ -53,12 +53,11 @@ obj-$(CONFIG_ARM_DT_BL_CPUFREQ) += arm_big_little_dt.o | |||
| 53 | 53 | ||
| 54 | obj-$(CONFIG_ARM_BRCMSTB_AVS_CPUFREQ) += brcmstb-avs-cpufreq.o | 54 | obj-$(CONFIG_ARM_BRCMSTB_AVS_CPUFREQ) += brcmstb-avs-cpufreq.o |
| 55 | obj-$(CONFIG_ARCH_DAVINCI) += davinci-cpufreq.o | 55 | obj-$(CONFIG_ARCH_DAVINCI) += davinci-cpufreq.o |
| 56 | obj-$(CONFIG_ARM_DB8500_CPUFREQ) += dbx500-cpufreq.o | ||
| 57 | obj-$(CONFIG_ARM_EXYNOS5440_CPUFREQ) += exynos5440-cpufreq.o | 56 | obj-$(CONFIG_ARM_EXYNOS5440_CPUFREQ) += exynos5440-cpufreq.o |
| 58 | obj-$(CONFIG_ARM_HIGHBANK_CPUFREQ) += highbank-cpufreq.o | 57 | obj-$(CONFIG_ARM_HIGHBANK_CPUFREQ) += highbank-cpufreq.o |
| 59 | obj-$(CONFIG_ARM_IMX6Q_CPUFREQ) += imx6q-cpufreq.o | 58 | obj-$(CONFIG_ARM_IMX6Q_CPUFREQ) += imx6q-cpufreq.o |
| 60 | obj-$(CONFIG_ARM_KIRKWOOD_CPUFREQ) += kirkwood-cpufreq.o | 59 | obj-$(CONFIG_ARM_KIRKWOOD_CPUFREQ) += kirkwood-cpufreq.o |
| 61 | obj-$(CONFIG_ARM_MT8173_CPUFREQ) += mt8173-cpufreq.o | 60 | obj-$(CONFIG_ARM_MEDIATEK_CPUFREQ) += mediatek-cpufreq.o |
| 62 | obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o | 61 | obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o |
| 63 | obj-$(CONFIG_ARM_PXA2xx_CPUFREQ) += pxa2xx-cpufreq.o | 62 | obj-$(CONFIG_ARM_PXA2xx_CPUFREQ) += pxa2xx-cpufreq.o |
| 64 | obj-$(CONFIG_PXA3xx) += pxa3xx-cpufreq.o | 63 | obj-$(CONFIG_PXA3xx) += pxa3xx-cpufreq.o |
| @@ -75,6 +74,7 @@ obj-$(CONFIG_ARM_SA1110_CPUFREQ) += sa1110-cpufreq.o | |||
| 75 | obj-$(CONFIG_ARM_SCPI_CPUFREQ) += scpi-cpufreq.o | 74 | obj-$(CONFIG_ARM_SCPI_CPUFREQ) += scpi-cpufreq.o |
| 76 | obj-$(CONFIG_ARM_SPEAR_CPUFREQ) += spear-cpufreq.o | 75 | obj-$(CONFIG_ARM_SPEAR_CPUFREQ) += spear-cpufreq.o |
| 77 | obj-$(CONFIG_ARM_STI_CPUFREQ) += sti-cpufreq.o | 76 | obj-$(CONFIG_ARM_STI_CPUFREQ) += sti-cpufreq.o |
| 77 | obj-$(CONFIG_ARM_TANGO_CPUFREQ) += tango-cpufreq.o | ||
| 78 | obj-$(CONFIG_ARM_TEGRA20_CPUFREQ) += tegra20-cpufreq.o | 78 | obj-$(CONFIG_ARM_TEGRA20_CPUFREQ) += tegra20-cpufreq.o |
| 79 | obj-$(CONFIG_ARM_TEGRA124_CPUFREQ) += tegra124-cpufreq.o | 79 | obj-$(CONFIG_ARM_TEGRA124_CPUFREQ) += tegra124-cpufreq.o |
| 80 | obj-$(CONFIG_ARM_TEGRA186_CPUFREQ) += tegra186-cpufreq.o | 80 | obj-$(CONFIG_ARM_TEGRA186_CPUFREQ) += tegra186-cpufreq.o |
diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c index ea6d62547b10..17504129fd77 100644 --- a/drivers/cpufreq/arm_big_little.c +++ b/drivers/cpufreq/arm_big_little.c | |||
| @@ -483,11 +483,8 @@ static int bL_cpufreq_init(struct cpufreq_policy *policy) | |||
| 483 | return ret; | 483 | return ret; |
| 484 | } | 484 | } |
| 485 | 485 | ||
| 486 | if (arm_bL_ops->get_transition_latency) | 486 | policy->cpuinfo.transition_latency = |
| 487 | policy->cpuinfo.transition_latency = | 487 | arm_bL_ops->get_transition_latency(cpu_dev); |
| 488 | arm_bL_ops->get_transition_latency(cpu_dev); | ||
| 489 | else | ||
| 490 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; | ||
| 491 | 488 | ||
| 492 | if (is_bL_switching_enabled()) | 489 | if (is_bL_switching_enabled()) |
| 493 | per_cpu(cpu_last_req_freq, policy->cpu) = clk_get_cpu_rate(policy->cpu); | 490 | per_cpu(cpu_last_req_freq, policy->cpu) = clk_get_cpu_rate(policy->cpu); |
| @@ -622,7 +619,8 @@ int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops) | |||
| 622 | return -EBUSY; | 619 | return -EBUSY; |
| 623 | } | 620 | } |
| 624 | 621 | ||
| 625 | if (!ops || !strlen(ops->name) || !ops->init_opp_table) { | 622 | if (!ops || !strlen(ops->name) || !ops->init_opp_table || |
| 623 | !ops->get_transition_latency) { | ||
| 626 | pr_err("%s: Invalid arm_bL_ops, exiting\n", __func__); | 624 | pr_err("%s: Invalid arm_bL_ops, exiting\n", __func__); |
| 627 | return -ENODEV; | 625 | return -ENODEV; |
| 628 | } | 626 | } |
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c index 10be285c9055..a1c3025f9df7 100644 --- a/drivers/cpufreq/cppc_cpufreq.c +++ b/drivers/cpufreq/cppc_cpufreq.c | |||
| @@ -172,7 +172,6 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
| 172 | return -EFAULT; | 172 | return -EFAULT; |
| 173 | } | 173 | } |
| 174 | 174 | ||
| 175 | cpumask_set_cpu(policy->cpu, policy->cpus); | ||
| 176 | cpu->cur_policy = policy; | 175 | cpu->cur_policy = policy; |
| 177 | 176 | ||
| 178 | /* Set policy->cur to max now. The governors will adjust later. */ | 177 | /* Set policy->cur to max now. The governors will adjust later. */ |
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c index 1c262923fe58..a020da7940d6 100644 --- a/drivers/cpufreq/cpufreq-dt-platdev.c +++ b/drivers/cpufreq/cpufreq-dt-platdev.c | |||
| @@ -9,11 +9,16 @@ | |||
| 9 | 9 | ||
| 10 | #include <linux/err.h> | 10 | #include <linux/err.h> |
| 11 | #include <linux/of.h> | 11 | #include <linux/of.h> |
| 12 | #include <linux/of_device.h> | ||
| 12 | #include <linux/platform_device.h> | 13 | #include <linux/platform_device.h> |
| 13 | 14 | ||
| 14 | #include "cpufreq-dt.h" | 15 | #include "cpufreq-dt.h" |
| 15 | 16 | ||
| 16 | static const struct of_device_id machines[] __initconst = { | 17 | /* |
| 18 | * Machines for which the cpufreq device is *always* created, mostly used for | ||
| 19 | * platforms using "operating-points" (V1) property. | ||
| 20 | */ | ||
| 21 | static const struct of_device_id whitelist[] __initconst = { | ||
| 17 | { .compatible = "allwinner,sun4i-a10", }, | 22 | { .compatible = "allwinner,sun4i-a10", }, |
| 18 | { .compatible = "allwinner,sun5i-a10s", }, | 23 | { .compatible = "allwinner,sun5i-a10s", }, |
| 19 | { .compatible = "allwinner,sun5i-a13", }, | 24 | { .compatible = "allwinner,sun5i-a13", }, |
| @@ -22,7 +27,6 @@ static const struct of_device_id machines[] __initconst = { | |||
| 22 | { .compatible = "allwinner,sun6i-a31s", }, | 27 | { .compatible = "allwinner,sun6i-a31s", }, |
| 23 | { .compatible = "allwinner,sun7i-a20", }, | 28 | { .compatible = "allwinner,sun7i-a20", }, |
| 24 | { .compatible = "allwinner,sun8i-a23", }, | 29 | { .compatible = "allwinner,sun8i-a23", }, |
| 25 | { .compatible = "allwinner,sun8i-a33", }, | ||
| 26 | { .compatible = "allwinner,sun8i-a83t", }, | 30 | { .compatible = "allwinner,sun8i-a83t", }, |
| 27 | { .compatible = "allwinner,sun8i-h3", }, | 31 | { .compatible = "allwinner,sun8i-h3", }, |
| 28 | 32 | ||
| @@ -32,7 +36,6 @@ static const struct of_device_id machines[] __initconst = { | |||
| 32 | { .compatible = "arm,integrator-cp", }, | 36 | { .compatible = "arm,integrator-cp", }, |
| 33 | 37 | ||
| 34 | { .compatible = "hisilicon,hi3660", }, | 38 | { .compatible = "hisilicon,hi3660", }, |
| 35 | { .compatible = "hisilicon,hi6220", }, | ||
| 36 | 39 | ||
| 37 | { .compatible = "fsl,imx27", }, | 40 | { .compatible = "fsl,imx27", }, |
| 38 | { .compatible = "fsl,imx51", }, | 41 | { .compatible = "fsl,imx51", }, |
| @@ -46,11 +49,8 @@ static const struct of_device_id machines[] __initconst = { | |||
| 46 | { .compatible = "samsung,exynos3250", }, | 49 | { .compatible = "samsung,exynos3250", }, |
| 47 | { .compatible = "samsung,exynos4210", }, | 50 | { .compatible = "samsung,exynos4210", }, |
| 48 | { .compatible = "samsung,exynos4212", }, | 51 | { .compatible = "samsung,exynos4212", }, |
| 49 | { .compatible = "samsung,exynos4412", }, | ||
| 50 | { .compatible = "samsung,exynos5250", }, | 52 | { .compatible = "samsung,exynos5250", }, |
| 51 | #ifndef CONFIG_BL_SWITCHER | 53 | #ifndef CONFIG_BL_SWITCHER |
| 52 | { .compatible = "samsung,exynos5420", }, | ||
| 53 | { .compatible = "samsung,exynos5433", }, | ||
| 54 | { .compatible = "samsung,exynos5800", }, | 54 | { .compatible = "samsung,exynos5800", }, |
| 55 | #endif | 55 | #endif |
| 56 | 56 | ||
| @@ -67,6 +67,8 @@ static const struct of_device_id machines[] __initconst = { | |||
| 67 | { .compatible = "renesas,r8a7792", }, | 67 | { .compatible = "renesas,r8a7792", }, |
| 68 | { .compatible = "renesas,r8a7793", }, | 68 | { .compatible = "renesas,r8a7793", }, |
| 69 | { .compatible = "renesas,r8a7794", }, | 69 | { .compatible = "renesas,r8a7794", }, |
| 70 | { .compatible = "renesas,r8a7795", }, | ||
| 71 | { .compatible = "renesas,r8a7796", }, | ||
| 70 | { .compatible = "renesas,sh73a0", }, | 72 | { .compatible = "renesas,sh73a0", }, |
| 71 | 73 | ||
| 72 | { .compatible = "rockchip,rk2928", }, | 74 | { .compatible = "rockchip,rk2928", }, |
| @@ -76,17 +78,17 @@ static const struct of_device_id machines[] __initconst = { | |||
| 76 | { .compatible = "rockchip,rk3188", }, | 78 | { .compatible = "rockchip,rk3188", }, |
| 77 | { .compatible = "rockchip,rk3228", }, | 79 | { .compatible = "rockchip,rk3228", }, |
| 78 | { .compatible = "rockchip,rk3288", }, | 80 | { .compatible = "rockchip,rk3288", }, |
| 81 | { .compatible = "rockchip,rk3328", }, | ||
| 79 | { .compatible = "rockchip,rk3366", }, | 82 | { .compatible = "rockchip,rk3366", }, |
| 80 | { .compatible = "rockchip,rk3368", }, | 83 | { .compatible = "rockchip,rk3368", }, |
| 81 | { .compatible = "rockchip,rk3399", }, | 84 | { .compatible = "rockchip,rk3399", }, |
| 82 | 85 | ||
| 83 | { .compatible = "sigma,tango4" }, | ||
| 84 | |||
| 85 | { .compatible = "socionext,uniphier-pro5", }, | ||
| 86 | { .compatible = "socionext,uniphier-pxs2", }, | ||
| 87 | { .compatible = "socionext,uniphier-ld6b", }, | 86 | { .compatible = "socionext,uniphier-ld6b", }, |
| 88 | { .compatible = "socionext,uniphier-ld11", }, | 87 | |
| 89 | { .compatible = "socionext,uniphier-ld20", }, | 88 | { .compatible = "st-ericsson,u8500", }, |
| 89 | { .compatible = "st-ericsson,u8540", }, | ||
| 90 | { .compatible = "st-ericsson,u9500", }, | ||
| 91 | { .compatible = "st-ericsson,u9540", }, | ||
| 90 | 92 | ||
| 91 | { .compatible = "ti,omap2", }, | 93 | { .compatible = "ti,omap2", }, |
| 92 | { .compatible = "ti,omap3", }, | 94 | { .compatible = "ti,omap3", }, |
| @@ -94,27 +96,56 @@ static const struct of_device_id machines[] __initconst = { | |||
| 94 | { .compatible = "ti,omap5", }, | 96 | { .compatible = "ti,omap5", }, |
| 95 | 97 | ||
| 96 | { .compatible = "xlnx,zynq-7000", }, | 98 | { .compatible = "xlnx,zynq-7000", }, |
| 99 | { .compatible = "xlnx,zynqmp", }, | ||
| 97 | 100 | ||
| 98 | { .compatible = "zte,zx296718", }, | 101 | { } |
| 102 | }; | ||
| 99 | 103 | ||
| 104 | /* | ||
| 105 | * Machines for which the cpufreq device is *not* created, mostly used for | ||
| 106 | * platforms using "operating-points-v2" property. | ||
| 107 | */ | ||
| 108 | static const struct of_device_id blacklist[] __initconst = { | ||
| 100 | { } | 109 | { } |
| 101 | }; | 110 | }; |
| 102 | 111 | ||
| 112 | static bool __init cpu0_node_has_opp_v2_prop(void) | ||
| 113 | { | ||
| 114 | struct device_node *np = of_cpu_device_node_get(0); | ||
| 115 | bool ret = false; | ||
| 116 | |||
| 117 | if (of_get_property(np, "operating-points-v2", NULL)) | ||
| 118 | ret = true; | ||
| 119 | |||
| 120 | of_node_put(np); | ||
| 121 | return ret; | ||
| 122 | } | ||
| 123 | |||
| 103 | static int __init cpufreq_dt_platdev_init(void) | 124 | static int __init cpufreq_dt_platdev_init(void) |
| 104 | { | 125 | { |
| 105 | struct device_node *np = of_find_node_by_path("/"); | 126 | struct device_node *np = of_find_node_by_path("/"); |
| 106 | const struct of_device_id *match; | 127 | const struct of_device_id *match; |
| 128 | const void *data = NULL; | ||
| 107 | 129 | ||
| 108 | if (!np) | 130 | if (!np) |
| 109 | return -ENODEV; | 131 | return -ENODEV; |
| 110 | 132 | ||
| 111 | match = of_match_node(machines, np); | 133 | match = of_match_node(whitelist, np); |
| 134 | if (match) { | ||
| 135 | data = match->data; | ||
| 136 | goto create_pdev; | ||
| 137 | } | ||
| 138 | |||
| 139 | if (cpu0_node_has_opp_v2_prop() && !of_match_node(blacklist, np)) | ||
| 140 | goto create_pdev; | ||
| 141 | |||
| 112 | of_node_put(np); | 142 | of_node_put(np); |
| 113 | if (!match) | 143 | return -ENODEV; |
| 114 | return -ENODEV; | ||
| 115 | 144 | ||
| 145 | create_pdev: | ||
| 146 | of_node_put(np); | ||
| 116 | return PTR_ERR_OR_ZERO(platform_device_register_data(NULL, "cpufreq-dt", | 147 | return PTR_ERR_OR_ZERO(platform_device_register_data(NULL, "cpufreq-dt", |
| 117 | -1, match->data, | 148 | -1, data, |
| 118 | sizeof(struct cpufreq_dt_platform_data))); | 149 | sizeof(struct cpufreq_dt_platform_data))); |
| 119 | } | 150 | } |
| 120 | device_initcall(cpufreq_dt_platdev_init); | 151 | device_initcall(cpufreq_dt_platdev_init); |
diff --git a/drivers/cpufreq/cpufreq-nforce2.c b/drivers/cpufreq/cpufreq-nforce2.c index 5503d491b016..dbf82f36d270 100644 --- a/drivers/cpufreq/cpufreq-nforce2.c +++ b/drivers/cpufreq/cpufreq-nforce2.c | |||
| @@ -357,7 +357,6 @@ static int nforce2_cpu_init(struct cpufreq_policy *policy) | |||
| 357 | /* cpuinfo and default policy values */ | 357 | /* cpuinfo and default policy values */ |
| 358 | policy->min = policy->cpuinfo.min_freq = min_fsb * fid * 100; | 358 | policy->min = policy->cpuinfo.min_freq = min_fsb * fid * 100; |
| 359 | policy->max = policy->cpuinfo.max_freq = max_fsb * fid * 100; | 359 | policy->max = policy->cpuinfo.max_freq = max_fsb * fid * 100; |
| 360 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; | ||
| 361 | 360 | ||
| 362 | return 0; | 361 | return 0; |
| 363 | } | 362 | } |
| @@ -369,6 +368,7 @@ static int nforce2_cpu_exit(struct cpufreq_policy *policy) | |||
| 369 | 368 | ||
| 370 | static struct cpufreq_driver nforce2_driver = { | 369 | static struct cpufreq_driver nforce2_driver = { |
| 371 | .name = "nforce2", | 370 | .name = "nforce2", |
| 371 | .flags = CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING, | ||
| 372 | .verify = nforce2_verify, | 372 | .verify = nforce2_verify, |
| 373 | .target = nforce2_target, | 373 | .target = nforce2_target, |
| 374 | .get = nforce2_get, | 374 | .get = nforce2_get, |
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 0c728190e444..ea43b147a7fe 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
| @@ -524,6 +524,32 @@ unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy, | |||
| 524 | } | 524 | } |
| 525 | EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq); | 525 | EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq); |
| 526 | 526 | ||
| 527 | unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy *policy) | ||
| 528 | { | ||
| 529 | unsigned int latency; | ||
| 530 | |||
| 531 | if (policy->transition_delay_us) | ||
| 532 | return policy->transition_delay_us; | ||
| 533 | |||
| 534 | latency = policy->cpuinfo.transition_latency / NSEC_PER_USEC; | ||
| 535 | if (latency) { | ||
| 536 | /* | ||
| 537 | * For platforms that can change the frequency very fast (< 10 | ||
| 538 | * us), the above formula gives a decent transition delay. But | ||
| 539 | * for platforms where transition_latency is in milliseconds, it | ||
| 540 | * ends up giving unrealistic values. | ||
| 541 | * | ||
| 542 | * Cap the default transition delay to 10 ms, which seems to be | ||
| 543 | * a reasonable amount of time after which we should reevaluate | ||
| 544 | * the frequency. | ||
| 545 | */ | ||
| 546 | return min(latency * LATENCY_MULTIPLIER, (unsigned int)10000); | ||
| 547 | } | ||
| 548 | |||
| 549 | return LATENCY_MULTIPLIER; | ||
| 550 | } | ||
| 551 | EXPORT_SYMBOL_GPL(cpufreq_policy_transition_delay_us); | ||
| 552 | |||
| 527 | /********************************************************************* | 553 | /********************************************************************* |
| 528 | * SYSFS INTERFACE * | 554 | * SYSFS INTERFACE * |
| 529 | *********************************************************************/ | 555 | *********************************************************************/ |
| @@ -1989,13 +2015,13 @@ static int cpufreq_init_governor(struct cpufreq_policy *policy) | |||
| 1989 | if (!policy->governor) | 2015 | if (!policy->governor) |
| 1990 | return -EINVAL; | 2016 | return -EINVAL; |
| 1991 | 2017 | ||
| 1992 | if (policy->governor->max_transition_latency && | 2018 | /* Platform doesn't want dynamic frequency switching ? */ |
| 1993 | policy->cpuinfo.transition_latency > | 2019 | if (policy->governor->dynamic_switching && |
| 1994 | policy->governor->max_transition_latency) { | 2020 | cpufreq_driver->flags & CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING) { |
| 1995 | struct cpufreq_governor *gov = cpufreq_fallback_governor(); | 2021 | struct cpufreq_governor *gov = cpufreq_fallback_governor(); |
| 1996 | 2022 | ||
| 1997 | if (gov) { | 2023 | if (gov) { |
| 1998 | pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n", | 2024 | pr_warn("Can't use %s governor as dynamic switching is disallowed. Fallback to %s governor\n", |
| 1999 | policy->governor->name, gov->name); | 2025 | policy->governor->name, gov->name); |
| 2000 | policy->governor = gov; | 2026 | policy->governor = gov; |
| 2001 | } else { | 2027 | } else { |
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index 88220ff3e1c2..f20f20a77d4d 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c | |||
| @@ -246,7 +246,6 @@ gov_show_one_common(sampling_rate); | |||
| 246 | gov_show_one_common(sampling_down_factor); | 246 | gov_show_one_common(sampling_down_factor); |
| 247 | gov_show_one_common(up_threshold); | 247 | gov_show_one_common(up_threshold); |
| 248 | gov_show_one_common(ignore_nice_load); | 248 | gov_show_one_common(ignore_nice_load); |
| 249 | gov_show_one_common(min_sampling_rate); | ||
| 250 | gov_show_one(cs, down_threshold); | 249 | gov_show_one(cs, down_threshold); |
| 251 | gov_show_one(cs, freq_step); | 250 | gov_show_one(cs, freq_step); |
| 252 | 251 | ||
| @@ -254,12 +253,10 @@ gov_attr_rw(sampling_rate); | |||
| 254 | gov_attr_rw(sampling_down_factor); | 253 | gov_attr_rw(sampling_down_factor); |
| 255 | gov_attr_rw(up_threshold); | 254 | gov_attr_rw(up_threshold); |
| 256 | gov_attr_rw(ignore_nice_load); | 255 | gov_attr_rw(ignore_nice_load); |
| 257 | gov_attr_ro(min_sampling_rate); | ||
| 258 | gov_attr_rw(down_threshold); | 256 | gov_attr_rw(down_threshold); |
| 259 | gov_attr_rw(freq_step); | 257 | gov_attr_rw(freq_step); |
| 260 | 258 | ||
| 261 | static struct attribute *cs_attributes[] = { | 259 | static struct attribute *cs_attributes[] = { |
| 262 | &min_sampling_rate.attr, | ||
| 263 | &sampling_rate.attr, | 260 | &sampling_rate.attr, |
| 264 | &sampling_down_factor.attr, | 261 | &sampling_down_factor.attr, |
| 265 | &up_threshold.attr, | 262 | &up_threshold.attr, |
| @@ -297,10 +294,7 @@ static int cs_init(struct dbs_data *dbs_data) | |||
| 297 | dbs_data->up_threshold = DEF_FREQUENCY_UP_THRESHOLD; | 294 | dbs_data->up_threshold = DEF_FREQUENCY_UP_THRESHOLD; |
| 298 | dbs_data->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR; | 295 | dbs_data->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR; |
| 299 | dbs_data->ignore_nice_load = 0; | 296 | dbs_data->ignore_nice_load = 0; |
| 300 | |||
| 301 | dbs_data->tuners = tuners; | 297 | dbs_data->tuners = tuners; |
| 302 | dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO * | ||
| 303 | jiffies_to_usecs(10); | ||
| 304 | 298 | ||
| 305 | return 0; | 299 | return 0; |
| 306 | } | 300 | } |
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c index ce5f3ec7ce71..58d4f4e1ad6a 100644 --- a/drivers/cpufreq/cpufreq_governor.c +++ b/drivers/cpufreq/cpufreq_governor.c | |||
| @@ -47,14 +47,11 @@ ssize_t store_sampling_rate(struct gov_attr_set *attr_set, const char *buf, | |||
| 47 | { | 47 | { |
| 48 | struct dbs_data *dbs_data = to_dbs_data(attr_set); | 48 | struct dbs_data *dbs_data = to_dbs_data(attr_set); |
| 49 | struct policy_dbs_info *policy_dbs; | 49 | struct policy_dbs_info *policy_dbs; |
| 50 | unsigned int rate; | ||
| 51 | int ret; | 50 | int ret; |
| 52 | ret = sscanf(buf, "%u", &rate); | 51 | ret = sscanf(buf, "%u", &dbs_data->sampling_rate); |
| 53 | if (ret != 1) | 52 | if (ret != 1) |
| 54 | return -EINVAL; | 53 | return -EINVAL; |
| 55 | 54 | ||
| 56 | dbs_data->sampling_rate = max(rate, dbs_data->min_sampling_rate); | ||
| 57 | |||
| 58 | /* | 55 | /* |
| 59 | * We are operating under dbs_data->mutex and so the list and its | 56 | * We are operating under dbs_data->mutex and so the list and its |
| 60 | * entries can't be freed concurrently. | 57 | * entries can't be freed concurrently. |
| @@ -395,7 +392,6 @@ int cpufreq_dbs_governor_init(struct cpufreq_policy *policy) | |||
| 395 | struct dbs_governor *gov = dbs_governor_of(policy); | 392 | struct dbs_governor *gov = dbs_governor_of(policy); |
| 396 | struct dbs_data *dbs_data; | 393 | struct dbs_data *dbs_data; |
| 397 | struct policy_dbs_info *policy_dbs; | 394 | struct policy_dbs_info *policy_dbs; |
| 398 | unsigned int latency; | ||
| 399 | int ret = 0; | 395 | int ret = 0; |
| 400 | 396 | ||
| 401 | /* State should be equivalent to EXIT */ | 397 | /* State should be equivalent to EXIT */ |
| @@ -434,16 +430,7 @@ int cpufreq_dbs_governor_init(struct cpufreq_policy *policy) | |||
| 434 | if (ret) | 430 | if (ret) |
| 435 | goto free_policy_dbs_info; | 431 | goto free_policy_dbs_info; |
| 436 | 432 | ||
| 437 | /* policy latency is in ns. Convert it to us first */ | 433 | dbs_data->sampling_rate = cpufreq_policy_transition_delay_us(policy); |
| 438 | latency = policy->cpuinfo.transition_latency / 1000; | ||
| 439 | if (latency == 0) | ||
| 440 | latency = 1; | ||
| 441 | |||
| 442 | /* Bring kernel and HW constraints together */ | ||
| 443 | dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate, | ||
| 444 | MIN_LATENCY_MULTIPLIER * latency); | ||
| 445 | dbs_data->sampling_rate = max(dbs_data->min_sampling_rate, | ||
| 446 | LATENCY_MULTIPLIER * latency); | ||
| 447 | 434 | ||
| 448 | if (!have_governor_per_policy()) | 435 | if (!have_governor_per_policy()) |
| 449 | gov->gdbs_data = dbs_data; | 436 | gov->gdbs_data = dbs_data; |
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h index 0236ec2cd654..8463f5def0f5 100644 --- a/drivers/cpufreq/cpufreq_governor.h +++ b/drivers/cpufreq/cpufreq_governor.h | |||
| @@ -41,7 +41,6 @@ enum {OD_NORMAL_SAMPLE, OD_SUB_SAMPLE}; | |||
| 41 | struct dbs_data { | 41 | struct dbs_data { |
| 42 | struct gov_attr_set attr_set; | 42 | struct gov_attr_set attr_set; |
| 43 | void *tuners; | 43 | void *tuners; |
| 44 | unsigned int min_sampling_rate; | ||
| 45 | unsigned int ignore_nice_load; | 44 | unsigned int ignore_nice_load; |
| 46 | unsigned int sampling_rate; | 45 | unsigned int sampling_rate; |
| 47 | unsigned int sampling_down_factor; | 46 | unsigned int sampling_down_factor; |
| @@ -160,7 +159,7 @@ void cpufreq_dbs_governor_limits(struct cpufreq_policy *policy); | |||
| 160 | #define CPUFREQ_DBS_GOVERNOR_INITIALIZER(_name_) \ | 159 | #define CPUFREQ_DBS_GOVERNOR_INITIALIZER(_name_) \ |
| 161 | { \ | 160 | { \ |
| 162 | .name = _name_, \ | 161 | .name = _name_, \ |
| 163 | .max_transition_latency = TRANSITION_LATENCY_LIMIT, \ | 162 | .dynamic_switching = true, \ |
| 164 | .owner = THIS_MODULE, \ | 163 | .owner = THIS_MODULE, \ |
| 165 | .init = cpufreq_dbs_governor_init, \ | 164 | .init = cpufreq_dbs_governor_init, \ |
| 166 | .exit = cpufreq_dbs_governor_exit, \ | 165 | .exit = cpufreq_dbs_governor_exit, \ |
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 3937acf7e026..6b423eebfd5d 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c | |||
| @@ -319,7 +319,6 @@ gov_show_one_common(sampling_rate); | |||
| 319 | gov_show_one_common(up_threshold); | 319 | gov_show_one_common(up_threshold); |
| 320 | gov_show_one_common(sampling_down_factor); | 320 | gov_show_one_common(sampling_down_factor); |
| 321 | gov_show_one_common(ignore_nice_load); | 321 | gov_show_one_common(ignore_nice_load); |
| 322 | gov_show_one_common(min_sampling_rate); | ||
| 323 | gov_show_one_common(io_is_busy); | 322 | gov_show_one_common(io_is_busy); |
| 324 | gov_show_one(od, powersave_bias); | 323 | gov_show_one(od, powersave_bias); |
| 325 | 324 | ||
| @@ -329,10 +328,8 @@ gov_attr_rw(up_threshold); | |||
| 329 | gov_attr_rw(sampling_down_factor); | 328 | gov_attr_rw(sampling_down_factor); |
| 330 | gov_attr_rw(ignore_nice_load); | 329 | gov_attr_rw(ignore_nice_load); |
| 331 | gov_attr_rw(powersave_bias); | 330 | gov_attr_rw(powersave_bias); |
| 332 | gov_attr_ro(min_sampling_rate); | ||
| 333 | 331 | ||
| 334 | static struct attribute *od_attributes[] = { | 332 | static struct attribute *od_attributes[] = { |
| 335 | &min_sampling_rate.attr, | ||
| 336 | &sampling_rate.attr, | 333 | &sampling_rate.attr, |
| 337 | &up_threshold.attr, | 334 | &up_threshold.attr, |
| 338 | &sampling_down_factor.attr, | 335 | &sampling_down_factor.attr, |
| @@ -373,17 +370,8 @@ static int od_init(struct dbs_data *dbs_data) | |||
| 373 | if (idle_time != -1ULL) { | 370 | if (idle_time != -1ULL) { |
| 374 | /* Idle micro accounting is supported. Use finer thresholds */ | 371 | /* Idle micro accounting is supported. Use finer thresholds */ |
| 375 | dbs_data->up_threshold = MICRO_FREQUENCY_UP_THRESHOLD; | 372 | dbs_data->up_threshold = MICRO_FREQUENCY_UP_THRESHOLD; |
| 376 | /* | ||
| 377 | * In nohz/micro accounting case we set the minimum frequency | ||
| 378 | * not depending on HZ, but fixed (very low). | ||
| 379 | */ | ||
| 380 | dbs_data->min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE; | ||
| 381 | } else { | 373 | } else { |
| 382 | dbs_data->up_threshold = DEF_FREQUENCY_UP_THRESHOLD; | 374 | dbs_data->up_threshold = DEF_FREQUENCY_UP_THRESHOLD; |
| 383 | |||
| 384 | /* For correct statistics, we need 10 ticks for each measure */ | ||
| 385 | dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO * | ||
| 386 | jiffies_to_usecs(10); | ||
| 387 | } | 375 | } |
| 388 | 376 | ||
| 389 | dbs_data->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR; | 377 | dbs_data->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR; |
diff --git a/drivers/cpufreq/dbx500-cpufreq.c b/drivers/cpufreq/dbx500-cpufreq.c deleted file mode 100644 index 4ee0431579c1..000000000000 --- a/drivers/cpufreq/dbx500-cpufreq.c +++ /dev/null | |||
| @@ -1,103 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) STMicroelectronics 2009 | ||
| 3 | * Copyright (C) ST-Ericsson SA 2010-2012 | ||
| 4 | * | ||
| 5 | * License Terms: GNU General Public License v2 | ||
| 6 | * Author: Sundar Iyer <sundar.iyer@stericsson.com> | ||
| 7 | * Author: Martin Persson <martin.persson@stericsson.com> | ||
| 8 | * Author: Jonas Aaberg <jonas.aberg@stericsson.com> | ||
| 9 | */ | ||
| 10 | |||
| 11 | #include <linux/module.h> | ||
| 12 | #include <linux/kernel.h> | ||
| 13 | #include <linux/cpufreq.h> | ||
| 14 | #include <linux/cpu_cooling.h> | ||
| 15 | #include <linux/delay.h> | ||
| 16 | #include <linux/slab.h> | ||
| 17 | #include <linux/platform_device.h> | ||
| 18 | #include <linux/clk.h> | ||
| 19 | |||
| 20 | static struct cpufreq_frequency_table *freq_table; | ||
| 21 | static struct clk *armss_clk; | ||
| 22 | static struct thermal_cooling_device *cdev; | ||
| 23 | |||
| 24 | static int dbx500_cpufreq_target(struct cpufreq_policy *policy, | ||
| 25 | unsigned int index) | ||
| 26 | { | ||
| 27 | /* update armss clk frequency */ | ||
| 28 | return clk_set_rate(armss_clk, freq_table[index].frequency * 1000); | ||
| 29 | } | ||
| 30 | |||
| 31 | static int dbx500_cpufreq_init(struct cpufreq_policy *policy) | ||
| 32 | { | ||
| 33 | policy->clk = armss_clk; | ||
| 34 | return cpufreq_generic_init(policy, freq_table, 20 * 1000); | ||
| 35 | } | ||
| 36 | |||
| 37 | static int dbx500_cpufreq_exit(struct cpufreq_policy *policy) | ||
| 38 | { | ||
| 39 | if (!IS_ERR(cdev)) | ||
| 40 | cpufreq_cooling_unregister(cdev); | ||
| 41 | return 0; | ||
| 42 | } | ||
| 43 | |||
| 44 | static void dbx500_cpufreq_ready(struct cpufreq_policy *policy) | ||
| 45 | { | ||
| 46 | cdev = cpufreq_cooling_register(policy); | ||
| 47 | if (IS_ERR(cdev)) | ||
| 48 | pr_err("Failed to register cooling device %ld\n", PTR_ERR(cdev)); | ||
| 49 | else | ||
| 50 | pr_info("Cooling device registered: %s\n", cdev->type); | ||
| 51 | } | ||
| 52 | |||
| 53 | static struct cpufreq_driver dbx500_cpufreq_driver = { | ||
| 54 | .flags = CPUFREQ_STICKY | CPUFREQ_CONST_LOOPS | | ||
| 55 | CPUFREQ_NEED_INITIAL_FREQ_CHECK, | ||
| 56 | .verify = cpufreq_generic_frequency_table_verify, | ||
| 57 | .target_index = dbx500_cpufreq_target, | ||
| 58 | .get = cpufreq_generic_get, | ||
| 59 | .init = dbx500_cpufreq_init, | ||
| 60 | .exit = dbx500_cpufreq_exit, | ||
| 61 | .ready = dbx500_cpufreq_ready, | ||
| 62 | .name = "DBX500", | ||
| 63 | .attr = cpufreq_generic_attr, | ||
| 64 | }; | ||
| 65 | |||
| 66 | static int dbx500_cpufreq_probe(struct platform_device *pdev) | ||
| 67 | { | ||
| 68 | struct cpufreq_frequency_table *pos; | ||
| 69 | |||
| 70 | freq_table = dev_get_platdata(&pdev->dev); | ||
| 71 | if (!freq_table) { | ||
| 72 | pr_err("dbx500-cpufreq: Failed to fetch cpufreq table\n"); | ||
| 73 | return -ENODEV; | ||
| 74 | } | ||
| 75 | |||
| 76 | armss_clk = clk_get(&pdev->dev, "armss"); | ||
| 77 | if (IS_ERR(armss_clk)) { | ||
| 78 | pr_err("dbx500-cpufreq: Failed to get armss clk\n"); | ||
| 79 | return PTR_ERR(armss_clk); | ||
| 80 | } | ||
| 81 | |||
| 82 | pr_info("dbx500-cpufreq: Available frequencies:\n"); | ||
| 83 | cpufreq_for_each_entry(pos, freq_table) | ||
| 84 | pr_info(" %d Mhz\n", pos->frequency / 1000); | ||
| 85 | |||
| 86 | return cpufreq_register_driver(&dbx500_cpufreq_driver); | ||
| 87 | } | ||
| 88 | |||
| 89 | static struct platform_driver dbx500_cpufreq_plat_driver = { | ||
| 90 | .driver = { | ||
| 91 | .name = "cpufreq-ux500", | ||
| 92 | }, | ||
| 93 | .probe = dbx500_cpufreq_probe, | ||
| 94 | }; | ||
| 95 | |||
| 96 | static int __init dbx500_cpufreq_register(void) | ||
| 97 | { | ||
| 98 | return platform_driver_register(&dbx500_cpufreq_plat_driver); | ||
| 99 | } | ||
| 100 | device_initcall(dbx500_cpufreq_register); | ||
| 101 | |||
| 102 | MODULE_LICENSE("GPL v2"); | ||
| 103 | MODULE_DESCRIPTION("cpufreq driver for DBX500"); | ||
diff --git a/drivers/cpufreq/elanfreq.c b/drivers/cpufreq/elanfreq.c index bfce11cba1df..45e2ca62515e 100644 --- a/drivers/cpufreq/elanfreq.c +++ b/drivers/cpufreq/elanfreq.c | |||
| @@ -165,9 +165,6 @@ static int elanfreq_cpu_init(struct cpufreq_policy *policy) | |||
| 165 | if (pos->frequency > max_freq) | 165 | if (pos->frequency > max_freq) |
| 166 | pos->frequency = CPUFREQ_ENTRY_INVALID; | 166 | pos->frequency = CPUFREQ_ENTRY_INVALID; |
| 167 | 167 | ||
| 168 | /* cpuinfo and default policy values */ | ||
| 169 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; | ||
| 170 | |||
| 171 | return cpufreq_table_validate_and_show(policy, elanfreq_table); | 168 | return cpufreq_table_validate_and_show(policy, elanfreq_table); |
| 172 | } | 169 | } |
| 173 | 170 | ||
| @@ -196,6 +193,7 @@ __setup("elanfreq=", elanfreq_setup); | |||
| 196 | 193 | ||
| 197 | static struct cpufreq_driver elanfreq_driver = { | 194 | static struct cpufreq_driver elanfreq_driver = { |
| 198 | .get = elanfreq_get_cpu_frequency, | 195 | .get = elanfreq_get_cpu_frequency, |
| 196 | .flags = CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING, | ||
| 199 | .verify = cpufreq_generic_frequency_table_verify, | 197 | .verify = cpufreq_generic_frequency_table_verify, |
| 200 | .target_index = elanfreq_target, | 198 | .target_index = elanfreq_target, |
| 201 | .init = elanfreq_cpu_init, | 199 | .init = elanfreq_cpu_init, |
diff --git a/drivers/cpufreq/gx-suspmod.c b/drivers/cpufreq/gx-suspmod.c index 3488c9c175eb..8f52a06664e3 100644 --- a/drivers/cpufreq/gx-suspmod.c +++ b/drivers/cpufreq/gx-suspmod.c | |||
| @@ -428,7 +428,6 @@ static int cpufreq_gx_cpu_init(struct cpufreq_policy *policy) | |||
| 428 | policy->max = maxfreq; | 428 | policy->max = maxfreq; |
| 429 | policy->cpuinfo.min_freq = maxfreq / max_duration; | 429 | policy->cpuinfo.min_freq = maxfreq / max_duration; |
| 430 | policy->cpuinfo.max_freq = maxfreq; | 430 | policy->cpuinfo.max_freq = maxfreq; |
| 431 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; | ||
| 432 | 431 | ||
| 433 | return 0; | 432 | return 0; |
| 434 | } | 433 | } |
| @@ -438,6 +437,7 @@ static int cpufreq_gx_cpu_init(struct cpufreq_policy *policy) | |||
| 438 | * MediaGX/Geode GX initialize cpufreq driver | 437 | * MediaGX/Geode GX initialize cpufreq driver |
| 439 | */ | 438 | */ |
| 440 | static struct cpufreq_driver gx_suspmod_driver = { | 439 | static struct cpufreq_driver gx_suspmod_driver = { |
| 440 | .flags = CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING, | ||
| 441 | .get = gx_get_cpuspeed, | 441 | .get = gx_get_cpuspeed, |
| 442 | .verify = cpufreq_gx_verify, | 442 | .verify = cpufreq_gx_verify, |
| 443 | .target = cpufreq_gx_target, | 443 | .target = cpufreq_gx_target, |
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c index b6edd3ccaa55..14466a9b01c0 100644 --- a/drivers/cpufreq/imx6q-cpufreq.c +++ b/drivers/cpufreq/imx6q-cpufreq.c | |||
| @@ -47,6 +47,7 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index) | |||
| 47 | struct dev_pm_opp *opp; | 47 | struct dev_pm_opp *opp; |
| 48 | unsigned long freq_hz, volt, volt_old; | 48 | unsigned long freq_hz, volt, volt_old; |
| 49 | unsigned int old_freq, new_freq; | 49 | unsigned int old_freq, new_freq; |
| 50 | bool pll1_sys_temp_enabled = false; | ||
| 50 | int ret; | 51 | int ret; |
| 51 | 52 | ||
| 52 | new_freq = freq_table[index].frequency; | 53 | new_freq = freq_table[index].frequency; |
| @@ -124,6 +125,10 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index) | |||
| 124 | if (freq_hz > clk_get_rate(pll2_pfd2_396m_clk)) { | 125 | if (freq_hz > clk_get_rate(pll2_pfd2_396m_clk)) { |
| 125 | clk_set_rate(pll1_sys_clk, new_freq * 1000); | 126 | clk_set_rate(pll1_sys_clk, new_freq * 1000); |
| 126 | clk_set_parent(pll1_sw_clk, pll1_sys_clk); | 127 | clk_set_parent(pll1_sw_clk, pll1_sys_clk); |
| 128 | } else { | ||
| 129 | /* pll1_sys needs to be enabled for divider rate change to work. */ | ||
| 130 | pll1_sys_temp_enabled = true; | ||
| 131 | clk_prepare_enable(pll1_sys_clk); | ||
| 127 | } | 132 | } |
| 128 | } | 133 | } |
| 129 | 134 | ||
| @@ -135,6 +140,10 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index) | |||
| 135 | return ret; | 140 | return ret; |
| 136 | } | 141 | } |
| 137 | 142 | ||
| 143 | /* PLL1 is only needed until after ARM-PODF is set. */ | ||
| 144 | if (pll1_sys_temp_enabled) | ||
| 145 | clk_disable_unprepare(pll1_sys_clk); | ||
| 146 | |||
| 138 | /* scaling down? scale voltage after frequency */ | 147 | /* scaling down? scale voltage after frequency */ |
| 139 | if (new_freq < old_freq) { | 148 | if (new_freq < old_freq) { |
| 140 | ret = regulator_set_voltage_tol(arm_reg, volt, 0); | 149 | ret = regulator_set_voltage_tol(arm_reg, volt, 0); |
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index d299b86a5a00..0c50637e6bda 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c | |||
| @@ -1613,8 +1613,7 @@ static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time) | |||
| 1613 | 1613 | ||
| 1614 | static inline int32_t get_avg_frequency(struct cpudata *cpu) | 1614 | static inline int32_t get_avg_frequency(struct cpudata *cpu) |
| 1615 | { | 1615 | { |
| 1616 | return mul_ext_fp(cpu->sample.core_avg_perf, | 1616 | return mul_ext_fp(cpu->sample.core_avg_perf, cpu_khz); |
| 1617 | cpu->pstate.max_pstate_physical * cpu->pstate.scaling); | ||
| 1618 | } | 1617 | } |
| 1619 | 1618 | ||
| 1620 | static inline int32_t get_avg_pstate(struct cpudata *cpu) | 1619 | static inline int32_t get_avg_pstate(struct cpudata *cpu) |
| @@ -1930,13 +1929,6 @@ static int intel_pstate_init_cpu(unsigned int cpunum) | |||
| 1930 | return 0; | 1929 | return 0; |
| 1931 | } | 1930 | } |
| 1932 | 1931 | ||
| 1933 | static unsigned int intel_pstate_get(unsigned int cpu_num) | ||
| 1934 | { | ||
| 1935 | struct cpudata *cpu = all_cpu_data[cpu_num]; | ||
| 1936 | |||
| 1937 | return cpu ? get_avg_frequency(cpu) : 0; | ||
| 1938 | } | ||
| 1939 | |||
| 1940 | static void intel_pstate_set_update_util_hook(unsigned int cpu_num) | 1932 | static void intel_pstate_set_update_util_hook(unsigned int cpu_num) |
| 1941 | { | 1933 | { |
| 1942 | struct cpudata *cpu = all_cpu_data[cpu_num]; | 1934 | struct cpudata *cpu = all_cpu_data[cpu_num]; |
| @@ -2148,7 +2140,6 @@ static int __intel_pstate_cpu_init(struct cpufreq_policy *policy) | |||
| 2148 | policy->cpuinfo.max_freq *= cpu->pstate.scaling; | 2140 | policy->cpuinfo.max_freq *= cpu->pstate.scaling; |
| 2149 | 2141 | ||
| 2150 | intel_pstate_init_acpi_perf_limits(policy); | 2142 | intel_pstate_init_acpi_perf_limits(policy); |
| 2151 | cpumask_set_cpu(policy->cpu, policy->cpus); | ||
| 2152 | 2143 | ||
| 2153 | policy->fast_switch_possible = true; | 2144 | policy->fast_switch_possible = true; |
| 2154 | 2145 | ||
| @@ -2162,7 +2153,6 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy) | |||
| 2162 | if (ret) | 2153 | if (ret) |
| 2163 | return ret; | 2154 | return ret; |
| 2164 | 2155 | ||
| 2165 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; | ||
| 2166 | if (IS_ENABLED(CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE)) | 2156 | if (IS_ENABLED(CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE)) |
| 2167 | policy->policy = CPUFREQ_POLICY_PERFORMANCE; | 2157 | policy->policy = CPUFREQ_POLICY_PERFORMANCE; |
| 2168 | else | 2158 | else |
| @@ -2177,7 +2167,6 @@ static struct cpufreq_driver intel_pstate = { | |||
| 2177 | .setpolicy = intel_pstate_set_policy, | 2167 | .setpolicy = intel_pstate_set_policy, |
| 2178 | .suspend = intel_pstate_hwp_save_state, | 2168 | .suspend = intel_pstate_hwp_save_state, |
| 2179 | .resume = intel_pstate_resume, | 2169 | .resume = intel_pstate_resume, |
| 2180 | .get = intel_pstate_get, | ||
| 2181 | .init = intel_pstate_cpu_init, | 2170 | .init = intel_pstate_cpu_init, |
| 2182 | .exit = intel_pstate_cpu_exit, | 2171 | .exit = intel_pstate_cpu_exit, |
| 2183 | .stop_cpu = intel_pstate_stop_cpu, | 2172 | .stop_cpu = intel_pstate_stop_cpu, |
diff --git a/drivers/cpufreq/longrun.c b/drivers/cpufreq/longrun.c index 074971b12635..542aa9adba1a 100644 --- a/drivers/cpufreq/longrun.c +++ b/drivers/cpufreq/longrun.c | |||
| @@ -270,7 +270,6 @@ static int longrun_cpu_init(struct cpufreq_policy *policy) | |||
| 270 | /* cpuinfo and default policy values */ | 270 | /* cpuinfo and default policy values */ |
| 271 | policy->cpuinfo.min_freq = longrun_low_freq; | 271 | policy->cpuinfo.min_freq = longrun_low_freq; |
| 272 | policy->cpuinfo.max_freq = longrun_high_freq; | 272 | policy->cpuinfo.max_freq = longrun_high_freq; |
| 273 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; | ||
| 274 | longrun_get_policy(policy); | 273 | longrun_get_policy(policy); |
| 275 | 274 | ||
| 276 | return 0; | 275 | return 0; |
diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c index 9ac27b22476c..da344696beed 100644 --- a/drivers/cpufreq/loongson2_cpufreq.c +++ b/drivers/cpufreq/loongson2_cpufreq.c | |||
| @@ -114,7 +114,7 @@ static struct cpufreq_driver loongson2_cpufreq_driver = { | |||
| 114 | .attr = cpufreq_generic_attr, | 114 | .attr = cpufreq_generic_attr, |
| 115 | }; | 115 | }; |
| 116 | 116 | ||
| 117 | static struct platform_device_id platform_device_ids[] = { | 117 | static const struct platform_device_id platform_device_ids[] = { |
| 118 | { | 118 | { |
| 119 | .name = "loongson2_cpufreq", | 119 | .name = "loongson2_cpufreq", |
| 120 | }, | 120 | }, |
diff --git a/drivers/cpufreq/mt8173-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c index f9f00fb4bc3a..18c4bd9a5c65 100644 --- a/drivers/cpufreq/mt8173-cpufreq.c +++ b/drivers/cpufreq/mediatek-cpufreq.c | |||
| @@ -507,7 +507,7 @@ static int mtk_cpufreq_exit(struct cpufreq_policy *policy) | |||
| 507 | return 0; | 507 | return 0; |
| 508 | } | 508 | } |
| 509 | 509 | ||
| 510 | static struct cpufreq_driver mt8173_cpufreq_driver = { | 510 | static struct cpufreq_driver mtk_cpufreq_driver = { |
| 511 | .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK | | 511 | .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK | |
| 512 | CPUFREQ_HAVE_GOVERNOR_PER_POLICY, | 512 | CPUFREQ_HAVE_GOVERNOR_PER_POLICY, |
| 513 | .verify = cpufreq_generic_frequency_table_verify, | 513 | .verify = cpufreq_generic_frequency_table_verify, |
| @@ -520,7 +520,7 @@ static struct cpufreq_driver mt8173_cpufreq_driver = { | |||
| 520 | .attr = cpufreq_generic_attr, | 520 | .attr = cpufreq_generic_attr, |
| 521 | }; | 521 | }; |
| 522 | 522 | ||
| 523 | static int mt8173_cpufreq_probe(struct platform_device *pdev) | 523 | static int mtk_cpufreq_probe(struct platform_device *pdev) |
| 524 | { | 524 | { |
| 525 | struct mtk_cpu_dvfs_info *info, *tmp; | 525 | struct mtk_cpu_dvfs_info *info, *tmp; |
| 526 | int cpu, ret; | 526 | int cpu, ret; |
| @@ -547,7 +547,7 @@ static int mt8173_cpufreq_probe(struct platform_device *pdev) | |||
| 547 | list_add(&info->list_head, &dvfs_info_list); | 547 | list_add(&info->list_head, &dvfs_info_list); |
| 548 | } | 548 | } |
| 549 | 549 | ||
| 550 | ret = cpufreq_register_driver(&mt8173_cpufreq_driver); | 550 | ret = cpufreq_register_driver(&mtk_cpufreq_driver); |
| 551 | if (ret) { | 551 | if (ret) { |
| 552 | dev_err(&pdev->dev, "failed to register mtk cpufreq driver\n"); | 552 | dev_err(&pdev->dev, "failed to register mtk cpufreq driver\n"); |
| 553 | goto release_dvfs_info_list; | 553 | goto release_dvfs_info_list; |
| @@ -564,15 +564,18 @@ release_dvfs_info_list: | |||
| 564 | return ret; | 564 | return ret; |
| 565 | } | 565 | } |
| 566 | 566 | ||
| 567 | static struct platform_driver mt8173_cpufreq_platdrv = { | 567 | static struct platform_driver mtk_cpufreq_platdrv = { |
| 568 | .driver = { | 568 | .driver = { |
| 569 | .name = "mt8173-cpufreq", | 569 | .name = "mtk-cpufreq", |
| 570 | }, | 570 | }, |
| 571 | .probe = mt8173_cpufreq_probe, | 571 | .probe = mtk_cpufreq_probe, |
| 572 | }; | 572 | }; |
| 573 | 573 | ||
| 574 | /* List of machines supported by this driver */ | 574 | /* List of machines supported by this driver */ |
| 575 | static const struct of_device_id mt8173_cpufreq_machines[] __initconst = { | 575 | static const struct of_device_id mtk_cpufreq_machines[] __initconst = { |
| 576 | { .compatible = "mediatek,mt2701", }, | ||
| 577 | { .compatible = "mediatek,mt7622", }, | ||
| 578 | { .compatible = "mediatek,mt7623", }, | ||
| 576 | { .compatible = "mediatek,mt817x", }, | 579 | { .compatible = "mediatek,mt817x", }, |
| 577 | { .compatible = "mediatek,mt8173", }, | 580 | { .compatible = "mediatek,mt8173", }, |
| 578 | { .compatible = "mediatek,mt8176", }, | 581 | { .compatible = "mediatek,mt8176", }, |
| @@ -580,7 +583,7 @@ static const struct of_device_id mt8173_cpufreq_machines[] __initconst = { | |||
| 580 | { } | 583 | { } |
| 581 | }; | 584 | }; |
| 582 | 585 | ||
| 583 | static int __init mt8173_cpufreq_driver_init(void) | 586 | static int __init mtk_cpufreq_driver_init(void) |
| 584 | { | 587 | { |
| 585 | struct device_node *np; | 588 | struct device_node *np; |
| 586 | const struct of_device_id *match; | 589 | const struct of_device_id *match; |
| @@ -591,14 +594,14 @@ static int __init mt8173_cpufreq_driver_init(void) | |||
| 591 | if (!np) | 594 | if (!np) |
| 592 | return -ENODEV; | 595 | return -ENODEV; |
| 593 | 596 | ||
| 594 | match = of_match_node(mt8173_cpufreq_machines, np); | 597 | match = of_match_node(mtk_cpufreq_machines, np); |
| 595 | of_node_put(np); | 598 | of_node_put(np); |
| 596 | if (!match) { | 599 | if (!match) { |
| 597 | pr_warn("Machine is not compatible with mt8173-cpufreq\n"); | 600 | pr_warn("Machine is not compatible with mtk-cpufreq\n"); |
| 598 | return -ENODEV; | 601 | return -ENODEV; |
| 599 | } | 602 | } |
| 600 | 603 | ||
| 601 | err = platform_driver_register(&mt8173_cpufreq_platdrv); | 604 | err = platform_driver_register(&mtk_cpufreq_platdrv); |
| 602 | if (err) | 605 | if (err) |
| 603 | return err; | 606 | return err; |
| 604 | 607 | ||
| @@ -608,7 +611,7 @@ static int __init mt8173_cpufreq_driver_init(void) | |||
| 608 | * and the device registration codes are put here to handle defer | 611 | * and the device registration codes are put here to handle defer |
| 609 | * probing. | 612 | * probing. |
| 610 | */ | 613 | */ |
| 611 | pdev = platform_device_register_simple("mt8173-cpufreq", -1, NULL, 0); | 614 | pdev = platform_device_register_simple("mtk-cpufreq", -1, NULL, 0); |
| 612 | if (IS_ERR(pdev)) { | 615 | if (IS_ERR(pdev)) { |
| 613 | pr_err("failed to register mtk-cpufreq platform device\n"); | 616 | pr_err("failed to register mtk-cpufreq platform device\n"); |
| 614 | return PTR_ERR(pdev); | 617 | return PTR_ERR(pdev); |
| @@ -616,4 +619,4 @@ static int __init mt8173_cpufreq_driver_init(void) | |||
| 616 | 619 | ||
| 617 | return 0; | 620 | return 0; |
| 618 | } | 621 | } |
| 619 | device_initcall(mt8173_cpufreq_driver_init); | 622 | device_initcall(mtk_cpufreq_driver_init); |
diff --git a/drivers/cpufreq/pmac32-cpufreq.c b/drivers/cpufreq/pmac32-cpufreq.c index ff44016ea031..61ae06ca008e 100644 --- a/drivers/cpufreq/pmac32-cpufreq.c +++ b/drivers/cpufreq/pmac32-cpufreq.c | |||
| @@ -442,7 +442,8 @@ static struct cpufreq_driver pmac_cpufreq_driver = { | |||
| 442 | .init = pmac_cpufreq_cpu_init, | 442 | .init = pmac_cpufreq_cpu_init, |
| 443 | .suspend = pmac_cpufreq_suspend, | 443 | .suspend = pmac_cpufreq_suspend, |
| 444 | .resume = pmac_cpufreq_resume, | 444 | .resume = pmac_cpufreq_resume, |
| 445 | .flags = CPUFREQ_PM_NO_WARN, | 445 | .flags = CPUFREQ_PM_NO_WARN | |
| 446 | CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING, | ||
| 446 | .attr = cpufreq_generic_attr, | 447 | .attr = cpufreq_generic_attr, |
| 447 | .name = "powermac", | 448 | .name = "powermac", |
| 448 | }; | 449 | }; |
| @@ -626,14 +627,16 @@ static int __init pmac_cpufreq_setup(void) | |||
| 626 | if (!value) | 627 | if (!value) |
| 627 | goto out; | 628 | goto out; |
| 628 | cur_freq = (*value) / 1000; | 629 | cur_freq = (*value) / 1000; |
| 629 | transition_latency = CPUFREQ_ETERNAL; | ||
| 630 | 630 | ||
| 631 | /* Check for 7447A based MacRISC3 */ | 631 | /* Check for 7447A based MacRISC3 */ |
| 632 | if (of_machine_is_compatible("MacRISC3") && | 632 | if (of_machine_is_compatible("MacRISC3") && |
| 633 | of_get_property(cpunode, "dynamic-power-step", NULL) && | 633 | of_get_property(cpunode, "dynamic-power-step", NULL) && |
| 634 | PVR_VER(mfspr(SPRN_PVR)) == 0x8003) { | 634 | PVR_VER(mfspr(SPRN_PVR)) == 0x8003) { |
| 635 | pmac_cpufreq_init_7447A(cpunode); | 635 | pmac_cpufreq_init_7447A(cpunode); |
| 636 | |||
| 637 | /* Allow dynamic switching */ | ||
| 636 | transition_latency = 8000000; | 638 | transition_latency = 8000000; |
| 639 | pmac_cpufreq_driver.flags &= ~CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING; | ||
| 637 | /* Check for other MacRISC3 machines */ | 640 | /* Check for other MacRISC3 machines */ |
| 638 | } else if (of_machine_is_compatible("PowerBook3,4") || | 641 | } else if (of_machine_is_compatible("PowerBook3,4") || |
| 639 | of_machine_is_compatible("PowerBook3,5") || | 642 | of_machine_is_compatible("PowerBook3,5") || |
diff --git a/drivers/cpufreq/pmac64-cpufreq.c b/drivers/cpufreq/pmac64-cpufreq.c index 267e0894c62d..be623dd7b9f2 100644 --- a/drivers/cpufreq/pmac64-cpufreq.c +++ b/drivers/cpufreq/pmac64-cpufreq.c | |||
| @@ -516,7 +516,7 @@ static int __init g5_pm72_cpufreq_init(struct device_node *cpunode) | |||
| 516 | goto bail; | 516 | goto bail; |
| 517 | } | 517 | } |
| 518 | 518 | ||
| 519 | DBG("cpufreq: i2c clock chip found: %s\n", hwclock->full_name); | 519 | DBG("cpufreq: i2c clock chip found: %pOF\n", hwclock); |
| 520 | 520 | ||
| 521 | /* Now get all the platform functions */ | 521 | /* Now get all the platform functions */ |
| 522 | pfunc_cpu_getfreq = | 522 | pfunc_cpu_getfreq = |
diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c index f82074eea779..5d31c2db12a3 100644 --- a/drivers/cpufreq/s5pv210-cpufreq.c +++ b/drivers/cpufreq/s5pv210-cpufreq.c | |||
| @@ -602,6 +602,7 @@ static int s5pv210_cpufreq_probe(struct platform_device *pdev) | |||
| 602 | } | 602 | } |
| 603 | 603 | ||
| 604 | clk_base = of_iomap(np, 0); | 604 | clk_base = of_iomap(np, 0); |
| 605 | of_node_put(np); | ||
| 605 | if (!clk_base) { | 606 | if (!clk_base) { |
| 606 | pr_err("%s: failed to map clock registers\n", __func__); | 607 | pr_err("%s: failed to map clock registers\n", __func__); |
| 607 | return -EFAULT; | 608 | return -EFAULT; |
| @@ -612,6 +613,7 @@ static int s5pv210_cpufreq_probe(struct platform_device *pdev) | |||
| 612 | if (id < 0 || id >= ARRAY_SIZE(dmc_base)) { | 613 | if (id < 0 || id >= ARRAY_SIZE(dmc_base)) { |
| 613 | pr_err("%s: failed to get alias of dmc node '%s'\n", | 614 | pr_err("%s: failed to get alias of dmc node '%s'\n", |
| 614 | __func__, np->name); | 615 | __func__, np->name); |
| 616 | of_node_put(np); | ||
| 615 | return id; | 617 | return id; |
| 616 | } | 618 | } |
| 617 | 619 | ||
| @@ -619,6 +621,7 @@ static int s5pv210_cpufreq_probe(struct platform_device *pdev) | |||
| 619 | if (!dmc_base[id]) { | 621 | if (!dmc_base[id]) { |
| 620 | pr_err("%s: failed to map dmc%d registers\n", | 622 | pr_err("%s: failed to map dmc%d registers\n", |
| 621 | __func__, id); | 623 | __func__, id); |
| 624 | of_node_put(np); | ||
| 622 | return -EFAULT; | 625 | return -EFAULT; |
| 623 | } | 626 | } |
| 624 | } | 627 | } |
diff --git a/drivers/cpufreq/sa1100-cpufreq.c b/drivers/cpufreq/sa1100-cpufreq.c index 728eab77e8e0..e2d8a77c36d5 100644 --- a/drivers/cpufreq/sa1100-cpufreq.c +++ b/drivers/cpufreq/sa1100-cpufreq.c | |||
| @@ -197,11 +197,12 @@ static int sa1100_target(struct cpufreq_policy *policy, unsigned int ppcr) | |||
| 197 | 197 | ||
| 198 | static int __init sa1100_cpu_init(struct cpufreq_policy *policy) | 198 | static int __init sa1100_cpu_init(struct cpufreq_policy *policy) |
| 199 | { | 199 | { |
| 200 | return cpufreq_generic_init(policy, sa11x0_freq_table, CPUFREQ_ETERNAL); | 200 | return cpufreq_generic_init(policy, sa11x0_freq_table, 0); |
| 201 | } | 201 | } |
| 202 | 202 | ||
| 203 | static struct cpufreq_driver sa1100_driver __refdata = { | 203 | static struct cpufreq_driver sa1100_driver __refdata = { |
| 204 | .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK, | 204 | .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK | |
| 205 | CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING, | ||
| 205 | .verify = cpufreq_generic_frequency_table_verify, | 206 | .verify = cpufreq_generic_frequency_table_verify, |
| 206 | .target_index = sa1100_target, | 207 | .target_index = sa1100_target, |
| 207 | .get = sa11x0_getspeed, | 208 | .get = sa11x0_getspeed, |
diff --git a/drivers/cpufreq/sa1110-cpufreq.c b/drivers/cpufreq/sa1110-cpufreq.c index 2bac9b6cfeea..66e5fb088ecc 100644 --- a/drivers/cpufreq/sa1110-cpufreq.c +++ b/drivers/cpufreq/sa1110-cpufreq.c | |||
| @@ -306,13 +306,14 @@ static int sa1110_target(struct cpufreq_policy *policy, unsigned int ppcr) | |||
| 306 | 306 | ||
| 307 | static int __init sa1110_cpu_init(struct cpufreq_policy *policy) | 307 | static int __init sa1110_cpu_init(struct cpufreq_policy *policy) |
| 308 | { | 308 | { |
| 309 | return cpufreq_generic_init(policy, sa11x0_freq_table, CPUFREQ_ETERNAL); | 309 | return cpufreq_generic_init(policy, sa11x0_freq_table, 0); |
| 310 | } | 310 | } |
| 311 | 311 | ||
| 312 | /* sa1110_driver needs __refdata because it must remain after init registers | 312 | /* sa1110_driver needs __refdata because it must remain after init registers |
| 313 | * it with cpufreq_register_driver() */ | 313 | * it with cpufreq_register_driver() */ |
| 314 | static struct cpufreq_driver sa1110_driver __refdata = { | 314 | static struct cpufreq_driver sa1110_driver __refdata = { |
| 315 | .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK, | 315 | .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK | |
| 316 | CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING, | ||
| 316 | .verify = cpufreq_generic_frequency_table_verify, | 317 | .verify = cpufreq_generic_frequency_table_verify, |
| 317 | .target_index = sa1110_target, | 318 | .target_index = sa1110_target, |
| 318 | .get = sa11x0_getspeed, | 319 | .get = sa11x0_getspeed, |
diff --git a/drivers/cpufreq/sh-cpufreq.c b/drivers/cpufreq/sh-cpufreq.c index 719c3d9f07fb..28893d435cf5 100644 --- a/drivers/cpufreq/sh-cpufreq.c +++ b/drivers/cpufreq/sh-cpufreq.c | |||
| @@ -137,8 +137,6 @@ static int sh_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
| 137 | (clk_round_rate(cpuclk, ~0UL) + 500) / 1000; | 137 | (clk_round_rate(cpuclk, ~0UL) + 500) / 1000; |
| 138 | } | 138 | } |
| 139 | 139 | ||
| 140 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; | ||
| 141 | |||
| 142 | dev_info(dev, "CPU Frequencies - Minimum %u.%03u MHz, " | 140 | dev_info(dev, "CPU Frequencies - Minimum %u.%03u MHz, " |
| 143 | "Maximum %u.%03u MHz.\n", | 141 | "Maximum %u.%03u MHz.\n", |
| 144 | policy->min / 1000, policy->min % 1000, | 142 | policy->min / 1000, policy->min % 1000, |
| @@ -159,6 +157,7 @@ static int sh_cpufreq_cpu_exit(struct cpufreq_policy *policy) | |||
| 159 | 157 | ||
| 160 | static struct cpufreq_driver sh_cpufreq_driver = { | 158 | static struct cpufreq_driver sh_cpufreq_driver = { |
| 161 | .name = "sh", | 159 | .name = "sh", |
| 160 | .flags = CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING, | ||
| 162 | .get = sh_cpufreq_get, | 161 | .get = sh_cpufreq_get, |
| 163 | .target = sh_cpufreq_target, | 162 | .target = sh_cpufreq_target, |
| 164 | .verify = sh_cpufreq_verify, | 163 | .verify = sh_cpufreq_verify, |
diff --git a/drivers/cpufreq/speedstep-ich.c b/drivers/cpufreq/speedstep-ich.c index b86953a3ddc4..0412a246a785 100644 --- a/drivers/cpufreq/speedstep-ich.c +++ b/drivers/cpufreq/speedstep-ich.c | |||
| @@ -207,7 +207,7 @@ static unsigned int speedstep_detect_chipset(void) | |||
| 207 | * 8100 which use a pretty old revision of the 82815 | 207 | * 8100 which use a pretty old revision of the 82815 |
| 208 | * host bridge. Abort on these systems. | 208 | * host bridge. Abort on these systems. |
| 209 | */ | 209 | */ |
| 210 | static struct pci_dev *hostbridge; | 210 | struct pci_dev *hostbridge; |
| 211 | 211 | ||
| 212 | hostbridge = pci_get_subsys(PCI_VENDOR_ID_INTEL, | 212 | hostbridge = pci_get_subsys(PCI_VENDOR_ID_INTEL, |
| 213 | PCI_DEVICE_ID_INTEL_82815_MC, | 213 | PCI_DEVICE_ID_INTEL_82815_MC, |
diff --git a/drivers/cpufreq/speedstep-lib.c b/drivers/cpufreq/speedstep-lib.c index 1b8062182c81..ccab452a4ef5 100644 --- a/drivers/cpufreq/speedstep-lib.c +++ b/drivers/cpufreq/speedstep-lib.c | |||
| @@ -35,7 +35,7 @@ static int relaxed_check; | |||
| 35 | static unsigned int pentium3_get_frequency(enum speedstep_processor processor) | 35 | static unsigned int pentium3_get_frequency(enum speedstep_processor processor) |
| 36 | { | 36 | { |
| 37 | /* See table 14 of p3_ds.pdf and table 22 of 29834003.pdf */ | 37 | /* See table 14 of p3_ds.pdf and table 22 of 29834003.pdf */ |
| 38 | struct { | 38 | static const struct { |
| 39 | unsigned int ratio; /* Frequency Multiplier (x10) */ | 39 | unsigned int ratio; /* Frequency Multiplier (x10) */ |
| 40 | u8 bitmap; /* power on configuration bits | 40 | u8 bitmap; /* power on configuration bits |
| 41 | [27, 25:22] (in MSR 0x2a) */ | 41 | [27, 25:22] (in MSR 0x2a) */ |
| @@ -58,7 +58,7 @@ static unsigned int pentium3_get_frequency(enum speedstep_processor processor) | |||
| 58 | }; | 58 | }; |
| 59 | 59 | ||
| 60 | /* PIII(-M) FSB settings: see table b1-b of 24547206.pdf */ | 60 | /* PIII(-M) FSB settings: see table b1-b of 24547206.pdf */ |
| 61 | struct { | 61 | static const struct { |
| 62 | unsigned int value; /* Front Side Bus speed in MHz */ | 62 | unsigned int value; /* Front Side Bus speed in MHz */ |
| 63 | u8 bitmap; /* power on configuration bits [18: 19] | 63 | u8 bitmap; /* power on configuration bits [18: 19] |
| 64 | (in MSR 0x2a) */ | 64 | (in MSR 0x2a) */ |
diff --git a/drivers/cpufreq/speedstep-smi.c b/drivers/cpufreq/speedstep-smi.c index 37b30071c220..d23f24ccff38 100644 --- a/drivers/cpufreq/speedstep-smi.c +++ b/drivers/cpufreq/speedstep-smi.c | |||
| @@ -266,7 +266,6 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy) | |||
| 266 | pr_debug("workaround worked.\n"); | 266 | pr_debug("workaround worked.\n"); |
| 267 | } | 267 | } |
| 268 | 268 | ||
| 269 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; | ||
| 270 | return cpufreq_table_validate_and_show(policy, speedstep_freqs); | 269 | return cpufreq_table_validate_and_show(policy, speedstep_freqs); |
| 271 | } | 270 | } |
| 272 | 271 | ||
| @@ -290,6 +289,7 @@ static int speedstep_resume(struct cpufreq_policy *policy) | |||
| 290 | 289 | ||
| 291 | static struct cpufreq_driver speedstep_driver = { | 290 | static struct cpufreq_driver speedstep_driver = { |
| 292 | .name = "speedstep-smi", | 291 | .name = "speedstep-smi", |
| 292 | .flags = CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING, | ||
| 293 | .verify = cpufreq_generic_frequency_table_verify, | 293 | .verify = cpufreq_generic_frequency_table_verify, |
| 294 | .target_index = speedstep_target, | 294 | .target_index = speedstep_target, |
| 295 | .init = speedstep_cpu_init, | 295 | .init = speedstep_cpu_init, |
diff --git a/drivers/cpufreq/sti-cpufreq.c b/drivers/cpufreq/sti-cpufreq.c index d2d0430d09d4..47105735df12 100644 --- a/drivers/cpufreq/sti-cpufreq.c +++ b/drivers/cpufreq/sti-cpufreq.c | |||
| @@ -65,8 +65,8 @@ static int sti_cpufreq_fetch_major(void) { | |||
| 65 | ret = of_property_read_u32_index(np, "st,syscfg", | 65 | ret = of_property_read_u32_index(np, "st,syscfg", |
| 66 | MAJOR_ID_INDEX, &major_offset); | 66 | MAJOR_ID_INDEX, &major_offset); |
| 67 | if (ret) { | 67 | if (ret) { |
| 68 | dev_err(dev, "No major number offset provided in %s [%d]\n", | 68 | dev_err(dev, "No major number offset provided in %pOF [%d]\n", |
| 69 | np->full_name, ret); | 69 | np, ret); |
| 70 | return ret; | 70 | return ret; |
| 71 | } | 71 | } |
| 72 | 72 | ||
| @@ -92,8 +92,8 @@ static int sti_cpufreq_fetch_minor(void) | |||
| 92 | MINOR_ID_INDEX, &minor_offset); | 92 | MINOR_ID_INDEX, &minor_offset); |
| 93 | if (ret) { | 93 | if (ret) { |
| 94 | dev_err(dev, | 94 | dev_err(dev, |
| 95 | "No minor number offset provided %s [%d]\n", | 95 | "No minor number offset provided %pOF [%d]\n", |
| 96 | np->full_name, ret); | 96 | np, ret); |
| 97 | return ret; | 97 | return ret; |
| 98 | } | 98 | } |
| 99 | 99 | ||
diff --git a/drivers/cpufreq/tango-cpufreq.c b/drivers/cpufreq/tango-cpufreq.c new file mode 100644 index 000000000000..89a7f860bfe8 --- /dev/null +++ b/drivers/cpufreq/tango-cpufreq.c | |||
| @@ -0,0 +1,38 @@ | |||
| 1 | #include <linux/of.h> | ||
| 2 | #include <linux/cpu.h> | ||
| 3 | #include <linux/clk.h> | ||
| 4 | #include <linux/pm_opp.h> | ||
| 5 | #include <linux/platform_device.h> | ||
| 6 | |||
| 7 | static const struct of_device_id machines[] __initconst = { | ||
| 8 | { .compatible = "sigma,tango4" }, | ||
| 9 | { /* sentinel */ } | ||
| 10 | }; | ||
| 11 | |||
| 12 | static int __init tango_cpufreq_init(void) | ||
| 13 | { | ||
| 14 | struct device *cpu_dev = get_cpu_device(0); | ||
| 15 | unsigned long max_freq; | ||
| 16 | struct clk *cpu_clk; | ||
| 17 | void *res; | ||
| 18 | |||
| 19 | if (!of_match_node(machines, of_root)) | ||
| 20 | return -ENODEV; | ||
| 21 | |||
| 22 | cpu_clk = clk_get(cpu_dev, NULL); | ||
| 23 | if (IS_ERR(cpu_clk)) | ||
| 24 | return -ENODEV; | ||
| 25 | |||
| 26 | max_freq = clk_get_rate(cpu_clk); | ||
| 27 | |||
| 28 | dev_pm_opp_add(cpu_dev, max_freq / 1, 0); | ||
| 29 | dev_pm_opp_add(cpu_dev, max_freq / 2, 0); | ||
| 30 | dev_pm_opp_add(cpu_dev, max_freq / 3, 0); | ||
| 31 | dev_pm_opp_add(cpu_dev, max_freq / 5, 0); | ||
| 32 | dev_pm_opp_add(cpu_dev, max_freq / 9, 0); | ||
| 33 | |||
| 34 | res = platform_device_register_data(NULL, "cpufreq-dt", -1, NULL, 0); | ||
| 35 | |||
| 36 | return PTR_ERR_OR_ZERO(res); | ||
| 37 | } | ||
| 38 | device_initcall(tango_cpufreq_init); | ||
diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c index a7b5658c0460..b29cd3398463 100644 --- a/drivers/cpufreq/ti-cpufreq.c +++ b/drivers/cpufreq/ti-cpufreq.c | |||
| @@ -245,8 +245,6 @@ static int ti_cpufreq_init(void) | |||
| 245 | if (ret) | 245 | if (ret) |
| 246 | goto fail_put_node; | 246 | goto fail_put_node; |
| 247 | 247 | ||
| 248 | of_node_put(opp_data->opp_node); | ||
| 249 | |||
| 250 | ret = PTR_ERR_OR_ZERO(dev_pm_opp_set_supported_hw(opp_data->cpu_dev, | 248 | ret = PTR_ERR_OR_ZERO(dev_pm_opp_set_supported_hw(opp_data->cpu_dev, |
| 251 | version, VERSION_COUNT)); | 249 | version, VERSION_COUNT)); |
| 252 | if (ret) { | 250 | if (ret) { |
| @@ -255,6 +253,8 @@ static int ti_cpufreq_init(void) | |||
| 255 | goto fail_put_node; | 253 | goto fail_put_node; |
| 256 | } | 254 | } |
| 257 | 255 | ||
| 256 | of_node_put(opp_data->opp_node); | ||
| 257 | |||
| 258 | register_cpufreq_dt: | 258 | register_cpufreq_dt: |
| 259 | platform_device_register_simple("cpufreq-dt", -1, NULL, 0); | 259 | platform_device_register_simple("cpufreq-dt", -1, NULL, 0); |
| 260 | 260 | ||
diff --git a/drivers/cpufreq/unicore2-cpufreq.c b/drivers/cpufreq/unicore2-cpufreq.c index 6f9dfa80563a..db62d9844751 100644 --- a/drivers/cpufreq/unicore2-cpufreq.c +++ b/drivers/cpufreq/unicore2-cpufreq.c | |||
| @@ -58,13 +58,12 @@ static int __init ucv2_cpu_init(struct cpufreq_policy *policy) | |||
| 58 | 58 | ||
| 59 | policy->min = policy->cpuinfo.min_freq = 250000; | 59 | policy->min = policy->cpuinfo.min_freq = 250000; |
| 60 | policy->max = policy->cpuinfo.max_freq = 1000000; | 60 | policy->max = policy->cpuinfo.max_freq = 1000000; |
| 61 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; | ||
| 62 | policy->clk = clk_get(NULL, "MAIN_CLK"); | 61 | policy->clk = clk_get(NULL, "MAIN_CLK"); |
| 63 | return PTR_ERR_OR_ZERO(policy->clk); | 62 | return PTR_ERR_OR_ZERO(policy->clk); |
| 64 | } | 63 | } |
| 65 | 64 | ||
| 66 | static struct cpufreq_driver ucv2_driver = { | 65 | static struct cpufreq_driver ucv2_driver = { |
| 67 | .flags = CPUFREQ_STICKY, | 66 | .flags = CPUFREQ_STICKY | CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING, |
| 68 | .verify = ucv2_verify_speed, | 67 | .verify = ucv2_verify_speed, |
| 69 | .target = ucv2_target, | 68 | .target = ucv2_target, |
| 70 | .get = cpufreq_generic_get, | 69 | .get = cpufreq_generic_get, |
diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c index 37b0698b7193..42896a67aeae 100644 --- a/drivers/cpuidle/cpuidle-powernv.c +++ b/drivers/cpuidle/cpuidle-powernv.c | |||
| @@ -235,6 +235,7 @@ static inline int validate_dt_prop_sizes(const char *prop1, int prop1_len, | |||
| 235 | return -1; | 235 | return -1; |
| 236 | } | 236 | } |
| 237 | 237 | ||
| 238 | extern u32 pnv_get_supported_cpuidle_states(void); | ||
| 238 | static int powernv_add_idle_states(void) | 239 | static int powernv_add_idle_states(void) |
| 239 | { | 240 | { |
| 240 | struct device_node *power_mgt; | 241 | struct device_node *power_mgt; |
| @@ -248,6 +249,8 @@ static int powernv_add_idle_states(void) | |||
| 248 | const char *names[CPUIDLE_STATE_MAX]; | 249 | const char *names[CPUIDLE_STATE_MAX]; |
| 249 | u32 has_stop_states = 0; | 250 | u32 has_stop_states = 0; |
| 250 | int i, rc; | 251 | int i, rc; |
| 252 | u32 supported_flags = pnv_get_supported_cpuidle_states(); | ||
| 253 | |||
| 251 | 254 | ||
| 252 | /* Currently we have snooze statically defined */ | 255 | /* Currently we have snooze statically defined */ |
| 253 | 256 | ||
| @@ -362,6 +365,13 @@ static int powernv_add_idle_states(void) | |||
| 362 | for (i = 0; i < dt_idle_states; i++) { | 365 | for (i = 0; i < dt_idle_states; i++) { |
| 363 | unsigned int exit_latency, target_residency; | 366 | unsigned int exit_latency, target_residency; |
| 364 | bool stops_timebase = false; | 367 | bool stops_timebase = false; |
| 368 | |||
| 369 | /* | ||
| 370 | * Skip the platform idle state whose flag isn't in | ||
| 371 | * the supported_cpuidle_states flag mask. | ||
| 372 | */ | ||
| 373 | if ((flags[i] & supported_flags) != flags[i]) | ||
| 374 | continue; | ||
| 365 | /* | 375 | /* |
| 366 | * If an idle state has exit latency beyond | 376 | * If an idle state has exit latency beyond |
| 367 | * POWERNV_THRESHOLD_LATENCY_NS then don't use it | 377 | * POWERNV_THRESHOLD_LATENCY_NS then don't use it |
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 193204dfbf3a..4b75084fabad 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig | |||
| @@ -655,7 +655,7 @@ source "drivers/crypto/virtio/Kconfig" | |||
| 655 | config CRYPTO_DEV_BCM_SPU | 655 | config CRYPTO_DEV_BCM_SPU |
| 656 | tristate "Broadcom symmetric crypto/hash acceleration support" | 656 | tristate "Broadcom symmetric crypto/hash acceleration support" |
| 657 | depends on ARCH_BCM_IPROC | 657 | depends on ARCH_BCM_IPROC |
| 658 | depends on BCM_PDC_MBOX | 658 | depends on MAILBOX |
| 659 | default m | 659 | default m |
| 660 | select CRYPTO_DES | 660 | select CRYPTO_DES |
| 661 | select CRYPTO_MD5 | 661 | select CRYPTO_MD5 |
diff --git a/drivers/crypto/bcm/spu2.c b/drivers/crypto/bcm/spu2.c index ef04c9748317..bf7ac621c591 100644 --- a/drivers/crypto/bcm/spu2.c +++ b/drivers/crypto/bcm/spu2.c | |||
| @@ -302,6 +302,7 @@ spu2_hash_xlate(enum hash_alg hash_alg, enum hash_mode hash_mode, | |||
| 302 | break; | 302 | break; |
| 303 | case HASH_ALG_SHA3_512: | 303 | case HASH_ALG_SHA3_512: |
| 304 | *spu2_type = SPU2_HASH_TYPE_SHA3_512; | 304 | *spu2_type = SPU2_HASH_TYPE_SHA3_512; |
| 305 | break; | ||
| 305 | case HASH_ALG_LAST: | 306 | case HASH_ALG_LAST: |
| 306 | default: | 307 | default: |
| 307 | err = -EINVAL; | 308 | err = -EINVAL; |
diff --git a/drivers/crypto/cavium/nitrox/nitrox_main.c b/drivers/crypto/cavium/nitrox/nitrox_main.c index ae44a464cd2d..9ccefb9b7232 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_main.c +++ b/drivers/crypto/cavium/nitrox/nitrox_main.c | |||
| @@ -18,8 +18,9 @@ | |||
| 18 | #define SE_GROUP 0 | 18 | #define SE_GROUP 0 |
| 19 | 19 | ||
| 20 | #define DRIVER_VERSION "1.0" | 20 | #define DRIVER_VERSION "1.0" |
| 21 | #define FW_DIR "cavium/" | ||
| 21 | /* SE microcode */ | 22 | /* SE microcode */ |
| 22 | #define SE_FW "cnn55xx_se.fw" | 23 | #define SE_FW FW_DIR "cnn55xx_se.fw" |
| 23 | 24 | ||
| 24 | static const char nitrox_driver_name[] = "CNN55XX"; | 25 | static const char nitrox_driver_name[] = "CNN55XX"; |
| 25 | 26 | ||
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c index e7f87ac12685..1fabd4aee81b 100644 --- a/drivers/crypto/inside-secure/safexcel.c +++ b/drivers/crypto/inside-secure/safexcel.c | |||
| @@ -773,7 +773,6 @@ static int safexcel_probe(struct platform_device *pdev) | |||
| 773 | struct device *dev = &pdev->dev; | 773 | struct device *dev = &pdev->dev; |
| 774 | struct resource *res; | 774 | struct resource *res; |
| 775 | struct safexcel_crypto_priv *priv; | 775 | struct safexcel_crypto_priv *priv; |
| 776 | u64 dma_mask; | ||
| 777 | int i, ret; | 776 | int i, ret; |
| 778 | 777 | ||
| 779 | priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); | 778 | priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); |
| @@ -802,9 +801,7 @@ static int safexcel_probe(struct platform_device *pdev) | |||
| 802 | return -EPROBE_DEFER; | 801 | return -EPROBE_DEFER; |
| 803 | } | 802 | } |
| 804 | 803 | ||
| 805 | if (of_property_read_u64(dev->of_node, "dma-mask", &dma_mask)) | 804 | ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); |
| 806 | dma_mask = DMA_BIT_MASK(64); | ||
| 807 | ret = dma_set_mask_and_coherent(dev, dma_mask); | ||
| 808 | if (ret) | 805 | if (ret) |
| 809 | goto err_clk; | 806 | goto err_clk; |
| 810 | 807 | ||
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c index 8527a5899a2f..3f819399cd95 100644 --- a/drivers/crypto/inside-secure/safexcel_hash.c +++ b/drivers/crypto/inside-secure/safexcel_hash.c | |||
| @@ -883,10 +883,7 @@ static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key, | |||
| 883 | if (ret) | 883 | if (ret) |
| 884 | return ret; | 884 | return ret; |
| 885 | 885 | ||
| 886 | memcpy(ctx->ipad, &istate.state, SHA1_DIGEST_SIZE); | 886 | for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) { |
| 887 | memcpy(ctx->opad, &ostate.state, SHA1_DIGEST_SIZE); | ||
| 888 | |||
| 889 | for (i = 0; i < ARRAY_SIZE(istate.state); i++) { | ||
| 890 | if (ctx->ipad[i] != le32_to_cpu(istate.state[i]) || | 887 | if (ctx->ipad[i] != le32_to_cpu(istate.state[i]) || |
| 891 | ctx->opad[i] != le32_to_cpu(ostate.state[i])) { | 888 | ctx->opad[i] != le32_to_cpu(ostate.state[i])) { |
| 892 | ctx->base.needs_inv = true; | 889 | ctx->base.needs_inv = true; |
| @@ -894,6 +891,9 @@ static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key, | |||
| 894 | } | 891 | } |
| 895 | } | 892 | } |
| 896 | 893 | ||
| 894 | memcpy(ctx->ipad, &istate.state, SHA1_DIGEST_SIZE); | ||
| 895 | memcpy(ctx->opad, &ostate.state, SHA1_DIGEST_SIZE); | ||
| 896 | |||
| 897 | return 0; | 897 | return 0; |
| 898 | } | 898 | } |
| 899 | 899 | ||
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c index 427cbe012729..dadc4a808df5 100644 --- a/drivers/crypto/ixp4xx_crypto.c +++ b/drivers/crypto/ixp4xx_crypto.c | |||
| @@ -1073,7 +1073,7 @@ static int aead_perform(struct aead_request *req, int encrypt, | |||
| 1073 | req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags, | 1073 | req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags, |
| 1074 | &crypt->icv_rev_aes); | 1074 | &crypt->icv_rev_aes); |
| 1075 | if (unlikely(!req_ctx->hmac_virt)) | 1075 | if (unlikely(!req_ctx->hmac_virt)) |
| 1076 | goto free_buf_src; | 1076 | goto free_buf_dst; |
| 1077 | if (!encrypt) { | 1077 | if (!encrypt) { |
| 1078 | scatterwalk_map_and_copy(req_ctx->hmac_virt, | 1078 | scatterwalk_map_and_copy(req_ctx->hmac_virt, |
| 1079 | req->src, cryptlen, authsize, 0); | 1079 | req->src, cryptlen, authsize, 0); |
| @@ -1088,10 +1088,10 @@ static int aead_perform(struct aead_request *req, int encrypt, | |||
| 1088 | BUG_ON(qmgr_stat_overflow(SEND_QID)); | 1088 | BUG_ON(qmgr_stat_overflow(SEND_QID)); |
| 1089 | return -EINPROGRESS; | 1089 | return -EINPROGRESS; |
| 1090 | 1090 | ||
| 1091 | free_buf_src: | ||
| 1092 | free_buf_chain(dev, req_ctx->src, crypt->src_buf); | ||
| 1093 | free_buf_dst: | 1091 | free_buf_dst: |
| 1094 | free_buf_chain(dev, req_ctx->dst, crypt->dst_buf); | 1092 | free_buf_chain(dev, req_ctx->dst, crypt->dst_buf); |
| 1093 | free_buf_src: | ||
| 1094 | free_buf_chain(dev, req_ctx->src, crypt->src_buf); | ||
| 1095 | crypt->ctl_flags = CTL_FLAG_UNUSED; | 1095 | crypt->ctl_flags = CTL_FLAG_UNUSED; |
| 1096 | return -ENOMEM; | 1096 | return -ENOMEM; |
| 1097 | } | 1097 | } |
diff --git a/drivers/dax/super.c b/drivers/dax/super.c index ce9e563e6e1d..938eb4868f7f 100644 --- a/drivers/dax/super.c +++ b/drivers/dax/super.c | |||
| @@ -278,6 +278,12 @@ void dax_write_cache(struct dax_device *dax_dev, bool wc) | |||
| 278 | } | 278 | } |
| 279 | EXPORT_SYMBOL_GPL(dax_write_cache); | 279 | EXPORT_SYMBOL_GPL(dax_write_cache); |
| 280 | 280 | ||
| 281 | bool dax_write_cache_enabled(struct dax_device *dax_dev) | ||
| 282 | { | ||
| 283 | return test_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags); | ||
| 284 | } | ||
| 285 | EXPORT_SYMBOL_GPL(dax_write_cache_enabled); | ||
| 286 | |||
| 281 | bool dax_alive(struct dax_device *dax_dev) | 287 | bool dax_alive(struct dax_device *dax_dev) |
| 282 | { | 288 | { |
| 283 | lockdep_assert_held(&dax_srcu); | 289 | lockdep_assert_held(&dax_srcu); |
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c index d7e219d2669d..66fb40d0ebdb 100644 --- a/drivers/dma-buf/sync_file.c +++ b/drivers/dma-buf/sync_file.c | |||
| @@ -304,7 +304,7 @@ static int sync_file_release(struct inode *inode, struct file *file) | |||
| 304 | { | 304 | { |
| 305 | struct sync_file *sync_file = file->private_data; | 305 | struct sync_file *sync_file = file->private_data; |
| 306 | 306 | ||
| 307 | if (test_bit(POLL_ENABLED, &sync_file->fence->flags)) | 307 | if (test_bit(POLL_ENABLED, &sync_file->flags)) |
| 308 | dma_fence_remove_callback(sync_file->fence, &sync_file->cb); | 308 | dma_fence_remove_callback(sync_file->fence, &sync_file->cb); |
| 309 | dma_fence_put(sync_file->fence); | 309 | dma_fence_put(sync_file->fence); |
| 310 | kfree(sync_file); | 310 | kfree(sync_file); |
| @@ -318,7 +318,8 @@ static unsigned int sync_file_poll(struct file *file, poll_table *wait) | |||
| 318 | 318 | ||
| 319 | poll_wait(file, &sync_file->wq, wait); | 319 | poll_wait(file, &sync_file->wq, wait); |
| 320 | 320 | ||
| 321 | if (!test_and_set_bit(POLL_ENABLED, &sync_file->fence->flags)) { | 321 | if (list_empty(&sync_file->cb.node) && |
| 322 | !test_and_set_bit(POLL_ENABLED, &sync_file->flags)) { | ||
| 322 | if (dma_fence_add_callback(sync_file->fence, &sync_file->cb, | 323 | if (dma_fence_add_callback(sync_file->fence, &sync_file->cb, |
| 323 | fence_check_cb_func) < 0) | 324 | fence_check_cb_func) < 0) |
| 324 | wake_up_all(&sync_file->wq); | 325 | wake_up_all(&sync_file->wq); |
diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c index b10cbaa82ff5..b26256f23d67 100644 --- a/drivers/dma/tegra210-adma.c +++ b/drivers/dma/tegra210-adma.c | |||
| @@ -717,8 +717,8 @@ static int tegra_adma_probe(struct platform_device *pdev) | |||
| 717 | tdc->chan_addr = tdma->base_addr + ADMA_CH_REG_OFFSET(i); | 717 | tdc->chan_addr = tdma->base_addr + ADMA_CH_REG_OFFSET(i); |
| 718 | 718 | ||
| 719 | tdc->irq = of_irq_get(pdev->dev.of_node, i); | 719 | tdc->irq = of_irq_get(pdev->dev.of_node, i); |
| 720 | if (tdc->irq < 0) { | 720 | if (tdc->irq <= 0) { |
| 721 | ret = tdc->irq; | 721 | ret = tdc->irq ?: -ENXIO; |
| 722 | goto irq_dispose; | 722 | goto irq_dispose; |
| 723 | } | 723 | } |
| 724 | 724 | ||
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index f235eae04c16..461d6fc3688b 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig | |||
| @@ -504,6 +504,7 @@ config GPIO_XGENE_SB | |||
| 504 | depends on ARCH_XGENE && OF_GPIO | 504 | depends on ARCH_XGENE && OF_GPIO |
| 505 | select GPIO_GENERIC | 505 | select GPIO_GENERIC |
| 506 | select GPIOLIB_IRQCHIP | 506 | select GPIOLIB_IRQCHIP |
| 507 | select IRQ_DOMAIN_HIERARCHY | ||
| 507 | help | 508 | help |
| 508 | This driver supports the GPIO block within the APM X-Gene | 509 | This driver supports the GPIO block within the APM X-Gene |
| 509 | Standby Domain. Say yes here to enable the GPIO functionality. | 510 | Standby Domain. Say yes here to enable the GPIO functionality. |
diff --git a/drivers/gpio/gpio-exar.c b/drivers/gpio/gpio-exar.c index fb8d304cfa17..0ecd2369c2ca 100644 --- a/drivers/gpio/gpio-exar.c +++ b/drivers/gpio/gpio-exar.c | |||
| @@ -132,7 +132,7 @@ static int gpio_exar_probe(struct platform_device *pdev) | |||
| 132 | if (!p) | 132 | if (!p) |
| 133 | return -ENOMEM; | 133 | return -ENOMEM; |
| 134 | 134 | ||
| 135 | ret = device_property_read_u32(&pdev->dev, "linux,first-pin", | 135 | ret = device_property_read_u32(&pdev->dev, "exar,first-pin", |
| 136 | &first_pin); | 136 | &first_pin); |
| 137 | if (ret) | 137 | if (ret) |
| 138 | return ret; | 138 | return ret; |
diff --git a/drivers/gpio/gpio-lp87565.c b/drivers/gpio/gpio-lp87565.c index 6313c50bb91b..a121c8f10610 100644 --- a/drivers/gpio/gpio-lp87565.c +++ b/drivers/gpio/gpio-lp87565.c | |||
| @@ -26,6 +26,27 @@ struct lp87565_gpio { | |||
| 26 | struct regmap *map; | 26 | struct regmap *map; |
| 27 | }; | 27 | }; |
| 28 | 28 | ||
| 29 | static int lp87565_gpio_get(struct gpio_chip *chip, unsigned int offset) | ||
| 30 | { | ||
| 31 | struct lp87565_gpio *gpio = gpiochip_get_data(chip); | ||
| 32 | int ret, val; | ||
| 33 | |||
| 34 | ret = regmap_read(gpio->map, LP87565_REG_GPIO_IN, &val); | ||
| 35 | if (ret < 0) | ||
| 36 | return ret; | ||
| 37 | |||
| 38 | return !!(val & BIT(offset)); | ||
| 39 | } | ||
| 40 | |||
| 41 | static void lp87565_gpio_set(struct gpio_chip *chip, unsigned int offset, | ||
| 42 | int value) | ||
| 43 | { | ||
| 44 | struct lp87565_gpio *gpio = gpiochip_get_data(chip); | ||
| 45 | |||
| 46 | regmap_update_bits(gpio->map, LP87565_REG_GPIO_OUT, | ||
| 47 | BIT(offset), value ? BIT(offset) : 0); | ||
| 48 | } | ||
| 49 | |||
| 29 | static int lp87565_gpio_get_direction(struct gpio_chip *chip, | 50 | static int lp87565_gpio_get_direction(struct gpio_chip *chip, |
| 30 | unsigned int offset) | 51 | unsigned int offset) |
| 31 | { | 52 | { |
| @@ -54,30 +75,11 @@ static int lp87565_gpio_direction_output(struct gpio_chip *chip, | |||
| 54 | { | 75 | { |
| 55 | struct lp87565_gpio *gpio = gpiochip_get_data(chip); | 76 | struct lp87565_gpio *gpio = gpiochip_get_data(chip); |
| 56 | 77 | ||
| 78 | lp87565_gpio_set(chip, offset, value); | ||
| 79 | |||
| 57 | return regmap_update_bits(gpio->map, | 80 | return regmap_update_bits(gpio->map, |
| 58 | LP87565_REG_GPIO_CONFIG, | 81 | LP87565_REG_GPIO_CONFIG, |
| 59 | BIT(offset), !value ? BIT(offset) : 0); | 82 | BIT(offset), BIT(offset)); |
| 60 | } | ||
| 61 | |||
| 62 | static int lp87565_gpio_get(struct gpio_chip *chip, unsigned int offset) | ||
| 63 | { | ||
| 64 | struct lp87565_gpio *gpio = gpiochip_get_data(chip); | ||
| 65 | int ret, val; | ||
| 66 | |||
| 67 | ret = regmap_read(gpio->map, LP87565_REG_GPIO_IN, &val); | ||
| 68 | if (ret < 0) | ||
| 69 | return ret; | ||
| 70 | |||
| 71 | return !!(val & BIT(offset)); | ||
| 72 | } | ||
| 73 | |||
| 74 | static void lp87565_gpio_set(struct gpio_chip *chip, unsigned int offset, | ||
| 75 | int value) | ||
| 76 | { | ||
| 77 | struct lp87565_gpio *gpio = gpiochip_get_data(chip); | ||
| 78 | |||
| 79 | regmap_update_bits(gpio->map, LP87565_REG_GPIO_OUT, | ||
| 80 | BIT(offset), value ? BIT(offset) : 0); | ||
| 81 | } | 83 | } |
| 82 | 84 | ||
| 83 | static int lp87565_gpio_request(struct gpio_chip *gc, unsigned int offset) | 85 | static int lp87565_gpio_request(struct gpio_chip *gc, unsigned int offset) |
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c index e338c3743562..45c65f805fd6 100644 --- a/drivers/gpio/gpio-mvebu.c +++ b/drivers/gpio/gpio-mvebu.c | |||
| @@ -557,7 +557,7 @@ static void mvebu_gpio_irq_handler(struct irq_desc *desc) | |||
| 557 | edge_cause = mvebu_gpio_read_edge_cause(mvchip); | 557 | edge_cause = mvebu_gpio_read_edge_cause(mvchip); |
| 558 | edge_mask = mvebu_gpio_read_edge_mask(mvchip); | 558 | edge_mask = mvebu_gpio_read_edge_mask(mvchip); |
| 559 | 559 | ||
| 560 | cause = (data_in ^ level_mask) | (edge_cause & edge_mask); | 560 | cause = (data_in & level_mask) | (edge_cause & edge_mask); |
| 561 | 561 | ||
| 562 | for (i = 0; i < mvchip->chip.ngpio; i++) { | 562 | for (i = 0; i < mvchip->chip.ngpio; i++) { |
| 563 | int irq; | 563 | int irq; |
diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c index 3abea3f0b307..92692251ade1 100644 --- a/drivers/gpio/gpio-mxc.c +++ b/drivers/gpio/gpio-mxc.c | |||
| @@ -424,6 +424,9 @@ static int mxc_gpio_probe(struct platform_device *pdev) | |||
| 424 | return PTR_ERR(port->base); | 424 | return PTR_ERR(port->base); |
| 425 | 425 | ||
| 426 | port->irq_high = platform_get_irq(pdev, 1); | 426 | port->irq_high = platform_get_irq(pdev, 1); |
| 427 | if (port->irq_high < 0) | ||
| 428 | port->irq_high = 0; | ||
| 429 | |||
| 427 | port->irq = platform_get_irq(pdev, 0); | 430 | port->irq = platform_get_irq(pdev, 0); |
| 428 | if (port->irq < 0) | 431 | if (port->irq < 0) |
| 429 | return port->irq; | 432 | return port->irq; |
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c index 88529d3c06c9..506c6a67c5fc 100644 --- a/drivers/gpio/gpio-tegra.c +++ b/drivers/gpio/gpio-tegra.c | |||
| @@ -360,7 +360,7 @@ static void tegra_gpio_irq_handler(struct irq_desc *desc) | |||
| 360 | { | 360 | { |
| 361 | int port; | 361 | int port; |
| 362 | int pin; | 362 | int pin; |
| 363 | int unmasked = 0; | 363 | bool unmasked = false; |
| 364 | int gpio; | 364 | int gpio; |
| 365 | u32 lvl; | 365 | u32 lvl; |
| 366 | unsigned long sta; | 366 | unsigned long sta; |
| @@ -384,8 +384,8 @@ static void tegra_gpio_irq_handler(struct irq_desc *desc) | |||
| 384 | * before executing the handler so that we don't | 384 | * before executing the handler so that we don't |
| 385 | * miss edges | 385 | * miss edges |
| 386 | */ | 386 | */ |
| 387 | if (lvl & (0x100 << pin)) { | 387 | if (!unmasked && lvl & (0x100 << pin)) { |
| 388 | unmasked = 1; | 388 | unmasked = true; |
| 389 | chained_irq_exit(chip, desc); | 389 | chained_irq_exit(chip, desc); |
| 390 | } | 390 | } |
| 391 | 391 | ||
diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c index 16fe9742597b..fc80add5fedb 100644 --- a/drivers/gpio/gpiolib-sysfs.c +++ b/drivers/gpio/gpiolib-sysfs.c | |||
| @@ -2,6 +2,7 @@ | |||
| 2 | #include <linux/mutex.h> | 2 | #include <linux/mutex.h> |
| 3 | #include <linux/device.h> | 3 | #include <linux/device.h> |
| 4 | #include <linux/sysfs.h> | 4 | #include <linux/sysfs.h> |
| 5 | #include <linux/gpio.h> | ||
| 5 | #include <linux/gpio/consumer.h> | 6 | #include <linux/gpio/consumer.h> |
| 6 | #include <linux/gpio/driver.h> | 7 | #include <linux/gpio/driver.h> |
| 7 | #include <linux/interrupt.h> | 8 | #include <linux/interrupt.h> |
| @@ -432,6 +433,11 @@ static struct attribute *gpiochip_attrs[] = { | |||
| 432 | }; | 433 | }; |
| 433 | ATTRIBUTE_GROUPS(gpiochip); | 434 | ATTRIBUTE_GROUPS(gpiochip); |
| 434 | 435 | ||
| 436 | static struct gpio_desc *gpio_to_valid_desc(int gpio) | ||
| 437 | { | ||
| 438 | return gpio_is_valid(gpio) ? gpio_to_desc(gpio) : NULL; | ||
| 439 | } | ||
| 440 | |||
| 435 | /* | 441 | /* |
| 436 | * /sys/class/gpio/export ... write-only | 442 | * /sys/class/gpio/export ... write-only |
| 437 | * integer N ... number of GPIO to export (full access) | 443 | * integer N ... number of GPIO to export (full access) |
| @@ -450,7 +456,7 @@ static ssize_t export_store(struct class *class, | |||
| 450 | if (status < 0) | 456 | if (status < 0) |
| 451 | goto done; | 457 | goto done; |
| 452 | 458 | ||
| 453 | desc = gpio_to_desc(gpio); | 459 | desc = gpio_to_valid_desc(gpio); |
| 454 | /* reject invalid GPIOs */ | 460 | /* reject invalid GPIOs */ |
| 455 | if (!desc) { | 461 | if (!desc) { |
| 456 | pr_warn("%s: invalid GPIO %ld\n", __func__, gpio); | 462 | pr_warn("%s: invalid GPIO %ld\n", __func__, gpio); |
| @@ -493,7 +499,7 @@ static ssize_t unexport_store(struct class *class, | |||
| 493 | if (status < 0) | 499 | if (status < 0) |
| 494 | goto done; | 500 | goto done; |
| 495 | 501 | ||
| 496 | desc = gpio_to_desc(gpio); | 502 | desc = gpio_to_valid_desc(gpio); |
| 497 | /* reject bogus commands (gpio_unexport ignores them) */ | 503 | /* reject bogus commands (gpio_unexport ignores them) */ |
| 498 | if (!desc) { | 504 | if (!desc) { |
| 499 | pr_warn("%s: invalid GPIO %ld\n", __func__, gpio); | 505 | pr_warn("%s: invalid GPIO %ld\n", __func__, gpio); |
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 9568708a550b..cd003b74512f 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c | |||
| @@ -704,24 +704,23 @@ static irqreturn_t lineevent_irq_thread(int irq, void *p) | |||
| 704 | { | 704 | { |
| 705 | struct lineevent_state *le = p; | 705 | struct lineevent_state *le = p; |
| 706 | struct gpioevent_data ge; | 706 | struct gpioevent_data ge; |
| 707 | int ret; | 707 | int ret, level; |
| 708 | 708 | ||
| 709 | ge.timestamp = ktime_get_real_ns(); | 709 | ge.timestamp = ktime_get_real_ns(); |
| 710 | level = gpiod_get_value_cansleep(le->desc); | ||
| 710 | 711 | ||
| 711 | if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE | 712 | if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE |
| 712 | && le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) { | 713 | && le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) { |
| 713 | int level = gpiod_get_value_cansleep(le->desc); | ||
| 714 | |||
| 715 | if (level) | 714 | if (level) |
| 716 | /* Emit low-to-high event */ | 715 | /* Emit low-to-high event */ |
| 717 | ge.id = GPIOEVENT_EVENT_RISING_EDGE; | 716 | ge.id = GPIOEVENT_EVENT_RISING_EDGE; |
| 718 | else | 717 | else |
| 719 | /* Emit high-to-low event */ | 718 | /* Emit high-to-low event */ |
| 720 | ge.id = GPIOEVENT_EVENT_FALLING_EDGE; | 719 | ge.id = GPIOEVENT_EVENT_FALLING_EDGE; |
| 721 | } else if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE) { | 720 | } else if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE && level) { |
| 722 | /* Emit low-to-high event */ | 721 | /* Emit low-to-high event */ |
| 723 | ge.id = GPIOEVENT_EVENT_RISING_EDGE; | 722 | ge.id = GPIOEVENT_EVENT_RISING_EDGE; |
| 724 | } else if (le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) { | 723 | } else if (le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE && !level) { |
| 725 | /* Emit high-to-low event */ | 724 | /* Emit high-to-low event */ |
| 726 | ge.id = GPIOEVENT_EVENT_FALLING_EDGE; | 725 | ge.id = GPIOEVENT_EVENT_FALLING_EDGE; |
| 727 | } else { | 726 | } else { |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c index f621ee115c98..5e771bc11b00 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c | |||
| @@ -198,12 +198,16 @@ amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id) | |||
| 198 | result = idr_find(&fpriv->bo_list_handles, id); | 198 | result = idr_find(&fpriv->bo_list_handles, id); |
| 199 | 199 | ||
| 200 | if (result) { | 200 | if (result) { |
| 201 | if (kref_get_unless_zero(&result->refcount)) | 201 | if (kref_get_unless_zero(&result->refcount)) { |
| 202 | rcu_read_unlock(); | ||
| 202 | mutex_lock(&result->lock); | 203 | mutex_lock(&result->lock); |
| 203 | else | 204 | } else { |
| 205 | rcu_read_unlock(); | ||
| 204 | result = NULL; | 206 | result = NULL; |
| 207 | } | ||
| 208 | } else { | ||
| 209 | rcu_read_unlock(); | ||
| 205 | } | 210 | } |
| 206 | rcu_read_unlock(); | ||
| 207 | 211 | ||
| 208 | return result; | 212 | return result; |
| 209 | } | 213 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c index 38f739fb727b..e1cde6b80027 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c | |||
| @@ -147,36 +147,6 @@ static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node, | |||
| 147 | } | 147 | } |
| 148 | 148 | ||
| 149 | /** | 149 | /** |
| 150 | * amdgpu_mn_invalidate_page - callback to notify about mm change | ||
| 151 | * | ||
| 152 | * @mn: our notifier | ||
| 153 | * @mn: the mm this callback is about | ||
| 154 | * @address: address of invalidate page | ||
| 155 | * | ||
| 156 | * Invalidation of a single page. Blocks for all BOs mapping it | ||
| 157 | * and unmap them by move them into system domain again. | ||
| 158 | */ | ||
| 159 | static void amdgpu_mn_invalidate_page(struct mmu_notifier *mn, | ||
| 160 | struct mm_struct *mm, | ||
| 161 | unsigned long address) | ||
| 162 | { | ||
| 163 | struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn); | ||
| 164 | struct interval_tree_node *it; | ||
| 165 | |||
| 166 | mutex_lock(&rmn->lock); | ||
| 167 | |||
| 168 | it = interval_tree_iter_first(&rmn->objects, address, address); | ||
| 169 | if (it) { | ||
| 170 | struct amdgpu_mn_node *node; | ||
| 171 | |||
| 172 | node = container_of(it, struct amdgpu_mn_node, it); | ||
| 173 | amdgpu_mn_invalidate_node(node, address, address); | ||
| 174 | } | ||
| 175 | |||
| 176 | mutex_unlock(&rmn->lock); | ||
| 177 | } | ||
| 178 | |||
| 179 | /** | ||
| 180 | * amdgpu_mn_invalidate_range_start - callback to notify about mm change | 150 | * amdgpu_mn_invalidate_range_start - callback to notify about mm change |
| 181 | * | 151 | * |
| 182 | * @mn: our notifier | 152 | * @mn: our notifier |
| @@ -215,7 +185,6 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn, | |||
| 215 | 185 | ||
| 216 | static const struct mmu_notifier_ops amdgpu_mn_ops = { | 186 | static const struct mmu_notifier_ops amdgpu_mn_ops = { |
| 217 | .release = amdgpu_mn_release, | 187 | .release = amdgpu_mn_release, |
| 218 | .invalidate_page = amdgpu_mn_invalidate_page, | ||
| 219 | .invalidate_range_start = amdgpu_mn_invalidate_range_start, | 188 | .invalidate_range_start = amdgpu_mn_invalidate_range_start, |
| 220 | }; | 189 | }; |
| 221 | 190 | ||
| @@ -359,7 +328,7 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo) | |||
| 359 | head = bo->mn_list.next; | 328 | head = bo->mn_list.next; |
| 360 | 329 | ||
| 361 | bo->mn = NULL; | 330 | bo->mn = NULL; |
| 362 | list_del(&bo->mn_list); | 331 | list_del_init(&bo->mn_list); |
| 363 | 332 | ||
| 364 | if (list_empty(head)) { | 333 | if (list_empty(head)) { |
| 365 | struct amdgpu_mn_node *node; | 334 | struct amdgpu_mn_node *node; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c index a6899180b265..c586f44312f9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c | |||
| @@ -244,6 +244,12 @@ struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, | |||
| 244 | struct dma_fence *f = e->fence; | 244 | struct dma_fence *f = e->fence; |
| 245 | struct amd_sched_fence *s_fence = to_amd_sched_fence(f); | 245 | struct amd_sched_fence *s_fence = to_amd_sched_fence(f); |
| 246 | 246 | ||
| 247 | if (dma_fence_is_signaled(f)) { | ||
| 248 | hash_del(&e->node); | ||
| 249 | dma_fence_put(f); | ||
| 250 | kmem_cache_free(amdgpu_sync_slab, e); | ||
| 251 | continue; | ||
| 252 | } | ||
| 247 | if (ring && s_fence) { | 253 | if (ring && s_fence) { |
| 248 | /* For fences from the same ring it is sufficient | 254 | /* For fences from the same ring it is sufficient |
| 249 | * when they are scheduled. | 255 | * when they are scheduled. |
| @@ -256,13 +262,6 @@ struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, | |||
| 256 | } | 262 | } |
| 257 | } | 263 | } |
| 258 | 264 | ||
| 259 | if (dma_fence_is_signaled(f)) { | ||
| 260 | hash_del(&e->node); | ||
| 261 | dma_fence_put(f); | ||
| 262 | kmem_cache_free(amdgpu_sync_slab, e); | ||
| 263 | continue; | ||
| 264 | } | ||
| 265 | |||
| 266 | return f; | 265 | return f; |
| 267 | } | 266 | } |
| 268 | 267 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h b/drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h index 18fd01f3e4b2..003a131bad47 100644 --- a/drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h +++ b/drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h | |||
| @@ -1,24 +1,25 @@ | |||
| 1 | |||
| 2 | /* | 1 | /* |
| 3 | *************************************************************************************************** | 2 | * Copyright 2017 Advanced Micro Devices, Inc. |
| 4 | * | 3 | * |
| 5 | * Trade secret of Advanced Micro Devices, Inc. | 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 6 | * Copyright (c) 2010 Advanced Micro Devices, Inc. (unpublished) | 5 | * copy of this software and associated documentation files (the "Software"), |
| 7 | * | 6 | * to deal in the Software without restriction, including without limitation |
| 8 | * All rights reserved. This notice is intended as a precaution against inadvertent publication and | 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 9 | * does not imply publication or any waiver of confidentiality. The year included in the foregoing | 8 | * and/or sell copies of the Software, and to permit persons to whom the |
| 10 | * notice is the year of creation of the work. | 9 | * Software is furnished to do so, subject to the following conditions: |
| 11 | * | 10 | * |
| 12 | *************************************************************************************************** | 11 | * The above copyright notice and this permission notice shall be included in |
| 13 | */ | 12 | * all copies or substantial portions of the Software. |
| 14 | /** | 13 | * |
| 15 | *************************************************************************************************** | 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 16 | * @brief gfx9 Clearstate Definitions | 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 17 | *************************************************************************************************** | 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 18 | * | 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
| 19 | * Do not edit! This is a machine-generated file! | 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
| 20 | * | 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
| 21 | */ | 20 | * OTHER DEALINGS IN THE SOFTWARE. |
| 21 | * | ||
| 22 | */ | ||
| 22 | 23 | ||
| 23 | static const unsigned int gfx9_SECT_CONTEXT_def_1[] = | 24 | static const unsigned int gfx9_SECT_CONTEXT_def_1[] = |
| 24 | { | 25 | { |
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 3a0b69b09ed6..c9b9c88231aa 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | |||
| @@ -1475,21 +1475,23 @@ static void gfx_v9_0_tiling_mode_table_init(struct amdgpu_device *adev) | |||
| 1475 | 1475 | ||
| 1476 | static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance) | 1476 | static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance) |
| 1477 | { | 1477 | { |
| 1478 | u32 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1); | 1478 | u32 data; |
| 1479 | 1479 | ||
| 1480 | if ((se_num == 0xffffffff) && (sh_num == 0xffffffff)) { | 1480 | if (instance == 0xffffffff) |
| 1481 | data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1); | 1481 | data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1); |
| 1482 | data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1); | 1482 | else |
| 1483 | } else if (se_num == 0xffffffff) { | 1483 | data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance); |
| 1484 | data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num); | 1484 | |
| 1485 | if (se_num == 0xffffffff) | ||
| 1485 | data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1); | 1486 | data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1); |
| 1486 | } else if (sh_num == 0xffffffff) { | 1487 | else |
| 1487 | data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1); | ||
| 1488 | data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num); | 1488 | data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num); |
| 1489 | } else { | 1489 | |
| 1490 | if (sh_num == 0xffffffff) | ||
| 1491 | data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1); | ||
| 1492 | else | ||
| 1490 | data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num); | 1493 | data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num); |
| 1491 | data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num); | 1494 | |
| 1492 | } | ||
| 1493 | WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data); | 1495 | WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data); |
| 1494 | } | 1496 | } |
| 1495 | 1497 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c index f45fb0f022b3..4267fa417997 100644 --- a/drivers/gpu/drm/amd/amdgpu/si.c +++ b/drivers/gpu/drm/amd/amdgpu/si.c | |||
| @@ -1385,6 +1385,7 @@ static void si_init_golden_registers(struct amdgpu_device *adev) | |||
| 1385 | amdgpu_program_register_sequence(adev, | 1385 | amdgpu_program_register_sequence(adev, |
| 1386 | pitcairn_mgcg_cgcg_init, | 1386 | pitcairn_mgcg_cgcg_init, |
| 1387 | (const u32)ARRAY_SIZE(pitcairn_mgcg_cgcg_init)); | 1387 | (const u32)ARRAY_SIZE(pitcairn_mgcg_cgcg_init)); |
| 1388 | break; | ||
| 1388 | case CHIP_VERDE: | 1389 | case CHIP_VERDE: |
| 1389 | amdgpu_program_register_sequence(adev, | 1390 | amdgpu_program_register_sequence(adev, |
| 1390 | verde_golden_registers, | 1391 | verde_golden_registers, |
| @@ -1409,6 +1410,7 @@ static void si_init_golden_registers(struct amdgpu_device *adev) | |||
| 1409 | amdgpu_program_register_sequence(adev, | 1410 | amdgpu_program_register_sequence(adev, |
| 1410 | oland_mgcg_cgcg_init, | 1411 | oland_mgcg_cgcg_init, |
| 1411 | (const u32)ARRAY_SIZE(oland_mgcg_cgcg_init)); | 1412 | (const u32)ARRAY_SIZE(oland_mgcg_cgcg_init)); |
| 1413 | break; | ||
| 1412 | case CHIP_HAINAN: | 1414 | case CHIP_HAINAN: |
| 1413 | amdgpu_program_register_sequence(adev, | 1415 | amdgpu_program_register_sequence(adev, |
| 1414 | hainan_golden_registers, | 1416 | hainan_golden_registers, |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index d6f097f44b6c..197174e562d2 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | |||
| @@ -2128,15 +2128,9 @@ static int vega10_populate_avfs_parameters(struct pp_hwmgr *hwmgr) | |||
| 2128 | pp_table->AvfsGbCksOff.m2_shift = 12; | 2128 | pp_table->AvfsGbCksOff.m2_shift = 12; |
| 2129 | pp_table->AvfsGbCksOff.b_shift = 0; | 2129 | pp_table->AvfsGbCksOff.b_shift = 0; |
| 2130 | 2130 | ||
| 2131 | for (i = 0; i < dep_table->count; i++) { | 2131 | for (i = 0; i < dep_table->count; i++) |
| 2132 | if (dep_table->entries[i].sclk_offset == 0) | 2132 | pp_table->StaticVoltageOffsetVid[i] = |
| 2133 | pp_table->StaticVoltageOffsetVid[i] = 248; | 2133 | convert_to_vid((uint8_t)(dep_table->entries[i].sclk_offset)); |
| 2134 | else | ||
| 2135 | pp_table->StaticVoltageOffsetVid[i] = | ||
| 2136 | (uint8_t)(dep_table->entries[i].sclk_offset * | ||
| 2137 | VOLTAGE_VID_OFFSET_SCALE2 / | ||
| 2138 | VOLTAGE_VID_OFFSET_SCALE1); | ||
| 2139 | } | ||
| 2140 | 2134 | ||
| 2141 | if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT != | 2135 | if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT != |
| 2142 | data->disp_clk_quad_eqn_a) && | 2136 | data->disp_clk_quad_eqn_a) && |
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c index 2d51a2269fc6..5131bfb94f06 100644 --- a/drivers/gpu/drm/bridge/sil-sii8620.c +++ b/drivers/gpu/drm/bridge/sil-sii8620.c | |||
| @@ -597,9 +597,9 @@ static void sii8620_mt_read_devcap(struct sii8620 *ctx, bool xdevcap) | |||
| 597 | static void sii8620_mt_read_devcap_reg_recv(struct sii8620 *ctx, | 597 | static void sii8620_mt_read_devcap_reg_recv(struct sii8620 *ctx, |
| 598 | struct sii8620_mt_msg *msg) | 598 | struct sii8620_mt_msg *msg) |
| 599 | { | 599 | { |
| 600 | u8 reg = msg->reg[0] & 0x7f; | 600 | u8 reg = msg->reg[1] & 0x7f; |
| 601 | 601 | ||
| 602 | if (msg->reg[0] & 0x80) | 602 | if (msg->reg[1] & 0x80) |
| 603 | ctx->xdevcap[reg] = msg->ret; | 603 | ctx->xdevcap[reg] = msg->ret; |
| 604 | else | 604 | else |
| 605 | ctx->devcap[reg] = msg->ret; | 605 | ctx->devcap[reg] = msg->ret; |
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c index 5c26488e7a2d..0529e500c534 100644 --- a/drivers/gpu/drm/bridge/tc358767.c +++ b/drivers/gpu/drm/bridge/tc358767.c | |||
| @@ -1255,7 +1255,7 @@ static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id) | |||
| 1255 | 1255 | ||
| 1256 | /* port@2 is the output port */ | 1256 | /* port@2 is the output port */ |
| 1257 | ret = drm_of_find_panel_or_bridge(dev->of_node, 2, 0, &tc->panel, NULL); | 1257 | ret = drm_of_find_panel_or_bridge(dev->of_node, 2, 0, &tc->panel, NULL); |
| 1258 | if (ret) | 1258 | if (ret && ret != -ENODEV) |
| 1259 | return ret; | 1259 | return ret; |
| 1260 | 1260 | ||
| 1261 | /* Shut down GPIO is optional */ | 1261 | /* Shut down GPIO is optional */ |
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index c0f336d23f9c..aed25c4183bb 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c | |||
| @@ -1655,6 +1655,9 @@ int drm_atomic_check_only(struct drm_atomic_state *state) | |||
| 1655 | if (config->funcs->atomic_check) | 1655 | if (config->funcs->atomic_check) |
| 1656 | ret = config->funcs->atomic_check(state->dev, state); | 1656 | ret = config->funcs->atomic_check(state->dev, state); |
| 1657 | 1657 | ||
| 1658 | if (ret) | ||
| 1659 | return ret; | ||
| 1660 | |||
| 1658 | if (!state->allow_modeset) { | 1661 | if (!state->allow_modeset) { |
| 1659 | for_each_new_crtc_in_state(state, crtc, crtc_state, i) { | 1662 | for_each_new_crtc_in_state(state, crtc, crtc_state, i) { |
| 1660 | if (drm_atomic_crtc_needs_modeset(crtc_state)) { | 1663 | if (drm_atomic_crtc_needs_modeset(crtc_state)) { |
| @@ -1665,7 +1668,7 @@ int drm_atomic_check_only(struct drm_atomic_state *state) | |||
| 1665 | } | 1668 | } |
| 1666 | } | 1669 | } |
| 1667 | 1670 | ||
| 1668 | return ret; | 1671 | return 0; |
| 1669 | } | 1672 | } |
| 1670 | EXPORT_SYMBOL(drm_atomic_check_only); | 1673 | EXPORT_SYMBOL(drm_atomic_check_only); |
| 1671 | 1674 | ||
| @@ -2167,10 +2170,10 @@ int drm_mode_atomic_ioctl(struct drm_device *dev, | |||
| 2167 | struct drm_atomic_state *state; | 2170 | struct drm_atomic_state *state; |
| 2168 | struct drm_modeset_acquire_ctx ctx; | 2171 | struct drm_modeset_acquire_ctx ctx; |
| 2169 | struct drm_plane *plane; | 2172 | struct drm_plane *plane; |
| 2170 | struct drm_out_fence_state *fence_state = NULL; | 2173 | struct drm_out_fence_state *fence_state; |
| 2171 | unsigned plane_mask; | 2174 | unsigned plane_mask; |
| 2172 | int ret = 0; | 2175 | int ret = 0; |
| 2173 | unsigned int i, j, num_fences = 0; | 2176 | unsigned int i, j, num_fences; |
| 2174 | 2177 | ||
| 2175 | /* disallow for drivers not supporting atomic: */ | 2178 | /* disallow for drivers not supporting atomic: */ |
| 2176 | if (!drm_core_check_feature(dev, DRIVER_ATOMIC)) | 2179 | if (!drm_core_check_feature(dev, DRIVER_ATOMIC)) |
| @@ -2211,6 +2214,8 @@ retry: | |||
| 2211 | plane_mask = 0; | 2214 | plane_mask = 0; |
| 2212 | copied_objs = 0; | 2215 | copied_objs = 0; |
| 2213 | copied_props = 0; | 2216 | copied_props = 0; |
| 2217 | fence_state = NULL; | ||
| 2218 | num_fences = 0; | ||
| 2214 | 2219 | ||
| 2215 | for (i = 0; i < arg->count_objs; i++) { | 2220 | for (i = 0; i < arg->count_objs; i++) { |
| 2216 | uint32_t obj_id, count_props; | 2221 | uint32_t obj_id, count_props; |
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c index 213fb837e1c4..08af8d6b844b 100644 --- a/drivers/gpu/drm/drm_dp_helper.c +++ b/drivers/gpu/drm/drm_dp_helper.c | |||
| @@ -544,7 +544,7 @@ void drm_dp_downstream_debug(struct seq_file *m, | |||
| 544 | DP_DETAILED_CAP_INFO_AVAILABLE; | 544 | DP_DETAILED_CAP_INFO_AVAILABLE; |
| 545 | int clk; | 545 | int clk; |
| 546 | int bpc; | 546 | int bpc; |
| 547 | char id[6]; | 547 | char id[7]; |
| 548 | int len; | 548 | int len; |
| 549 | uint8_t rev[2]; | 549 | uint8_t rev[2]; |
| 550 | int type = port_cap[0] & DP_DS_PORT_TYPE_MASK; | 550 | int type = port_cap[0] & DP_DS_PORT_TYPE_MASK; |
| @@ -583,6 +583,7 @@ void drm_dp_downstream_debug(struct seq_file *m, | |||
| 583 | seq_puts(m, "\t\tType: N/A\n"); | 583 | seq_puts(m, "\t\tType: N/A\n"); |
| 584 | } | 584 | } |
| 585 | 585 | ||
| 586 | memset(id, 0, sizeof(id)); | ||
| 586 | drm_dp_downstream_id(aux, id); | 587 | drm_dp_downstream_id(aux, id); |
| 587 | seq_printf(m, "\t\tID: %s\n", id); | 588 | seq_printf(m, "\t\tID: %s\n", id); |
| 588 | 589 | ||
| @@ -591,7 +592,7 @@ void drm_dp_downstream_debug(struct seq_file *m, | |||
| 591 | seq_printf(m, "\t\tHW: %d.%d\n", | 592 | seq_printf(m, "\t\tHW: %d.%d\n", |
| 592 | (rev[0] & 0xf0) >> 4, rev[0] & 0xf); | 593 | (rev[0] & 0xf0) >> 4, rev[0] & 0xf); |
| 593 | 594 | ||
| 594 | len = drm_dp_dpcd_read(aux, DP_BRANCH_SW_REV, &rev, 2); | 595 | len = drm_dp_dpcd_read(aux, DP_BRANCH_SW_REV, rev, 2); |
| 595 | if (len > 0) | 596 | if (len > 0) |
| 596 | seq_printf(m, "\t\tSW: %d.%d\n", rev[0], rev[1]); | 597 | seq_printf(m, "\t\tSW: %d.%d\n", rev[0], rev[1]); |
| 597 | 598 | ||
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 8dc11064253d..cdaac37907b1 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c | |||
| @@ -255,13 +255,13 @@ drm_gem_object_release_handle(int id, void *ptr, void *data) | |||
| 255 | struct drm_gem_object *obj = ptr; | 255 | struct drm_gem_object *obj = ptr; |
| 256 | struct drm_device *dev = obj->dev; | 256 | struct drm_device *dev = obj->dev; |
| 257 | 257 | ||
| 258 | if (dev->driver->gem_close_object) | ||
| 259 | dev->driver->gem_close_object(obj, file_priv); | ||
| 260 | |||
| 258 | if (drm_core_check_feature(dev, DRIVER_PRIME)) | 261 | if (drm_core_check_feature(dev, DRIVER_PRIME)) |
| 259 | drm_gem_remove_prime_handles(obj, file_priv); | 262 | drm_gem_remove_prime_handles(obj, file_priv); |
| 260 | drm_vma_node_revoke(&obj->vma_node, file_priv); | 263 | drm_vma_node_revoke(&obj->vma_node, file_priv); |
| 261 | 264 | ||
| 262 | if (dev->driver->gem_close_object) | ||
| 263 | dev->driver->gem_close_object(obj, file_priv); | ||
| 264 | |||
| 265 | drm_gem_object_handle_put_unlocked(obj); | 265 | drm_gem_object_handle_put_unlocked(obj); |
| 266 | 266 | ||
| 267 | return 0; | 267 | return 0; |
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c index 5dc8c4350602..e40c12fabbde 100644 --- a/drivers/gpu/drm/drm_plane.c +++ b/drivers/gpu/drm/drm_plane.c | |||
| @@ -601,6 +601,7 @@ int drm_mode_setplane(struct drm_device *dev, void *data, | |||
| 601 | 601 | ||
| 602 | crtc = drm_crtc_find(dev, plane_req->crtc_id); | 602 | crtc = drm_crtc_find(dev, plane_req->crtc_id); |
| 603 | if (!crtc) { | 603 | if (!crtc) { |
| 604 | drm_framebuffer_put(fb); | ||
| 604 | DRM_DEBUG_KMS("Unknown crtc ID %d\n", | 605 | DRM_DEBUG_KMS("Unknown crtc ID %d\n", |
| 605 | plane_req->crtc_id); | 606 | plane_req->crtc_id); |
| 606 | return -ENOENT; | 607 | return -ENOENT; |
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c index 5bd93169dac2..6463fc2c736f 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c | |||
| @@ -270,8 +270,8 @@ static int submit_reloc(struct etnaviv_gem_submit *submit, void *stream, | |||
| 270 | if (ret) | 270 | if (ret) |
| 271 | return ret; | 271 | return ret; |
| 272 | 272 | ||
| 273 | if (r->reloc_offset >= bo->obj->base.size - sizeof(*ptr)) { | 273 | if (r->reloc_offset > bo->obj->base.size - sizeof(*ptr)) { |
| 274 | DRM_ERROR("relocation %u outside object", i); | 274 | DRM_ERROR("relocation %u outside object\n", i); |
| 275 | return -EINVAL; | 275 | return -EINVAL; |
| 276 | } | 276 | } |
| 277 | 277 | ||
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig index 1d185347c64c..305dc3d4ff77 100644 --- a/drivers/gpu/drm/exynos/Kconfig +++ b/drivers/gpu/drm/exynos/Kconfig | |||
| @@ -75,6 +75,7 @@ config DRM_EXYNOS_DP | |||
| 75 | config DRM_EXYNOS_HDMI | 75 | config DRM_EXYNOS_HDMI |
| 76 | bool "HDMI" | 76 | bool "HDMI" |
| 77 | depends on DRM_EXYNOS_MIXER || DRM_EXYNOS5433_DECON | 77 | depends on DRM_EXYNOS_MIXER || DRM_EXYNOS5433_DECON |
| 78 | select CEC_CORE if CEC_NOTIFIER | ||
| 78 | help | 79 | help |
| 79 | Choose this option if you want to use Exynos HDMI for DRM. | 80 | Choose this option if you want to use Exynos HDMI for DRM. |
| 80 | 81 | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index 35a8dfc93836..242bd50faa26 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c | |||
| @@ -453,7 +453,6 @@ static int exynos_drm_platform_probe(struct platform_device *pdev) | |||
| 453 | struct component_match *match; | 453 | struct component_match *match; |
| 454 | 454 | ||
| 455 | pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); | 455 | pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); |
| 456 | exynos_drm_driver.num_ioctls = ARRAY_SIZE(exynos_ioctls); | ||
| 457 | 456 | ||
| 458 | match = exynos_drm_match_add(&pdev->dev); | 457 | match = exynos_drm_match_add(&pdev->dev); |
| 459 | if (IS_ERR(match)) | 458 | if (IS_ERR(match)) |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c index a11b79596e2f..b6a46d9a016e 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c | |||
| @@ -1651,8 +1651,6 @@ static int exynos_dsi_parse_dt(struct exynos_dsi *dsi) | |||
| 1651 | return ret; | 1651 | return ret; |
| 1652 | 1652 | ||
| 1653 | dsi->bridge_node = of_graph_get_remote_node(node, DSI_PORT_IN, 0); | 1653 | dsi->bridge_node = of_graph_get_remote_node(node, DSI_PORT_IN, 0); |
| 1654 | if (!dsi->bridge_node) | ||
| 1655 | return -EINVAL; | ||
| 1656 | 1654 | ||
| 1657 | return 0; | 1655 | return 0; |
| 1658 | } | 1656 | } |
| @@ -1687,9 +1685,11 @@ static int exynos_dsi_bind(struct device *dev, struct device *master, | |||
| 1687 | return ret; | 1685 | return ret; |
| 1688 | } | 1686 | } |
| 1689 | 1687 | ||
| 1690 | bridge = of_drm_find_bridge(dsi->bridge_node); | 1688 | if (dsi->bridge_node) { |
| 1691 | if (bridge) | 1689 | bridge = of_drm_find_bridge(dsi->bridge_node); |
| 1692 | drm_bridge_attach(encoder, bridge, NULL); | 1690 | if (bridge) |
| 1691 | drm_bridge_attach(encoder, bridge, NULL); | ||
| 1692 | } | ||
| 1693 | 1693 | ||
| 1694 | return mipi_dsi_host_register(&dsi->dsi_host); | 1694 | return mipi_dsi_host_register(&dsi->dsi_host); |
| 1695 | } | 1695 | } |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c index d48fd7c918f8..73217c281c9a 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fb.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c | |||
| @@ -145,13 +145,19 @@ static struct drm_framebuffer * | |||
| 145 | exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv, | 145 | exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv, |
| 146 | const struct drm_mode_fb_cmd2 *mode_cmd) | 146 | const struct drm_mode_fb_cmd2 *mode_cmd) |
| 147 | { | 147 | { |
| 148 | const struct drm_format_info *info = drm_get_format_info(dev, mode_cmd); | ||
| 148 | struct exynos_drm_gem *exynos_gem[MAX_FB_BUFFER]; | 149 | struct exynos_drm_gem *exynos_gem[MAX_FB_BUFFER]; |
| 149 | struct drm_gem_object *obj; | 150 | struct drm_gem_object *obj; |
| 150 | struct drm_framebuffer *fb; | 151 | struct drm_framebuffer *fb; |
| 151 | int i; | 152 | int i; |
| 152 | int ret; | 153 | int ret; |
| 153 | 154 | ||
| 154 | for (i = 0; i < drm_format_num_planes(mode_cmd->pixel_format); i++) { | 155 | for (i = 0; i < info->num_planes; i++) { |
| 156 | unsigned int height = (i == 0) ? mode_cmd->height : | ||
| 157 | DIV_ROUND_UP(mode_cmd->height, info->vsub); | ||
| 158 | unsigned long size = height * mode_cmd->pitches[i] + | ||
| 159 | mode_cmd->offsets[i]; | ||
| 160 | |||
| 155 | obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[i]); | 161 | obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[i]); |
| 156 | if (!obj) { | 162 | if (!obj) { |
| 157 | DRM_ERROR("failed to lookup gem object\n"); | 163 | DRM_ERROR("failed to lookup gem object\n"); |
| @@ -160,6 +166,12 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv, | |||
| 160 | } | 166 | } |
| 161 | 167 | ||
| 162 | exynos_gem[i] = to_exynos_gem(obj); | 168 | exynos_gem[i] = to_exynos_gem(obj); |
| 169 | |||
| 170 | if (size > exynos_gem[i]->size) { | ||
| 171 | i++; | ||
| 172 | ret = -EINVAL; | ||
| 173 | goto err; | ||
| 174 | } | ||
| 163 | } | 175 | } |
| 164 | 176 | ||
| 165 | fb = exynos_drm_framebuffer_init(dev, mode_cmd, exynos_gem, i); | 177 | fb = exynos_drm_framebuffer_init(dev, mode_cmd, exynos_gem, i); |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c index e45720543a45..16bbee897e0d 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_mic.c +++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c | |||
| @@ -340,16 +340,10 @@ static int exynos_mic_bind(struct device *dev, struct device *master, | |||
| 340 | void *data) | 340 | void *data) |
| 341 | { | 341 | { |
| 342 | struct exynos_mic *mic = dev_get_drvdata(dev); | 342 | struct exynos_mic *mic = dev_get_drvdata(dev); |
| 343 | int ret; | ||
| 344 | 343 | ||
| 345 | mic->bridge.funcs = &mic_bridge_funcs; | ||
| 346 | mic->bridge.of_node = dev->of_node; | ||
| 347 | mic->bridge.driver_private = mic; | 344 | mic->bridge.driver_private = mic; |
| 348 | ret = drm_bridge_add(&mic->bridge); | ||
| 349 | if (ret) | ||
| 350 | DRM_ERROR("mic: Failed to add MIC to the global bridge list\n"); | ||
| 351 | 345 | ||
| 352 | return ret; | 346 | return 0; |
| 353 | } | 347 | } |
| 354 | 348 | ||
| 355 | static void exynos_mic_unbind(struct device *dev, struct device *master, | 349 | static void exynos_mic_unbind(struct device *dev, struct device *master, |
| @@ -365,8 +359,6 @@ static void exynos_mic_unbind(struct device *dev, struct device *master, | |||
| 365 | 359 | ||
| 366 | already_disabled: | 360 | already_disabled: |
| 367 | mutex_unlock(&mic_mutex); | 361 | mutex_unlock(&mic_mutex); |
| 368 | |||
| 369 | drm_bridge_remove(&mic->bridge); | ||
| 370 | } | 362 | } |
| 371 | 363 | ||
| 372 | static const struct component_ops exynos_mic_component_ops = { | 364 | static const struct component_ops exynos_mic_component_ops = { |
| @@ -461,6 +453,15 @@ static int exynos_mic_probe(struct platform_device *pdev) | |||
| 461 | 453 | ||
| 462 | platform_set_drvdata(pdev, mic); | 454 | platform_set_drvdata(pdev, mic); |
| 463 | 455 | ||
| 456 | mic->bridge.funcs = &mic_bridge_funcs; | ||
| 457 | mic->bridge.of_node = dev->of_node; | ||
| 458 | |||
| 459 | ret = drm_bridge_add(&mic->bridge); | ||
| 460 | if (ret) { | ||
| 461 | DRM_ERROR("mic: Failed to add MIC to the global bridge list\n"); | ||
| 462 | return ret; | ||
| 463 | } | ||
| 464 | |||
| 464 | pm_runtime_enable(dev); | 465 | pm_runtime_enable(dev); |
| 465 | 466 | ||
| 466 | ret = component_add(dev, &exynos_mic_component_ops); | 467 | ret = component_add(dev, &exynos_mic_component_ops); |
| @@ -479,8 +480,13 @@ err: | |||
| 479 | 480 | ||
| 480 | static int exynos_mic_remove(struct platform_device *pdev) | 481 | static int exynos_mic_remove(struct platform_device *pdev) |
| 481 | { | 482 | { |
| 483 | struct exynos_mic *mic = platform_get_drvdata(pdev); | ||
| 484 | |||
| 482 | component_del(&pdev->dev, &exynos_mic_component_ops); | 485 | component_del(&pdev->dev, &exynos_mic_component_ops); |
| 483 | pm_runtime_disable(&pdev->dev); | 486 | pm_runtime_disable(&pdev->dev); |
| 487 | |||
| 488 | drm_bridge_remove(&mic->bridge); | ||
| 489 | |||
| 484 | return 0; | 490 | return 0; |
| 485 | } | 491 | } |
| 486 | 492 | ||
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index 06bfbe400cf1..d3b69d66736f 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c | |||
| @@ -1501,8 +1501,6 @@ static void hdmi_disable(struct drm_encoder *encoder) | |||
| 1501 | */ | 1501 | */ |
| 1502 | cancel_delayed_work(&hdata->hotplug_work); | 1502 | cancel_delayed_work(&hdata->hotplug_work); |
| 1503 | cec_notifier_set_phys_addr(hdata->notifier, CEC_PHYS_ADDR_INVALID); | 1503 | cec_notifier_set_phys_addr(hdata->notifier, CEC_PHYS_ADDR_INVALID); |
| 1504 | |||
| 1505 | hdmiphy_disable(hdata); | ||
| 1506 | } | 1504 | } |
| 1507 | 1505 | ||
| 1508 | static const struct drm_encoder_helper_funcs exynos_hdmi_encoder_helper_funcs = { | 1506 | static const struct drm_encoder_helper_funcs exynos_hdmi_encoder_helper_funcs = { |
| @@ -1676,7 +1674,7 @@ static int hdmi_resources_init(struct hdmi_context *hdata) | |||
| 1676 | return hdmi_bridge_init(hdata); | 1674 | return hdmi_bridge_init(hdata); |
| 1677 | } | 1675 | } |
| 1678 | 1676 | ||
| 1679 | static struct of_device_id hdmi_match_types[] = { | 1677 | static const struct of_device_id hdmi_match_types[] = { |
| 1680 | { | 1678 | { |
| 1681 | .compatible = "samsung,exynos4210-hdmi", | 1679 | .compatible = "samsung,exynos4210-hdmi", |
| 1682 | .data = &exynos4210_hdmi_driver_data, | 1680 | .data = &exynos4210_hdmi_driver_data, |
| @@ -1934,8 +1932,7 @@ static int hdmi_remove(struct platform_device *pdev) | |||
| 1934 | return 0; | 1932 | return 0; |
| 1935 | } | 1933 | } |
| 1936 | 1934 | ||
| 1937 | #ifdef CONFIG_PM | 1935 | static int __maybe_unused exynos_hdmi_suspend(struct device *dev) |
| 1938 | static int exynos_hdmi_suspend(struct device *dev) | ||
| 1939 | { | 1936 | { |
| 1940 | struct hdmi_context *hdata = dev_get_drvdata(dev); | 1937 | struct hdmi_context *hdata = dev_get_drvdata(dev); |
| 1941 | 1938 | ||
| @@ -1944,7 +1941,7 @@ static int exynos_hdmi_suspend(struct device *dev) | |||
| 1944 | return 0; | 1941 | return 0; |
| 1945 | } | 1942 | } |
| 1946 | 1943 | ||
| 1947 | static int exynos_hdmi_resume(struct device *dev) | 1944 | static int __maybe_unused exynos_hdmi_resume(struct device *dev) |
| 1948 | { | 1945 | { |
| 1949 | struct hdmi_context *hdata = dev_get_drvdata(dev); | 1946 | struct hdmi_context *hdata = dev_get_drvdata(dev); |
| 1950 | int ret; | 1947 | int ret; |
| @@ -1955,7 +1952,6 @@ static int exynos_hdmi_resume(struct device *dev) | |||
| 1955 | 1952 | ||
| 1956 | return 0; | 1953 | return 0; |
| 1957 | } | 1954 | } |
| 1958 | #endif | ||
| 1959 | 1955 | ||
| 1960 | static const struct dev_pm_ops exynos_hdmi_pm_ops = { | 1956 | static const struct dev_pm_ops exynos_hdmi_pm_ops = { |
| 1961 | SET_RUNTIME_PM_OPS(exynos_hdmi_suspend, exynos_hdmi_resume, NULL) | 1957 | SET_RUNTIME_PM_OPS(exynos_hdmi_suspend, exynos_hdmi_resume, NULL) |
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c index 6bed4f3ffcd6..a998a8dd783c 100644 --- a/drivers/gpu/drm/exynos/exynos_mixer.c +++ b/drivers/gpu/drm/exynos/exynos_mixer.c | |||
| @@ -1094,28 +1094,28 @@ static const struct exynos_drm_crtc_ops mixer_crtc_ops = { | |||
| 1094 | .atomic_check = mixer_atomic_check, | 1094 | .atomic_check = mixer_atomic_check, |
| 1095 | }; | 1095 | }; |
| 1096 | 1096 | ||
| 1097 | static struct mixer_drv_data exynos5420_mxr_drv_data = { | 1097 | static const struct mixer_drv_data exynos5420_mxr_drv_data = { |
| 1098 | .version = MXR_VER_128_0_0_184, | 1098 | .version = MXR_VER_128_0_0_184, |
| 1099 | .is_vp_enabled = 0, | 1099 | .is_vp_enabled = 0, |
| 1100 | }; | 1100 | }; |
| 1101 | 1101 | ||
| 1102 | static struct mixer_drv_data exynos5250_mxr_drv_data = { | 1102 | static const struct mixer_drv_data exynos5250_mxr_drv_data = { |
| 1103 | .version = MXR_VER_16_0_33_0, | 1103 | .version = MXR_VER_16_0_33_0, |
| 1104 | .is_vp_enabled = 0, | 1104 | .is_vp_enabled = 0, |
| 1105 | }; | 1105 | }; |
| 1106 | 1106 | ||
| 1107 | static struct mixer_drv_data exynos4212_mxr_drv_data = { | 1107 | static const struct mixer_drv_data exynos4212_mxr_drv_data = { |
| 1108 | .version = MXR_VER_0_0_0_16, | 1108 | .version = MXR_VER_0_0_0_16, |
| 1109 | .is_vp_enabled = 1, | 1109 | .is_vp_enabled = 1, |
| 1110 | }; | 1110 | }; |
| 1111 | 1111 | ||
| 1112 | static struct mixer_drv_data exynos4210_mxr_drv_data = { | 1112 | static const struct mixer_drv_data exynos4210_mxr_drv_data = { |
| 1113 | .version = MXR_VER_0_0_0_16, | 1113 | .version = MXR_VER_0_0_0_16, |
| 1114 | .is_vp_enabled = 1, | 1114 | .is_vp_enabled = 1, |
| 1115 | .has_sclk = 1, | 1115 | .has_sclk = 1, |
| 1116 | }; | 1116 | }; |
| 1117 | 1117 | ||
| 1118 | static struct of_device_id mixer_match_types[] = { | 1118 | static const struct of_device_id mixer_match_types[] = { |
| 1119 | { | 1119 | { |
| 1120 | .compatible = "samsung,exynos4210-mixer", | 1120 | .compatible = "samsung,exynos4210-mixer", |
| 1121 | .data = &exynos4210_mxr_drv_data, | 1121 | .data = &exynos4210_mxr_drv_data, |
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index 713848c36349..e556a46cd4c2 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c | |||
| @@ -2714,7 +2714,7 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx) | |||
| 2714 | unmap_src: | 2714 | unmap_src: |
| 2715 | i915_gem_object_unpin_map(obj); | 2715 | i915_gem_object_unpin_map(obj); |
| 2716 | put_obj: | 2716 | put_obj: |
| 2717 | i915_gem_object_put(wa_ctx->indirect_ctx.obj); | 2717 | i915_gem_object_put(obj); |
| 2718 | return ret; | 2718 | return ret; |
| 2719 | } | 2719 | } |
| 2720 | 2720 | ||
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c index 2deb05f618fb..7cb0818a13de 100644 --- a/drivers/gpu/drm/i915/gvt/display.c +++ b/drivers/gpu/drm/i915/gvt/display.c | |||
| @@ -323,27 +323,27 @@ void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt) | |||
| 323 | { | 323 | { |
| 324 | struct intel_gvt_irq *irq = &gvt->irq; | 324 | struct intel_gvt_irq *irq = &gvt->irq; |
| 325 | struct intel_vgpu *vgpu; | 325 | struct intel_vgpu *vgpu; |
| 326 | bool have_enabled_pipe = false; | ||
| 327 | int pipe, id; | 326 | int pipe, id; |
| 328 | 327 | ||
| 329 | if (WARN_ON(!mutex_is_locked(&gvt->lock))) | 328 | if (WARN_ON(!mutex_is_locked(&gvt->lock))) |
| 330 | return; | 329 | return; |
| 331 | 330 | ||
| 332 | hrtimer_cancel(&irq->vblank_timer.timer); | ||
| 333 | |||
| 334 | for_each_active_vgpu(gvt, vgpu, id) { | 331 | for_each_active_vgpu(gvt, vgpu, id) { |
| 335 | for (pipe = 0; pipe < I915_MAX_PIPES; pipe++) { | 332 | for (pipe = 0; pipe < I915_MAX_PIPES; pipe++) { |
| 336 | have_enabled_pipe = | 333 | if (pipe_is_enabled(vgpu, pipe)) |
| 337 | pipe_is_enabled(vgpu, pipe); | 334 | goto out; |
| 338 | if (have_enabled_pipe) | ||
| 339 | break; | ||
| 340 | } | 335 | } |
| 341 | } | 336 | } |
| 342 | 337 | ||
| 343 | if (have_enabled_pipe) | 338 | /* all the pipes are disabled */ |
| 344 | hrtimer_start(&irq->vblank_timer.timer, | 339 | hrtimer_cancel(&irq->vblank_timer.timer); |
| 345 | ktime_add_ns(ktime_get(), irq->vblank_timer.period), | 340 | return; |
| 346 | HRTIMER_MODE_ABS); | 341 | |
| 342 | out: | ||
| 343 | hrtimer_start(&irq->vblank_timer.timer, | ||
| 344 | ktime_add_ns(ktime_get(), irq->vblank_timer.period), | ||
| 345 | HRTIMER_MODE_ABS); | ||
| 346 | |||
| 347 | } | 347 | } |
| 348 | 348 | ||
| 349 | static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe) | 349 | static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe) |
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c index 700050556242..1648887d3f55 100644 --- a/drivers/gpu/drm/i915/gvt/execlist.c +++ b/drivers/gpu/drm/i915/gvt/execlist.c | |||
| @@ -46,6 +46,8 @@ | |||
| 46 | #define same_context(a, b) (((a)->context_id == (b)->context_id) && \ | 46 | #define same_context(a, b) (((a)->context_id == (b)->context_id) && \ |
| 47 | ((a)->lrca == (b)->lrca)) | 47 | ((a)->lrca == (b)->lrca)) |
| 48 | 48 | ||
| 49 | static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask); | ||
| 50 | |||
| 49 | static int context_switch_events[] = { | 51 | static int context_switch_events[] = { |
| 50 | [RCS] = RCS_AS_CONTEXT_SWITCH, | 52 | [RCS] = RCS_AS_CONTEXT_SWITCH, |
| 51 | [BCS] = BCS_AS_CONTEXT_SWITCH, | 53 | [BCS] = BCS_AS_CONTEXT_SWITCH, |
| @@ -499,10 +501,10 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) | |||
| 499 | static int complete_execlist_workload(struct intel_vgpu_workload *workload) | 501 | static int complete_execlist_workload(struct intel_vgpu_workload *workload) |
| 500 | { | 502 | { |
| 501 | struct intel_vgpu *vgpu = workload->vgpu; | 503 | struct intel_vgpu *vgpu = workload->vgpu; |
| 502 | struct intel_vgpu_execlist *execlist = | 504 | int ring_id = workload->ring_id; |
| 503 | &vgpu->execlist[workload->ring_id]; | 505 | struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id]; |
| 504 | struct intel_vgpu_workload *next_workload; | 506 | struct intel_vgpu_workload *next_workload; |
| 505 | struct list_head *next = workload_q_head(vgpu, workload->ring_id)->next; | 507 | struct list_head *next = workload_q_head(vgpu, ring_id)->next; |
| 506 | bool lite_restore = false; | 508 | bool lite_restore = false; |
| 507 | int ret; | 509 | int ret; |
| 508 | 510 | ||
| @@ -512,10 +514,25 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload) | |||
| 512 | release_shadow_batch_buffer(workload); | 514 | release_shadow_batch_buffer(workload); |
| 513 | release_shadow_wa_ctx(&workload->wa_ctx); | 515 | release_shadow_wa_ctx(&workload->wa_ctx); |
| 514 | 516 | ||
| 515 | if (workload->status || vgpu->resetting) | 517 | if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) { |
| 518 | /* if workload->status is not successful means HW GPU | ||
| 519 | * has occurred GPU hang or something wrong with i915/GVT, | ||
| 520 | * and GVT won't inject context switch interrupt to guest. | ||
| 521 | * So this error is a vGPU hang actually to the guest. | ||
| 522 | * According to this we should emunlate a vGPU hang. If | ||
| 523 | * there are pending workloads which are already submitted | ||
| 524 | * from guest, we should clean them up like HW GPU does. | ||
| 525 | * | ||
| 526 | * if it is in middle of engine resetting, the pending | ||
| 527 | * workloads won't be submitted to HW GPU and will be | ||
| 528 | * cleaned up during the resetting process later, so doing | ||
| 529 | * the workload clean up here doesn't have any impact. | ||
| 530 | **/ | ||
| 531 | clean_workloads(vgpu, ENGINE_MASK(ring_id)); | ||
| 516 | goto out; | 532 | goto out; |
| 533 | } | ||
| 517 | 534 | ||
| 518 | if (!list_empty(workload_q_head(vgpu, workload->ring_id))) { | 535 | if (!list_empty(workload_q_head(vgpu, ring_id))) { |
| 519 | struct execlist_ctx_descriptor_format *this_desc, *next_desc; | 536 | struct execlist_ctx_descriptor_format *this_desc, *next_desc; |
| 520 | 537 | ||
| 521 | next_workload = container_of(next, | 538 | next_workload = container_of(next, |
diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c index 5dad9298b2d5..a26c1705430e 100644 --- a/drivers/gpu/drm/i915/gvt/firmware.c +++ b/drivers/gpu/drm/i915/gvt/firmware.c | |||
| @@ -72,11 +72,13 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt) | |||
| 72 | struct intel_gvt_device_info *info = &gvt->device_info; | 72 | struct intel_gvt_device_info *info = &gvt->device_info; |
| 73 | struct pci_dev *pdev = gvt->dev_priv->drm.pdev; | 73 | struct pci_dev *pdev = gvt->dev_priv->drm.pdev; |
| 74 | struct intel_gvt_mmio_info *e; | 74 | struct intel_gvt_mmio_info *e; |
| 75 | struct gvt_mmio_block *block = gvt->mmio.mmio_block; | ||
| 76 | int num = gvt->mmio.num_mmio_block; | ||
| 75 | struct gvt_firmware_header *h; | 77 | struct gvt_firmware_header *h; |
| 76 | void *firmware; | 78 | void *firmware; |
| 77 | void *p; | 79 | void *p; |
| 78 | unsigned long size, crc32_start; | 80 | unsigned long size, crc32_start; |
| 79 | int i; | 81 | int i, j; |
| 80 | int ret; | 82 | int ret; |
| 81 | 83 | ||
| 82 | size = sizeof(*h) + info->mmio_size + info->cfg_space_size; | 84 | size = sizeof(*h) + info->mmio_size + info->cfg_space_size; |
| @@ -105,6 +107,13 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt) | |||
| 105 | hash_for_each(gvt->mmio.mmio_info_table, i, e, node) | 107 | hash_for_each(gvt->mmio.mmio_info_table, i, e, node) |
| 106 | *(u32 *)(p + e->offset) = I915_READ_NOTRACE(_MMIO(e->offset)); | 108 | *(u32 *)(p + e->offset) = I915_READ_NOTRACE(_MMIO(e->offset)); |
| 107 | 109 | ||
| 110 | for (i = 0; i < num; i++, block++) { | ||
| 111 | for (j = 0; j < block->size; j += 4) | ||
| 112 | *(u32 *)(p + INTEL_GVT_MMIO_OFFSET(block->offset) + j) = | ||
| 113 | I915_READ_NOTRACE(_MMIO(INTEL_GVT_MMIO_OFFSET( | ||
| 114 | block->offset) + j)); | ||
| 115 | } | ||
| 116 | |||
| 108 | memcpy(gvt->firmware.mmio, p, info->mmio_size); | 117 | memcpy(gvt->firmware.mmio, p, info->mmio_size); |
| 109 | 118 | ||
| 110 | crc32_start = offsetof(struct gvt_firmware_header, crc32) + 4; | 119 | crc32_start = offsetof(struct gvt_firmware_header, crc32) + 4; |
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 3a74e79eac2f..2964a4d01a66 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h | |||
| @@ -149,7 +149,7 @@ struct intel_vgpu { | |||
| 149 | bool active; | 149 | bool active; |
| 150 | bool pv_notified; | 150 | bool pv_notified; |
| 151 | bool failsafe; | 151 | bool failsafe; |
| 152 | bool resetting; | 152 | unsigned int resetting_eng; |
| 153 | void *sched_data; | 153 | void *sched_data; |
| 154 | struct vgpu_sched_ctl sched_ctl; | 154 | struct vgpu_sched_ctl sched_ctl; |
| 155 | 155 | ||
| @@ -195,6 +195,15 @@ struct intel_gvt_fence { | |||
| 195 | unsigned long vgpu_allocated_fence_num; | 195 | unsigned long vgpu_allocated_fence_num; |
| 196 | }; | 196 | }; |
| 197 | 197 | ||
| 198 | /* Special MMIO blocks. */ | ||
| 199 | struct gvt_mmio_block { | ||
| 200 | unsigned int device; | ||
| 201 | i915_reg_t offset; | ||
| 202 | unsigned int size; | ||
| 203 | gvt_mmio_func read; | ||
| 204 | gvt_mmio_func write; | ||
| 205 | }; | ||
| 206 | |||
| 198 | #define INTEL_GVT_MMIO_HASH_BITS 11 | 207 | #define INTEL_GVT_MMIO_HASH_BITS 11 |
| 199 | 208 | ||
| 200 | struct intel_gvt_mmio { | 209 | struct intel_gvt_mmio { |
| @@ -214,6 +223,9 @@ struct intel_gvt_mmio { | |||
| 214 | /* This reg could be accessed by unaligned address */ | 223 | /* This reg could be accessed by unaligned address */ |
| 215 | #define F_UNALIGN (1 << 6) | 224 | #define F_UNALIGN (1 << 6) |
| 216 | 225 | ||
| 226 | struct gvt_mmio_block *mmio_block; | ||
| 227 | unsigned int num_mmio_block; | ||
| 228 | |||
| 217 | DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS); | 229 | DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS); |
| 218 | unsigned int num_tracked_mmio; | 230 | unsigned int num_tracked_mmio; |
| 219 | }; | 231 | }; |
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 17febe830ff6..feed9921b3b3 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c | |||
| @@ -2857,31 +2857,15 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) | |||
| 2857 | return 0; | 2857 | return 0; |
| 2858 | } | 2858 | } |
| 2859 | 2859 | ||
| 2860 | /* Special MMIO blocks. */ | ||
| 2861 | static struct gvt_mmio_block { | ||
| 2862 | unsigned int device; | ||
| 2863 | i915_reg_t offset; | ||
| 2864 | unsigned int size; | ||
| 2865 | gvt_mmio_func read; | ||
| 2866 | gvt_mmio_func write; | ||
| 2867 | } gvt_mmio_blocks[] = { | ||
| 2868 | {D_SKL_PLUS, _MMIO(CSR_MMIO_START_RANGE), 0x3000, NULL, NULL}, | ||
| 2869 | {D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL}, | ||
| 2870 | {D_ALL, _MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE, | ||
| 2871 | pvinfo_mmio_read, pvinfo_mmio_write}, | ||
| 2872 | {D_ALL, LGC_PALETTE(PIPE_A, 0), 1024, NULL, NULL}, | ||
| 2873 | {D_ALL, LGC_PALETTE(PIPE_B, 0), 1024, NULL, NULL}, | ||
| 2874 | {D_ALL, LGC_PALETTE(PIPE_C, 0), 1024, NULL, NULL}, | ||
| 2875 | }; | ||
| 2876 | |||
| 2877 | static struct gvt_mmio_block *find_mmio_block(struct intel_gvt *gvt, | 2860 | static struct gvt_mmio_block *find_mmio_block(struct intel_gvt *gvt, |
| 2878 | unsigned int offset) | 2861 | unsigned int offset) |
| 2879 | { | 2862 | { |
| 2880 | unsigned long device = intel_gvt_get_device_type(gvt); | 2863 | unsigned long device = intel_gvt_get_device_type(gvt); |
| 2881 | struct gvt_mmio_block *block = gvt_mmio_blocks; | 2864 | struct gvt_mmio_block *block = gvt->mmio.mmio_block; |
| 2865 | int num = gvt->mmio.num_mmio_block; | ||
| 2882 | int i; | 2866 | int i; |
| 2883 | 2867 | ||
| 2884 | for (i = 0; i < ARRAY_SIZE(gvt_mmio_blocks); i++, block++) { | 2868 | for (i = 0; i < num; i++, block++) { |
| 2885 | if (!(device & block->device)) | 2869 | if (!(device & block->device)) |
| 2886 | continue; | 2870 | continue; |
| 2887 | if (offset >= INTEL_GVT_MMIO_OFFSET(block->offset) && | 2871 | if (offset >= INTEL_GVT_MMIO_OFFSET(block->offset) && |
| @@ -2912,6 +2896,17 @@ void intel_gvt_clean_mmio_info(struct intel_gvt *gvt) | |||
| 2912 | gvt->mmio.mmio_attribute = NULL; | 2896 | gvt->mmio.mmio_attribute = NULL; |
| 2913 | } | 2897 | } |
| 2914 | 2898 | ||
| 2899 | /* Special MMIO blocks. */ | ||
| 2900 | static struct gvt_mmio_block mmio_blocks[] = { | ||
| 2901 | {D_SKL_PLUS, _MMIO(CSR_MMIO_START_RANGE), 0x3000, NULL, NULL}, | ||
| 2902 | {D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL}, | ||
| 2903 | {D_ALL, _MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE, | ||
| 2904 | pvinfo_mmio_read, pvinfo_mmio_write}, | ||
| 2905 | {D_ALL, LGC_PALETTE(PIPE_A, 0), 1024, NULL, NULL}, | ||
| 2906 | {D_ALL, LGC_PALETTE(PIPE_B, 0), 1024, NULL, NULL}, | ||
| 2907 | {D_ALL, LGC_PALETTE(PIPE_C, 0), 1024, NULL, NULL}, | ||
| 2908 | }; | ||
| 2909 | |||
| 2915 | /** | 2910 | /** |
| 2916 | * intel_gvt_setup_mmio_info - setup MMIO information table for GVT device | 2911 | * intel_gvt_setup_mmio_info - setup MMIO information table for GVT device |
| 2917 | * @gvt: GVT device | 2912 | * @gvt: GVT device |
| @@ -2951,6 +2946,9 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt) | |||
| 2951 | goto err; | 2946 | goto err; |
| 2952 | } | 2947 | } |
| 2953 | 2948 | ||
| 2949 | gvt->mmio.mmio_block = mmio_blocks; | ||
| 2950 | gvt->mmio.num_mmio_block = ARRAY_SIZE(mmio_blocks); | ||
| 2951 | |||
| 2954 | gvt_dbg_mmio("traced %u virtual mmio registers\n", | 2952 | gvt_dbg_mmio("traced %u virtual mmio registers\n", |
| 2955 | gvt->mmio.num_tracked_mmio); | 2953 | gvt->mmio.num_tracked_mmio); |
| 2956 | return 0; | 2954 | return 0; |
| @@ -3030,7 +3028,7 @@ int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset, | |||
| 3030 | gvt_mmio_func func; | 3028 | gvt_mmio_func func; |
| 3031 | int ret; | 3029 | int ret; |
| 3032 | 3030 | ||
| 3033 | if (WARN_ON(bytes > 4)) | 3031 | if (WARN_ON(bytes > 8)) |
| 3034 | return -EINVAL; | 3032 | return -EINVAL; |
| 3035 | 3033 | ||
| 3036 | /* | 3034 | /* |
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 4f7057d62d88..22e08eb2d0b7 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c | |||
| @@ -432,7 +432,8 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) | |||
| 432 | 432 | ||
| 433 | i915_gem_request_put(fetch_and_zero(&workload->req)); | 433 | i915_gem_request_put(fetch_and_zero(&workload->req)); |
| 434 | 434 | ||
| 435 | if (!workload->status && !vgpu->resetting) { | 435 | if (!workload->status && !(vgpu->resetting_eng & |
| 436 | ENGINE_MASK(ring_id))) { | ||
| 436 | update_guest_context(workload); | 437 | update_guest_context(workload); |
| 437 | 438 | ||
| 438 | for_each_set_bit(event, workload->pending_events, | 439 | for_each_set_bit(event, workload->pending_events, |
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index 90c14e6e3ea0..3deadcbd5a24 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c | |||
| @@ -480,11 +480,13 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, | |||
| 480 | { | 480 | { |
| 481 | struct intel_gvt *gvt = vgpu->gvt; | 481 | struct intel_gvt *gvt = vgpu->gvt; |
| 482 | struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; | 482 | struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; |
| 483 | unsigned int resetting_eng = dmlr ? ALL_ENGINES : engine_mask; | ||
| 483 | 484 | ||
| 484 | gvt_dbg_core("------------------------------------------\n"); | 485 | gvt_dbg_core("------------------------------------------\n"); |
| 485 | gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n", | 486 | gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n", |
| 486 | vgpu->id, dmlr, engine_mask); | 487 | vgpu->id, dmlr, engine_mask); |
| 487 | vgpu->resetting = true; | 488 | |
| 489 | vgpu->resetting_eng = resetting_eng; | ||
| 488 | 490 | ||
| 489 | intel_vgpu_stop_schedule(vgpu); | 491 | intel_vgpu_stop_schedule(vgpu); |
| 490 | /* | 492 | /* |
| @@ -497,7 +499,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, | |||
| 497 | mutex_lock(&gvt->lock); | 499 | mutex_lock(&gvt->lock); |
| 498 | } | 500 | } |
| 499 | 501 | ||
| 500 | intel_vgpu_reset_execlist(vgpu, dmlr ? ALL_ENGINES : engine_mask); | 502 | intel_vgpu_reset_execlist(vgpu, resetting_eng); |
| 501 | 503 | ||
| 502 | /* full GPU reset or device model level reset */ | 504 | /* full GPU reset or device model level reset */ |
| 503 | if (engine_mask == ALL_ENGINES || dmlr) { | 505 | if (engine_mask == ALL_ENGINES || dmlr) { |
| @@ -520,7 +522,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, | |||
| 520 | } | 522 | } |
| 521 | } | 523 | } |
| 522 | 524 | ||
| 523 | vgpu->resetting = false; | 525 | vgpu->resetting_eng = 0; |
| 524 | gvt_dbg_core("reset vgpu%d done\n", vgpu->id); | 526 | gvt_dbg_core("reset vgpu%d done\n", vgpu->id); |
| 525 | gvt_dbg_core("------------------------------------------\n"); | 527 | gvt_dbg_core("------------------------------------------\n"); |
| 526 | } | 528 | } |
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 00d8967c8512..d1bd53b73738 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
| @@ -4580,7 +4580,7 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv, | |||
| 4580 | 4580 | ||
| 4581 | sseu->slice_mask |= BIT(s); | 4581 | sseu->slice_mask |= BIT(s); |
| 4582 | 4582 | ||
| 4583 | if (IS_GEN9_BC(dev_priv)) | 4583 | if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) |
| 4584 | sseu->subslice_mask = | 4584 | sseu->subslice_mask = |
| 4585 | INTEL_INFO(dev_priv)->sseu.subslice_mask; | 4585 | INTEL_INFO(dev_priv)->sseu.subslice_mask; |
| 4586 | 4586 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_clflush.c b/drivers/gpu/drm/i915/i915_gem_clflush.c index 152f16c11878..348b29a845c9 100644 --- a/drivers/gpu/drm/i915/i915_gem_clflush.c +++ b/drivers/gpu/drm/i915/i915_gem_clflush.c | |||
| @@ -114,7 +114,7 @@ i915_clflush_notify(struct i915_sw_fence *fence, | |||
| 114 | return NOTIFY_DONE; | 114 | return NOTIFY_DONE; |
| 115 | } | 115 | } |
| 116 | 116 | ||
| 117 | void i915_gem_clflush_object(struct drm_i915_gem_object *obj, | 117 | bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, |
| 118 | unsigned int flags) | 118 | unsigned int flags) |
| 119 | { | 119 | { |
| 120 | struct clflush *clflush; | 120 | struct clflush *clflush; |
| @@ -128,7 +128,7 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj, | |||
| 128 | */ | 128 | */ |
| 129 | if (!i915_gem_object_has_struct_page(obj)) { | 129 | if (!i915_gem_object_has_struct_page(obj)) { |
| 130 | obj->cache_dirty = false; | 130 | obj->cache_dirty = false; |
| 131 | return; | 131 | return false; |
| 132 | } | 132 | } |
| 133 | 133 | ||
| 134 | /* If the GPU is snooping the contents of the CPU cache, | 134 | /* If the GPU is snooping the contents of the CPU cache, |
| @@ -140,7 +140,7 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj, | |||
| 140 | * tracking. | 140 | * tracking. |
| 141 | */ | 141 | */ |
| 142 | if (!(flags & I915_CLFLUSH_FORCE) && obj->cache_coherent) | 142 | if (!(flags & I915_CLFLUSH_FORCE) && obj->cache_coherent) |
| 143 | return; | 143 | return false; |
| 144 | 144 | ||
| 145 | trace_i915_gem_object_clflush(obj); | 145 | trace_i915_gem_object_clflush(obj); |
| 146 | 146 | ||
| @@ -179,4 +179,5 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj, | |||
| 179 | } | 179 | } |
| 180 | 180 | ||
| 181 | obj->cache_dirty = false; | 181 | obj->cache_dirty = false; |
| 182 | return true; | ||
| 182 | } | 183 | } |
diff --git a/drivers/gpu/drm/i915/i915_gem_clflush.h b/drivers/gpu/drm/i915/i915_gem_clflush.h index 2455a7820937..f390247561b3 100644 --- a/drivers/gpu/drm/i915/i915_gem_clflush.h +++ b/drivers/gpu/drm/i915/i915_gem_clflush.h | |||
| @@ -28,7 +28,7 @@ | |||
| 28 | struct drm_i915_private; | 28 | struct drm_i915_private; |
| 29 | struct drm_i915_gem_object; | 29 | struct drm_i915_gem_object; |
| 30 | 30 | ||
| 31 | void i915_gem_clflush_object(struct drm_i915_gem_object *obj, | 31 | bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, |
| 32 | unsigned int flags); | 32 | unsigned int flags); |
| 33 | #define I915_CLFLUSH_FORCE BIT(0) | 33 | #define I915_CLFLUSH_FORCE BIT(0) |
| 34 | #define I915_CLFLUSH_SYNC BIT(1) | 34 | #define I915_CLFLUSH_SYNC BIT(1) |
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 39ed58a21fc1..e1e971ee2ed5 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c | |||
| @@ -688,19 +688,19 @@ static inline bool skip_rcs_switch(struct i915_hw_ppgtt *ppgtt, | |||
| 688 | } | 688 | } |
| 689 | 689 | ||
| 690 | static bool | 690 | static bool |
| 691 | needs_pd_load_pre(struct i915_hw_ppgtt *ppgtt, | 691 | needs_pd_load_pre(struct i915_hw_ppgtt *ppgtt, struct intel_engine_cs *engine) |
| 692 | struct intel_engine_cs *engine, | ||
| 693 | struct i915_gem_context *to) | ||
| 694 | { | 692 | { |
| 693 | struct i915_gem_context *from = engine->legacy_active_context; | ||
| 694 | |||
| 695 | if (!ppgtt) | 695 | if (!ppgtt) |
| 696 | return false; | 696 | return false; |
| 697 | 697 | ||
| 698 | /* Always load the ppgtt on first use */ | 698 | /* Always load the ppgtt on first use */ |
| 699 | if (!engine->legacy_active_context) | 699 | if (!from) |
| 700 | return true; | 700 | return true; |
| 701 | 701 | ||
| 702 | /* Same context without new entries, skip */ | 702 | /* Same context without new entries, skip */ |
| 703 | if (engine->legacy_active_context == to && | 703 | if ((!from->ppgtt || from->ppgtt == ppgtt) && |
| 704 | !(intel_engine_flag(engine) & ppgtt->pd_dirty_rings)) | 704 | !(intel_engine_flag(engine) & ppgtt->pd_dirty_rings)) |
| 705 | return false; | 705 | return false; |
| 706 | 706 | ||
| @@ -744,7 +744,7 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) | |||
| 744 | if (skip_rcs_switch(ppgtt, engine, to)) | 744 | if (skip_rcs_switch(ppgtt, engine, to)) |
| 745 | return 0; | 745 | return 0; |
| 746 | 746 | ||
| 747 | if (needs_pd_load_pre(ppgtt, engine, to)) { | 747 | if (needs_pd_load_pre(ppgtt, engine)) { |
| 748 | /* Older GENs and non render rings still want the load first, | 748 | /* Older GENs and non render rings still want the load first, |
| 749 | * "PP_DCLV followed by PP_DIR_BASE register through Load | 749 | * "PP_DCLV followed by PP_DIR_BASE register through Load |
| 750 | * Register Immediate commands in Ring Buffer before submitting | 750 | * Register Immediate commands in Ring Buffer before submitting |
| @@ -841,7 +841,7 @@ int i915_switch_context(struct drm_i915_gem_request *req) | |||
| 841 | struct i915_hw_ppgtt *ppgtt = | 841 | struct i915_hw_ppgtt *ppgtt = |
| 842 | to->ppgtt ?: req->i915->mm.aliasing_ppgtt; | 842 | to->ppgtt ?: req->i915->mm.aliasing_ppgtt; |
| 843 | 843 | ||
| 844 | if (needs_pd_load_pre(ppgtt, engine, to)) { | 844 | if (needs_pd_load_pre(ppgtt, engine)) { |
| 845 | int ret; | 845 | int ret; |
| 846 | 846 | ||
| 847 | trace_switch_mm(engine, to); | 847 | trace_switch_mm(engine, to); |
| @@ -852,6 +852,7 @@ int i915_switch_context(struct drm_i915_gem_request *req) | |||
| 852 | ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine); | 852 | ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine); |
| 853 | } | 853 | } |
| 854 | 854 | ||
| 855 | engine->legacy_active_context = to; | ||
| 855 | return 0; | 856 | return 0; |
| 856 | } | 857 | } |
| 857 | 858 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 054b2e54cdaf..e9503f6d1100 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c | |||
| @@ -560,9 +560,6 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb, | |||
| 560 | eb->args->flags |= __EXEC_HAS_RELOC; | 560 | eb->args->flags |= __EXEC_HAS_RELOC; |
| 561 | } | 561 | } |
| 562 | 562 | ||
| 563 | entry->flags |= __EXEC_OBJECT_HAS_PIN; | ||
| 564 | GEM_BUG_ON(eb_vma_misplaced(entry, vma)); | ||
| 565 | |||
| 566 | if (unlikely(entry->flags & EXEC_OBJECT_NEEDS_FENCE)) { | 563 | if (unlikely(entry->flags & EXEC_OBJECT_NEEDS_FENCE)) { |
| 567 | err = i915_vma_get_fence(vma); | 564 | err = i915_vma_get_fence(vma); |
| 568 | if (unlikely(err)) { | 565 | if (unlikely(err)) { |
| @@ -574,6 +571,9 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb, | |||
| 574 | entry->flags |= __EXEC_OBJECT_HAS_FENCE; | 571 | entry->flags |= __EXEC_OBJECT_HAS_FENCE; |
| 575 | } | 572 | } |
| 576 | 573 | ||
| 574 | entry->flags |= __EXEC_OBJECT_HAS_PIN; | ||
| 575 | GEM_BUG_ON(eb_vma_misplaced(entry, vma)); | ||
| 576 | |||
| 577 | return 0; | 577 | return 0; |
| 578 | } | 578 | } |
| 579 | 579 | ||
| @@ -1458,7 +1458,7 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct i915_vma *vma) | |||
| 1458 | * to read. However, if the array is not writable the user loses | 1458 | * to read. However, if the array is not writable the user loses |
| 1459 | * the updated relocation values. | 1459 | * the updated relocation values. |
| 1460 | */ | 1460 | */ |
| 1461 | if (unlikely(!access_ok(VERIFY_READ, urelocs, remain*sizeof(urelocs)))) | 1461 | if (unlikely(!access_ok(VERIFY_READ, urelocs, remain*sizeof(*urelocs)))) |
| 1462 | return -EFAULT; | 1462 | return -EFAULT; |
| 1463 | 1463 | ||
| 1464 | do { | 1464 | do { |
| @@ -1775,7 +1775,7 @@ out: | |||
| 1775 | } | 1775 | } |
| 1776 | } | 1776 | } |
| 1777 | 1777 | ||
| 1778 | return err ?: have_copy; | 1778 | return err; |
| 1779 | } | 1779 | } |
| 1780 | 1780 | ||
| 1781 | static int eb_relocate(struct i915_execbuffer *eb) | 1781 | static int eb_relocate(struct i915_execbuffer *eb) |
| @@ -1825,7 +1825,7 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb) | |||
| 1825 | int err; | 1825 | int err; |
| 1826 | 1826 | ||
| 1827 | for (i = 0; i < count; i++) { | 1827 | for (i = 0; i < count; i++) { |
| 1828 | const struct drm_i915_gem_exec_object2 *entry = &eb->exec[i]; | 1828 | struct drm_i915_gem_exec_object2 *entry = &eb->exec[i]; |
| 1829 | struct i915_vma *vma = exec_to_vma(entry); | 1829 | struct i915_vma *vma = exec_to_vma(entry); |
| 1830 | struct drm_i915_gem_object *obj = vma->obj; | 1830 | struct drm_i915_gem_object *obj = vma->obj; |
| 1831 | 1831 | ||
| @@ -1841,12 +1841,14 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb) | |||
| 1841 | eb->request->capture_list = capture; | 1841 | eb->request->capture_list = capture; |
| 1842 | } | 1842 | } |
| 1843 | 1843 | ||
| 1844 | if (unlikely(obj->cache_dirty && !obj->cache_coherent)) { | ||
| 1845 | if (i915_gem_clflush_object(obj, 0)) | ||
| 1846 | entry->flags &= ~EXEC_OBJECT_ASYNC; | ||
| 1847 | } | ||
| 1848 | |||
| 1844 | if (entry->flags & EXEC_OBJECT_ASYNC) | 1849 | if (entry->flags & EXEC_OBJECT_ASYNC) |
| 1845 | goto skip_flushes; | 1850 | goto skip_flushes; |
| 1846 | 1851 | ||
| 1847 | if (unlikely(obj->cache_dirty && !obj->cache_coherent)) | ||
| 1848 | i915_gem_clflush_object(obj, 0); | ||
| 1849 | |||
| 1850 | err = i915_gem_request_await_object | 1852 | err = i915_gem_request_await_object |
| 1851 | (eb->request, obj, entry->flags & EXEC_OBJECT_WRITE); | 1853 | (eb->request, obj, entry->flags & EXEC_OBJECT_WRITE); |
| 1852 | if (err) | 1854 | if (err) |
| @@ -2209,7 +2211,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, | |||
| 2209 | goto err_unlock; | 2211 | goto err_unlock; |
| 2210 | 2212 | ||
| 2211 | err = eb_relocate(&eb); | 2213 | err = eb_relocate(&eb); |
| 2212 | if (err) | 2214 | if (err) { |
| 2213 | /* | 2215 | /* |
| 2214 | * If the user expects the execobject.offset and | 2216 | * If the user expects the execobject.offset and |
| 2215 | * reloc.presumed_offset to be an exact match, | 2217 | * reloc.presumed_offset to be an exact match, |
| @@ -2218,8 +2220,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, | |||
| 2218 | * relocation. | 2220 | * relocation. |
| 2219 | */ | 2221 | */ |
| 2220 | args->flags &= ~__EXEC_HAS_RELOC; | 2222 | args->flags &= ~__EXEC_HAS_RELOC; |
| 2221 | if (err < 0) | ||
| 2222 | goto err_vma; | 2223 | goto err_vma; |
| 2224 | } | ||
| 2223 | 2225 | ||
| 2224 | if (unlikely(eb.batch->exec_entry->flags & EXEC_OBJECT_WRITE)) { | 2226 | if (unlikely(eb.batch->exec_entry->flags & EXEC_OBJECT_WRITE)) { |
| 2225 | DRM_DEBUG("Attempting to use self-modifying batch buffer\n"); | 2227 | DRM_DEBUG("Attempting to use self-modifying batch buffer\n"); |
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c index 7032c542a9b1..4dd4c2159a92 100644 --- a/drivers/gpu/drm/i915/i915_gem_render_state.c +++ b/drivers/gpu/drm/i915/i915_gem_render_state.c | |||
| @@ -242,6 +242,10 @@ int i915_gem_render_state_emit(struct drm_i915_gem_request *req) | |||
| 242 | goto err_unpin; | 242 | goto err_unpin; |
| 243 | } | 243 | } |
| 244 | 244 | ||
| 245 | ret = req->engine->emit_flush(req, EMIT_INVALIDATE); | ||
| 246 | if (ret) | ||
| 247 | goto err_unpin; | ||
| 248 | |||
| 245 | ret = req->engine->emit_bb_start(req, | 249 | ret = req->engine->emit_bb_start(req, |
| 246 | so->batch_offset, so->batch_size, | 250 | so->batch_offset, so->batch_size, |
| 247 | I915_DISPATCH_SECURE); | 251 | I915_DISPATCH_SECURE); |
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c index 1032f98add11..77fb39808131 100644 --- a/drivers/gpu/drm/i915/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c | |||
| @@ -43,16 +43,21 @@ static bool shrinker_lock(struct drm_i915_private *dev_priv, bool *unlock) | |||
| 43 | return true; | 43 | return true; |
| 44 | 44 | ||
| 45 | case MUTEX_TRYLOCK_FAILED: | 45 | case MUTEX_TRYLOCK_FAILED: |
| 46 | *unlock = false; | ||
| 47 | preempt_disable(); | ||
| 46 | do { | 48 | do { |
| 47 | cpu_relax(); | 49 | cpu_relax(); |
| 48 | if (mutex_trylock(&dev_priv->drm.struct_mutex)) { | 50 | if (mutex_trylock(&dev_priv->drm.struct_mutex)) { |
| 49 | case MUTEX_TRYLOCK_SUCCESS: | ||
| 50 | *unlock = true; | 51 | *unlock = true; |
| 51 | return true; | 52 | break; |
| 52 | } | 53 | } |
| 53 | } while (!need_resched()); | 54 | } while (!need_resched()); |
| 55 | preempt_enable(); | ||
| 56 | return *unlock; | ||
| 54 | 57 | ||
| 55 | return false; | 58 | case MUTEX_TRYLOCK_SUCCESS: |
| 59 | *unlock = true; | ||
| 60 | return true; | ||
| 56 | } | 61 | } |
| 57 | 62 | ||
| 58 | BUG(); | 63 | BUG(); |
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 9cd22f83b0cf..f33d90226704 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c | |||
| @@ -1601,11 +1601,11 @@ static int gen8_emit_oa_config(struct drm_i915_gem_request *req) | |||
| 1601 | u32 *cs; | 1601 | u32 *cs; |
| 1602 | int i; | 1602 | int i; |
| 1603 | 1603 | ||
| 1604 | cs = intel_ring_begin(req, n_flex_regs * 2 + 4); | 1604 | cs = intel_ring_begin(req, ARRAY_SIZE(flex_mmio) * 2 + 4); |
| 1605 | if (IS_ERR(cs)) | 1605 | if (IS_ERR(cs)) |
| 1606 | return PTR_ERR(cs); | 1606 | return PTR_ERR(cs); |
| 1607 | 1607 | ||
| 1608 | *cs++ = MI_LOAD_REGISTER_IMM(n_flex_regs + 1); | 1608 | *cs++ = MI_LOAD_REGISTER_IMM(ARRAY_SIZE(flex_mmio) + 1); |
| 1609 | 1609 | ||
| 1610 | *cs++ = i915_mmio_reg_offset(GEN8_OACTXCONTROL); | 1610 | *cs++ = i915_mmio_reg_offset(GEN8_OACTXCONTROL); |
| 1611 | *cs++ = (dev_priv->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) | | 1611 | *cs++ = (dev_priv->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) | |
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h index 4a673fc1a432..20cf272c97b1 100644 --- a/drivers/gpu/drm/i915/i915_vma.h +++ b/drivers/gpu/drm/i915/i915_vma.h | |||
| @@ -284,12 +284,12 @@ static inline void __i915_vma_pin(struct i915_vma *vma) | |||
| 284 | 284 | ||
| 285 | static inline void __i915_vma_unpin(struct i915_vma *vma) | 285 | static inline void __i915_vma_unpin(struct i915_vma *vma) |
| 286 | { | 286 | { |
| 287 | GEM_BUG_ON(!i915_vma_is_pinned(vma)); | ||
| 288 | vma->flags--; | 287 | vma->flags--; |
| 289 | } | 288 | } |
| 290 | 289 | ||
| 291 | static inline void i915_vma_unpin(struct i915_vma *vma) | 290 | static inline void i915_vma_unpin(struct i915_vma *vma) |
| 292 | { | 291 | { |
| 292 | GEM_BUG_ON(!i915_vma_is_pinned(vma)); | ||
| 293 | GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); | 293 | GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); |
| 294 | __i915_vma_unpin(vma); | 294 | __i915_vma_unpin(vma); |
| 295 | } | 295 | } |
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 639d45c1dd2e..7ea7fd1e8856 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c | |||
| @@ -1120,8 +1120,8 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port, | |||
| 1120 | bool is_dvi, is_hdmi, is_dp, is_edp, is_crt; | 1120 | bool is_dvi, is_hdmi, is_dp, is_edp, is_crt; |
| 1121 | uint8_t aux_channel, ddc_pin; | 1121 | uint8_t aux_channel, ddc_pin; |
| 1122 | /* Each DDI port can have more than one value on the "DVO Port" field, | 1122 | /* Each DDI port can have more than one value on the "DVO Port" field, |
| 1123 | * so look for all the possible values for each port and abort if more | 1123 | * so look for all the possible values for each port. |
| 1124 | * than one is found. */ | 1124 | */ |
| 1125 | int dvo_ports[][3] = { | 1125 | int dvo_ports[][3] = { |
| 1126 | {DVO_PORT_HDMIA, DVO_PORT_DPA, -1}, | 1126 | {DVO_PORT_HDMIA, DVO_PORT_DPA, -1}, |
| 1127 | {DVO_PORT_HDMIB, DVO_PORT_DPB, -1}, | 1127 | {DVO_PORT_HDMIB, DVO_PORT_DPB, -1}, |
| @@ -1130,7 +1130,10 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port, | |||
| 1130 | {DVO_PORT_CRT, DVO_PORT_HDMIE, DVO_PORT_DPE}, | 1130 | {DVO_PORT_CRT, DVO_PORT_HDMIE, DVO_PORT_DPE}, |
| 1131 | }; | 1131 | }; |
| 1132 | 1132 | ||
| 1133 | /* Find the child device to use, abort if more than one found. */ | 1133 | /* |
| 1134 | * Find the first child device to reference the port, report if more | ||
| 1135 | * than one found. | ||
| 1136 | */ | ||
| 1134 | for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { | 1137 | for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { |
| 1135 | it = dev_priv->vbt.child_dev + i; | 1138 | it = dev_priv->vbt.child_dev + i; |
| 1136 | 1139 | ||
| @@ -1140,11 +1143,11 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port, | |||
| 1140 | 1143 | ||
| 1141 | if (it->common.dvo_port == dvo_ports[port][j]) { | 1144 | if (it->common.dvo_port == dvo_ports[port][j]) { |
| 1142 | if (child) { | 1145 | if (child) { |
| 1143 | DRM_DEBUG_KMS("More than one child device for port %c in VBT.\n", | 1146 | DRM_DEBUG_KMS("More than one child device for port %c in VBT, using the first.\n", |
| 1144 | port_name(port)); | 1147 | port_name(port)); |
| 1145 | return; | 1148 | } else { |
| 1149 | child = it; | ||
| 1146 | } | 1150 | } |
| 1147 | child = it; | ||
| 1148 | } | 1151 | } |
| 1149 | } | 1152 | } |
| 1150 | } | 1153 | } |
diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c index 306c6b06b330..17c4ae7e4e7c 100644 --- a/drivers/gpu/drm/i915/intel_color.c +++ b/drivers/gpu/drm/i915/intel_color.c | |||
| @@ -398,6 +398,7 @@ static void bdw_load_gamma_lut(struct drm_crtc_state *state, u32 offset) | |||
| 398 | } | 398 | } |
| 399 | 399 | ||
| 400 | /* Program the max register to clamp values > 1.0. */ | 400 | /* Program the max register to clamp values > 1.0. */ |
| 401 | i = lut_size - 1; | ||
| 401 | I915_WRITE(PREC_PAL_GC_MAX(pipe, 0), | 402 | I915_WRITE(PREC_PAL_GC_MAX(pipe, 0), |
| 402 | drm_color_lut_extract(lut[i].red, 16)); | 403 | drm_color_lut_extract(lut[i].red, 16)); |
| 403 | I915_WRITE(PREC_PAL_GC_MAX(pipe, 1), | 404 | I915_WRITE(PREC_PAL_GC_MAX(pipe, 1), |
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 80e96f1f49d2..d3b3252a8742 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c | |||
| @@ -1762,7 +1762,7 @@ cnl_get_buf_trans_edp(struct drm_i915_private *dev_priv, | |||
| 1762 | if (dev_priv->vbt.edp.low_vswing) { | 1762 | if (dev_priv->vbt.edp.low_vswing) { |
| 1763 | if (voltage == VOLTAGE_INFO_0_85V) { | 1763 | if (voltage == VOLTAGE_INFO_0_85V) { |
| 1764 | *n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_0_85V); | 1764 | *n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_0_85V); |
| 1765 | return cnl_ddi_translations_dp_0_85V; | 1765 | return cnl_ddi_translations_edp_0_85V; |
| 1766 | } else if (voltage == VOLTAGE_INFO_0_95V) { | 1766 | } else if (voltage == VOLTAGE_INFO_0_95V) { |
| 1767 | *n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_0_95V); | 1767 | *n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_0_95V); |
| 1768 | return cnl_ddi_translations_edp_0_95V; | 1768 | return cnl_ddi_translations_edp_0_95V; |
| @@ -1896,8 +1896,8 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder, u32 level) | |||
| 1896 | val = I915_READ(CNL_PORT_TX_DW4_LN(port, ln)); | 1896 | val = I915_READ(CNL_PORT_TX_DW4_LN(port, ln)); |
| 1897 | val &= ~LOADGEN_SELECT; | 1897 | val &= ~LOADGEN_SELECT; |
| 1898 | 1898 | ||
| 1899 | if (((rate < 600000) && (width == 4) && (ln >= 1)) || | 1899 | if ((rate <= 600000 && width == 4 && ln >= 1) || |
| 1900 | ((rate < 600000) && (width < 4) && ((ln == 1) || (ln == 2)))) { | 1900 | (rate <= 600000 && width < 4 && (ln == 1 || ln == 2))) { |
| 1901 | val |= LOADGEN_SELECT; | 1901 | val |= LOADGEN_SELECT; |
| 1902 | } | 1902 | } |
| 1903 | I915_WRITE(CNL_PORT_TX_DW4_LN(port, ln), val); | 1903 | I915_WRITE(CNL_PORT_TX_DW4_LN(port, ln), val); |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index dec9e58545a1..cc484b56eeaa 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
| @@ -3427,26 +3427,6 @@ static void intel_complete_page_flips(struct drm_i915_private *dev_priv) | |||
| 3427 | intel_finish_page_flip_cs(dev_priv, crtc->pipe); | 3427 | intel_finish_page_flip_cs(dev_priv, crtc->pipe); |
| 3428 | } | 3428 | } |
| 3429 | 3429 | ||
| 3430 | static void intel_update_primary_planes(struct drm_device *dev) | ||
| 3431 | { | ||
| 3432 | struct drm_crtc *crtc; | ||
| 3433 | |||
| 3434 | for_each_crtc(dev, crtc) { | ||
| 3435 | struct intel_plane *plane = to_intel_plane(crtc->primary); | ||
| 3436 | struct intel_plane_state *plane_state = | ||
| 3437 | to_intel_plane_state(plane->base.state); | ||
| 3438 | |||
| 3439 | if (plane_state->base.visible) { | ||
| 3440 | trace_intel_update_plane(&plane->base, | ||
| 3441 | to_intel_crtc(crtc)); | ||
| 3442 | |||
| 3443 | plane->update_plane(plane, | ||
| 3444 | to_intel_crtc_state(crtc->state), | ||
| 3445 | plane_state); | ||
| 3446 | } | ||
| 3447 | } | ||
| 3448 | } | ||
| 3449 | |||
| 3450 | static int | 3430 | static int |
| 3451 | __intel_display_resume(struct drm_device *dev, | 3431 | __intel_display_resume(struct drm_device *dev, |
| 3452 | struct drm_atomic_state *state, | 3432 | struct drm_atomic_state *state, |
| @@ -3499,6 +3479,19 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv) | |||
| 3499 | struct drm_atomic_state *state; | 3479 | struct drm_atomic_state *state; |
| 3500 | int ret; | 3480 | int ret; |
| 3501 | 3481 | ||
| 3482 | |||
| 3483 | /* reset doesn't touch the display */ | ||
| 3484 | if (!i915.force_reset_modeset_test && | ||
| 3485 | !gpu_reset_clobbers_display(dev_priv)) | ||
| 3486 | return; | ||
| 3487 | |||
| 3488 | /* We have a modeset vs reset deadlock, defensively unbreak it. | ||
| 3489 | * | ||
| 3490 | * FIXME: We can do a _lot_ better, this is just a first iteration. | ||
| 3491 | */ | ||
| 3492 | i915_gem_set_wedged(dev_priv); | ||
| 3493 | DRM_DEBUG_DRIVER("Wedging GPU to avoid deadlocks with pending modeset updates\n"); | ||
| 3494 | |||
| 3502 | /* | 3495 | /* |
| 3503 | * Need mode_config.mutex so that we don't | 3496 | * Need mode_config.mutex so that we don't |
| 3504 | * trample ongoing ->detect() and whatnot. | 3497 | * trample ongoing ->detect() and whatnot. |
| @@ -3512,12 +3505,6 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv) | |||
| 3512 | 3505 | ||
| 3513 | drm_modeset_backoff(ctx); | 3506 | drm_modeset_backoff(ctx); |
| 3514 | } | 3507 | } |
| 3515 | |||
| 3516 | /* reset doesn't touch the display, but flips might get nuked anyway, */ | ||
| 3517 | if (!i915.force_reset_modeset_test && | ||
| 3518 | !gpu_reset_clobbers_display(dev_priv)) | ||
| 3519 | return; | ||
| 3520 | |||
| 3521 | /* | 3508 | /* |
| 3522 | * Disabling the crtcs gracefully seems nicer. Also the | 3509 | * Disabling the crtcs gracefully seems nicer. Also the |
| 3523 | * g33 docs say we should at least disable all the planes. | 3510 | * g33 docs say we should at least disable all the planes. |
| @@ -3547,6 +3534,14 @@ void intel_finish_reset(struct drm_i915_private *dev_priv) | |||
| 3547 | struct drm_atomic_state *state = dev_priv->modeset_restore_state; | 3534 | struct drm_atomic_state *state = dev_priv->modeset_restore_state; |
| 3548 | int ret; | 3535 | int ret; |
| 3549 | 3536 | ||
| 3537 | /* reset doesn't touch the display */ | ||
| 3538 | if (!i915.force_reset_modeset_test && | ||
| 3539 | !gpu_reset_clobbers_display(dev_priv)) | ||
| 3540 | return; | ||
| 3541 | |||
| 3542 | if (!state) | ||
| 3543 | goto unlock; | ||
| 3544 | |||
| 3550 | /* | 3545 | /* |
| 3551 | * Flips in the rings will be nuked by the reset, | 3546 | * Flips in the rings will be nuked by the reset, |
| 3552 | * so complete all pending flips so that user space | 3547 | * so complete all pending flips so that user space |
| @@ -3558,22 +3553,10 @@ void intel_finish_reset(struct drm_i915_private *dev_priv) | |||
| 3558 | 3553 | ||
| 3559 | /* reset doesn't touch the display */ | 3554 | /* reset doesn't touch the display */ |
| 3560 | if (!gpu_reset_clobbers_display(dev_priv)) { | 3555 | if (!gpu_reset_clobbers_display(dev_priv)) { |
| 3561 | if (!state) { | 3556 | /* for testing only restore the display */ |
| 3562 | /* | 3557 | ret = __intel_display_resume(dev, state, ctx); |
| 3563 | * Flips in the rings have been nuked by the reset, | ||
| 3564 | * so update the base address of all primary | ||
| 3565 | * planes to the the last fb to make sure we're | ||
| 3566 | * showing the correct fb after a reset. | ||
| 3567 | * | ||
| 3568 | * FIXME: Atomic will make this obsolete since we won't schedule | ||
| 3569 | * CS-based flips (which might get lost in gpu resets) any more. | ||
| 3570 | */ | ||
| 3571 | intel_update_primary_planes(dev); | ||
| 3572 | } else { | ||
| 3573 | ret = __intel_display_resume(dev, state, ctx); | ||
| 3574 | if (ret) | 3558 | if (ret) |
| 3575 | DRM_ERROR("Restoring old state failed with %i\n", ret); | 3559 | DRM_ERROR("Restoring old state failed with %i\n", ret); |
| 3576 | } | ||
| 3577 | } else { | 3560 | } else { |
| 3578 | /* | 3561 | /* |
| 3579 | * The display has been reset as well, | 3562 | * The display has been reset as well, |
| @@ -3597,8 +3580,8 @@ void intel_finish_reset(struct drm_i915_private *dev_priv) | |||
| 3597 | intel_hpd_init(dev_priv); | 3580 | intel_hpd_init(dev_priv); |
| 3598 | } | 3581 | } |
| 3599 | 3582 | ||
| 3600 | if (state) | 3583 | drm_atomic_state_put(state); |
| 3601 | drm_atomic_state_put(state); | 3584 | unlock: |
| 3602 | drm_modeset_drop_locks(ctx); | 3585 | drm_modeset_drop_locks(ctx); |
| 3603 | drm_modeset_acquire_fini(ctx); | 3586 | drm_modeset_acquire_fini(ctx); |
| 3604 | mutex_unlock(&dev->mode_config.mutex); | 3587 | mutex_unlock(&dev->mode_config.mutex); |
| @@ -9117,6 +9100,13 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc, | |||
| 9117 | u64 power_domain_mask; | 9100 | u64 power_domain_mask; |
| 9118 | bool active; | 9101 | bool active; |
| 9119 | 9102 | ||
| 9103 | if (INTEL_GEN(dev_priv) >= 9) { | ||
| 9104 | intel_crtc_init_scalers(crtc, pipe_config); | ||
| 9105 | |||
| 9106 | pipe_config->scaler_state.scaler_id = -1; | ||
| 9107 | pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX); | ||
| 9108 | } | ||
| 9109 | |||
| 9120 | power_domain = POWER_DOMAIN_PIPE(crtc->pipe); | 9110 | power_domain = POWER_DOMAIN_PIPE(crtc->pipe); |
| 9121 | if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) | 9111 | if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) |
| 9122 | return false; | 9112 | return false; |
| @@ -9145,13 +9135,6 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc, | |||
| 9145 | pipe_config->gamma_mode = | 9135 | pipe_config->gamma_mode = |
| 9146 | I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK; | 9136 | I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK; |
| 9147 | 9137 | ||
| 9148 | if (INTEL_GEN(dev_priv) >= 9) { | ||
| 9149 | intel_crtc_init_scalers(crtc, pipe_config); | ||
| 9150 | |||
| 9151 | pipe_config->scaler_state.scaler_id = -1; | ||
| 9152 | pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX); | ||
| 9153 | } | ||
| 9154 | |||
| 9155 | power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe); | 9138 | power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe); |
| 9156 | if (intel_display_power_get_if_enabled(dev_priv, power_domain)) { | 9139 | if (intel_display_power_get_if_enabled(dev_priv, power_domain)) { |
| 9157 | power_domain_mask |= BIT_ULL(power_domain); | 9140 | power_domain_mask |= BIT_ULL(power_domain); |
| @@ -9540,7 +9523,16 @@ static void i9xx_update_cursor(struct intel_plane *plane, | |||
| 9540 | * On some platforms writing CURCNTR first will also | 9523 | * On some platforms writing CURCNTR first will also |
| 9541 | * cause CURPOS to be armed by the CURBASE write. | 9524 | * cause CURPOS to be armed by the CURBASE write. |
| 9542 | * Without the CURCNTR write the CURPOS write would | 9525 | * Without the CURCNTR write the CURPOS write would |
| 9543 | * arm itself. | 9526 | * arm itself. Thus we always start the full update |
| 9527 | * with a CURCNTR write. | ||
| 9528 | * | ||
| 9529 | * On other platforms CURPOS always requires the | ||
| 9530 | * CURBASE write to arm the update. Additonally | ||
| 9531 | * a write to any of the cursor register will cancel | ||
| 9532 | * an already armed cursor update. Thus leaving out | ||
| 9533 | * the CURBASE write after CURPOS could lead to a | ||
| 9534 | * cursor that doesn't appear to move, or even change | ||
| 9535 | * shape. Thus we always write CURBASE. | ||
| 9544 | * | 9536 | * |
| 9545 | * CURCNTR and CUR_FBC_CTL are always | 9537 | * CURCNTR and CUR_FBC_CTL are always |
| 9546 | * armed by the CURBASE write only. | 9538 | * armed by the CURBASE write only. |
| @@ -9559,6 +9551,7 @@ static void i9xx_update_cursor(struct intel_plane *plane, | |||
| 9559 | plane->cursor.cntl = cntl; | 9551 | plane->cursor.cntl = cntl; |
| 9560 | } else { | 9552 | } else { |
| 9561 | I915_WRITE_FW(CURPOS(pipe), pos); | 9553 | I915_WRITE_FW(CURPOS(pipe), pos); |
| 9554 | I915_WRITE_FW(CURBASE(pipe), base); | ||
| 9562 | } | 9555 | } |
| 9563 | 9556 | ||
| 9564 | POSTING_READ_FW(CURBASE(pipe)); | 9557 | POSTING_READ_FW(CURBASE(pipe)); |
diff --git a/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c b/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c index 6e09ceb71500..150a156f3b1e 100644 --- a/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c +++ b/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c | |||
| @@ -46,7 +46,7 @@ static u32 dcs_get_backlight(struct intel_connector *connector) | |||
| 46 | struct intel_encoder *encoder = connector->encoder; | 46 | struct intel_encoder *encoder = connector->encoder; |
| 47 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); | 47 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); |
| 48 | struct mipi_dsi_device *dsi_device; | 48 | struct mipi_dsi_device *dsi_device; |
| 49 | u8 data; | 49 | u8 data = 0; |
| 50 | enum port port; | 50 | enum port port; |
| 51 | 51 | ||
| 52 | /* FIXME: Need to take care of 16 bit brightness level */ | 52 | /* FIXME: Need to take care of 16 bit brightness level */ |
diff --git a/drivers/gpu/drm/i915/intel_dsi_vbt.c b/drivers/gpu/drm/i915/intel_dsi_vbt.c index 7158c7ce9c09..91c07b0c8db9 100644 --- a/drivers/gpu/drm/i915/intel_dsi_vbt.c +++ b/drivers/gpu/drm/i915/intel_dsi_vbt.c | |||
| @@ -306,7 +306,7 @@ static void bxt_exec_gpio(struct drm_i915_private *dev_priv, | |||
| 306 | 306 | ||
| 307 | if (!gpio_desc) { | 307 | if (!gpio_desc) { |
| 308 | gpio_desc = devm_gpiod_get_index(dev_priv->drm.dev, | 308 | gpio_desc = devm_gpiod_get_index(dev_priv->drm.dev, |
| 309 | "panel", gpio_index, | 309 | NULL, gpio_index, |
| 310 | value ? GPIOD_OUT_LOW : | 310 | value ? GPIOD_OUT_LOW : |
| 311 | GPIOD_OUT_HIGH); | 311 | GPIOD_OUT_HIGH); |
| 312 | 312 | ||
diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c index 52d5b82790d9..c17ed0e62b67 100644 --- a/drivers/gpu/drm/i915/intel_gvt.c +++ b/drivers/gpu/drm/i915/intel_gvt.c | |||
| @@ -45,7 +45,7 @@ static bool is_supported_device(struct drm_i915_private *dev_priv) | |||
| 45 | return true; | 45 | return true; |
| 46 | if (IS_SKYLAKE(dev_priv)) | 46 | if (IS_SKYLAKE(dev_priv)) |
| 47 | return true; | 47 | return true; |
| 48 | if (IS_KABYLAKE(dev_priv) && INTEL_DEVID(dev_priv) == 0x591D) | 48 | if (IS_KABYLAKE(dev_priv)) |
| 49 | return true; | 49 | return true; |
| 50 | return false; | 50 | return false; |
| 51 | } | 51 | } |
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 7404cf2aac28..2afa4daa88e8 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c | |||
| @@ -1221,6 +1221,14 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine) | |||
| 1221 | return ret; | 1221 | return ret; |
| 1222 | } | 1222 | } |
| 1223 | 1223 | ||
| 1224 | static u8 gtiir[] = { | ||
| 1225 | [RCS] = 0, | ||
| 1226 | [BCS] = 0, | ||
| 1227 | [VCS] = 1, | ||
| 1228 | [VCS2] = 1, | ||
| 1229 | [VECS] = 3, | ||
| 1230 | }; | ||
| 1231 | |||
| 1224 | static int gen8_init_common_ring(struct intel_engine_cs *engine) | 1232 | static int gen8_init_common_ring(struct intel_engine_cs *engine) |
| 1225 | { | 1233 | { |
| 1226 | struct drm_i915_private *dev_priv = engine->i915; | 1234 | struct drm_i915_private *dev_priv = engine->i915; |
| @@ -1245,9 +1253,22 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine) | |||
| 1245 | 1253 | ||
| 1246 | DRM_DEBUG_DRIVER("Execlists enabled for %s\n", engine->name); | 1254 | DRM_DEBUG_DRIVER("Execlists enabled for %s\n", engine->name); |
| 1247 | 1255 | ||
| 1248 | /* After a GPU reset, we may have requests to replay */ | 1256 | GEM_BUG_ON(engine->id >= ARRAY_SIZE(gtiir)); |
| 1257 | |||
| 1258 | /* | ||
| 1259 | * Clear any pending interrupt state. | ||
| 1260 | * | ||
| 1261 | * We do it twice out of paranoia that some of the IIR are double | ||
| 1262 | * buffered, and if we only reset it once there may still be | ||
| 1263 | * an interrupt pending. | ||
| 1264 | */ | ||
| 1265 | I915_WRITE(GEN8_GT_IIR(gtiir[engine->id]), | ||
| 1266 | GT_CONTEXT_SWITCH_INTERRUPT << engine->irq_shift); | ||
| 1267 | I915_WRITE(GEN8_GT_IIR(gtiir[engine->id]), | ||
| 1268 | GT_CONTEXT_SWITCH_INTERRUPT << engine->irq_shift); | ||
| 1249 | clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted); | 1269 | clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted); |
| 1250 | 1270 | ||
| 1271 | /* After a GPU reset, we may have requests to replay */ | ||
| 1251 | submit = false; | 1272 | submit = false; |
| 1252 | for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++) { | 1273 | for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++) { |
| 1253 | if (!port_isset(&port[n])) | 1274 | if (!port_isset(&port[n])) |
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h index 52b3a1fd4059..57ef5833c427 100644 --- a/drivers/gpu/drm/i915/intel_lrc.h +++ b/drivers/gpu/drm/i915/intel_lrc.h | |||
| @@ -63,7 +63,6 @@ enum { | |||
| 63 | }; | 63 | }; |
| 64 | 64 | ||
| 65 | /* Logical Rings */ | 65 | /* Logical Rings */ |
| 66 | void intel_logical_ring_stop(struct intel_engine_cs *engine); | ||
| 67 | void intel_logical_ring_cleanup(struct intel_engine_cs *engine); | 66 | void intel_logical_ring_cleanup(struct intel_engine_cs *engine); |
| 68 | int logical_render_ring_init(struct intel_engine_cs *engine); | 67 | int logical_render_ring_init(struct intel_engine_cs *engine); |
| 69 | int logical_xcs_ring_init(struct intel_engine_cs *engine); | 68 | int logical_xcs_ring_init(struct intel_engine_cs *engine); |
diff --git a/drivers/gpu/drm/i915/intel_lspcon.c b/drivers/gpu/drm/i915/intel_lspcon.c index 5abef482eacf..beb9baaf2f2e 100644 --- a/drivers/gpu/drm/i915/intel_lspcon.c +++ b/drivers/gpu/drm/i915/intel_lspcon.c | |||
| @@ -210,8 +210,8 @@ bool lspcon_init(struct intel_digital_port *intel_dig_port) | |||
| 210 | struct drm_device *dev = intel_dig_port->base.base.dev; | 210 | struct drm_device *dev = intel_dig_port->base.base.dev; |
| 211 | struct drm_i915_private *dev_priv = to_i915(dev); | 211 | struct drm_i915_private *dev_priv = to_i915(dev); |
| 212 | 212 | ||
| 213 | if (!IS_GEN9(dev_priv)) { | 213 | if (!HAS_LSPCON(dev_priv)) { |
| 214 | DRM_ERROR("LSPCON is supported on GEN9 only\n"); | 214 | DRM_ERROR("LSPCON is not supported on this platform\n"); |
| 215 | return false; | 215 | return false; |
| 216 | } | 216 | } |
| 217 | 217 | ||
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 96c2cbd81869..593349be8b9d 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c | |||
| @@ -469,7 +469,7 @@ static u32 intel_panel_compute_brightness(struct intel_connector *connector, | |||
| 469 | 469 | ||
| 470 | if (i915.invert_brightness > 0 || | 470 | if (i915.invert_brightness > 0 || |
| 471 | dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) { | 471 | dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) { |
| 472 | return panel->backlight.max - val; | 472 | return panel->backlight.max - val + panel->backlight.min; |
| 473 | } | 473 | } |
| 474 | 474 | ||
| 475 | return val; | 475 | return val; |
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 48ea0fca1f72..40b224b44d1b 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
| @@ -4463,8 +4463,8 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, | |||
| 4463 | if ((cpp * cstate->base.adjusted_mode.crtc_htotal / 512 < 1) && | 4463 | if ((cpp * cstate->base.adjusted_mode.crtc_htotal / 512 < 1) && |
| 4464 | (plane_bytes_per_line / 512 < 1)) | 4464 | (plane_bytes_per_line / 512 < 1)) |
| 4465 | selected_result = method2; | 4465 | selected_result = method2; |
| 4466 | else if ((ddb_allocation && ddb_allocation / | 4466 | else if (ddb_allocation >= |
| 4467 | fixed_16_16_to_u32_round_up(plane_blocks_per_line)) >= 1) | 4467 | fixed_16_16_to_u32_round_up(plane_blocks_per_line)) |
| 4468 | selected_result = min_fixed_16_16(method1, method2); | 4468 | selected_result = min_fixed_16_16(method1, method2); |
| 4469 | else if (latency >= linetime_us) | 4469 | else if (latency >= linetime_us) |
| 4470 | selected_result = min_fixed_16_16(method1, method2); | 4470 | selected_result = min_fixed_16_16(method1, method2); |
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c index 627e2aa09766..8cdec455cf7d 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c | |||
| @@ -206,7 +206,7 @@ struct drm_i915_private *mock_gem_device(void) | |||
| 206 | mkwrite_device_info(i915)->ring_mask = BIT(0); | 206 | mkwrite_device_info(i915)->ring_mask = BIT(0); |
| 207 | i915->engine[RCS] = mock_engine(i915, "mock"); | 207 | i915->engine[RCS] = mock_engine(i915, "mock"); |
| 208 | if (!i915->engine[RCS]) | 208 | if (!i915->engine[RCS]) |
| 209 | goto err_dependencies; | 209 | goto err_priorities; |
| 210 | 210 | ||
| 211 | i915->kernel_context = mock_context(i915, NULL); | 211 | i915->kernel_context = mock_context(i915, NULL); |
| 212 | if (!i915->kernel_context) | 212 | if (!i915->kernel_context) |
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c index 6276bb834b4f..d3845989a29d 100644 --- a/drivers/gpu/drm/imx/ipuv3-plane.c +++ b/drivers/gpu/drm/imx/ipuv3-plane.c | |||
| @@ -545,15 +545,13 @@ static void ipu_plane_atomic_update(struct drm_plane *plane, | |||
| 545 | return; | 545 | return; |
| 546 | } | 546 | } |
| 547 | 547 | ||
| 548 | ics = ipu_drm_fourcc_to_colorspace(fb->format->format); | ||
| 548 | switch (ipu_plane->dp_flow) { | 549 | switch (ipu_plane->dp_flow) { |
| 549 | case IPU_DP_FLOW_SYNC_BG: | 550 | case IPU_DP_FLOW_SYNC_BG: |
| 550 | ipu_dp_setup_channel(ipu_plane->dp, | 551 | ipu_dp_setup_channel(ipu_plane->dp, ics, IPUV3_COLORSPACE_RGB); |
| 551 | IPUV3_COLORSPACE_RGB, | ||
| 552 | IPUV3_COLORSPACE_RGB); | ||
| 553 | ipu_dp_set_global_alpha(ipu_plane->dp, true, 0, true); | 552 | ipu_dp_set_global_alpha(ipu_plane->dp, true, 0, true); |
| 554 | break; | 553 | break; |
| 555 | case IPU_DP_FLOW_SYNC_FG: | 554 | case IPU_DP_FLOW_SYNC_FG: |
| 556 | ics = ipu_drm_fourcc_to_colorspace(state->fb->format->format); | ||
| 557 | ipu_dp_setup_channel(ipu_plane->dp, ics, | 555 | ipu_dp_setup_channel(ipu_plane->dp, ics, |
| 558 | IPUV3_COLORSPACE_UNKNOWN); | 556 | IPUV3_COLORSPACE_UNKNOWN); |
| 559 | /* Enable local alpha on partial plane */ | 557 | /* Enable local alpha on partial plane */ |
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig index b638d192ce5e..99d39b2aefa6 100644 --- a/drivers/gpu/drm/msm/Kconfig +++ b/drivers/gpu/drm/msm/Kconfig | |||
| @@ -5,7 +5,7 @@ config DRM_MSM | |||
| 5 | depends on ARCH_QCOM || (ARM && COMPILE_TEST) | 5 | depends on ARCH_QCOM || (ARM && COMPILE_TEST) |
| 6 | depends on OF && COMMON_CLK | 6 | depends on OF && COMMON_CLK |
| 7 | depends on MMU | 7 | depends on MMU |
| 8 | select QCOM_MDT_LOADER | 8 | select QCOM_MDT_LOADER if ARCH_QCOM |
| 9 | select REGULATOR | 9 | select REGULATOR |
| 10 | select DRM_KMS_HELPER | 10 | select DRM_KMS_HELPER |
| 11 | select DRM_PANEL | 11 | select DRM_PANEL |
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index b4b54f1c24bc..f9eae03aa1dc 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c | |||
| @@ -15,7 +15,7 @@ | |||
| 15 | #include <linux/cpumask.h> | 15 | #include <linux/cpumask.h> |
| 16 | #include <linux/qcom_scm.h> | 16 | #include <linux/qcom_scm.h> |
| 17 | #include <linux/dma-mapping.h> | 17 | #include <linux/dma-mapping.h> |
| 18 | #include <linux/of_reserved_mem.h> | 18 | #include <linux/of_address.h> |
| 19 | #include <linux/soc/qcom/mdt_loader.h> | 19 | #include <linux/soc/qcom/mdt_loader.h> |
| 20 | #include "msm_gem.h" | 20 | #include "msm_gem.h" |
| 21 | #include "msm_mmu.h" | 21 | #include "msm_mmu.h" |
| @@ -26,16 +26,34 @@ static void a5xx_dump(struct msm_gpu *gpu); | |||
| 26 | 26 | ||
| 27 | #define GPU_PAS_ID 13 | 27 | #define GPU_PAS_ID 13 |
| 28 | 28 | ||
| 29 | #if IS_ENABLED(CONFIG_QCOM_MDT_LOADER) | ||
| 30 | |||
| 31 | static int zap_shader_load_mdt(struct device *dev, const char *fwname) | 29 | static int zap_shader_load_mdt(struct device *dev, const char *fwname) |
| 32 | { | 30 | { |
| 33 | const struct firmware *fw; | 31 | const struct firmware *fw; |
| 32 | struct device_node *np; | ||
| 33 | struct resource r; | ||
| 34 | phys_addr_t mem_phys; | 34 | phys_addr_t mem_phys; |
| 35 | ssize_t mem_size; | 35 | ssize_t mem_size; |
| 36 | void *mem_region = NULL; | 36 | void *mem_region = NULL; |
| 37 | int ret; | 37 | int ret; |
| 38 | 38 | ||
| 39 | if (!IS_ENABLED(CONFIG_ARCH_QCOM)) | ||
| 40 | return -EINVAL; | ||
| 41 | |||
| 42 | np = of_get_child_by_name(dev->of_node, "zap-shader"); | ||
| 43 | if (!np) | ||
| 44 | return -ENODEV; | ||
| 45 | |||
| 46 | np = of_parse_phandle(np, "memory-region", 0); | ||
| 47 | if (!np) | ||
| 48 | return -EINVAL; | ||
| 49 | |||
| 50 | ret = of_address_to_resource(np, 0, &r); | ||
| 51 | if (ret) | ||
| 52 | return ret; | ||
| 53 | |||
| 54 | mem_phys = r.start; | ||
| 55 | mem_size = resource_size(&r); | ||
| 56 | |||
| 39 | /* Request the MDT file for the firmware */ | 57 | /* Request the MDT file for the firmware */ |
| 40 | ret = request_firmware(&fw, fwname, dev); | 58 | ret = request_firmware(&fw, fwname, dev); |
| 41 | if (ret) { | 59 | if (ret) { |
| @@ -51,7 +69,7 @@ static int zap_shader_load_mdt(struct device *dev, const char *fwname) | |||
| 51 | } | 69 | } |
| 52 | 70 | ||
| 53 | /* Allocate memory for the firmware image */ | 71 | /* Allocate memory for the firmware image */ |
| 54 | mem_region = dmam_alloc_coherent(dev, mem_size, &mem_phys, GFP_KERNEL); | 72 | mem_region = memremap(mem_phys, mem_size, MEMREMAP_WC); |
| 55 | if (!mem_region) { | 73 | if (!mem_region) { |
| 56 | ret = -ENOMEM; | 74 | ret = -ENOMEM; |
| 57 | goto out; | 75 | goto out; |
| @@ -69,16 +87,13 @@ static int zap_shader_load_mdt(struct device *dev, const char *fwname) | |||
| 69 | DRM_DEV_ERROR(dev, "Unable to authorize the image\n"); | 87 | DRM_DEV_ERROR(dev, "Unable to authorize the image\n"); |
| 70 | 88 | ||
| 71 | out: | 89 | out: |
| 90 | if (mem_region) | ||
| 91 | memunmap(mem_region); | ||
| 92 | |||
| 72 | release_firmware(fw); | 93 | release_firmware(fw); |
| 73 | 94 | ||
| 74 | return ret; | 95 | return ret; |
| 75 | } | 96 | } |
| 76 | #else | ||
| 77 | static int zap_shader_load_mdt(struct device *dev, const char *fwname) | ||
| 78 | { | ||
| 79 | return -ENODEV; | ||
| 80 | } | ||
| 81 | #endif | ||
| 82 | 97 | ||
| 83 | static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, | 98 | static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, |
| 84 | struct msm_file_private *ctx) | 99 | struct msm_file_private *ctx) |
| @@ -117,12 +132,10 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, | |||
| 117 | gpu->funcs->flush(gpu); | 132 | gpu->funcs->flush(gpu); |
| 118 | } | 133 | } |
| 119 | 134 | ||
| 120 | struct a5xx_hwcg { | 135 | static const struct { |
| 121 | u32 offset; | 136 | u32 offset; |
| 122 | u32 value; | 137 | u32 value; |
| 123 | }; | 138 | } a5xx_hwcg[] = { |
| 124 | |||
| 125 | static const struct a5xx_hwcg a530_hwcg[] = { | ||
| 126 | {REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222}, | 139 | {REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222}, |
| 127 | {REG_A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222}, | 140 | {REG_A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222}, |
| 128 | {REG_A5XX_RBBM_CLOCK_CNTL_SP2, 0x02222222}, | 141 | {REG_A5XX_RBBM_CLOCK_CNTL_SP2, 0x02222222}, |
| @@ -217,38 +230,16 @@ static const struct a5xx_hwcg a530_hwcg[] = { | |||
| 217 | {REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222} | 230 | {REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222} |
| 218 | }; | 231 | }; |
| 219 | 232 | ||
| 220 | static const struct { | 233 | void a5xx_set_hwcg(struct msm_gpu *gpu, bool state) |
| 221 | int (*test)(struct adreno_gpu *gpu); | ||
| 222 | const struct a5xx_hwcg *regs; | ||
| 223 | unsigned int count; | ||
| 224 | } a5xx_hwcg_regs[] = { | ||
| 225 | { adreno_is_a530, a530_hwcg, ARRAY_SIZE(a530_hwcg), }, | ||
| 226 | }; | ||
| 227 | |||
| 228 | static void _a5xx_enable_hwcg(struct msm_gpu *gpu, | ||
| 229 | const struct a5xx_hwcg *regs, unsigned int count) | ||
| 230 | { | 234 | { |
| 231 | unsigned int i; | 235 | unsigned int i; |
| 232 | 236 | ||
| 233 | for (i = 0; i < count; i++) | 237 | for (i = 0; i < ARRAY_SIZE(a5xx_hwcg); i++) |
| 234 | gpu_write(gpu, regs[i].offset, regs[i].value); | 238 | gpu_write(gpu, a5xx_hwcg[i].offset, |
| 239 | state ? a5xx_hwcg[i].value : 0); | ||
| 235 | 240 | ||
| 236 | gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, 0xAAA8AA00); | 241 | gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, state ? 0xAAA8AA00 : 0); |
| 237 | gpu_write(gpu, REG_A5XX_RBBM_ISDB_CNT, 0x182); | 242 | gpu_write(gpu, REG_A5XX_RBBM_ISDB_CNT, state ? 0x182 : 0x180); |
| 238 | } | ||
| 239 | |||
| 240 | static void a5xx_enable_hwcg(struct msm_gpu *gpu) | ||
| 241 | { | ||
| 242 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); | ||
| 243 | unsigned int i; | ||
| 244 | |||
| 245 | for (i = 0; i < ARRAY_SIZE(a5xx_hwcg_regs); i++) { | ||
| 246 | if (a5xx_hwcg_regs[i].test(adreno_gpu)) { | ||
| 247 | _a5xx_enable_hwcg(gpu, a5xx_hwcg_regs[i].regs, | ||
| 248 | a5xx_hwcg_regs[i].count); | ||
| 249 | return; | ||
| 250 | } | ||
| 251 | } | ||
| 252 | } | 243 | } |
| 253 | 244 | ||
| 254 | static int a5xx_me_init(struct msm_gpu *gpu) | 245 | static int a5xx_me_init(struct msm_gpu *gpu) |
| @@ -377,45 +368,6 @@ static int a5xx_zap_shader_resume(struct msm_gpu *gpu) | |||
| 377 | return ret; | 368 | return ret; |
| 378 | } | 369 | } |
| 379 | 370 | ||
| 380 | /* Set up a child device to "own" the zap shader */ | ||
| 381 | static int a5xx_zap_shader_dev_init(struct device *parent, struct device *dev) | ||
| 382 | { | ||
| 383 | struct device_node *node; | ||
| 384 | int ret; | ||
| 385 | |||
| 386 | if (dev->parent) | ||
| 387 | return 0; | ||
| 388 | |||
| 389 | /* Find the sub-node for the zap shader */ | ||
| 390 | node = of_get_child_by_name(parent->of_node, "zap-shader"); | ||
| 391 | if (!node) { | ||
| 392 | DRM_DEV_ERROR(parent, "zap-shader not found in device tree\n"); | ||
| 393 | return -ENODEV; | ||
| 394 | } | ||
| 395 | |||
| 396 | dev->parent = parent; | ||
| 397 | dev->of_node = node; | ||
| 398 | dev_set_name(dev, "adreno_zap_shader"); | ||
| 399 | |||
| 400 | ret = device_register(dev); | ||
| 401 | if (ret) { | ||
| 402 | DRM_DEV_ERROR(parent, "Couldn't register zap shader device\n"); | ||
| 403 | goto out; | ||
| 404 | } | ||
| 405 | |||
| 406 | ret = of_reserved_mem_device_init(dev); | ||
| 407 | if (ret) { | ||
| 408 | DRM_DEV_ERROR(parent, "Unable to set up the reserved memory\n"); | ||
| 409 | device_unregister(dev); | ||
| 410 | } | ||
| 411 | |||
| 412 | out: | ||
| 413 | if (ret) | ||
| 414 | dev->parent = NULL; | ||
| 415 | |||
| 416 | return ret; | ||
| 417 | } | ||
| 418 | |||
| 419 | static int a5xx_zap_shader_init(struct msm_gpu *gpu) | 371 | static int a5xx_zap_shader_init(struct msm_gpu *gpu) |
| 420 | { | 372 | { |
| 421 | static bool loaded; | 373 | static bool loaded; |
| @@ -444,11 +396,7 @@ static int a5xx_zap_shader_init(struct msm_gpu *gpu) | |||
| 444 | return -ENODEV; | 396 | return -ENODEV; |
| 445 | } | 397 | } |
| 446 | 398 | ||
| 447 | ret = a5xx_zap_shader_dev_init(&pdev->dev, &a5xx_gpu->zap_dev); | 399 | ret = zap_shader_load_mdt(&pdev->dev, adreno_gpu->info->zapfw); |
| 448 | |||
| 449 | if (!ret) | ||
| 450 | ret = zap_shader_load_mdt(&a5xx_gpu->zap_dev, | ||
| 451 | adreno_gpu->info->zapfw); | ||
| 452 | 400 | ||
| 453 | loaded = !ret; | 401 | loaded = !ret; |
| 454 | 402 | ||
| @@ -545,7 +493,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu) | |||
| 545 | gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL1, 0xA6FFFFFF); | 493 | gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL1, 0xA6FFFFFF); |
| 546 | 494 | ||
| 547 | /* Enable HWCG */ | 495 | /* Enable HWCG */ |
| 548 | a5xx_enable_hwcg(gpu); | 496 | a5xx_set_hwcg(gpu, true); |
| 549 | 497 | ||
| 550 | gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F); | 498 | gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F); |
| 551 | 499 | ||
| @@ -691,9 +639,6 @@ static void a5xx_destroy(struct msm_gpu *gpu) | |||
| 691 | 639 | ||
| 692 | DBG("%s", gpu->name); | 640 | DBG("%s", gpu->name); |
| 693 | 641 | ||
| 694 | if (a5xx_gpu->zap_dev.parent) | ||
| 695 | device_unregister(&a5xx_gpu->zap_dev); | ||
| 696 | |||
| 697 | if (a5xx_gpu->pm4_bo) { | 642 | if (a5xx_gpu->pm4_bo) { |
| 698 | if (a5xx_gpu->pm4_iova) | 643 | if (a5xx_gpu->pm4_iova) |
| 699 | msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace); | 644 | msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace); |
| @@ -920,31 +865,30 @@ static const u32 a5xx_registers[] = { | |||
| 920 | 0x0000, 0x0002, 0x0004, 0x0020, 0x0022, 0x0026, 0x0029, 0x002B, | 865 | 0x0000, 0x0002, 0x0004, 0x0020, 0x0022, 0x0026, 0x0029, 0x002B, |
| 921 | 0x002E, 0x0035, 0x0038, 0x0042, 0x0044, 0x0044, 0x0047, 0x0095, | 866 | 0x002E, 0x0035, 0x0038, 0x0042, 0x0044, 0x0044, 0x0047, 0x0095, |
| 922 | 0x0097, 0x00BB, 0x03A0, 0x0464, 0x0469, 0x046F, 0x04D2, 0x04D3, | 867 | 0x0097, 0x00BB, 0x03A0, 0x0464, 0x0469, 0x046F, 0x04D2, 0x04D3, |
| 923 | 0x04E0, 0x0533, 0x0540, 0x0555, 0xF400, 0xF400, 0xF800, 0xF807, | 868 | 0x04E0, 0x0533, 0x0540, 0x0555, 0x0800, 0x081A, 0x081F, 0x0841, |
| 924 | 0x0800, 0x081A, 0x081F, 0x0841, 0x0860, 0x0860, 0x0880, 0x08A0, | 869 | 0x0860, 0x0860, 0x0880, 0x08A0, 0x0B00, 0x0B12, 0x0B15, 0x0B28, |
| 925 | 0x0B00, 0x0B12, 0x0B15, 0x0B28, 0x0B78, 0x0B7F, 0x0BB0, 0x0BBD, | 870 | 0x0B78, 0x0B7F, 0x0BB0, 0x0BBD, 0x0BC0, 0x0BC6, 0x0BD0, 0x0C53, |
| 926 | 0x0BC0, 0x0BC6, 0x0BD0, 0x0C53, 0x0C60, 0x0C61, 0x0C80, 0x0C82, | 871 | 0x0C60, 0x0C61, 0x0C80, 0x0C82, 0x0C84, 0x0C85, 0x0C90, 0x0C98, |
| 927 | 0x0C84, 0x0C85, 0x0C90, 0x0C98, 0x0CA0, 0x0CA0, 0x0CB0, 0x0CB2, | 872 | 0x0CA0, 0x0CA0, 0x0CB0, 0x0CB2, 0x2180, 0x2185, 0x2580, 0x2585, |
| 928 | 0x2180, 0x2185, 0x2580, 0x2585, 0x0CC1, 0x0CC1, 0x0CC4, 0x0CC7, | 873 | 0x0CC1, 0x0CC1, 0x0CC4, 0x0CC7, 0x0CCC, 0x0CCC, 0x0CD0, 0x0CD8, |
| 929 | 0x0CCC, 0x0CCC, 0x0CD0, 0x0CD8, 0x0CE0, 0x0CE5, 0x0CE8, 0x0CE8, | 874 | 0x0CE0, 0x0CE5, 0x0CE8, 0x0CE8, 0x0CEC, 0x0CF1, 0x0CFB, 0x0D0E, |
| 930 | 0x0CEC, 0x0CF1, 0x0CFB, 0x0D0E, 0x2100, 0x211E, 0x2140, 0x2145, | 875 | 0x2100, 0x211E, 0x2140, 0x2145, 0x2500, 0x251E, 0x2540, 0x2545, |
| 931 | 0x2500, 0x251E, 0x2540, 0x2545, 0x0D10, 0x0D17, 0x0D20, 0x0D23, | 876 | 0x0D10, 0x0D17, 0x0D20, 0x0D23, 0x0D30, 0x0D30, 0x20C0, 0x20C0, |
| 932 | 0x0D30, 0x0D30, 0x20C0, 0x20C0, 0x24C0, 0x24C0, 0x0E40, 0x0E43, | 877 | 0x24C0, 0x24C0, 0x0E40, 0x0E43, 0x0E4A, 0x0E4A, 0x0E50, 0x0E57, |
| 933 | 0x0E4A, 0x0E4A, 0x0E50, 0x0E57, 0x0E60, 0x0E7C, 0x0E80, 0x0E8E, | 878 | 0x0E60, 0x0E7C, 0x0E80, 0x0E8E, 0x0E90, 0x0E96, 0x0EA0, 0x0EA8, |
| 934 | 0x0E90, 0x0E96, 0x0EA0, 0x0EA8, 0x0EB0, 0x0EB2, 0xE140, 0xE147, | 879 | 0x0EB0, 0x0EB2, 0xE140, 0xE147, 0xE150, 0xE187, 0xE1A0, 0xE1A9, |
| 935 | 0xE150, 0xE187, 0xE1A0, 0xE1A9, 0xE1B0, 0xE1B6, 0xE1C0, 0xE1C7, | 880 | 0xE1B0, 0xE1B6, 0xE1C0, 0xE1C7, 0xE1D0, 0xE1D1, 0xE200, 0xE201, |
| 936 | 0xE1D0, 0xE1D1, 0xE200, 0xE201, 0xE210, 0xE21C, 0xE240, 0xE268, | 881 | 0xE210, 0xE21C, 0xE240, 0xE268, 0xE000, 0xE006, 0xE010, 0xE09A, |
| 937 | 0xE000, 0xE006, 0xE010, 0xE09A, 0xE0A0, 0xE0A4, 0xE0AA, 0xE0EB, | 882 | 0xE0A0, 0xE0A4, 0xE0AA, 0xE0EB, 0xE100, 0xE105, 0xE380, 0xE38F, |
| 938 | 0xE100, 0xE105, 0xE380, 0xE38F, 0xE3B0, 0xE3B0, 0xE400, 0xE405, | 883 | 0xE3B0, 0xE3B0, 0xE400, 0xE405, 0xE408, 0xE4E9, 0xE4F0, 0xE4F0, |
| 939 | 0xE408, 0xE4E9, 0xE4F0, 0xE4F0, 0xE280, 0xE280, 0xE282, 0xE2A3, | 884 | 0xE280, 0xE280, 0xE282, 0xE2A3, 0xE2A5, 0xE2C2, 0xE940, 0xE947, |
| 940 | 0xE2A5, 0xE2C2, 0xE940, 0xE947, 0xE950, 0xE987, 0xE9A0, 0xE9A9, | 885 | 0xE950, 0xE987, 0xE9A0, 0xE9A9, 0xE9B0, 0xE9B6, 0xE9C0, 0xE9C7, |
| 941 | 0xE9B0, 0xE9B6, 0xE9C0, 0xE9C7, 0xE9D0, 0xE9D1, 0xEA00, 0xEA01, | 886 | 0xE9D0, 0xE9D1, 0xEA00, 0xEA01, 0xEA10, 0xEA1C, 0xEA40, 0xEA68, |
| 942 | 0xEA10, 0xEA1C, 0xEA40, 0xEA68, 0xE800, 0xE806, 0xE810, 0xE89A, | 887 | 0xE800, 0xE806, 0xE810, 0xE89A, 0xE8A0, 0xE8A4, 0xE8AA, 0xE8EB, |
| 943 | 0xE8A0, 0xE8A4, 0xE8AA, 0xE8EB, 0xE900, 0xE905, 0xEB80, 0xEB8F, | 888 | 0xE900, 0xE905, 0xEB80, 0xEB8F, 0xEBB0, 0xEBB0, 0xEC00, 0xEC05, |
| 944 | 0xEBB0, 0xEBB0, 0xEC00, 0xEC05, 0xEC08, 0xECE9, 0xECF0, 0xECF0, | 889 | 0xEC08, 0xECE9, 0xECF0, 0xECF0, 0xEA80, 0xEA80, 0xEA82, 0xEAA3, |
| 945 | 0xEA80, 0xEA80, 0xEA82, 0xEAA3, 0xEAA5, 0xEAC2, 0xA800, 0xA8FF, | 890 | 0xEAA5, 0xEAC2, 0xA800, 0xA8FF, 0xAC60, 0xAC60, 0xB000, 0xB97F, |
| 946 | 0xAC60, 0xAC60, 0xB000, 0xB97F, 0xB9A0, 0xB9BF, | 891 | 0xB9A0, 0xB9BF, ~0 |
| 947 | ~0 | ||
| 948 | }; | 892 | }; |
| 949 | 893 | ||
| 950 | static void a5xx_dump(struct msm_gpu *gpu) | 894 | static void a5xx_dump(struct msm_gpu *gpu) |
| @@ -1020,7 +964,14 @@ static void a5xx_show(struct msm_gpu *gpu, struct seq_file *m) | |||
| 1020 | { | 964 | { |
| 1021 | seq_printf(m, "status: %08x\n", | 965 | seq_printf(m, "status: %08x\n", |
| 1022 | gpu_read(gpu, REG_A5XX_RBBM_STATUS)); | 966 | gpu_read(gpu, REG_A5XX_RBBM_STATUS)); |
| 967 | |||
| 968 | /* | ||
| 969 | * Temporarily disable hardware clock gating before going into | ||
| 970 | * adreno_show to avoid issues while reading the registers | ||
| 971 | */ | ||
| 972 | a5xx_set_hwcg(gpu, false); | ||
| 1023 | adreno_show(gpu, m); | 973 | adreno_show(gpu, m); |
| 974 | a5xx_set_hwcg(gpu, true); | ||
| 1024 | } | 975 | } |
| 1025 | #endif | 976 | #endif |
| 1026 | 977 | ||
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h index 6638bc85645d..1137092241d5 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h | |||
| @@ -36,8 +36,6 @@ struct a5xx_gpu { | |||
| 36 | uint32_t gpmu_dwords; | 36 | uint32_t gpmu_dwords; |
| 37 | 37 | ||
| 38 | uint32_t lm_leakage; | 38 | uint32_t lm_leakage; |
| 39 | |||
| 40 | struct device zap_dev; | ||
| 41 | }; | 39 | }; |
| 42 | 40 | ||
| 43 | #define to_a5xx_gpu(x) container_of(x, struct a5xx_gpu, base) | 41 | #define to_a5xx_gpu(x) container_of(x, struct a5xx_gpu, base) |
| @@ -59,5 +57,6 @@ static inline int spin_usecs(struct msm_gpu *gpu, uint32_t usecs, | |||
| 59 | } | 57 | } |
| 60 | 58 | ||
| 61 | bool a5xx_idle(struct msm_gpu *gpu); | 59 | bool a5xx_idle(struct msm_gpu *gpu); |
| 60 | void a5xx_set_hwcg(struct msm_gpu *gpu, bool state); | ||
| 62 | 61 | ||
| 63 | #endif /* __A5XX_GPU_H__ */ | 62 | #endif /* __A5XX_GPU_H__ */ |
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index f1ab2703674a..7414c6bbd582 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c | |||
| @@ -48,8 +48,15 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value) | |||
| 48 | *value = adreno_gpu->base.fast_rate; | 48 | *value = adreno_gpu->base.fast_rate; |
| 49 | return 0; | 49 | return 0; |
| 50 | case MSM_PARAM_TIMESTAMP: | 50 | case MSM_PARAM_TIMESTAMP: |
| 51 | if (adreno_gpu->funcs->get_timestamp) | 51 | if (adreno_gpu->funcs->get_timestamp) { |
| 52 | return adreno_gpu->funcs->get_timestamp(gpu, value); | 52 | int ret; |
| 53 | |||
| 54 | pm_runtime_get_sync(&gpu->pdev->dev); | ||
| 55 | ret = adreno_gpu->funcs->get_timestamp(gpu, value); | ||
| 56 | pm_runtime_put_autosuspend(&gpu->pdev->dev); | ||
| 57 | |||
| 58 | return ret; | ||
| 59 | } | ||
| 53 | return -EINVAL; | 60 | return -EINVAL; |
| 54 | default: | 61 | default: |
| 55 | DBG("%s: invalid param: %u", gpu->name, param); | 62 | DBG("%s: invalid param: %u", gpu->name, param); |
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c index 9e9c5696bc03..c7b612c3d771 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c | |||
| @@ -2137,6 +2137,13 @@ void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host, | |||
| 2137 | struct msm_dsi_phy_clk_request *clk_req) | 2137 | struct msm_dsi_phy_clk_request *clk_req) |
| 2138 | { | 2138 | { |
| 2139 | struct msm_dsi_host *msm_host = to_msm_dsi_host(host); | 2139 | struct msm_dsi_host *msm_host = to_msm_dsi_host(host); |
| 2140 | int ret; | ||
| 2141 | |||
| 2142 | ret = dsi_calc_clk_rate(msm_host); | ||
| 2143 | if (ret) { | ||
| 2144 | pr_err("%s: unable to calc clk rate, %d\n", __func__, ret); | ||
| 2145 | return; | ||
| 2146 | } | ||
| 2140 | 2147 | ||
| 2141 | clk_req->bitclk_rate = msm_host->byte_clk_rate * 8; | 2148 | clk_req->bitclk_rate = msm_host->byte_clk_rate * 8; |
| 2142 | clk_req->escclk_rate = msm_host->esc_clk_rate; | 2149 | clk_req->escclk_rate = msm_host->esc_clk_rate; |
| @@ -2280,7 +2287,6 @@ int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host, | |||
| 2280 | struct drm_display_mode *mode) | 2287 | struct drm_display_mode *mode) |
| 2281 | { | 2288 | { |
| 2282 | struct msm_dsi_host *msm_host = to_msm_dsi_host(host); | 2289 | struct msm_dsi_host *msm_host = to_msm_dsi_host(host); |
| 2283 | int ret; | ||
| 2284 | 2290 | ||
| 2285 | if (msm_host->mode) { | 2291 | if (msm_host->mode) { |
| 2286 | drm_mode_destroy(msm_host->dev, msm_host->mode); | 2292 | drm_mode_destroy(msm_host->dev, msm_host->mode); |
| @@ -2293,12 +2299,6 @@ int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host, | |||
| 2293 | return -ENOMEM; | 2299 | return -ENOMEM; |
| 2294 | } | 2300 | } |
| 2295 | 2301 | ||
| 2296 | ret = dsi_calc_clk_rate(msm_host); | ||
| 2297 | if (ret) { | ||
| 2298 | pr_err("%s: unable to calc clk rate, %d\n", __func__, ret); | ||
| 2299 | return ret; | ||
| 2300 | } | ||
| 2301 | |||
| 2302 | return 0; | 2302 | return 0; |
| 2303 | } | 2303 | } |
| 2304 | 2304 | ||
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c index cb5415d6c04b..735a87a699fa 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c | |||
| @@ -221,8 +221,8 @@ static void blend_setup(struct drm_crtc *crtc) | |||
| 221 | struct mdp5_ctl *ctl = mdp5_cstate->ctl; | 221 | struct mdp5_ctl *ctl = mdp5_cstate->ctl; |
| 222 | uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0; | 222 | uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0; |
| 223 | unsigned long flags; | 223 | unsigned long flags; |
| 224 | enum mdp5_pipe stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { SSPP_NONE }; | 224 | enum mdp5_pipe stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { { SSPP_NONE } }; |
| 225 | enum mdp5_pipe r_stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { SSPP_NONE }; | 225 | enum mdp5_pipe r_stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { { SSPP_NONE } }; |
| 226 | int i, plane_cnt = 0; | 226 | int i, plane_cnt = 0; |
| 227 | bool bg_alpha_enabled = false; | 227 | bool bg_alpha_enabled = false; |
| 228 | u32 mixer_op_mode = 0; | 228 | u32 mixer_op_mode = 0; |
| @@ -753,6 +753,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, | |||
| 753 | if (!handle) { | 753 | if (!handle) { |
| 754 | DBG("Cursor off"); | 754 | DBG("Cursor off"); |
| 755 | cursor_enable = false; | 755 | cursor_enable = false; |
| 756 | mdp5_enable(mdp5_kms); | ||
| 756 | goto set_cursor; | 757 | goto set_cursor; |
| 757 | } | 758 | } |
| 758 | 759 | ||
| @@ -776,6 +777,8 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, | |||
| 776 | 777 | ||
| 777 | get_roi(crtc, &roi_w, &roi_h); | 778 | get_roi(crtc, &roi_w, &roi_h); |
| 778 | 779 | ||
| 780 | mdp5_enable(mdp5_kms); | ||
| 781 | |||
| 779 | mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride); | 782 | mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride); |
| 780 | mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm), | 783 | mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm), |
| 781 | MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888)); | 784 | MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888)); |
| @@ -804,6 +807,7 @@ set_cursor: | |||
| 804 | crtc_flush(crtc, flush_mask); | 807 | crtc_flush(crtc, flush_mask); |
| 805 | 808 | ||
| 806 | end: | 809 | end: |
| 810 | mdp5_disable(mdp5_kms); | ||
| 807 | if (old_bo) { | 811 | if (old_bo) { |
| 808 | drm_flip_work_queue(&mdp5_crtc->unref_cursor_work, old_bo); | 812 | drm_flip_work_queue(&mdp5_crtc->unref_cursor_work, old_bo); |
| 809 | /* enable vblank to complete cursor work: */ | 813 | /* enable vblank to complete cursor work: */ |
| @@ -836,6 +840,8 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) | |||
| 836 | 840 | ||
| 837 | get_roi(crtc, &roi_w, &roi_h); | 841 | get_roi(crtc, &roi_w, &roi_h); |
| 838 | 842 | ||
| 843 | mdp5_enable(mdp5_kms); | ||
| 844 | |||
| 839 | spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags); | 845 | spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags); |
| 840 | mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm), | 846 | mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm), |
| 841 | MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) | | 847 | MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) | |
| @@ -847,6 +853,8 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) | |||
| 847 | 853 | ||
| 848 | crtc_flush(crtc, flush_mask); | 854 | crtc_flush(crtc, flush_mask); |
| 849 | 855 | ||
| 856 | mdp5_disable(mdp5_kms); | ||
| 857 | |||
| 850 | return 0; | 858 | return 0; |
| 851 | } | 859 | } |
| 852 | 860 | ||
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c index 97f3294fbfc6..70bef51245af 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c | |||
| @@ -299,7 +299,7 @@ static void mdp5_encoder_enable(struct drm_encoder *encoder) | |||
| 299 | struct mdp5_interface *intf = mdp5_encoder->intf; | 299 | struct mdp5_interface *intf = mdp5_encoder->intf; |
| 300 | 300 | ||
| 301 | if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND) | 301 | if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND) |
| 302 | mdp5_cmd_encoder_disable(encoder); | 302 | mdp5_cmd_encoder_enable(encoder); |
| 303 | else | 303 | else |
| 304 | mdp5_vid_encoder_enable(encoder); | 304 | mdp5_vid_encoder_enable(encoder); |
| 305 | } | 305 | } |
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c index 5d13fa5381ee..1c603aef3c59 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c | |||
| @@ -502,7 +502,7 @@ static int get_clk(struct platform_device *pdev, struct clk **clkp, | |||
| 502 | const char *name, bool mandatory) | 502 | const char *name, bool mandatory) |
| 503 | { | 503 | { |
| 504 | struct device *dev = &pdev->dev; | 504 | struct device *dev = &pdev->dev; |
| 505 | struct clk *clk = devm_clk_get(dev, name); | 505 | struct clk *clk = msm_clk_get(pdev, name); |
| 506 | if (IS_ERR(clk) && mandatory) { | 506 | if (IS_ERR(clk) && mandatory) { |
| 507 | dev_err(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk)); | 507 | dev_err(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk)); |
| 508 | return PTR_ERR(clk); | 508 | return PTR_ERR(clk); |
| @@ -887,21 +887,21 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev) | |||
| 887 | } | 887 | } |
| 888 | 888 | ||
| 889 | /* mandatory clocks: */ | 889 | /* mandatory clocks: */ |
| 890 | ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus_clk", true); | 890 | ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus", true); |
| 891 | if (ret) | 891 | if (ret) |
| 892 | goto fail; | 892 | goto fail; |
| 893 | ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface_clk", true); | 893 | ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface", true); |
| 894 | if (ret) | 894 | if (ret) |
| 895 | goto fail; | 895 | goto fail; |
| 896 | ret = get_clk(pdev, &mdp5_kms->core_clk, "core_clk", true); | 896 | ret = get_clk(pdev, &mdp5_kms->core_clk, "core", true); |
| 897 | if (ret) | 897 | if (ret) |
| 898 | goto fail; | 898 | goto fail; |
| 899 | ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync_clk", true); | 899 | ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync", true); |
| 900 | if (ret) | 900 | if (ret) |
| 901 | goto fail; | 901 | goto fail; |
| 902 | 902 | ||
| 903 | /* optional clocks: */ | 903 | /* optional clocks: */ |
| 904 | get_clk(pdev, &mdp5_kms->lut_clk, "lut_clk", false); | 904 | get_clk(pdev, &mdp5_kms->lut_clk, "lut", false); |
| 905 | 905 | ||
| 906 | /* we need to set a default rate before enabling. Set a safe | 906 | /* we need to set a default rate before enabling. Set a safe |
| 907 | * rate first, then figure out hw revision, and then set a | 907 | * rate first, then figure out hw revision, and then set a |
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c index fe3a4de1a433..61f39c86dd09 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c | |||
| @@ -890,8 +890,8 @@ static int mdp5_plane_mode_set(struct drm_plane *plane, | |||
| 890 | struct mdp5_hw_pipe *right_hwpipe; | 890 | struct mdp5_hw_pipe *right_hwpipe; |
| 891 | const struct mdp_format *format; | 891 | const struct mdp_format *format; |
| 892 | uint32_t nplanes, config = 0; | 892 | uint32_t nplanes, config = 0; |
| 893 | struct phase_step step = { 0 }; | 893 | struct phase_step step = { { 0 } }; |
| 894 | struct pixel_ext pe = { 0 }; | 894 | struct pixel_ext pe = { { 0 } }; |
| 895 | uint32_t hdecm = 0, vdecm = 0; | 895 | uint32_t hdecm = 0, vdecm = 0; |
| 896 | uint32_t pix_format; | 896 | uint32_t pix_format; |
| 897 | unsigned int rotation; | 897 | unsigned int rotation; |
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 65f35544c1ec..a0c60e738db8 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c | |||
| @@ -383,8 +383,10 @@ int msm_gem_get_iova(struct drm_gem_object *obj, | |||
| 383 | struct page **pages; | 383 | struct page **pages; |
| 384 | 384 | ||
| 385 | vma = add_vma(obj, aspace); | 385 | vma = add_vma(obj, aspace); |
| 386 | if (IS_ERR(vma)) | 386 | if (IS_ERR(vma)) { |
| 387 | return PTR_ERR(vma); | 387 | ret = PTR_ERR(vma); |
| 388 | goto unlock; | ||
| 389 | } | ||
| 388 | 390 | ||
| 389 | pages = get_pages(obj); | 391 | pages = get_pages(obj); |
| 390 | if (IS_ERR(pages)) { | 392 | if (IS_ERR(pages)) { |
| @@ -405,7 +407,7 @@ int msm_gem_get_iova(struct drm_gem_object *obj, | |||
| 405 | 407 | ||
| 406 | fail: | 408 | fail: |
| 407 | del_vma(vma); | 409 | del_vma(vma); |
| 408 | 410 | unlock: | |
| 409 | mutex_unlock(&msm_obj->lock); | 411 | mutex_unlock(&msm_obj->lock); |
| 410 | return ret; | 412 | return ret; |
| 411 | } | 413 | } |
| @@ -928,8 +930,12 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev, | |||
| 928 | if (use_vram) { | 930 | if (use_vram) { |
| 929 | struct msm_gem_vma *vma; | 931 | struct msm_gem_vma *vma; |
| 930 | struct page **pages; | 932 | struct page **pages; |
| 933 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | ||
| 934 | |||
| 935 | mutex_lock(&msm_obj->lock); | ||
| 931 | 936 | ||
| 932 | vma = add_vma(obj, NULL); | 937 | vma = add_vma(obj, NULL); |
| 938 | mutex_unlock(&msm_obj->lock); | ||
| 933 | if (IS_ERR(vma)) { | 939 | if (IS_ERR(vma)) { |
| 934 | ret = PTR_ERR(vma); | 940 | ret = PTR_ERR(vma); |
| 935 | goto fail; | 941 | goto fail; |
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 6bfca7470141..8a75c0bd8a78 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c | |||
| @@ -34,8 +34,8 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev, | |||
| 34 | struct msm_gpu *gpu, uint32_t nr_bos, uint32_t nr_cmds) | 34 | struct msm_gpu *gpu, uint32_t nr_bos, uint32_t nr_cmds) |
| 35 | { | 35 | { |
| 36 | struct msm_gem_submit *submit; | 36 | struct msm_gem_submit *submit; |
| 37 | uint64_t sz = sizeof(*submit) + (nr_bos * sizeof(submit->bos[0])) + | 37 | uint64_t sz = sizeof(*submit) + ((u64)nr_bos * sizeof(submit->bos[0])) + |
| 38 | (nr_cmds * sizeof(submit->cmd[0])); | 38 | ((u64)nr_cmds * sizeof(submit->cmd[0])); |
| 39 | 39 | ||
| 40 | if (sz > SIZE_MAX) | 40 | if (sz > SIZE_MAX) |
| 41 | return NULL; | 41 | return NULL; |
| @@ -451,7 +451,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, | |||
| 451 | if (ret) | 451 | if (ret) |
| 452 | goto out; | 452 | goto out; |
| 453 | 453 | ||
| 454 | if (!(args->fence & MSM_SUBMIT_NO_IMPLICIT)) { | 454 | if (!(args->flags & MSM_SUBMIT_NO_IMPLICIT)) { |
| 455 | ret = submit_fence_sync(submit); | 455 | ret = submit_fence_sync(submit); |
| 456 | if (ret) | 456 | if (ret) |
| 457 | goto out; | 457 | goto out; |
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c index c36321bc8714..d34e331554f3 100644 --- a/drivers/gpu/drm/msm/msm_gem_vma.c +++ b/drivers/gpu/drm/msm/msm_gem_vma.c | |||
| @@ -42,7 +42,7 @@ void | |||
| 42 | msm_gem_unmap_vma(struct msm_gem_address_space *aspace, | 42 | msm_gem_unmap_vma(struct msm_gem_address_space *aspace, |
| 43 | struct msm_gem_vma *vma, struct sg_table *sgt) | 43 | struct msm_gem_vma *vma, struct sg_table *sgt) |
| 44 | { | 44 | { |
| 45 | if (!vma->iova) | 45 | if (!aspace || !vma->iova) |
| 46 | return; | 46 | return; |
| 47 | 47 | ||
| 48 | if (aspace->mmu) { | 48 | if (aspace->mmu) { |
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 147b22163f9f..dab78c660dd6 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c | |||
| @@ -1158,8 +1158,6 @@ nouveau_connector_aux_xfer(struct drm_dp_aux *obj, struct drm_dp_aux_msg *msg) | |||
| 1158 | return -ENODEV; | 1158 | return -ENODEV; |
| 1159 | if (WARN_ON(msg->size > 16)) | 1159 | if (WARN_ON(msg->size > 16)) |
| 1160 | return -E2BIG; | 1160 | return -E2BIG; |
| 1161 | if (msg->size == 0) | ||
| 1162 | return msg->size; | ||
| 1163 | 1161 | ||
| 1164 | ret = nvkm_i2c_aux_acquire(aux); | 1162 | ret = nvkm_i2c_aux_acquire(aux); |
| 1165 | if (ret) | 1163 | if (ret) |
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 8d1df5678eaa..f362c9fa8b3b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c | |||
| @@ -409,7 +409,6 @@ nouveau_display_fini(struct drm_device *dev, bool suspend) | |||
| 409 | struct nouveau_display *disp = nouveau_display(dev); | 409 | struct nouveau_display *disp = nouveau_display(dev); |
| 410 | struct nouveau_drm *drm = nouveau_drm(dev); | 410 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 411 | struct drm_connector *connector; | 411 | struct drm_connector *connector; |
| 412 | struct drm_crtc *crtc; | ||
| 413 | 412 | ||
| 414 | if (!suspend) { | 413 | if (!suspend) { |
| 415 | if (drm_drv_uses_atomic_modeset(dev)) | 414 | if (drm_drv_uses_atomic_modeset(dev)) |
| @@ -418,10 +417,6 @@ nouveau_display_fini(struct drm_device *dev, bool suspend) | |||
| 418 | drm_crtc_force_disable_all(dev); | 417 | drm_crtc_force_disable_all(dev); |
| 419 | } | 418 | } |
| 420 | 419 | ||
| 421 | /* Make sure that drm and hw vblank irqs get properly disabled. */ | ||
| 422 | drm_for_each_crtc(crtc, dev) | ||
| 423 | drm_crtc_vblank_off(crtc); | ||
| 424 | |||
| 425 | /* disable flip completion events */ | 420 | /* disable flip completion events */ |
| 426 | nvif_notify_put(&drm->flip); | 421 | nvif_notify_put(&drm->flip); |
| 427 | 422 | ||
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index e3132a2ce34d..2bc0dc985214 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c | |||
| @@ -3674,15 +3674,24 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe) | |||
| 3674 | drm_mode_connector_attach_encoder(connector, encoder); | 3674 | drm_mode_connector_attach_encoder(connector, encoder); |
| 3675 | 3675 | ||
| 3676 | if (dcbe->type == DCB_OUTPUT_DP) { | 3676 | if (dcbe->type == DCB_OUTPUT_DP) { |
| 3677 | struct nv50_disp *disp = nv50_disp(encoder->dev); | ||
| 3677 | struct nvkm_i2c_aux *aux = | 3678 | struct nvkm_i2c_aux *aux = |
| 3678 | nvkm_i2c_aux_find(i2c, dcbe->i2c_index); | 3679 | nvkm_i2c_aux_find(i2c, dcbe->i2c_index); |
| 3679 | if (aux) { | 3680 | if (aux) { |
| 3680 | nv_encoder->i2c = &nv_connector->aux.ddc; | 3681 | if (disp->disp->oclass < GF110_DISP) { |
| 3682 | /* HW has no support for address-only | ||
| 3683 | * transactions, so we're required to | ||
| 3684 | * use custom I2C-over-AUX code. | ||
| 3685 | */ | ||
| 3686 | nv_encoder->i2c = &aux->i2c; | ||
| 3687 | } else { | ||
| 3688 | nv_encoder->i2c = &nv_connector->aux.ddc; | ||
| 3689 | } | ||
| 3681 | nv_encoder->aux = aux; | 3690 | nv_encoder->aux = aux; |
| 3682 | } | 3691 | } |
| 3683 | 3692 | ||
| 3684 | /*TODO: Use DP Info Table to check for support. */ | 3693 | /*TODO: Use DP Info Table to check for support. */ |
| 3685 | if (nv50_disp(encoder->dev)->disp->oclass >= GF110_DISP) { | 3694 | if (disp->disp->oclass >= GF110_DISP) { |
| 3686 | ret = nv50_mstm_new(nv_encoder, &nv_connector->aux, 16, | 3695 | ret = nv50_mstm_new(nv_encoder, &nv_connector->aux, 16, |
| 3687 | nv_connector->base.base.id, | 3696 | nv_connector->base.base.id, |
| 3688 | &nv_encoder->dp.mstm); | 3697 | &nv_encoder->dp.mstm); |
| @@ -3931,6 +3940,8 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state) | |||
| 3931 | 3940 | ||
| 3932 | NV_ATOMIC(drm, "%s: clr %04x (set %04x)\n", crtc->name, | 3941 | NV_ATOMIC(drm, "%s: clr %04x (set %04x)\n", crtc->name, |
| 3933 | asyh->clr.mask, asyh->set.mask); | 3942 | asyh->clr.mask, asyh->set.mask); |
| 3943 | if (crtc_state->active && !asyh->state.active) | ||
| 3944 | drm_crtc_vblank_off(crtc); | ||
| 3934 | 3945 | ||
| 3935 | if (asyh->clr.mask) { | 3946 | if (asyh->clr.mask) { |
| 3936 | nv50_head_flush_clr(head, asyh, atom->flush_disable); | 3947 | nv50_head_flush_clr(head, asyh, atom->flush_disable); |
| @@ -4016,11 +4027,13 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state) | |||
| 4016 | nv50_head_flush_set(head, asyh); | 4027 | nv50_head_flush_set(head, asyh); |
| 4017 | interlock_core = 1; | 4028 | interlock_core = 1; |
| 4018 | } | 4029 | } |
| 4019 | } | ||
| 4020 | 4030 | ||
| 4021 | for_each_crtc_in_state(state, crtc, crtc_state, i) { | 4031 | if (asyh->state.active) { |
| 4022 | if (crtc->state->event) | 4032 | if (!crtc_state->active) |
| 4023 | drm_crtc_vblank_get(crtc); | 4033 | drm_crtc_vblank_on(crtc); |
| 4034 | if (asyh->state.event) | ||
| 4035 | drm_crtc_vblank_get(crtc); | ||
| 4036 | } | ||
| 4024 | } | 4037 | } |
| 4025 | 4038 | ||
| 4026 | /* Update plane(s). */ | 4039 | /* Update plane(s). */ |
| @@ -4067,12 +4080,14 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state) | |||
| 4067 | if (crtc->state->event) { | 4080 | if (crtc->state->event) { |
| 4068 | unsigned long flags; | 4081 | unsigned long flags; |
| 4069 | /* Get correct count/ts if racing with vblank irq */ | 4082 | /* Get correct count/ts if racing with vblank irq */ |
| 4070 | drm_accurate_vblank_count(crtc); | 4083 | if (crtc->state->active) |
| 4084 | drm_accurate_vblank_count(crtc); | ||
| 4071 | spin_lock_irqsave(&crtc->dev->event_lock, flags); | 4085 | spin_lock_irqsave(&crtc->dev->event_lock, flags); |
| 4072 | drm_crtc_send_vblank_event(crtc, crtc->state->event); | 4086 | drm_crtc_send_vblank_event(crtc, crtc->state->event); |
| 4073 | spin_unlock_irqrestore(&crtc->dev->event_lock, flags); | 4087 | spin_unlock_irqrestore(&crtc->dev->event_lock, flags); |
| 4074 | crtc->state->event = NULL; | 4088 | crtc->state->event = NULL; |
| 4075 | drm_crtc_vblank_put(crtc); | 4089 | if (crtc->state->active) |
| 4090 | drm_crtc_vblank_put(crtc); | ||
| 4076 | } | 4091 | } |
| 4077 | } | 4092 | } |
| 4078 | 4093 | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c index c7c84d34d97e..88582af8bd89 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c | |||
| @@ -267,6 +267,8 @@ nvkm_disp_oneinit(struct nvkm_engine *engine) | |||
| 267 | /* Create output path objects for each VBIOS display path. */ | 267 | /* Create output path objects for each VBIOS display path. */ |
| 268 | i = -1; | 268 | i = -1; |
| 269 | while ((data = dcb_outp_parse(bios, ++i, &ver, &hdr, &dcbE))) { | 269 | while ((data = dcb_outp_parse(bios, ++i, &ver, &hdr, &dcbE))) { |
| 270 | if (ver < 0x40) /* No support for chipsets prior to NV50. */ | ||
| 271 | break; | ||
| 270 | if (dcbE.type == DCB_OUTPUT_UNUSED) | 272 | if (dcbE.type == DCB_OUTPUT_UNUSED) |
| 271 | continue; | 273 | continue; |
| 272 | if (dcbE.type == DCB_OUTPUT_EOL) | 274 | if (dcbE.type == DCB_OUTPUT_EOL) |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h index a24312fb0228..a1e8bf48b778 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h | |||
| @@ -22,6 +22,7 @@ struct nvkm_ior { | |||
| 22 | unsigned proto_evo:4; | 22 | unsigned proto_evo:4; |
| 23 | enum nvkm_ior_proto { | 23 | enum nvkm_ior_proto { |
| 24 | CRT, | 24 | CRT, |
| 25 | TV, | ||
| 25 | TMDS, | 26 | TMDS, |
| 26 | LVDS, | 27 | LVDS, |
| 27 | DP, | 28 | DP, |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h index 19c635663399..6ea19466f436 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h | |||
| @@ -22,7 +22,7 @@ struct nv50_disp { | |||
| 22 | u8 type[3]; | 22 | u8 type[3]; |
| 23 | } pior; | 23 | } pior; |
| 24 | 24 | ||
| 25 | struct nv50_disp_chan *chan[17]; | 25 | struct nv50_disp_chan *chan[21]; |
| 26 | }; | 26 | }; |
| 27 | 27 | ||
| 28 | void nv50_disp_super_1(struct nv50_disp *); | 28 | void nv50_disp_super_1(struct nv50_disp *); |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c index 85aff85394ac..be9e7f8c3b23 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c | |||
| @@ -62,6 +62,7 @@ nvkm_outp_xlat(struct nvkm_outp *outp, enum nvkm_ior_type *type) | |||
| 62 | case 0: | 62 | case 0: |
| 63 | switch (outp->info.type) { | 63 | switch (outp->info.type) { |
| 64 | case DCB_OUTPUT_ANALOG: *type = DAC; return CRT; | 64 | case DCB_OUTPUT_ANALOG: *type = DAC; return CRT; |
| 65 | case DCB_OUTPUT_TV : *type = DAC; return TV; | ||
| 65 | case DCB_OUTPUT_TMDS : *type = SOR; return TMDS; | 66 | case DCB_OUTPUT_TMDS : *type = SOR; return TMDS; |
| 66 | case DCB_OUTPUT_LVDS : *type = SOR; return LVDS; | 67 | case DCB_OUTPUT_LVDS : *type = SOR; return LVDS; |
| 67 | case DCB_OUTPUT_DP : *type = SOR; return DP; | 68 | case DCB_OUTPUT_DP : *type = SOR; return DP; |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c index c794b2c2d21e..6d8f21290aa2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c | |||
| @@ -129,7 +129,7 @@ gf100_bar_init(struct nvkm_bar *base) | |||
| 129 | 129 | ||
| 130 | if (bar->bar[0].mem) { | 130 | if (bar->bar[0].mem) { |
| 131 | addr = nvkm_memory_addr(bar->bar[0].mem) >> 12; | 131 | addr = nvkm_memory_addr(bar->bar[0].mem) >> 12; |
| 132 | nvkm_wr32(device, 0x001714, 0xc0000000 | addr); | 132 | nvkm_wr32(device, 0x001714, 0x80000000 | addr); |
| 133 | } | 133 | } |
| 134 | 134 | ||
| 135 | return 0; | 135 | return 0; |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild index 48f01e40b8fc..b768e66a472b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild | |||
| @@ -25,6 +25,7 @@ nvkm-y += nvkm/subdev/i2c/bit.o | |||
| 25 | 25 | ||
| 26 | nvkm-y += nvkm/subdev/i2c/aux.o | 26 | nvkm-y += nvkm/subdev/i2c/aux.o |
| 27 | nvkm-y += nvkm/subdev/i2c/auxg94.o | 27 | nvkm-y += nvkm/subdev/i2c/auxg94.o |
| 28 | nvkm-y += nvkm/subdev/i2c/auxgf119.o | ||
| 28 | nvkm-y += nvkm/subdev/i2c/auxgm200.o | 29 | nvkm-y += nvkm/subdev/i2c/auxgm200.o |
| 29 | 30 | ||
| 30 | nvkm-y += nvkm/subdev/i2c/anx9805.o | 31 | nvkm-y += nvkm/subdev/i2c/anx9805.o |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c index d172e42dd228..4c1f547da463 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c | |||
| @@ -117,6 +117,10 @@ int | |||
| 117 | nvkm_i2c_aux_xfer(struct nvkm_i2c_aux *aux, bool retry, u8 type, | 117 | nvkm_i2c_aux_xfer(struct nvkm_i2c_aux *aux, bool retry, u8 type, |
| 118 | u32 addr, u8 *data, u8 *size) | 118 | u32 addr, u8 *data, u8 *size) |
| 119 | { | 119 | { |
| 120 | if (!*size && !aux->func->address_only) { | ||
| 121 | AUX_ERR(aux, "address-only transaction dropped"); | ||
| 122 | return -ENOSYS; | ||
| 123 | } | ||
| 120 | return aux->func->xfer(aux, retry, type, addr, data, size); | 124 | return aux->func->xfer(aux, retry, type, addr, data, size); |
| 121 | } | 125 | } |
| 122 | 126 | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h index 27a4a39c87f0..9587ab456d9e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h | |||
| @@ -3,6 +3,7 @@ | |||
| 3 | #include "pad.h" | 3 | #include "pad.h" |
| 4 | 4 | ||
| 5 | struct nvkm_i2c_aux_func { | 5 | struct nvkm_i2c_aux_func { |
| 6 | bool address_only; | ||
| 6 | int (*xfer)(struct nvkm_i2c_aux *, bool retry, u8 type, | 7 | int (*xfer)(struct nvkm_i2c_aux *, bool retry, u8 type, |
| 7 | u32 addr, u8 *data, u8 *size); | 8 | u32 addr, u8 *data, u8 *size); |
| 8 | int (*lnk_ctl)(struct nvkm_i2c_aux *, int link_nr, int link_bw, | 9 | int (*lnk_ctl)(struct nvkm_i2c_aux *, int link_nr, int link_bw, |
| @@ -17,7 +18,12 @@ void nvkm_i2c_aux_del(struct nvkm_i2c_aux **); | |||
| 17 | int nvkm_i2c_aux_xfer(struct nvkm_i2c_aux *, bool retry, u8 type, | 18 | int nvkm_i2c_aux_xfer(struct nvkm_i2c_aux *, bool retry, u8 type, |
| 18 | u32 addr, u8 *data, u8 *size); | 19 | u32 addr, u8 *data, u8 *size); |
| 19 | 20 | ||
| 21 | int g94_i2c_aux_new_(const struct nvkm_i2c_aux_func *, struct nvkm_i2c_pad *, | ||
| 22 | int, u8, struct nvkm_i2c_aux **); | ||
| 23 | |||
| 20 | int g94_i2c_aux_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_aux **); | 24 | int g94_i2c_aux_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_aux **); |
| 25 | int g94_i2c_aux_xfer(struct nvkm_i2c_aux *, bool, u8, u32, u8 *, u8 *); | ||
| 26 | int gf119_i2c_aux_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_aux **); | ||
| 21 | int gm200_i2c_aux_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_aux **); | 27 | int gm200_i2c_aux_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_aux **); |
| 22 | 28 | ||
| 23 | #define AUX_MSG(b,l,f,a...) do { \ | 29 | #define AUX_MSG(b,l,f,a...) do { \ |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c index ab8cb196c34e..c8ab1b5741a3 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c | |||
| @@ -72,7 +72,7 @@ g94_i2c_aux_init(struct g94_i2c_aux *aux) | |||
| 72 | return 0; | 72 | return 0; |
| 73 | } | 73 | } |
| 74 | 74 | ||
| 75 | static int | 75 | int |
| 76 | g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry, | 76 | g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry, |
| 77 | u8 type, u32 addr, u8 *data, u8 *size) | 77 | u8 type, u32 addr, u8 *data, u8 *size) |
| 78 | { | 78 | { |
| @@ -105,9 +105,9 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry, | |||
| 105 | } | 105 | } |
| 106 | 106 | ||
| 107 | ctrl = nvkm_rd32(device, 0x00e4e4 + base); | 107 | ctrl = nvkm_rd32(device, 0x00e4e4 + base); |
| 108 | ctrl &= ~0x0001f0ff; | 108 | ctrl &= ~0x0001f1ff; |
| 109 | ctrl |= type << 12; | 109 | ctrl |= type << 12; |
| 110 | ctrl |= *size - 1; | 110 | ctrl |= (*size ? (*size - 1) : 0x00000100); |
| 111 | nvkm_wr32(device, 0x00e4e0 + base, addr); | 111 | nvkm_wr32(device, 0x00e4e0 + base, addr); |
| 112 | 112 | ||
| 113 | /* (maybe) retry transaction a number of times on failure... */ | 113 | /* (maybe) retry transaction a number of times on failure... */ |
| @@ -160,14 +160,10 @@ out: | |||
| 160 | return ret < 0 ? ret : (stat & 0x000f0000) >> 16; | 160 | return ret < 0 ? ret : (stat & 0x000f0000) >> 16; |
| 161 | } | 161 | } |
| 162 | 162 | ||
| 163 | static const struct nvkm_i2c_aux_func | ||
| 164 | g94_i2c_aux_func = { | ||
| 165 | .xfer = g94_i2c_aux_xfer, | ||
| 166 | }; | ||
| 167 | |||
| 168 | int | 163 | int |
| 169 | g94_i2c_aux_new(struct nvkm_i2c_pad *pad, int index, u8 drive, | 164 | g94_i2c_aux_new_(const struct nvkm_i2c_aux_func *func, |
| 170 | struct nvkm_i2c_aux **paux) | 165 | struct nvkm_i2c_pad *pad, int index, u8 drive, |
| 166 | struct nvkm_i2c_aux **paux) | ||
| 171 | { | 167 | { |
| 172 | struct g94_i2c_aux *aux; | 168 | struct g94_i2c_aux *aux; |
| 173 | 169 | ||
| @@ -175,8 +171,20 @@ g94_i2c_aux_new(struct nvkm_i2c_pad *pad, int index, u8 drive, | |||
| 175 | return -ENOMEM; | 171 | return -ENOMEM; |
| 176 | *paux = &aux->base; | 172 | *paux = &aux->base; |
| 177 | 173 | ||
| 178 | nvkm_i2c_aux_ctor(&g94_i2c_aux_func, pad, index, &aux->base); | 174 | nvkm_i2c_aux_ctor(func, pad, index, &aux->base); |
| 179 | aux->ch = drive; | 175 | aux->ch = drive; |
| 180 | aux->base.intr = 1 << aux->ch; | 176 | aux->base.intr = 1 << aux->ch; |
| 181 | return 0; | 177 | return 0; |
| 182 | } | 178 | } |
| 179 | |||
| 180 | static const struct nvkm_i2c_aux_func | ||
| 181 | g94_i2c_aux = { | ||
| 182 | .xfer = g94_i2c_aux_xfer, | ||
| 183 | }; | ||
| 184 | |||
| 185 | int | ||
| 186 | g94_i2c_aux_new(struct nvkm_i2c_pad *pad, int index, u8 drive, | ||
| 187 | struct nvkm_i2c_aux **paux) | ||
| 188 | { | ||
| 189 | return g94_i2c_aux_new_(&g94_i2c_aux, pad, index, drive, paux); | ||
| 190 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgf119.c new file mode 100644 index 000000000000..dab40cd8fe3a --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgf119.c | |||
| @@ -0,0 +1,35 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2017 Red Hat Inc. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice shall be included in | ||
| 12 | * all copies or substantial portions of the Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 21 | */ | ||
| 22 | #include "aux.h" | ||
| 23 | |||
| 24 | static const struct nvkm_i2c_aux_func | ||
| 25 | gf119_i2c_aux = { | ||
| 26 | .address_only = true, | ||
| 27 | .xfer = g94_i2c_aux_xfer, | ||
| 28 | }; | ||
| 29 | |||
| 30 | int | ||
| 31 | gf119_i2c_aux_new(struct nvkm_i2c_pad *pad, int index, u8 drive, | ||
| 32 | struct nvkm_i2c_aux **paux) | ||
| 33 | { | ||
| 34 | return g94_i2c_aux_new_(&gf119_i2c_aux, pad, index, drive, paux); | ||
| 35 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c index ee091fa79628..7ef60895f43a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c | |||
| @@ -105,9 +105,9 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry, | |||
| 105 | } | 105 | } |
| 106 | 106 | ||
| 107 | ctrl = nvkm_rd32(device, 0x00d954 + base); | 107 | ctrl = nvkm_rd32(device, 0x00d954 + base); |
| 108 | ctrl &= ~0x0001f0ff; | 108 | ctrl &= ~0x0001f1ff; |
| 109 | ctrl |= type << 12; | 109 | ctrl |= type << 12; |
| 110 | ctrl |= *size - 1; | 110 | ctrl |= (*size ? (*size - 1) : 0x00000100); |
| 111 | nvkm_wr32(device, 0x00d950 + base, addr); | 111 | nvkm_wr32(device, 0x00d950 + base, addr); |
| 112 | 112 | ||
| 113 | /* (maybe) retry transaction a number of times on failure... */ | 113 | /* (maybe) retry transaction a number of times on failure... */ |
| @@ -162,6 +162,7 @@ out: | |||
| 162 | 162 | ||
| 163 | static const struct nvkm_i2c_aux_func | 163 | static const struct nvkm_i2c_aux_func |
| 164 | gm200_i2c_aux_func = { | 164 | gm200_i2c_aux_func = { |
| 165 | .address_only = true, | ||
| 165 | .xfer = gm200_i2c_aux_xfer, | 166 | .xfer = gm200_i2c_aux_xfer, |
| 166 | }; | 167 | }; |
| 167 | 168 | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c index d53212f1aa52..3bc4d0310076 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c | |||
| @@ -28,7 +28,7 @@ | |||
| 28 | static const struct nvkm_i2c_pad_func | 28 | static const struct nvkm_i2c_pad_func |
| 29 | gf119_i2c_pad_s_func = { | 29 | gf119_i2c_pad_s_func = { |
| 30 | .bus_new_4 = gf119_i2c_bus_new, | 30 | .bus_new_4 = gf119_i2c_bus_new, |
| 31 | .aux_new_6 = g94_i2c_aux_new, | 31 | .aux_new_6 = gf119_i2c_aux_new, |
| 32 | .mode = g94_i2c_pad_mode, | 32 | .mode = g94_i2c_pad_mode, |
| 33 | }; | 33 | }; |
| 34 | 34 | ||
| @@ -41,7 +41,7 @@ gf119_i2c_pad_s_new(struct nvkm_i2c *i2c, int id, struct nvkm_i2c_pad **ppad) | |||
| 41 | static const struct nvkm_i2c_pad_func | 41 | static const struct nvkm_i2c_pad_func |
| 42 | gf119_i2c_pad_x_func = { | 42 | gf119_i2c_pad_x_func = { |
| 43 | .bus_new_4 = gf119_i2c_bus_new, | 43 | .bus_new_4 = gf119_i2c_bus_new, |
| 44 | .aux_new_6 = g94_i2c_aux_new, | 44 | .aux_new_6 = gf119_i2c_aux_new, |
| 45 | }; | 45 | }; |
| 46 | 46 | ||
| 47 | int | 47 | int |
diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig index 50c41c0a50ef..dcc539ba85d6 100644 --- a/drivers/gpu/drm/rockchip/Kconfig +++ b/drivers/gpu/drm/rockchip/Kconfig | |||
| @@ -5,6 +5,10 @@ config DRM_ROCKCHIP | |||
| 5 | select DRM_KMS_HELPER | 5 | select DRM_KMS_HELPER |
| 6 | select DRM_PANEL | 6 | select DRM_PANEL |
| 7 | select VIDEOMODE_HELPERS | 7 | select VIDEOMODE_HELPERS |
| 8 | select DRM_ANALOGIX_DP if ROCKCHIP_ANALOGIX_DP | ||
| 9 | select DRM_DW_HDMI if ROCKCHIP_DW_HDMI | ||
| 10 | select DRM_MIPI_DSI if ROCKCHIP_DW_MIPI_DSI | ||
| 11 | select SND_SOC_HDMI_CODEC if ROCKCHIP_CDN_DP && SND_SOC | ||
| 8 | help | 12 | help |
| 9 | Choose this option if you have a Rockchip soc chipset. | 13 | Choose this option if you have a Rockchip soc chipset. |
| 10 | This driver provides kernel mode setting and buffer | 14 | This driver provides kernel mode setting and buffer |
| @@ -12,10 +16,10 @@ config DRM_ROCKCHIP | |||
| 12 | 2D or 3D acceleration; acceleration is performed by other | 16 | 2D or 3D acceleration; acceleration is performed by other |
| 13 | IP found on the SoC. | 17 | IP found on the SoC. |
| 14 | 18 | ||
| 19 | if DRM_ROCKCHIP | ||
| 20 | |||
| 15 | config ROCKCHIP_ANALOGIX_DP | 21 | config ROCKCHIP_ANALOGIX_DP |
| 16 | bool "Rockchip specific extensions for Analogix DP driver" | 22 | bool "Rockchip specific extensions for Analogix DP driver" |
| 17 | depends on DRM_ROCKCHIP | ||
| 18 | select DRM_ANALOGIX_DP | ||
| 19 | help | 23 | help |
| 20 | This selects support for Rockchip SoC specific extensions | 24 | This selects support for Rockchip SoC specific extensions |
| 21 | for the Analogix Core DP driver. If you want to enable DP | 25 | for the Analogix Core DP driver. If you want to enable DP |
| @@ -23,9 +27,7 @@ config ROCKCHIP_ANALOGIX_DP | |||
| 23 | 27 | ||
| 24 | config ROCKCHIP_CDN_DP | 28 | config ROCKCHIP_CDN_DP |
| 25 | bool "Rockchip cdn DP" | 29 | bool "Rockchip cdn DP" |
| 26 | depends on DRM_ROCKCHIP | 30 | depends on EXTCON=y || (EXTCON=m && DRM_ROCKCHIP=m) |
| 27 | depends on EXTCON | ||
| 28 | select SND_SOC_HDMI_CODEC if SND_SOC | ||
| 29 | help | 31 | help |
| 30 | This selects support for Rockchip SoC specific extensions | 32 | This selects support for Rockchip SoC specific extensions |
| 31 | for the cdn DP driver. If you want to enable Dp on | 33 | for the cdn DP driver. If you want to enable Dp on |
| @@ -34,8 +36,6 @@ config ROCKCHIP_CDN_DP | |||
| 34 | 36 | ||
| 35 | config ROCKCHIP_DW_HDMI | 37 | config ROCKCHIP_DW_HDMI |
| 36 | bool "Rockchip specific extensions for Synopsys DW HDMI" | 38 | bool "Rockchip specific extensions for Synopsys DW HDMI" |
| 37 | depends on DRM_ROCKCHIP | ||
| 38 | select DRM_DW_HDMI | ||
| 39 | help | 39 | help |
| 40 | This selects support for Rockchip SoC specific extensions | 40 | This selects support for Rockchip SoC specific extensions |
| 41 | for the Synopsys DesignWare HDMI driver. If you want to | 41 | for the Synopsys DesignWare HDMI driver. If you want to |
| @@ -44,8 +44,6 @@ config ROCKCHIP_DW_HDMI | |||
| 44 | 44 | ||
| 45 | config ROCKCHIP_DW_MIPI_DSI | 45 | config ROCKCHIP_DW_MIPI_DSI |
| 46 | bool "Rockchip specific extensions for Synopsys DW MIPI DSI" | 46 | bool "Rockchip specific extensions for Synopsys DW MIPI DSI" |
| 47 | depends on DRM_ROCKCHIP | ||
| 48 | select DRM_MIPI_DSI | ||
| 49 | help | 47 | help |
| 50 | This selects support for Rockchip SoC specific extensions | 48 | This selects support for Rockchip SoC specific extensions |
| 51 | for the Synopsys DesignWare HDMI driver. If you want to | 49 | for the Synopsys DesignWare HDMI driver. If you want to |
| @@ -54,8 +52,9 @@ config ROCKCHIP_DW_MIPI_DSI | |||
| 54 | 52 | ||
| 55 | config ROCKCHIP_INNO_HDMI | 53 | config ROCKCHIP_INNO_HDMI |
| 56 | bool "Rockchip specific extensions for Innosilicon HDMI" | 54 | bool "Rockchip specific extensions for Innosilicon HDMI" |
| 57 | depends on DRM_ROCKCHIP | ||
| 58 | help | 55 | help |
| 59 | This selects support for Rockchip SoC specific extensions | 56 | This selects support for Rockchip SoC specific extensions |
| 60 | for the Innosilicon HDMI driver. If you want to enable | 57 | for the Innosilicon HDMI driver. If you want to enable |
| 61 | HDMI on RK3036 based SoC, you should select this option. | 58 | HDMI on RK3036 based SoC, you should select this option. |
| 59 | |||
| 60 | endif | ||
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c index c6b1b7f3a2a3..c16bc0a7115b 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c | |||
| @@ -275,11 +275,15 @@ static void rockchip_drm_fb_resume(struct drm_device *drm) | |||
| 275 | static int rockchip_drm_sys_suspend(struct device *dev) | 275 | static int rockchip_drm_sys_suspend(struct device *dev) |
| 276 | { | 276 | { |
| 277 | struct drm_device *drm = dev_get_drvdata(dev); | 277 | struct drm_device *drm = dev_get_drvdata(dev); |
| 278 | struct rockchip_drm_private *priv = drm->dev_private; | 278 | struct rockchip_drm_private *priv; |
| 279 | |||
| 280 | if (!drm) | ||
| 281 | return 0; | ||
| 279 | 282 | ||
| 280 | drm_kms_helper_poll_disable(drm); | 283 | drm_kms_helper_poll_disable(drm); |
| 281 | rockchip_drm_fb_suspend(drm); | 284 | rockchip_drm_fb_suspend(drm); |
| 282 | 285 | ||
| 286 | priv = drm->dev_private; | ||
| 283 | priv->state = drm_atomic_helper_suspend(drm); | 287 | priv->state = drm_atomic_helper_suspend(drm); |
| 284 | if (IS_ERR(priv->state)) { | 288 | if (IS_ERR(priv->state)) { |
| 285 | rockchip_drm_fb_resume(drm); | 289 | rockchip_drm_fb_resume(drm); |
| @@ -293,8 +297,12 @@ static int rockchip_drm_sys_suspend(struct device *dev) | |||
| 293 | static int rockchip_drm_sys_resume(struct device *dev) | 297 | static int rockchip_drm_sys_resume(struct device *dev) |
| 294 | { | 298 | { |
| 295 | struct drm_device *drm = dev_get_drvdata(dev); | 299 | struct drm_device *drm = dev_get_drvdata(dev); |
| 296 | struct rockchip_drm_private *priv = drm->dev_private; | 300 | struct rockchip_drm_private *priv; |
| 301 | |||
| 302 | if (!drm) | ||
| 303 | return 0; | ||
| 297 | 304 | ||
| 305 | priv = drm->dev_private; | ||
| 298 | drm_atomic_helper_resume(drm, priv->state); | 306 | drm_atomic_helper_resume(drm, priv->state); |
| 299 | rockchip_drm_fb_resume(drm); | 307 | rockchip_drm_fb_resume(drm); |
| 300 | drm_kms_helper_poll_enable(drm); | 308 | drm_kms_helper_poll_enable(drm); |
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c index 5d450332c2fd..2900f1410d95 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c | |||
| @@ -500,7 +500,7 @@ static void vop_line_flag_irq_disable(struct vop *vop) | |||
| 500 | static int vop_enable(struct drm_crtc *crtc) | 500 | static int vop_enable(struct drm_crtc *crtc) |
| 501 | { | 501 | { |
| 502 | struct vop *vop = to_vop(crtc); | 502 | struct vop *vop = to_vop(crtc); |
| 503 | int ret; | 503 | int ret, i; |
| 504 | 504 | ||
| 505 | ret = pm_runtime_get_sync(vop->dev); | 505 | ret = pm_runtime_get_sync(vop->dev); |
| 506 | if (ret < 0) { | 506 | if (ret < 0) { |
| @@ -533,6 +533,20 @@ static int vop_enable(struct drm_crtc *crtc) | |||
| 533 | } | 533 | } |
| 534 | 534 | ||
| 535 | memcpy(vop->regs, vop->regsbak, vop->len); | 535 | memcpy(vop->regs, vop->regsbak, vop->len); |
| 536 | /* | ||
| 537 | * We need to make sure that all windows are disabled before we | ||
| 538 | * enable the crtc. Otherwise we might try to scan from a destroyed | ||
| 539 | * buffer later. | ||
| 540 | */ | ||
| 541 | for (i = 0; i < vop->data->win_size; i++) { | ||
| 542 | struct vop_win *vop_win = &vop->win[i]; | ||
| 543 | const struct vop_win_data *win = vop_win->data; | ||
| 544 | |||
| 545 | spin_lock(&vop->reg_lock); | ||
| 546 | VOP_WIN_SET(vop, win, enable, 0); | ||
| 547 | spin_unlock(&vop->reg_lock); | ||
| 548 | } | ||
| 549 | |||
| 536 | vop_cfg_done(vop); | 550 | vop_cfg_done(vop); |
| 537 | 551 | ||
| 538 | /* | 552 | /* |
| @@ -566,28 +580,11 @@ err_put_pm_runtime: | |||
| 566 | static void vop_crtc_disable(struct drm_crtc *crtc) | 580 | static void vop_crtc_disable(struct drm_crtc *crtc) |
| 567 | { | 581 | { |
| 568 | struct vop *vop = to_vop(crtc); | 582 | struct vop *vop = to_vop(crtc); |
| 569 | int i; | ||
| 570 | 583 | ||
| 571 | WARN_ON(vop->event); | 584 | WARN_ON(vop->event); |
| 572 | 585 | ||
| 573 | rockchip_drm_psr_deactivate(&vop->crtc); | 586 | rockchip_drm_psr_deactivate(&vop->crtc); |
| 574 | 587 | ||
| 575 | /* | ||
| 576 | * We need to make sure that all windows are disabled before we | ||
| 577 | * disable that crtc. Otherwise we might try to scan from a destroyed | ||
| 578 | * buffer later. | ||
| 579 | */ | ||
| 580 | for (i = 0; i < vop->data->win_size; i++) { | ||
| 581 | struct vop_win *vop_win = &vop->win[i]; | ||
| 582 | const struct vop_win_data *win = vop_win->data; | ||
| 583 | |||
| 584 | spin_lock(&vop->reg_lock); | ||
| 585 | VOP_WIN_SET(vop, win, enable, 0); | ||
| 586 | spin_unlock(&vop->reg_lock); | ||
| 587 | } | ||
| 588 | |||
| 589 | vop_cfg_done(vop); | ||
| 590 | |||
| 591 | drm_crtc_vblank_off(crtc); | 588 | drm_crtc_vblank_off(crtc); |
| 592 | 589 | ||
| 593 | /* | 590 | /* |
| @@ -682,8 +679,10 @@ static int vop_plane_atomic_check(struct drm_plane *plane, | |||
| 682 | * Src.x1 can be odd when do clip, but yuv plane start point | 679 | * Src.x1 can be odd when do clip, but yuv plane start point |
| 683 | * need align with 2 pixel. | 680 | * need align with 2 pixel. |
| 684 | */ | 681 | */ |
| 685 | if (is_yuv_support(fb->format->format) && ((state->src.x1 >> 16) % 2)) | 682 | if (is_yuv_support(fb->format->format) && ((state->src.x1 >> 16) % 2)) { |
| 683 | DRM_ERROR("Invalid Source: Yuv format not support odd xpos\n"); | ||
| 686 | return -EINVAL; | 684 | return -EINVAL; |
| 685 | } | ||
| 687 | 686 | ||
| 688 | return 0; | 687 | return 0; |
| 689 | } | 688 | } |
| @@ -764,7 +763,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane, | |||
| 764 | spin_lock(&vop->reg_lock); | 763 | spin_lock(&vop->reg_lock); |
| 765 | 764 | ||
| 766 | VOP_WIN_SET(vop, win, format, format); | 765 | VOP_WIN_SET(vop, win, format, format); |
| 767 | VOP_WIN_SET(vop, win, yrgb_vir, fb->pitches[0] >> 2); | 766 | VOP_WIN_SET(vop, win, yrgb_vir, DIV_ROUND_UP(fb->pitches[0], 4)); |
| 768 | VOP_WIN_SET(vop, win, yrgb_mst, dma_addr); | 767 | VOP_WIN_SET(vop, win, yrgb_mst, dma_addr); |
| 769 | if (is_yuv_support(fb->format->format)) { | 768 | if (is_yuv_support(fb->format->format)) { |
| 770 | int hsub = drm_format_horz_chroma_subsampling(fb->format->format); | 769 | int hsub = drm_format_horz_chroma_subsampling(fb->format->format); |
| @@ -778,7 +777,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane, | |||
| 778 | offset += (src->y1 >> 16) * fb->pitches[1] / vsub; | 777 | offset += (src->y1 >> 16) * fb->pitches[1] / vsub; |
| 779 | 778 | ||
| 780 | dma_addr = rk_uv_obj->dma_addr + offset + fb->offsets[1]; | 779 | dma_addr = rk_uv_obj->dma_addr + offset + fb->offsets[1]; |
| 781 | VOP_WIN_SET(vop, win, uv_vir, fb->pitches[1] >> 2); | 780 | VOP_WIN_SET(vop, win, uv_vir, DIV_ROUND_UP(fb->pitches[1], 4)); |
| 782 | VOP_WIN_SET(vop, win, uv_mst, dma_addr); | 781 | VOP_WIN_SET(vop, win, uv_mst, dma_addr); |
| 783 | } | 782 | } |
| 784 | 783 | ||
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h index 9979fd0c2282..27eefbfcf3d0 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h | |||
| @@ -282,6 +282,9 @@ static inline uint16_t scl_get_bili_dn_vskip(int src_h, int dst_h, | |||
| 282 | 282 | ||
| 283 | act_height = (src_h + vskiplines - 1) / vskiplines; | 283 | act_height = (src_h + vskiplines - 1) / vskiplines; |
| 284 | 284 | ||
| 285 | if (act_height == dst_h) | ||
| 286 | return GET_SCL_FT_BILI_DN(src_h, dst_h) / vskiplines; | ||
| 287 | |||
| 285 | return GET_SCL_FT_BILI_DN(act_height, dst_h); | 288 | return GET_SCL_FT_BILI_DN(act_height, dst_h); |
| 286 | } | 289 | } |
| 287 | 290 | ||
diff --git a/drivers/gpu/drm/stm/Kconfig b/drivers/gpu/drm/stm/Kconfig index 2c4817fb0890..8fe5b184b4e8 100644 --- a/drivers/gpu/drm/stm/Kconfig +++ b/drivers/gpu/drm/stm/Kconfig | |||
| @@ -7,7 +7,6 @@ config DRM_STM | |||
| 7 | select DRM_PANEL | 7 | select DRM_PANEL |
| 8 | select VIDEOMODE_HELPERS | 8 | select VIDEOMODE_HELPERS |
| 9 | select FB_PROVIDE_GET_FB_UNMAPPED_AREA | 9 | select FB_PROVIDE_GET_FB_UNMAPPED_AREA |
| 10 | default y | ||
| 11 | 10 | ||
| 12 | help | 11 | help |
| 13 | Enable support for the on-chip display controller on | 12 | Enable support for the on-chip display controller on |
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c index abc7d8fe06b4..a45a627283a1 100644 --- a/drivers/gpu/drm/sun4i/sun4i_drv.c +++ b/drivers/gpu/drm/sun4i/sun4i_drv.c | |||
| @@ -25,12 +25,20 @@ | |||
| 25 | #include "sun4i_framebuffer.h" | 25 | #include "sun4i_framebuffer.h" |
| 26 | #include "sun4i_tcon.h" | 26 | #include "sun4i_tcon.h" |
| 27 | 27 | ||
| 28 | static void sun4i_drv_lastclose(struct drm_device *dev) | ||
| 29 | { | ||
| 30 | struct sun4i_drv *drv = dev->dev_private; | ||
| 31 | |||
| 32 | drm_fbdev_cma_restore_mode(drv->fbdev); | ||
| 33 | } | ||
| 34 | |||
| 28 | DEFINE_DRM_GEM_CMA_FOPS(sun4i_drv_fops); | 35 | DEFINE_DRM_GEM_CMA_FOPS(sun4i_drv_fops); |
| 29 | 36 | ||
| 30 | static struct drm_driver sun4i_drv_driver = { | 37 | static struct drm_driver sun4i_drv_driver = { |
| 31 | .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC, | 38 | .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC, |
| 32 | 39 | ||
| 33 | /* Generic Operations */ | 40 | /* Generic Operations */ |
| 41 | .lastclose = sun4i_drv_lastclose, | ||
| 34 | .fops = &sun4i_drv_fops, | 42 | .fops = &sun4i_drv_fops, |
| 35 | .name = "sun4i-drm", | 43 | .name = "sun4i-drm", |
| 36 | .desc = "Allwinner sun4i Display Engine", | 44 | .desc = "Allwinner sun4i Display Engine", |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c index 35bf781e418e..c7056322211c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c | |||
| @@ -30,49 +30,49 @@ | |||
| 30 | #include <drm/ttm/ttm_placement.h> | 30 | #include <drm/ttm/ttm_placement.h> |
| 31 | #include <drm/ttm/ttm_page_alloc.h> | 31 | #include <drm/ttm/ttm_page_alloc.h> |
| 32 | 32 | ||
| 33 | static struct ttm_place vram_placement_flags = { | 33 | static const struct ttm_place vram_placement_flags = { |
| 34 | .fpfn = 0, | 34 | .fpfn = 0, |
| 35 | .lpfn = 0, | 35 | .lpfn = 0, |
| 36 | .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | 36 | .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED |
| 37 | }; | 37 | }; |
| 38 | 38 | ||
| 39 | static struct ttm_place vram_ne_placement_flags = { | 39 | static const struct ttm_place vram_ne_placement_flags = { |
| 40 | .fpfn = 0, | 40 | .fpfn = 0, |
| 41 | .lpfn = 0, | 41 | .lpfn = 0, |
| 42 | .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT | 42 | .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT |
| 43 | }; | 43 | }; |
| 44 | 44 | ||
| 45 | static struct ttm_place sys_placement_flags = { | 45 | static const struct ttm_place sys_placement_flags = { |
| 46 | .fpfn = 0, | 46 | .fpfn = 0, |
| 47 | .lpfn = 0, | 47 | .lpfn = 0, |
| 48 | .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED | 48 | .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED |
| 49 | }; | 49 | }; |
| 50 | 50 | ||
| 51 | static struct ttm_place sys_ne_placement_flags = { | 51 | static const struct ttm_place sys_ne_placement_flags = { |
| 52 | .fpfn = 0, | 52 | .fpfn = 0, |
| 53 | .lpfn = 0, | 53 | .lpfn = 0, |
| 54 | .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT | 54 | .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT |
| 55 | }; | 55 | }; |
| 56 | 56 | ||
| 57 | static struct ttm_place gmr_placement_flags = { | 57 | static const struct ttm_place gmr_placement_flags = { |
| 58 | .fpfn = 0, | 58 | .fpfn = 0, |
| 59 | .lpfn = 0, | 59 | .lpfn = 0, |
| 60 | .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | 60 | .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED |
| 61 | }; | 61 | }; |
| 62 | 62 | ||
| 63 | static struct ttm_place gmr_ne_placement_flags = { | 63 | static const struct ttm_place gmr_ne_placement_flags = { |
| 64 | .fpfn = 0, | 64 | .fpfn = 0, |
| 65 | .lpfn = 0, | 65 | .lpfn = 0, |
| 66 | .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT | 66 | .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT |
| 67 | }; | 67 | }; |
| 68 | 68 | ||
| 69 | static struct ttm_place mob_placement_flags = { | 69 | static const struct ttm_place mob_placement_flags = { |
| 70 | .fpfn = 0, | 70 | .fpfn = 0, |
| 71 | .lpfn = 0, | 71 | .lpfn = 0, |
| 72 | .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED | 72 | .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED |
| 73 | }; | 73 | }; |
| 74 | 74 | ||
| 75 | static struct ttm_place mob_ne_placement_flags = { | 75 | static const struct ttm_place mob_ne_placement_flags = { |
| 76 | .fpfn = 0, | 76 | .fpfn = 0, |
| 77 | .lpfn = 0, | 77 | .lpfn = 0, |
| 78 | .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT | 78 | .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT |
| @@ -85,7 +85,7 @@ struct ttm_placement vmw_vram_placement = { | |||
| 85 | .busy_placement = &vram_placement_flags | 85 | .busy_placement = &vram_placement_flags |
| 86 | }; | 86 | }; |
| 87 | 87 | ||
| 88 | static struct ttm_place vram_gmr_placement_flags[] = { | 88 | static const struct ttm_place vram_gmr_placement_flags[] = { |
| 89 | { | 89 | { |
| 90 | .fpfn = 0, | 90 | .fpfn = 0, |
| 91 | .lpfn = 0, | 91 | .lpfn = 0, |
| @@ -97,7 +97,7 @@ static struct ttm_place vram_gmr_placement_flags[] = { | |||
| 97 | } | 97 | } |
| 98 | }; | 98 | }; |
| 99 | 99 | ||
| 100 | static struct ttm_place gmr_vram_placement_flags[] = { | 100 | static const struct ttm_place gmr_vram_placement_flags[] = { |
| 101 | { | 101 | { |
| 102 | .fpfn = 0, | 102 | .fpfn = 0, |
| 103 | .lpfn = 0, | 103 | .lpfn = 0, |
| @@ -116,7 +116,7 @@ struct ttm_placement vmw_vram_gmr_placement = { | |||
| 116 | .busy_placement = &gmr_placement_flags | 116 | .busy_placement = &gmr_placement_flags |
| 117 | }; | 117 | }; |
| 118 | 118 | ||
| 119 | static struct ttm_place vram_gmr_ne_placement_flags[] = { | 119 | static const struct ttm_place vram_gmr_ne_placement_flags[] = { |
| 120 | { | 120 | { |
| 121 | .fpfn = 0, | 121 | .fpfn = 0, |
| 122 | .lpfn = 0, | 122 | .lpfn = 0, |
| @@ -165,7 +165,7 @@ struct ttm_placement vmw_sys_ne_placement = { | |||
| 165 | .busy_placement = &sys_ne_placement_flags | 165 | .busy_placement = &sys_ne_placement_flags |
| 166 | }; | 166 | }; |
| 167 | 167 | ||
| 168 | static struct ttm_place evictable_placement_flags[] = { | 168 | static const struct ttm_place evictable_placement_flags[] = { |
| 169 | { | 169 | { |
| 170 | .fpfn = 0, | 170 | .fpfn = 0, |
| 171 | .lpfn = 0, | 171 | .lpfn = 0, |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c index 99a7f4ab7d97..86178796de6c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c | |||
| @@ -779,8 +779,8 @@ static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man, | |||
| 779 | if (ret) | 779 | if (ret) |
| 780 | return ret; | 780 | return ret; |
| 781 | 781 | ||
| 782 | header->cb_header = dma_pool_alloc(man->headers, GFP_KERNEL, | 782 | header->cb_header = dma_pool_zalloc(man->headers, GFP_KERNEL, |
| 783 | &header->handle); | 783 | &header->handle); |
| 784 | if (!header->cb_header) { | 784 | if (!header->cb_header) { |
| 785 | ret = -ENOMEM; | 785 | ret = -ENOMEM; |
| 786 | goto out_no_cb_header; | 786 | goto out_no_cb_header; |
| @@ -790,7 +790,6 @@ static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man, | |||
| 790 | cb_hdr = header->cb_header; | 790 | cb_hdr = header->cb_header; |
| 791 | offset = header->node.start << PAGE_SHIFT; | 791 | offset = header->node.start << PAGE_SHIFT; |
| 792 | header->cmd = man->map + offset; | 792 | header->cmd = man->map + offset; |
| 793 | memset(cb_hdr, 0, sizeof(*cb_hdr)); | ||
| 794 | if (man->using_mob) { | 793 | if (man->using_mob) { |
| 795 | cb_hdr->flags = SVGA_CB_FLAG_MOB; | 794 | cb_hdr->flags = SVGA_CB_FLAG_MOB; |
| 796 | cb_hdr->ptr.mob.mobid = man->cmd_space->mem.start; | 795 | cb_hdr->ptr.mob.mobid = man->cmd_space->mem.start; |
| @@ -827,8 +826,8 @@ static int vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man *man, | |||
| 827 | if (WARN_ON_ONCE(size > VMW_CMDBUF_INLINE_SIZE)) | 826 | if (WARN_ON_ONCE(size > VMW_CMDBUF_INLINE_SIZE)) |
| 828 | return -ENOMEM; | 827 | return -ENOMEM; |
| 829 | 828 | ||
| 830 | dheader = dma_pool_alloc(man->dheaders, GFP_KERNEL, | 829 | dheader = dma_pool_zalloc(man->dheaders, GFP_KERNEL, |
| 831 | &header->handle); | 830 | &header->handle); |
| 832 | if (!dheader) | 831 | if (!dheader) |
| 833 | return -ENOMEM; | 832 | return -ENOMEM; |
| 834 | 833 | ||
| @@ -837,7 +836,6 @@ static int vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man *man, | |||
| 837 | cb_hdr = &dheader->cb_header; | 836 | cb_hdr = &dheader->cb_header; |
| 838 | header->cb_header = cb_hdr; | 837 | header->cb_header = cb_hdr; |
| 839 | header->cmd = dheader->cmd; | 838 | header->cmd = dheader->cmd; |
| 840 | memset(dheader, 0, sizeof(*dheader)); | ||
| 841 | cb_hdr->status = SVGA_CB_STATUS_NONE; | 839 | cb_hdr->status = SVGA_CB_STATUS_NONE; |
| 842 | cb_hdr->flags = SVGA_CB_FLAG_NONE; | 840 | cb_hdr->flags = SVGA_CB_FLAG_NONE; |
| 843 | cb_hdr->ptr.pa = (u64)header->handle + | 841 | cb_hdr->ptr.pa = (u64)header->handle + |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c index 1f013d45c9e9..36c7b6c839c0 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c | |||
| @@ -205,7 +205,7 @@ int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man, | |||
| 205 | int ret; | 205 | int ret; |
| 206 | 206 | ||
| 207 | cres = kzalloc(sizeof(*cres), GFP_KERNEL); | 207 | cres = kzalloc(sizeof(*cres), GFP_KERNEL); |
| 208 | if (unlikely(cres == NULL)) | 208 | if (unlikely(!cres)) |
| 209 | return -ENOMEM; | 209 | return -ENOMEM; |
| 210 | 210 | ||
| 211 | cres->hash.key = user_key | (res_type << 24); | 211 | cres->hash.key = user_key | (res_type << 24); |
| @@ -291,7 +291,7 @@ vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv) | |||
| 291 | int ret; | 291 | int ret; |
| 292 | 292 | ||
| 293 | man = kzalloc(sizeof(*man), GFP_KERNEL); | 293 | man = kzalloc(sizeof(*man), GFP_KERNEL); |
| 294 | if (man == NULL) | 294 | if (!man) |
| 295 | return ERR_PTR(-ENOMEM); | 295 | return ERR_PTR(-ENOMEM); |
| 296 | 296 | ||
| 297 | man->dev_priv = dev_priv; | 297 | man->dev_priv = dev_priv; |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c index bcc6d4136c87..4212b3e673bc 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c | |||
| @@ -210,8 +210,8 @@ static int vmw_gb_context_init(struct vmw_private *dev_priv, | |||
| 210 | for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) { | 210 | for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) { |
| 211 | uctx->cotables[i] = vmw_cotable_alloc(dev_priv, | 211 | uctx->cotables[i] = vmw_cotable_alloc(dev_priv, |
| 212 | &uctx->res, i); | 212 | &uctx->res, i); |
| 213 | if (unlikely(uctx->cotables[i] == NULL)) { | 213 | if (unlikely(IS_ERR(uctx->cotables[i]))) { |
| 214 | ret = -ENOMEM; | 214 | ret = PTR_ERR(uctx->cotables[i]); |
| 215 | goto out_cotables; | 215 | goto out_cotables; |
| 216 | } | 216 | } |
| 217 | } | 217 | } |
| @@ -777,7 +777,7 @@ static int vmw_context_define(struct drm_device *dev, void *data, | |||
| 777 | } | 777 | } |
| 778 | 778 | ||
| 779 | ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); | 779 | ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); |
| 780 | if (unlikely(ctx == NULL)) { | 780 | if (unlikely(!ctx)) { |
| 781 | ttm_mem_global_free(vmw_mem_glob(dev_priv), | 781 | ttm_mem_global_free(vmw_mem_glob(dev_priv), |
| 782 | vmw_user_context_size); | 782 | vmw_user_context_size); |
| 783 | ret = -ENOMEM; | 783 | ret = -ENOMEM; |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c index 6c026d75c180..d87861bbe971 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c | |||
| @@ -584,7 +584,7 @@ struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv, | |||
| 584 | return ERR_PTR(ret); | 584 | return ERR_PTR(ret); |
| 585 | 585 | ||
| 586 | vcotbl = kzalloc(sizeof(*vcotbl), GFP_KERNEL); | 586 | vcotbl = kzalloc(sizeof(*vcotbl), GFP_KERNEL); |
| 587 | if (unlikely(vcotbl == NULL)) { | 587 | if (unlikely(!vcotbl)) { |
| 588 | ret = -ENOMEM; | 588 | ret = -ENOMEM; |
| 589 | goto out_no_alloc; | 589 | goto out_no_alloc; |
| 590 | } | 590 | } |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 4a641555b960..4436d53ae16c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | |||
| @@ -227,7 +227,7 @@ static const struct drm_ioctl_desc vmw_ioctls[] = { | |||
| 227 | DRM_AUTH | DRM_RENDER_ALLOW), | 227 | DRM_AUTH | DRM_RENDER_ALLOW), |
| 228 | }; | 228 | }; |
| 229 | 229 | ||
| 230 | static struct pci_device_id vmw_pci_id_list[] = { | 230 | static const struct pci_device_id vmw_pci_id_list[] = { |
| 231 | {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII}, | 231 | {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII}, |
| 232 | {0, 0, 0} | 232 | {0, 0, 0} |
| 233 | }; | 233 | }; |
| @@ -630,7 +630,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
| 630 | char host_log[100] = {0}; | 630 | char host_log[100] = {0}; |
| 631 | 631 | ||
| 632 | dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); | 632 | dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); |
| 633 | if (unlikely(dev_priv == NULL)) { | 633 | if (unlikely(!dev_priv)) { |
| 634 | DRM_ERROR("Failed allocating a device private struct.\n"); | 634 | DRM_ERROR("Failed allocating a device private struct.\n"); |
| 635 | return -ENOMEM; | 635 | return -ENOMEM; |
| 636 | } | 636 | } |
| @@ -1035,7 +1035,7 @@ static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv) | |||
| 1035 | int ret = -ENOMEM; | 1035 | int ret = -ENOMEM; |
| 1036 | 1036 | ||
| 1037 | vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL); | 1037 | vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL); |
| 1038 | if (unlikely(vmw_fp == NULL)) | 1038 | if (unlikely(!vmw_fp)) |
| 1039 | return ret; | 1039 | return ret; |
| 1040 | 1040 | ||
| 1041 | vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10); | 1041 | vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10); |
| @@ -1196,7 +1196,7 @@ static int vmw_master_create(struct drm_device *dev, | |||
| 1196 | struct vmw_master *vmaster; | 1196 | struct vmw_master *vmaster; |
| 1197 | 1197 | ||
| 1198 | vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL); | 1198 | vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL); |
| 1199 | if (unlikely(vmaster == NULL)) | 1199 | if (unlikely(!vmaster)) |
| 1200 | return -ENOMEM; | 1200 | return -ENOMEM; |
| 1201 | 1201 | ||
| 1202 | vmw_master_init(vmaster); | 1202 | vmw_master_init(vmaster); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index c7b53d987f06..2cfb3c93f42a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | |||
| @@ -264,7 +264,7 @@ static int vmw_resource_val_add(struct vmw_sw_context *sw_context, | |||
| 264 | } | 264 | } |
| 265 | 265 | ||
| 266 | node = kzalloc(sizeof(*node), GFP_KERNEL); | 266 | node = kzalloc(sizeof(*node), GFP_KERNEL); |
| 267 | if (unlikely(node == NULL)) { | 267 | if (unlikely(!node)) { |
| 268 | DRM_ERROR("Failed to allocate a resource validation " | 268 | DRM_ERROR("Failed to allocate a resource validation " |
| 269 | "entry.\n"); | 269 | "entry.\n"); |
| 270 | return -ENOMEM; | 270 | return -ENOMEM; |
| @@ -452,7 +452,7 @@ static int vmw_resource_relocation_add(struct list_head *list, | |||
| 452 | struct vmw_resource_relocation *rel; | 452 | struct vmw_resource_relocation *rel; |
| 453 | 453 | ||
| 454 | rel = kmalloc(sizeof(*rel), GFP_KERNEL); | 454 | rel = kmalloc(sizeof(*rel), GFP_KERNEL); |
| 455 | if (unlikely(rel == NULL)) { | 455 | if (unlikely(!rel)) { |
| 456 | DRM_ERROR("Failed to allocate a resource relocation.\n"); | 456 | DRM_ERROR("Failed to allocate a resource relocation.\n"); |
| 457 | return -ENOMEM; | 457 | return -ENOMEM; |
| 458 | } | 458 | } |
| @@ -519,7 +519,7 @@ static int vmw_cmd_invalid(struct vmw_private *dev_priv, | |||
| 519 | struct vmw_sw_context *sw_context, | 519 | struct vmw_sw_context *sw_context, |
| 520 | SVGA3dCmdHeader *header) | 520 | SVGA3dCmdHeader *header) |
| 521 | { | 521 | { |
| 522 | return capable(CAP_SYS_ADMIN) ? : -EINVAL; | 522 | return -EINVAL; |
| 523 | } | 523 | } |
| 524 | 524 | ||
| 525 | static int vmw_cmd_ok(struct vmw_private *dev_priv, | 525 | static int vmw_cmd_ok(struct vmw_private *dev_priv, |
| @@ -2584,7 +2584,7 @@ static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv, | |||
| 2584 | 2584 | ||
| 2585 | /** | 2585 | /** |
| 2586 | * vmw_cmd_dx_ia_set_vertex_buffers - Validate an | 2586 | * vmw_cmd_dx_ia_set_vertex_buffers - Validate an |
| 2587 | * SVGA_3D_CMD_DX_IA_SET_VERTEX_BUFFERS command. | 2587 | * SVGA_3D_CMD_DX_IA_SET_INDEX_BUFFER command. |
| 2588 | * | 2588 | * |
| 2589 | * @dev_priv: Pointer to a device private struct. | 2589 | * @dev_priv: Pointer to a device private struct. |
| 2590 | * @sw_context: The software context being used for this batch. | 2590 | * @sw_context: The software context being used for this batch. |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c index 6b2708b4eafe..b8bc5bc7de7e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c | |||
| @@ -284,7 +284,7 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv) | |||
| 284 | { | 284 | { |
| 285 | struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL); | 285 | struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL); |
| 286 | 286 | ||
| 287 | if (unlikely(fman == NULL)) | 287 | if (unlikely(!fman)) |
| 288 | return NULL; | 288 | return NULL; |
| 289 | 289 | ||
| 290 | fman->dev_priv = dev_priv; | 290 | fman->dev_priv = dev_priv; |
| @@ -541,7 +541,7 @@ int vmw_fence_create(struct vmw_fence_manager *fman, | |||
| 541 | int ret; | 541 | int ret; |
| 542 | 542 | ||
| 543 | fence = kzalloc(sizeof(*fence), GFP_KERNEL); | 543 | fence = kzalloc(sizeof(*fence), GFP_KERNEL); |
| 544 | if (unlikely(fence == NULL)) | 544 | if (unlikely(!fence)) |
| 545 | return -ENOMEM; | 545 | return -ENOMEM; |
| 546 | 546 | ||
| 547 | ret = vmw_fence_obj_init(fman, fence, seqno, | 547 | ret = vmw_fence_obj_init(fman, fence, seqno, |
| @@ -606,7 +606,7 @@ int vmw_user_fence_create(struct drm_file *file_priv, | |||
| 606 | return ret; | 606 | return ret; |
| 607 | 607 | ||
| 608 | ufence = kzalloc(sizeof(*ufence), GFP_KERNEL); | 608 | ufence = kzalloc(sizeof(*ufence), GFP_KERNEL); |
| 609 | if (unlikely(ufence == NULL)) { | 609 | if (unlikely(!ufence)) { |
| 610 | ret = -ENOMEM; | 610 | ret = -ENOMEM; |
| 611 | goto out_no_object; | 611 | goto out_no_object; |
| 612 | } | 612 | } |
| @@ -966,7 +966,7 @@ int vmw_event_fence_action_queue(struct drm_file *file_priv, | |||
| 966 | struct vmw_fence_manager *fman = fman_from_fence(fence); | 966 | struct vmw_fence_manager *fman = fman_from_fence(fence); |
| 967 | 967 | ||
| 968 | eaction = kzalloc(sizeof(*eaction), GFP_KERNEL); | 968 | eaction = kzalloc(sizeof(*eaction), GFP_KERNEL); |
| 969 | if (unlikely(eaction == NULL)) | 969 | if (unlikely(!eaction)) |
| 970 | return -ENOMEM; | 970 | return -ENOMEM; |
| 971 | 971 | ||
| 972 | eaction->event = event; | 972 | eaction->event = event; |
| @@ -1002,7 +1002,7 @@ static int vmw_event_fence_action_create(struct drm_file *file_priv, | |||
| 1002 | int ret; | 1002 | int ret; |
| 1003 | 1003 | ||
| 1004 | event = kzalloc(sizeof(*event), GFP_KERNEL); | 1004 | event = kzalloc(sizeof(*event), GFP_KERNEL); |
| 1005 | if (unlikely(event == NULL)) { | 1005 | if (unlikely(!event)) { |
| 1006 | DRM_ERROR("Failed to allocate an event.\n"); | 1006 | DRM_ERROR("Failed to allocate an event.\n"); |
| 1007 | ret = -ENOMEM; | 1007 | ret = -ENOMEM; |
| 1008 | goto out_no_space; | 1008 | goto out_no_space; |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c index c1900f4390a4..d2b03d4a3c86 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c | |||
| @@ -121,7 +121,7 @@ static int vmw_gmrid_man_init(struct ttm_mem_type_manager *man, | |||
| 121 | struct vmwgfx_gmrid_man *gman = | 121 | struct vmwgfx_gmrid_man *gman = |
| 122 | kzalloc(sizeof(*gman), GFP_KERNEL); | 122 | kzalloc(sizeof(*gman), GFP_KERNEL); |
| 123 | 123 | ||
| 124 | if (unlikely(gman == NULL)) | 124 | if (unlikely(!gman)) |
| 125 | return -ENOMEM; | 125 | return -ENOMEM; |
| 126 | 126 | ||
| 127 | spin_lock_init(&gman->lock); | 127 | spin_lock_init(&gman->lock); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 3d94ea67a825..625ba24f143f 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | |||
| @@ -384,6 +384,12 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane, | |||
| 384 | 384 | ||
| 385 | hotspot_x = du->hotspot_x; | 385 | hotspot_x = du->hotspot_x; |
| 386 | hotspot_y = du->hotspot_y; | 386 | hotspot_y = du->hotspot_y; |
| 387 | |||
| 388 | if (plane->fb) { | ||
| 389 | hotspot_x += plane->fb->hot_x; | ||
| 390 | hotspot_y += plane->fb->hot_y; | ||
| 391 | } | ||
| 392 | |||
| 387 | du->cursor_surface = vps->surf; | 393 | du->cursor_surface = vps->surf; |
| 388 | du->cursor_dmabuf = vps->dmabuf; | 394 | du->cursor_dmabuf = vps->dmabuf; |
| 389 | 395 | ||
| @@ -411,6 +417,9 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane, | |||
| 411 | vmw_cursor_update_position(dev_priv, true, | 417 | vmw_cursor_update_position(dev_priv, true, |
| 412 | du->cursor_x + hotspot_x, | 418 | du->cursor_x + hotspot_x, |
| 413 | du->cursor_y + hotspot_y); | 419 | du->cursor_y + hotspot_y); |
| 420 | |||
| 421 | du->core_hotspot_x = hotspot_x - du->hotspot_x; | ||
| 422 | du->core_hotspot_y = hotspot_y - du->hotspot_y; | ||
| 414 | } else { | 423 | } else { |
| 415 | DRM_ERROR("Failed to update cursor image\n"); | 424 | DRM_ERROR("Failed to update cursor image\n"); |
| 416 | } | 425 | } |
| @@ -1558,10 +1567,34 @@ vmw_kms_atomic_check_modeset(struct drm_device *dev, | |||
| 1558 | } | 1567 | } |
| 1559 | 1568 | ||
| 1560 | 1569 | ||
| 1570 | /** | ||
| 1571 | * vmw_kms_atomic_commit - Perform an atomic state commit | ||
| 1572 | * | ||
| 1573 | * @dev: DRM device | ||
| 1574 | * @state: the driver state object | ||
| 1575 | * @nonblock: Whether nonblocking behaviour is requested | ||
| 1576 | * | ||
| 1577 | * This is a simple wrapper around drm_atomic_helper_commit() for | ||
| 1578 | * us to clear the nonblocking value. | ||
| 1579 | * | ||
| 1580 | * Nonblocking commits currently cause synchronization issues | ||
| 1581 | * for vmwgfx. | ||
| 1582 | * | ||
| 1583 | * RETURNS | ||
| 1584 | * Zero for success or negative error code on failure. | ||
| 1585 | */ | ||
| 1586 | int vmw_kms_atomic_commit(struct drm_device *dev, | ||
| 1587 | struct drm_atomic_state *state, | ||
| 1588 | bool nonblock) | ||
| 1589 | { | ||
| 1590 | return drm_atomic_helper_commit(dev, state, false); | ||
| 1591 | } | ||
| 1592 | |||
| 1593 | |||
| 1561 | static const struct drm_mode_config_funcs vmw_kms_funcs = { | 1594 | static const struct drm_mode_config_funcs vmw_kms_funcs = { |
| 1562 | .fb_create = vmw_kms_fb_create, | 1595 | .fb_create = vmw_kms_fb_create, |
| 1563 | .atomic_check = vmw_kms_atomic_check_modeset, | 1596 | .atomic_check = vmw_kms_atomic_check_modeset, |
| 1564 | .atomic_commit = drm_atomic_helper_commit, | 1597 | .atomic_commit = vmw_kms_atomic_commit, |
| 1565 | }; | 1598 | }; |
| 1566 | 1599 | ||
| 1567 | static int vmw_kms_generic_present(struct vmw_private *dev_priv, | 1600 | static int vmw_kms_generic_present(struct vmw_private *dev_priv, |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c index 941bcfd131ff..b17f08fc50d3 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c | |||
| @@ -320,14 +320,14 @@ int vmw_otables_setup(struct vmw_private *dev_priv) | |||
| 320 | 320 | ||
| 321 | if (dev_priv->has_dx) { | 321 | if (dev_priv->has_dx) { |
| 322 | *otables = kmemdup(dx_tables, sizeof(dx_tables), GFP_KERNEL); | 322 | *otables = kmemdup(dx_tables, sizeof(dx_tables), GFP_KERNEL); |
| 323 | if (*otables == NULL) | 323 | if (!(*otables)) |
| 324 | return -ENOMEM; | 324 | return -ENOMEM; |
| 325 | 325 | ||
| 326 | dev_priv->otable_batch.num_otables = ARRAY_SIZE(dx_tables); | 326 | dev_priv->otable_batch.num_otables = ARRAY_SIZE(dx_tables); |
| 327 | } else { | 327 | } else { |
| 328 | *otables = kmemdup(pre_dx_tables, sizeof(pre_dx_tables), | 328 | *otables = kmemdup(pre_dx_tables, sizeof(pre_dx_tables), |
| 329 | GFP_KERNEL); | 329 | GFP_KERNEL); |
| 330 | if (*otables == NULL) | 330 | if (!(*otables)) |
| 331 | return -ENOMEM; | 331 | return -ENOMEM; |
| 332 | 332 | ||
| 333 | dev_priv->otable_batch.num_otables = ARRAY_SIZE(pre_dx_tables); | 333 | dev_priv->otable_batch.num_otables = ARRAY_SIZE(pre_dx_tables); |
| @@ -407,7 +407,7 @@ struct vmw_mob *vmw_mob_create(unsigned long data_pages) | |||
| 407 | { | 407 | { |
| 408 | struct vmw_mob *mob = kzalloc(sizeof(*mob), GFP_KERNEL); | 408 | struct vmw_mob *mob = kzalloc(sizeof(*mob), GFP_KERNEL); |
| 409 | 409 | ||
| 410 | if (unlikely(mob == NULL)) | 410 | if (unlikely(!mob)) |
| 411 | return NULL; | 411 | return NULL; |
| 412 | 412 | ||
| 413 | mob->num_pages = vmw_mob_calculate_pt_pages(data_pages); | 413 | mob->num_pages = vmw_mob_calculate_pt_pages(data_pages); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c index 6063c9636d4a..97000996b8dc 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c | |||
| @@ -244,7 +244,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg, | |||
| 244 | 244 | ||
| 245 | reply_len = ebx; | 245 | reply_len = ebx; |
| 246 | reply = kzalloc(reply_len + 1, GFP_KERNEL); | 246 | reply = kzalloc(reply_len + 1, GFP_KERNEL); |
| 247 | if (reply == NULL) { | 247 | if (!reply) { |
| 248 | DRM_ERROR("Cannot allocate memory for reply\n"); | 248 | DRM_ERROR("Cannot allocate memory for reply\n"); |
| 249 | return -ENOMEM; | 249 | return -ENOMEM; |
| 250 | } | 250 | } |
| @@ -340,7 +340,7 @@ int vmw_host_get_guestinfo(const char *guest_info_param, | |||
| 340 | 340 | ||
| 341 | msg_len = strlen(guest_info_param) + strlen("info-get ") + 1; | 341 | msg_len = strlen(guest_info_param) + strlen("info-get ") + 1; |
| 342 | msg = kzalloc(msg_len, GFP_KERNEL); | 342 | msg = kzalloc(msg_len, GFP_KERNEL); |
| 343 | if (msg == NULL) { | 343 | if (!msg) { |
| 344 | DRM_ERROR("Cannot allocate memory to get %s", guest_info_param); | 344 | DRM_ERROR("Cannot allocate memory to get %s", guest_info_param); |
| 345 | return -ENOMEM; | 345 | return -ENOMEM; |
| 346 | } | 346 | } |
| @@ -400,7 +400,7 @@ int vmw_host_log(const char *log) | |||
| 400 | 400 | ||
| 401 | msg_len = strlen(log) + strlen("log ") + 1; | 401 | msg_len = strlen(log) + strlen("log ") + 1; |
| 402 | msg = kzalloc(msg_len, GFP_KERNEL); | 402 | msg = kzalloc(msg_len, GFP_KERNEL); |
| 403 | if (msg == NULL) { | 403 | if (!msg) { |
| 404 | DRM_ERROR("Cannot allocate memory for log message\n"); | 404 | DRM_ERROR("Cannot allocate memory for log message\n"); |
| 405 | return -ENOMEM; | 405 | return -ENOMEM; |
| 406 | } | 406 | } |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 7d591f653dfa..a96f90f017d1 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | |||
| @@ -446,7 +446,7 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv, | |||
| 446 | int ret; | 446 | int ret; |
| 447 | 447 | ||
| 448 | user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL); | 448 | user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL); |
| 449 | if (unlikely(user_bo == NULL)) { | 449 | if (unlikely(!user_bo)) { |
| 450 | DRM_ERROR("Failed to allocate a buffer.\n"); | 450 | DRM_ERROR("Failed to allocate a buffer.\n"); |
| 451 | return -ENOMEM; | 451 | return -ENOMEM; |
| 452 | } | 452 | } |
| @@ -836,7 +836,7 @@ static int vmw_resource_buf_alloc(struct vmw_resource *res, | |||
| 836 | } | 836 | } |
| 837 | 837 | ||
| 838 | backup = kzalloc(sizeof(*backup), GFP_KERNEL); | 838 | backup = kzalloc(sizeof(*backup), GFP_KERNEL); |
| 839 | if (unlikely(backup == NULL)) | 839 | if (unlikely(!backup)) |
| 840 | return -ENOMEM; | 840 | return -ENOMEM; |
| 841 | 841 | ||
| 842 | ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size, | 842 | ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size, |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c index 68f135c5b0d8..9b832f136813 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c | |||
| @@ -751,7 +751,7 @@ static int vmw_user_shader_alloc(struct vmw_private *dev_priv, | |||
| 751 | } | 751 | } |
| 752 | 752 | ||
| 753 | ushader = kzalloc(sizeof(*ushader), GFP_KERNEL); | 753 | ushader = kzalloc(sizeof(*ushader), GFP_KERNEL); |
| 754 | if (unlikely(ushader == NULL)) { | 754 | if (unlikely(!ushader)) { |
| 755 | ttm_mem_global_free(vmw_mem_glob(dev_priv), | 755 | ttm_mem_global_free(vmw_mem_glob(dev_priv), |
| 756 | vmw_user_shader_size); | 756 | vmw_user_shader_size); |
| 757 | ret = -ENOMEM; | 757 | ret = -ENOMEM; |
| @@ -821,7 +821,7 @@ static struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv, | |||
| 821 | } | 821 | } |
| 822 | 822 | ||
| 823 | shader = kzalloc(sizeof(*shader), GFP_KERNEL); | 823 | shader = kzalloc(sizeof(*shader), GFP_KERNEL); |
| 824 | if (unlikely(shader == NULL)) { | 824 | if (unlikely(!shader)) { |
| 825 | ttm_mem_global_free(vmw_mem_glob(dev_priv), | 825 | ttm_mem_global_free(vmw_mem_glob(dev_priv), |
| 826 | vmw_shader_size); | 826 | vmw_shader_size); |
| 827 | ret = -ENOMEM; | 827 | ret = -ENOMEM; |
| @@ -981,7 +981,7 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv, | |||
| 981 | 981 | ||
| 982 | /* Allocate and pin a DMA buffer */ | 982 | /* Allocate and pin a DMA buffer */ |
| 983 | buf = kzalloc(sizeof(*buf), GFP_KERNEL); | 983 | buf = kzalloc(sizeof(*buf), GFP_KERNEL); |
| 984 | if (unlikely(buf == NULL)) | 984 | if (unlikely(!buf)) |
| 985 | return -ENOMEM; | 985 | return -ENOMEM; |
| 986 | 986 | ||
| 987 | ret = vmw_dmabuf_init(dev_priv, buf, size, &vmw_sys_ne_placement, | 987 | ret = vmw_dmabuf_init(dev_priv, buf, size, &vmw_sys_ne_placement, |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c index 50be1f034f9e..5284e8d2f7ba 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c | |||
| @@ -1640,8 +1640,8 @@ int vmw_kms_stdu_init_display(struct vmw_private *dev_priv) | |||
| 1640 | * something arbitrarily large and we will reject any layout | 1640 | * something arbitrarily large and we will reject any layout |
| 1641 | * that doesn't fit prim_bb_mem later | 1641 | * that doesn't fit prim_bb_mem later |
| 1642 | */ | 1642 | */ |
| 1643 | dev->mode_config.max_width = 16384; | 1643 | dev->mode_config.max_width = 8192; |
| 1644 | dev->mode_config.max_height = 16384; | 1644 | dev->mode_config.max_height = 8192; |
| 1645 | } | 1645 | } |
| 1646 | 1646 | ||
| 1647 | vmw_kms_create_implicit_placement_property(dev_priv, false); | 1647 | vmw_kms_create_implicit_placement_property(dev_priv, false); |
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c index 2c58a390123a..778272514164 100644 --- a/drivers/gpu/host1x/dev.c +++ b/drivers/gpu/host1x/dev.c | |||
| @@ -186,8 +186,13 @@ static int host1x_probe(struct platform_device *pdev) | |||
| 186 | return -ENOMEM; | 186 | return -ENOMEM; |
| 187 | 187 | ||
| 188 | err = iommu_attach_device(host->domain, &pdev->dev); | 188 | err = iommu_attach_device(host->domain, &pdev->dev); |
| 189 | if (err) | 189 | if (err == -ENODEV) { |
| 190 | iommu_domain_free(host->domain); | ||
| 191 | host->domain = NULL; | ||
| 192 | goto skip_iommu; | ||
| 193 | } else if (err) { | ||
| 190 | goto fail_free_domain; | 194 | goto fail_free_domain; |
| 195 | } | ||
| 191 | 196 | ||
| 192 | geometry = &host->domain->geometry; | 197 | geometry = &host->domain->geometry; |
| 193 | 198 | ||
| @@ -198,6 +203,7 @@ static int host1x_probe(struct platform_device *pdev) | |||
| 198 | host->iova_end = geometry->aperture_end; | 203 | host->iova_end = geometry->aperture_end; |
| 199 | } | 204 | } |
| 200 | 205 | ||
| 206 | skip_iommu: | ||
| 201 | err = host1x_channel_list_init(&host->channel_list, | 207 | err = host1x_channel_list_init(&host->channel_list, |
| 202 | host->info->nb_channels); | 208 | host->info->nb_channels); |
| 203 | if (err) { | 209 | if (err) { |
diff --git a/drivers/gpu/ipu-v3/Kconfig b/drivers/gpu/ipu-v3/Kconfig index 08766c6e7856..87a20b3dcf7a 100644 --- a/drivers/gpu/ipu-v3/Kconfig +++ b/drivers/gpu/ipu-v3/Kconfig | |||
| @@ -1,6 +1,7 @@ | |||
| 1 | config IMX_IPUV3_CORE | 1 | config IMX_IPUV3_CORE |
| 2 | tristate "IPUv3 core support" | 2 | tristate "IPUv3 core support" |
| 3 | depends on SOC_IMX5 || SOC_IMX6Q || ARCH_MULTIPLATFORM | 3 | depends on SOC_IMX5 || SOC_IMX6Q || ARCH_MULTIPLATFORM |
| 4 | depends on DRM || !DRM # if DRM=m, this can't be 'y' | ||
| 4 | select GENERIC_IRQ_CHIP | 5 | select GENERIC_IRQ_CHIP |
| 5 | help | 6 | help |
| 6 | Choose this if you have a i.MX5/6 system and want to use the Image | 7 | Choose this if you have a i.MX5/6 system and want to use the Image |
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 6fd01a692197..9017dcc14502 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c | |||
| @@ -2216,6 +2216,7 @@ static const struct hid_device_id hid_have_special_driver[] = { | |||
| 2216 | #if IS_ENABLED(CONFIG_HID_ORTEK) | 2216 | #if IS_ENABLED(CONFIG_HID_ORTEK) |
| 2217 | { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_PKB1700) }, | 2217 | { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_PKB1700) }, |
| 2218 | { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) }, | 2218 | { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) }, |
| 2219 | { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_IHOME_IMAC_A210S) }, | ||
| 2219 | { HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) }, | 2220 | { HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) }, |
| 2220 | #endif | 2221 | #endif |
| 2221 | #if IS_ENABLED(CONFIG_HID_PANTHERLORD) | 2222 | #if IS_ENABLED(CONFIG_HID_PANTHERLORD) |
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 3d911bfd91cf..c9ba4c6db74c 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h | |||
| @@ -824,6 +824,7 @@ | |||
| 824 | #define USB_VENDOR_ID_ORTEK 0x05a4 | 824 | #define USB_VENDOR_ID_ORTEK 0x05a4 |
| 825 | #define USB_DEVICE_ID_ORTEK_PKB1700 0x1700 | 825 | #define USB_DEVICE_ID_ORTEK_PKB1700 0x1700 |
| 826 | #define USB_DEVICE_ID_ORTEK_WKB2000 0x2000 | 826 | #define USB_DEVICE_ID_ORTEK_WKB2000 0x2000 |
| 827 | #define USB_DEVICE_ID_ORTEK_IHOME_IMAC_A210S 0x8003 | ||
| 827 | 828 | ||
| 828 | #define USB_VENDOR_ID_PLANTRONICS 0x047f | 829 | #define USB_VENDOR_ID_PLANTRONICS 0x047f |
| 829 | 830 | ||
diff --git a/drivers/hid/hid-ortek.c b/drivers/hid/hid-ortek.c index 6620f15fec22..8783a064cdcf 100644 --- a/drivers/hid/hid-ortek.c +++ b/drivers/hid/hid-ortek.c | |||
| @@ -5,6 +5,7 @@ | |||
| 5 | * | 5 | * |
| 6 | * Ortek PKB-1700 | 6 | * Ortek PKB-1700 |
| 7 | * Ortek WKB-2000 | 7 | * Ortek WKB-2000 |
| 8 | * iHome IMAC-A210S | ||
| 8 | * Skycable wireless presenter | 9 | * Skycable wireless presenter |
| 9 | * | 10 | * |
| 10 | * Copyright (c) 2010 Johnathon Harris <jmharris@gmail.com> | 11 | * Copyright (c) 2010 Johnathon Harris <jmharris@gmail.com> |
| @@ -28,10 +29,10 @@ static __u8 *ortek_report_fixup(struct hid_device *hdev, __u8 *rdesc, | |||
| 28 | unsigned int *rsize) | 29 | unsigned int *rsize) |
| 29 | { | 30 | { |
| 30 | if (*rsize >= 56 && rdesc[54] == 0x25 && rdesc[55] == 0x01) { | 31 | if (*rsize >= 56 && rdesc[54] == 0x25 && rdesc[55] == 0x01) { |
| 31 | hid_info(hdev, "Fixing up logical minimum in report descriptor (Ortek)\n"); | 32 | hid_info(hdev, "Fixing up logical maximum in report descriptor (Ortek)\n"); |
| 32 | rdesc[55] = 0x92; | 33 | rdesc[55] = 0x92; |
| 33 | } else if (*rsize >= 54 && rdesc[52] == 0x25 && rdesc[53] == 0x01) { | 34 | } else if (*rsize >= 54 && rdesc[52] == 0x25 && rdesc[53] == 0x01) { |
| 34 | hid_info(hdev, "Fixing up logical minimum in report descriptor (Skycable)\n"); | 35 | hid_info(hdev, "Fixing up logical maximum in report descriptor (Skycable)\n"); |
| 35 | rdesc[53] = 0x65; | 36 | rdesc[53] = 0x65; |
| 36 | } | 37 | } |
| 37 | return rdesc; | 38 | return rdesc; |
| @@ -40,6 +41,7 @@ static __u8 *ortek_report_fixup(struct hid_device *hdev, __u8 *rdesc, | |||
| 40 | static const struct hid_device_id ortek_devices[] = { | 41 | static const struct hid_device_id ortek_devices[] = { |
| 41 | { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_PKB1700) }, | 42 | { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_PKB1700) }, |
| 42 | { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) }, | 43 | { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) }, |
| 44 | { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_IHOME_IMAC_A210S) }, | ||
| 43 | { HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) }, | 45 | { HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) }, |
| 44 | { } | 46 | { } |
| 45 | }; | 47 | }; |
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c index 76013eb5cb7f..c008847e0b20 100644 --- a/drivers/hid/usbhid/hid-core.c +++ b/drivers/hid/usbhid/hid-core.c | |||
| @@ -680,18 +680,21 @@ static int usbhid_open(struct hid_device *hid) | |||
| 680 | struct usbhid_device *usbhid = hid->driver_data; | 680 | struct usbhid_device *usbhid = hid->driver_data; |
| 681 | int res; | 681 | int res; |
| 682 | 682 | ||
| 683 | set_bit(HID_OPENED, &usbhid->iofl); | ||
| 684 | |||
| 683 | if (hid->quirks & HID_QUIRK_ALWAYS_POLL) | 685 | if (hid->quirks & HID_QUIRK_ALWAYS_POLL) |
| 684 | return 0; | 686 | return 0; |
| 685 | 687 | ||
| 686 | res = usb_autopm_get_interface(usbhid->intf); | 688 | res = usb_autopm_get_interface(usbhid->intf); |
| 687 | /* the device must be awake to reliably request remote wakeup */ | 689 | /* the device must be awake to reliably request remote wakeup */ |
| 688 | if (res < 0) | 690 | if (res < 0) { |
| 691 | clear_bit(HID_OPENED, &usbhid->iofl); | ||
| 689 | return -EIO; | 692 | return -EIO; |
| 693 | } | ||
| 690 | 694 | ||
| 691 | usbhid->intf->needs_remote_wakeup = 1; | 695 | usbhid->intf->needs_remote_wakeup = 1; |
| 692 | 696 | ||
| 693 | set_bit(HID_RESUME_RUNNING, &usbhid->iofl); | 697 | set_bit(HID_RESUME_RUNNING, &usbhid->iofl); |
| 694 | set_bit(HID_OPENED, &usbhid->iofl); | ||
| 695 | set_bit(HID_IN_POLLING, &usbhid->iofl); | 698 | set_bit(HID_IN_POLLING, &usbhid->iofl); |
| 696 | 699 | ||
| 697 | res = hid_start_in(hid); | 700 | res = hid_start_in(hid); |
| @@ -727,19 +730,20 @@ static void usbhid_close(struct hid_device *hid) | |||
| 727 | { | 730 | { |
| 728 | struct usbhid_device *usbhid = hid->driver_data; | 731 | struct usbhid_device *usbhid = hid->driver_data; |
| 729 | 732 | ||
| 730 | if (hid->quirks & HID_QUIRK_ALWAYS_POLL) | ||
| 731 | return; | ||
| 732 | |||
| 733 | /* | 733 | /* |
| 734 | * Make sure we don't restart data acquisition due to | 734 | * Make sure we don't restart data acquisition due to |
| 735 | * a resumption we no longer care about by avoiding racing | 735 | * a resumption we no longer care about by avoiding racing |
| 736 | * with hid_start_in(). | 736 | * with hid_start_in(). |
| 737 | */ | 737 | */ |
| 738 | spin_lock_irq(&usbhid->lock); | 738 | spin_lock_irq(&usbhid->lock); |
| 739 | clear_bit(HID_IN_POLLING, &usbhid->iofl); | ||
| 740 | clear_bit(HID_OPENED, &usbhid->iofl); | 739 | clear_bit(HID_OPENED, &usbhid->iofl); |
| 740 | if (!(hid->quirks & HID_QUIRK_ALWAYS_POLL)) | ||
| 741 | clear_bit(HID_IN_POLLING, &usbhid->iofl); | ||
| 741 | spin_unlock_irq(&usbhid->lock); | 742 | spin_unlock_irq(&usbhid->lock); |
| 742 | 743 | ||
| 744 | if (hid->quirks & HID_QUIRK_ALWAYS_POLL) | ||
| 745 | return; | ||
| 746 | |||
| 743 | hid_cancel_delayed_stuff(usbhid); | 747 | hid_cancel_delayed_stuff(usbhid); |
| 744 | usb_kill_urb(usbhid->urbin); | 748 | usb_kill_urb(usbhid->urbin); |
| 745 | usbhid->intf->needs_remote_wakeup = 0; | 749 | usbhid->intf->needs_remote_wakeup = 0; |
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index 1006b230b236..65fa29591d21 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig | |||
| @@ -983,7 +983,7 @@ config I2C_UNIPHIER_F | |||
| 983 | 983 | ||
| 984 | config I2C_VERSATILE | 984 | config I2C_VERSATILE |
| 985 | tristate "ARM Versatile/Realview I2C bus support" | 985 | tristate "ARM Versatile/Realview I2C bus support" |
| 986 | depends on ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS || COMPILE_TEST | 986 | depends on ARCH_MPS2 || ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS || COMPILE_TEST |
| 987 | select I2C_ALGOBIT | 987 | select I2C_ALGOBIT |
| 988 | help | 988 | help |
| 989 | Say yes if you want to support the I2C serial bus on ARMs Versatile | 989 | Say yes if you want to support the I2C serial bus on ARMs Versatile |
diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c index f19348328a71..6fdf9231c23c 100644 --- a/drivers/i2c/busses/i2c-aspeed.c +++ b/drivers/i2c/busses/i2c-aspeed.c | |||
| @@ -410,10 +410,11 @@ static bool aspeed_i2c_master_irq(struct aspeed_i2c_bus *bus) | |||
| 410 | } | 410 | } |
| 411 | 411 | ||
| 412 | /* We are in an invalid state; reset bus to a known state. */ | 412 | /* We are in an invalid state; reset bus to a known state. */ |
| 413 | if (!bus->msgs && bus->master_state != ASPEED_I2C_MASTER_STOP) { | 413 | if (!bus->msgs) { |
| 414 | dev_err(bus->dev, "bus in unknown state"); | 414 | dev_err(bus->dev, "bus in unknown state"); |
| 415 | bus->cmd_err = -EIO; | 415 | bus->cmd_err = -EIO; |
| 416 | aspeed_i2c_do_stop(bus); | 416 | if (bus->master_state != ASPEED_I2C_MASTER_STOP) |
| 417 | aspeed_i2c_do_stop(bus); | ||
| 417 | goto out_no_complete; | 418 | goto out_no_complete; |
| 418 | } | 419 | } |
| 419 | msg = &bus->msgs[bus->msgs_index]; | 420 | msg = &bus->msgs[bus->msgs_index]; |
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index 2ea6d0d25a01..2b98a173136f 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c | |||
| @@ -198,8 +198,7 @@ static void i2c_dw_configure_slave(struct dw_i2c_dev *dev) | |||
| 198 | dev->functionality = I2C_FUNC_SLAVE | DW_IC_DEFAULT_FUNCTIONALITY; | 198 | dev->functionality = I2C_FUNC_SLAVE | DW_IC_DEFAULT_FUNCTIONALITY; |
| 199 | 199 | ||
| 200 | dev->slave_cfg = DW_IC_CON_RX_FIFO_FULL_HLD_CTRL | | 200 | dev->slave_cfg = DW_IC_CON_RX_FIFO_FULL_HLD_CTRL | |
| 201 | DW_IC_CON_RESTART_EN | DW_IC_CON_STOP_DET_IFADDRESSED | | 201 | DW_IC_CON_RESTART_EN | DW_IC_CON_STOP_DET_IFADDRESSED; |
| 202 | DW_IC_CON_SPEED_FAST; | ||
| 203 | 202 | ||
| 204 | dev->mode = DW_IC_SLAVE; | 203 | dev->mode = DW_IC_SLAVE; |
| 205 | 204 | ||
| @@ -257,7 +256,8 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) | |||
| 257 | struct dw_i2c_dev *dev; | 256 | struct dw_i2c_dev *dev; |
| 258 | u32 acpi_speed, ht = 0; | 257 | u32 acpi_speed, ht = 0; |
| 259 | struct resource *mem; | 258 | struct resource *mem; |
| 260 | int irq, ret; | 259 | int i, irq, ret; |
| 260 | const int supported_speeds[] = { 0, 100000, 400000, 1000000, 3400000 }; | ||
| 261 | 261 | ||
| 262 | irq = platform_get_irq(pdev, 0); | 262 | irq = platform_get_irq(pdev, 0); |
| 263 | if (irq < 0) | 263 | if (irq < 0) |
| @@ -299,6 +299,16 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) | |||
| 299 | 299 | ||
| 300 | acpi_speed = i2c_acpi_find_bus_speed(&pdev->dev); | 300 | acpi_speed = i2c_acpi_find_bus_speed(&pdev->dev); |
| 301 | /* | 301 | /* |
| 302 | * Some DSTDs use a non standard speed, round down to the lowest | ||
| 303 | * standard speed. | ||
| 304 | */ | ||
| 305 | for (i = 1; i < ARRAY_SIZE(supported_speeds); i++) { | ||
| 306 | if (acpi_speed < supported_speeds[i]) | ||
| 307 | break; | ||
| 308 | } | ||
| 309 | acpi_speed = supported_speeds[i - 1]; | ||
| 310 | |||
| 311 | /* | ||
| 302 | * Find bus speed from the "clock-frequency" device property, ACPI | 312 | * Find bus speed from the "clock-frequency" device property, ACPI |
| 303 | * or by using fast mode if neither is set. | 313 | * or by using fast mode if neither is set. |
| 304 | */ | 314 | */ |
| @@ -319,7 +329,8 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) | |||
| 319 | if (dev->clk_freq != 100000 && dev->clk_freq != 400000 | 329 | if (dev->clk_freq != 100000 && dev->clk_freq != 400000 |
| 320 | && dev->clk_freq != 1000000 && dev->clk_freq != 3400000) { | 330 | && dev->clk_freq != 1000000 && dev->clk_freq != 3400000) { |
| 321 | dev_err(&pdev->dev, | 331 | dev_err(&pdev->dev, |
| 322 | "Only 100kHz, 400kHz, 1MHz and 3.4MHz supported"); | 332 | "%d Hz is unsupported, only 100kHz, 400kHz, 1MHz and 3.4MHz are supported\n", |
| 333 | dev->clk_freq); | ||
| 323 | ret = -EINVAL; | 334 | ret = -EINVAL; |
| 324 | goto exit_reset; | 335 | goto exit_reset; |
| 325 | } | 336 | } |
| @@ -426,7 +437,7 @@ static void dw_i2c_plat_complete(struct device *dev) | |||
| 426 | #endif | 437 | #endif |
| 427 | 438 | ||
| 428 | #ifdef CONFIG_PM | 439 | #ifdef CONFIG_PM |
| 429 | static int dw_i2c_plat_suspend(struct device *dev) | 440 | static int dw_i2c_plat_runtime_suspend(struct device *dev) |
| 430 | { | 441 | { |
| 431 | struct platform_device *pdev = to_platform_device(dev); | 442 | struct platform_device *pdev = to_platform_device(dev); |
| 432 | struct dw_i2c_dev *i_dev = platform_get_drvdata(pdev); | 443 | struct dw_i2c_dev *i_dev = platform_get_drvdata(pdev); |
| @@ -448,11 +459,21 @@ static int dw_i2c_plat_resume(struct device *dev) | |||
| 448 | return 0; | 459 | return 0; |
| 449 | } | 460 | } |
| 450 | 461 | ||
| 462 | #ifdef CONFIG_PM_SLEEP | ||
| 463 | static int dw_i2c_plat_suspend(struct device *dev) | ||
| 464 | { | ||
| 465 | pm_runtime_resume(dev); | ||
| 466 | return dw_i2c_plat_runtime_suspend(dev); | ||
| 467 | } | ||
| 468 | #endif | ||
| 469 | |||
| 451 | static const struct dev_pm_ops dw_i2c_dev_pm_ops = { | 470 | static const struct dev_pm_ops dw_i2c_dev_pm_ops = { |
| 452 | .prepare = dw_i2c_plat_prepare, | 471 | .prepare = dw_i2c_plat_prepare, |
| 453 | .complete = dw_i2c_plat_complete, | 472 | .complete = dw_i2c_plat_complete, |
| 454 | SET_SYSTEM_SLEEP_PM_OPS(dw_i2c_plat_suspend, dw_i2c_plat_resume) | 473 | SET_SYSTEM_SLEEP_PM_OPS(dw_i2c_plat_suspend, dw_i2c_plat_resume) |
| 455 | SET_RUNTIME_PM_OPS(dw_i2c_plat_suspend, dw_i2c_plat_resume, NULL) | 474 | SET_RUNTIME_PM_OPS(dw_i2c_plat_runtime_suspend, |
| 475 | dw_i2c_plat_resume, | ||
| 476 | NULL) | ||
| 456 | }; | 477 | }; |
| 457 | 478 | ||
| 458 | #define DW_I2C_DEV_PMOPS (&dw_i2c_dev_pm_ops) | 479 | #define DW_I2C_DEV_PMOPS (&dw_i2c_dev_pm_ops) |
diff --git a/drivers/i2c/busses/i2c-designware-slave.c b/drivers/i2c/busses/i2c-designware-slave.c index 0548c7ea578c..78d8fb73927d 100644 --- a/drivers/i2c/busses/i2c-designware-slave.c +++ b/drivers/i2c/busses/i2c-designware-slave.c | |||
| @@ -177,6 +177,8 @@ static int i2c_dw_reg_slave(struct i2c_client *slave) | |||
| 177 | return -EBUSY; | 177 | return -EBUSY; |
| 178 | if (slave->flags & I2C_CLIENT_TEN) | 178 | if (slave->flags & I2C_CLIENT_TEN) |
| 179 | return -EAFNOSUPPORT; | 179 | return -EAFNOSUPPORT; |
| 180 | pm_runtime_get_sync(dev->dev); | ||
| 181 | |||
| 180 | /* | 182 | /* |
| 181 | * Set slave address in the IC_SAR register, | 183 | * Set slave address in the IC_SAR register, |
| 182 | * the address to which the DW_apb_i2c responds. | 184 | * the address to which the DW_apb_i2c responds. |
| @@ -205,6 +207,7 @@ static int i2c_dw_unreg_slave(struct i2c_client *slave) | |||
| 205 | dev->disable_int(dev); | 207 | dev->disable_int(dev); |
| 206 | dev->disable(dev); | 208 | dev->disable(dev); |
| 207 | dev->slave = NULL; | 209 | dev->slave = NULL; |
| 210 | pm_runtime_put(dev->dev); | ||
| 208 | 211 | ||
| 209 | return 0; | 212 | return 0; |
| 210 | } | 213 | } |
| @@ -272,7 +275,7 @@ static int i2c_dw_irq_handler_slave(struct dw_i2c_dev *dev) | |||
| 272 | slave_activity = ((dw_readl(dev, DW_IC_STATUS) & | 275 | slave_activity = ((dw_readl(dev, DW_IC_STATUS) & |
| 273 | DW_IC_STATUS_SLAVE_ACTIVITY) >> 6); | 276 | DW_IC_STATUS_SLAVE_ACTIVITY) >> 6); |
| 274 | 277 | ||
| 275 | if (!enabled || !(raw_stat & ~DW_IC_INTR_ACTIVITY)) | 278 | if (!enabled || !(raw_stat & ~DW_IC_INTR_ACTIVITY) || !dev->slave) |
| 276 | return 0; | 279 | return 0; |
| 277 | 280 | ||
| 278 | dev_dbg(dev->dev, | 281 | dev_dbg(dev->dev, |
| @@ -382,7 +385,6 @@ int i2c_dw_probe_slave(struct dw_i2c_dev *dev) | |||
| 382 | ret = i2c_add_numbered_adapter(adap); | 385 | ret = i2c_add_numbered_adapter(adap); |
| 383 | if (ret) | 386 | if (ret) |
| 384 | dev_err(dev->dev, "failure adding adapter: %d\n", ret); | 387 | dev_err(dev->dev, "failure adding adapter: %d\n", ret); |
| 385 | pm_runtime_put_noidle(dev->dev); | ||
| 386 | 388 | ||
| 387 | return ret; | 389 | return ret; |
| 388 | } | 390 | } |
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c index e98e44e584a4..22ffcb73c185 100644 --- a/drivers/i2c/busses/i2c-ismt.c +++ b/drivers/i2c/busses/i2c-ismt.c | |||
| @@ -341,8 +341,10 @@ static int ismt_process_desc(const struct ismt_desc *desc, | |||
| 341 | break; | 341 | break; |
| 342 | case I2C_SMBUS_BLOCK_DATA: | 342 | case I2C_SMBUS_BLOCK_DATA: |
| 343 | case I2C_SMBUS_I2C_BLOCK_DATA: | 343 | case I2C_SMBUS_I2C_BLOCK_DATA: |
| 344 | memcpy(&data->block[1], dma_buffer, desc->rxbytes); | 344 | if (desc->rxbytes != dma_buffer[0] + 1) |
| 345 | data->block[0] = desc->rxbytes; | 345 | return -EMSGSIZE; |
| 346 | |||
| 347 | memcpy(data->block, dma_buffer, desc->rxbytes); | ||
| 346 | break; | 348 | break; |
| 347 | } | 349 | } |
| 348 | return 0; | 350 | return 0; |
diff --git a/drivers/i2c/busses/i2c-simtec.c b/drivers/i2c/busses/i2c-simtec.c index b4685bb9b5d7..adca51a99487 100644 --- a/drivers/i2c/busses/i2c-simtec.c +++ b/drivers/i2c/busses/i2c-simtec.c | |||
| @@ -127,8 +127,7 @@ static int simtec_i2c_probe(struct platform_device *dev) | |||
| 127 | iounmap(pd->reg); | 127 | iounmap(pd->reg); |
| 128 | 128 | ||
| 129 | err_res: | 129 | err_res: |
| 130 | release_resource(pd->ioarea); | 130 | release_mem_region(pd->ioarea->start, size); |
| 131 | kfree(pd->ioarea); | ||
| 132 | 131 | ||
| 133 | err: | 132 | err: |
| 134 | kfree(pd); | 133 | kfree(pd); |
| @@ -142,8 +141,7 @@ static int simtec_i2c_remove(struct platform_device *dev) | |||
| 142 | i2c_del_adapter(&pd->adap); | 141 | i2c_del_adapter(&pd->adap); |
| 143 | 142 | ||
| 144 | iounmap(pd->reg); | 143 | iounmap(pd->reg); |
| 145 | release_resource(pd->ioarea); | 144 | release_mem_region(pd->ioarea->start, resource_size(pd->ioarea)); |
| 146 | kfree(pd->ioarea); | ||
| 147 | kfree(pd); | 145 | kfree(pd); |
| 148 | 146 | ||
| 149 | return 0; | 147 | return 0; |
diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c index 4842ec3a5451..a9126b3cda61 100644 --- a/drivers/i2c/i2c-core-acpi.c +++ b/drivers/i2c/i2c-core-acpi.c | |||
| @@ -230,6 +230,16 @@ void i2c_acpi_register_devices(struct i2c_adapter *adap) | |||
| 230 | dev_warn(&adap->dev, "failed to enumerate I2C slaves\n"); | 230 | dev_warn(&adap->dev, "failed to enumerate I2C slaves\n"); |
| 231 | } | 231 | } |
| 232 | 232 | ||
| 233 | const struct acpi_device_id * | ||
| 234 | i2c_acpi_match_device(const struct acpi_device_id *matches, | ||
| 235 | struct i2c_client *client) | ||
| 236 | { | ||
| 237 | if (!(client && matches)) | ||
| 238 | return NULL; | ||
| 239 | |||
| 240 | return acpi_match_device(matches, &client->dev); | ||
| 241 | } | ||
| 242 | |||
| 233 | static acpi_status i2c_acpi_lookup_speed(acpi_handle handle, u32 level, | 243 | static acpi_status i2c_acpi_lookup_speed(acpi_handle handle, u32 level, |
| 234 | void *data, void **return_value) | 244 | void *data, void **return_value) |
| 235 | { | 245 | { |
| @@ -289,7 +299,7 @@ u32 i2c_acpi_find_bus_speed(struct device *dev) | |||
| 289 | } | 299 | } |
| 290 | EXPORT_SYMBOL_GPL(i2c_acpi_find_bus_speed); | 300 | EXPORT_SYMBOL_GPL(i2c_acpi_find_bus_speed); |
| 291 | 301 | ||
| 292 | static int i2c_acpi_match_adapter(struct device *dev, void *data) | 302 | static int i2c_acpi_find_match_adapter(struct device *dev, void *data) |
| 293 | { | 303 | { |
| 294 | struct i2c_adapter *adapter = i2c_verify_adapter(dev); | 304 | struct i2c_adapter *adapter = i2c_verify_adapter(dev); |
| 295 | 305 | ||
| @@ -299,7 +309,7 @@ static int i2c_acpi_match_adapter(struct device *dev, void *data) | |||
| 299 | return ACPI_HANDLE(dev) == (acpi_handle)data; | 309 | return ACPI_HANDLE(dev) == (acpi_handle)data; |
| 300 | } | 310 | } |
| 301 | 311 | ||
| 302 | static int i2c_acpi_match_device(struct device *dev, void *data) | 312 | static int i2c_acpi_find_match_device(struct device *dev, void *data) |
| 303 | { | 313 | { |
| 304 | return ACPI_COMPANION(dev) == data; | 314 | return ACPI_COMPANION(dev) == data; |
| 305 | } | 315 | } |
| @@ -309,7 +319,7 @@ static struct i2c_adapter *i2c_acpi_find_adapter_by_handle(acpi_handle handle) | |||
| 309 | struct device *dev; | 319 | struct device *dev; |
| 310 | 320 | ||
| 311 | dev = bus_find_device(&i2c_bus_type, NULL, handle, | 321 | dev = bus_find_device(&i2c_bus_type, NULL, handle, |
| 312 | i2c_acpi_match_adapter); | 322 | i2c_acpi_find_match_adapter); |
| 313 | return dev ? i2c_verify_adapter(dev) : NULL; | 323 | return dev ? i2c_verify_adapter(dev) : NULL; |
| 314 | } | 324 | } |
| 315 | 325 | ||
| @@ -317,7 +327,8 @@ static struct i2c_client *i2c_acpi_find_client_by_adev(struct acpi_device *adev) | |||
| 317 | { | 327 | { |
| 318 | struct device *dev; | 328 | struct device *dev; |
| 319 | 329 | ||
| 320 | dev = bus_find_device(&i2c_bus_type, NULL, adev, i2c_acpi_match_device); | 330 | dev = bus_find_device(&i2c_bus_type, NULL, adev, |
| 331 | i2c_acpi_find_match_device); | ||
| 321 | return dev ? i2c_verify_client(dev) : NULL; | 332 | return dev ? i2c_verify_client(dev) : NULL; |
| 322 | } | 333 | } |
| 323 | 334 | ||
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c index c89dac7fd2e7..56e46581b84b 100644 --- a/drivers/i2c/i2c-core-base.c +++ b/drivers/i2c/i2c-core-base.c | |||
| @@ -353,10 +353,11 @@ static int i2c_device_probe(struct device *dev) | |||
| 353 | } | 353 | } |
| 354 | 354 | ||
| 355 | /* | 355 | /* |
| 356 | * An I2C ID table is not mandatory, if and only if, a suitable Device | 356 | * An I2C ID table is not mandatory, if and only if, a suitable OF |
| 357 | * Tree match table entry is supplied for the probing device. | 357 | * or ACPI ID table is supplied for the probing device. |
| 358 | */ | 358 | */ |
| 359 | if (!driver->id_table && | 359 | if (!driver->id_table && |
| 360 | !i2c_acpi_match_device(dev->driver->acpi_match_table, client) && | ||
| 360 | !i2c_of_match_device(dev->driver->of_match_table, client)) | 361 | !i2c_of_match_device(dev->driver->of_match_table, client)) |
| 361 | return -ENODEV; | 362 | return -ENODEV; |
| 362 | 363 | ||
diff --git a/drivers/i2c/i2c-core.h b/drivers/i2c/i2c-core.h index 3b63f5e5b89c..3d3d9bf02101 100644 --- a/drivers/i2c/i2c-core.h +++ b/drivers/i2c/i2c-core.h | |||
| @@ -31,9 +31,18 @@ int i2c_check_addr_validity(unsigned addr, unsigned short flags); | |||
| 31 | int i2c_check_7bit_addr_validity_strict(unsigned short addr); | 31 | int i2c_check_7bit_addr_validity_strict(unsigned short addr); |
| 32 | 32 | ||
| 33 | #ifdef CONFIG_ACPI | 33 | #ifdef CONFIG_ACPI |
| 34 | const struct acpi_device_id * | ||
| 35 | i2c_acpi_match_device(const struct acpi_device_id *matches, | ||
| 36 | struct i2c_client *client); | ||
| 34 | void i2c_acpi_register_devices(struct i2c_adapter *adap); | 37 | void i2c_acpi_register_devices(struct i2c_adapter *adap); |
| 35 | #else /* CONFIG_ACPI */ | 38 | #else /* CONFIG_ACPI */ |
| 36 | static inline void i2c_acpi_register_devices(struct i2c_adapter *adap) { } | 39 | static inline void i2c_acpi_register_devices(struct i2c_adapter *adap) { } |
| 40 | static inline const struct acpi_device_id * | ||
| 41 | i2c_acpi_match_device(const struct acpi_device_id *matches, | ||
| 42 | struct i2c_client *client) | ||
| 43 | { | ||
| 44 | return NULL; | ||
| 45 | } | ||
| 37 | #endif /* CONFIG_ACPI */ | 46 | #endif /* CONFIG_ACPI */ |
| 38 | extern struct notifier_block i2c_acpi_notifier; | 47 | extern struct notifier_block i2c_acpi_notifier; |
| 39 | 48 | ||
diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig index 2c64d0e0740f..17121329bb79 100644 --- a/drivers/i2c/muxes/Kconfig +++ b/drivers/i2c/muxes/Kconfig | |||
| @@ -83,7 +83,7 @@ config I2C_MUX_PINCTRL | |||
| 83 | different sets of pins at run-time. | 83 | different sets of pins at run-time. |
| 84 | 84 | ||
| 85 | This driver can also be built as a module. If so, the module will be | 85 | This driver can also be built as a module. If so, the module will be |
| 86 | called pinctrl-i2cmux. | 86 | called i2c-mux-pinctrl. |
| 87 | 87 | ||
| 88 | config I2C_MUX_REG | 88 | config I2C_MUX_REG |
| 89 | tristate "Register-based I2C multiplexer" | 89 | tristate "Register-based I2C multiplexer" |
diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c index 6b5d3be283c4..807299dd45eb 100644 --- a/drivers/iio/accel/bmc150-accel-core.c +++ b/drivers/iio/accel/bmc150-accel-core.c | |||
| @@ -193,7 +193,6 @@ struct bmc150_accel_data { | |||
| 193 | struct regmap *regmap; | 193 | struct regmap *regmap; |
| 194 | int irq; | 194 | int irq; |
| 195 | struct bmc150_accel_interrupt interrupts[BMC150_ACCEL_INTERRUPTS]; | 195 | struct bmc150_accel_interrupt interrupts[BMC150_ACCEL_INTERRUPTS]; |
| 196 | atomic_t active_intr; | ||
| 197 | struct bmc150_accel_trigger triggers[BMC150_ACCEL_TRIGGERS]; | 196 | struct bmc150_accel_trigger triggers[BMC150_ACCEL_TRIGGERS]; |
| 198 | struct mutex mutex; | 197 | struct mutex mutex; |
| 199 | u8 fifo_mode, watermark; | 198 | u8 fifo_mode, watermark; |
| @@ -493,11 +492,6 @@ static int bmc150_accel_set_interrupt(struct bmc150_accel_data *data, int i, | |||
| 493 | goto out_fix_power_state; | 492 | goto out_fix_power_state; |
| 494 | } | 493 | } |
| 495 | 494 | ||
| 496 | if (state) | ||
| 497 | atomic_inc(&data->active_intr); | ||
| 498 | else | ||
| 499 | atomic_dec(&data->active_intr); | ||
| 500 | |||
| 501 | return 0; | 495 | return 0; |
| 502 | 496 | ||
| 503 | out_fix_power_state: | 497 | out_fix_power_state: |
| @@ -1710,8 +1704,7 @@ static int bmc150_accel_resume(struct device *dev) | |||
| 1710 | struct bmc150_accel_data *data = iio_priv(indio_dev); | 1704 | struct bmc150_accel_data *data = iio_priv(indio_dev); |
| 1711 | 1705 | ||
| 1712 | mutex_lock(&data->mutex); | 1706 | mutex_lock(&data->mutex); |
| 1713 | if (atomic_read(&data->active_intr)) | 1707 | bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_NORMAL, 0); |
| 1714 | bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_NORMAL, 0); | ||
| 1715 | bmc150_accel_fifo_set_mode(data); | 1708 | bmc150_accel_fifo_set_mode(data); |
| 1716 | mutex_unlock(&data->mutex); | 1709 | mutex_unlock(&data->mutex); |
| 1717 | 1710 | ||
diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c index 07d1489cd457..e44f62bf9caa 100644 --- a/drivers/iio/accel/st_accel_core.c +++ b/drivers/iio/accel/st_accel_core.c | |||
| @@ -166,6 +166,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = { | |||
| 166 | .mask_ihl = 0x02, | 166 | .mask_ihl = 0x02, |
| 167 | .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, | 167 | .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, |
| 168 | }, | 168 | }, |
| 169 | .sim = { | ||
| 170 | .addr = 0x23, | ||
| 171 | .value = BIT(0), | ||
| 172 | }, | ||
| 169 | .multi_read_bit = true, | 173 | .multi_read_bit = true, |
| 170 | .bootime = 2, | 174 | .bootime = 2, |
| 171 | }, | 175 | }, |
| @@ -234,6 +238,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = { | |||
| 234 | .mask_od = 0x40, | 238 | .mask_od = 0x40, |
| 235 | .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, | 239 | .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, |
| 236 | }, | 240 | }, |
| 241 | .sim = { | ||
| 242 | .addr = 0x23, | ||
| 243 | .value = BIT(0), | ||
| 244 | }, | ||
| 237 | .multi_read_bit = true, | 245 | .multi_read_bit = true, |
| 238 | .bootime = 2, | 246 | .bootime = 2, |
| 239 | }, | 247 | }, |
| @@ -316,6 +324,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = { | |||
| 316 | .en_mask = 0x08, | 324 | .en_mask = 0x08, |
| 317 | }, | 325 | }, |
| 318 | }, | 326 | }, |
| 327 | .sim = { | ||
| 328 | .addr = 0x24, | ||
| 329 | .value = BIT(0), | ||
| 330 | }, | ||
| 319 | .multi_read_bit = false, | 331 | .multi_read_bit = false, |
| 320 | .bootime = 2, | 332 | .bootime = 2, |
| 321 | }, | 333 | }, |
| @@ -379,6 +391,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = { | |||
| 379 | .mask_int1 = 0x04, | 391 | .mask_int1 = 0x04, |
| 380 | .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, | 392 | .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, |
| 381 | }, | 393 | }, |
| 394 | .sim = { | ||
| 395 | .addr = 0x21, | ||
| 396 | .value = BIT(1), | ||
| 397 | }, | ||
| 382 | .multi_read_bit = true, | 398 | .multi_read_bit = true, |
| 383 | .bootime = 2, /* guess */ | 399 | .bootime = 2, /* guess */ |
| 384 | }, | 400 | }, |
| @@ -437,6 +453,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = { | |||
| 437 | .mask_od = 0x40, | 453 | .mask_od = 0x40, |
| 438 | .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, | 454 | .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, |
| 439 | }, | 455 | }, |
| 456 | .sim = { | ||
| 457 | .addr = 0x21, | ||
| 458 | .value = BIT(7), | ||
| 459 | }, | ||
| 440 | .multi_read_bit = false, | 460 | .multi_read_bit = false, |
| 441 | .bootime = 2, /* guess */ | 461 | .bootime = 2, /* guess */ |
| 442 | }, | 462 | }, |
| @@ -499,6 +519,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = { | |||
| 499 | .addr_ihl = 0x22, | 519 | .addr_ihl = 0x22, |
| 500 | .mask_ihl = 0x80, | 520 | .mask_ihl = 0x80, |
| 501 | }, | 521 | }, |
| 522 | .sim = { | ||
| 523 | .addr = 0x23, | ||
| 524 | .value = BIT(0), | ||
| 525 | }, | ||
| 502 | .multi_read_bit = true, | 526 | .multi_read_bit = true, |
| 503 | .bootime = 2, | 527 | .bootime = 2, |
| 504 | }, | 528 | }, |
| @@ -547,6 +571,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = { | |||
| 547 | .mask_int1 = 0x04, | 571 | .mask_int1 = 0x04, |
| 548 | .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, | 572 | .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, |
| 549 | }, | 573 | }, |
| 574 | .sim = { | ||
| 575 | .addr = 0x21, | ||
| 576 | .value = BIT(1), | ||
| 577 | }, | ||
| 550 | .multi_read_bit = false, | 578 | .multi_read_bit = false, |
| 551 | .bootime = 2, | 579 | .bootime = 2, |
| 552 | }, | 580 | }, |
| @@ -614,6 +642,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = { | |||
| 614 | .mask_ihl = 0x02, | 642 | .mask_ihl = 0x02, |
| 615 | .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, | 643 | .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, |
| 616 | }, | 644 | }, |
| 645 | .sim = { | ||
| 646 | .addr = 0x23, | ||
| 647 | .value = BIT(0), | ||
| 648 | }, | ||
| 617 | .multi_read_bit = true, | 649 | .multi_read_bit = true, |
| 618 | .bootime = 2, | 650 | .bootime = 2, |
| 619 | }, | 651 | }, |
diff --git a/drivers/iio/adc/aspeed_adc.c b/drivers/iio/adc/aspeed_adc.c index e0ea411a0b2d..c02b23d675cb 100644 --- a/drivers/iio/adc/aspeed_adc.c +++ b/drivers/iio/adc/aspeed_adc.c | |||
| @@ -22,6 +22,7 @@ | |||
| 22 | 22 | ||
| 23 | #include <linux/iio/iio.h> | 23 | #include <linux/iio/iio.h> |
| 24 | #include <linux/iio/driver.h> | 24 | #include <linux/iio/driver.h> |
| 25 | #include <linux/iopoll.h> | ||
| 25 | 26 | ||
| 26 | #define ASPEED_RESOLUTION_BITS 10 | 27 | #define ASPEED_RESOLUTION_BITS 10 |
| 27 | #define ASPEED_CLOCKS_PER_SAMPLE 12 | 28 | #define ASPEED_CLOCKS_PER_SAMPLE 12 |
| @@ -38,11 +39,17 @@ | |||
| 38 | 39 | ||
| 39 | #define ASPEED_ENGINE_ENABLE BIT(0) | 40 | #define ASPEED_ENGINE_ENABLE BIT(0) |
| 40 | 41 | ||
| 42 | #define ASPEED_ADC_CTRL_INIT_RDY BIT(8) | ||
| 43 | |||
| 44 | #define ASPEED_ADC_INIT_POLLING_TIME 500 | ||
| 45 | #define ASPEED_ADC_INIT_TIMEOUT 500000 | ||
| 46 | |||
| 41 | struct aspeed_adc_model_data { | 47 | struct aspeed_adc_model_data { |
| 42 | const char *model_name; | 48 | const char *model_name; |
| 43 | unsigned int min_sampling_rate; // Hz | 49 | unsigned int min_sampling_rate; // Hz |
| 44 | unsigned int max_sampling_rate; // Hz | 50 | unsigned int max_sampling_rate; // Hz |
| 45 | unsigned int vref_voltage; // mV | 51 | unsigned int vref_voltage; // mV |
| 52 | bool wait_init_sequence; | ||
| 46 | }; | 53 | }; |
| 47 | 54 | ||
| 48 | struct aspeed_adc_data { | 55 | struct aspeed_adc_data { |
| @@ -211,6 +218,24 @@ static int aspeed_adc_probe(struct platform_device *pdev) | |||
| 211 | goto scaler_error; | 218 | goto scaler_error; |
| 212 | } | 219 | } |
| 213 | 220 | ||
| 221 | model_data = of_device_get_match_data(&pdev->dev); | ||
| 222 | |||
| 223 | if (model_data->wait_init_sequence) { | ||
| 224 | /* Enable engine in normal mode. */ | ||
| 225 | writel(ASPEED_OPERATION_MODE_NORMAL | ASPEED_ENGINE_ENABLE, | ||
| 226 | data->base + ASPEED_REG_ENGINE_CONTROL); | ||
| 227 | |||
| 228 | /* Wait for initial sequence complete. */ | ||
| 229 | ret = readl_poll_timeout(data->base + ASPEED_REG_ENGINE_CONTROL, | ||
| 230 | adc_engine_control_reg_val, | ||
| 231 | adc_engine_control_reg_val & | ||
| 232 | ASPEED_ADC_CTRL_INIT_RDY, | ||
| 233 | ASPEED_ADC_INIT_POLLING_TIME, | ||
| 234 | ASPEED_ADC_INIT_TIMEOUT); | ||
| 235 | if (ret) | ||
| 236 | goto scaler_error; | ||
| 237 | } | ||
| 238 | |||
| 214 | /* Start all channels in normal mode. */ | 239 | /* Start all channels in normal mode. */ |
| 215 | ret = clk_prepare_enable(data->clk_scaler->clk); | 240 | ret = clk_prepare_enable(data->clk_scaler->clk); |
| 216 | if (ret) | 241 | if (ret) |
| @@ -274,6 +299,7 @@ static const struct aspeed_adc_model_data ast2500_model_data = { | |||
| 274 | .vref_voltage = 1800, // mV | 299 | .vref_voltage = 1800, // mV |
| 275 | .min_sampling_rate = 1, | 300 | .min_sampling_rate = 1, |
| 276 | .max_sampling_rate = 1000000, | 301 | .max_sampling_rate = 1000000, |
| 302 | .wait_init_sequence = true, | ||
| 277 | }; | 303 | }; |
| 278 | 304 | ||
| 279 | static const struct of_device_id aspeed_adc_matches[] = { | 305 | static const struct of_device_id aspeed_adc_matches[] = { |
diff --git a/drivers/iio/adc/axp288_adc.c b/drivers/iio/adc/axp288_adc.c index 64799ad7ebad..462a99c13e7a 100644 --- a/drivers/iio/adc/axp288_adc.c +++ b/drivers/iio/adc/axp288_adc.c | |||
| @@ -28,6 +28,8 @@ | |||
| 28 | #include <linux/iio/driver.h> | 28 | #include <linux/iio/driver.h> |
| 29 | 29 | ||
| 30 | #define AXP288_ADC_EN_MASK 0xF1 | 30 | #define AXP288_ADC_EN_MASK 0xF1 |
| 31 | #define AXP288_ADC_TS_PIN_GPADC 0xF2 | ||
| 32 | #define AXP288_ADC_TS_PIN_ON 0xF3 | ||
| 31 | 33 | ||
| 32 | enum axp288_adc_id { | 34 | enum axp288_adc_id { |
| 33 | AXP288_ADC_TS, | 35 | AXP288_ADC_TS, |
| @@ -121,6 +123,26 @@ static int axp288_adc_read_channel(int *val, unsigned long address, | |||
| 121 | return IIO_VAL_INT; | 123 | return IIO_VAL_INT; |
| 122 | } | 124 | } |
| 123 | 125 | ||
| 126 | static int axp288_adc_set_ts(struct regmap *regmap, unsigned int mode, | ||
| 127 | unsigned long address) | ||
| 128 | { | ||
| 129 | int ret; | ||
| 130 | |||
| 131 | /* channels other than GPADC do not need to switch TS pin */ | ||
| 132 | if (address != AXP288_GP_ADC_H) | ||
| 133 | return 0; | ||
| 134 | |||
| 135 | ret = regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, mode); | ||
| 136 | if (ret) | ||
| 137 | return ret; | ||
| 138 | |||
| 139 | /* When switching to the GPADC pin give things some time to settle */ | ||
| 140 | if (mode == AXP288_ADC_TS_PIN_GPADC) | ||
| 141 | usleep_range(6000, 10000); | ||
| 142 | |||
| 143 | return 0; | ||
| 144 | } | ||
| 145 | |||
| 124 | static int axp288_adc_read_raw(struct iio_dev *indio_dev, | 146 | static int axp288_adc_read_raw(struct iio_dev *indio_dev, |
| 125 | struct iio_chan_spec const *chan, | 147 | struct iio_chan_spec const *chan, |
| 126 | int *val, int *val2, long mask) | 148 | int *val, int *val2, long mask) |
| @@ -131,7 +153,16 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev, | |||
| 131 | mutex_lock(&indio_dev->mlock); | 153 | mutex_lock(&indio_dev->mlock); |
| 132 | switch (mask) { | 154 | switch (mask) { |
| 133 | case IIO_CHAN_INFO_RAW: | 155 | case IIO_CHAN_INFO_RAW: |
| 156 | if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_GPADC, | ||
| 157 | chan->address)) { | ||
| 158 | dev_err(&indio_dev->dev, "GPADC mode\n"); | ||
| 159 | ret = -EINVAL; | ||
| 160 | break; | ||
| 161 | } | ||
| 134 | ret = axp288_adc_read_channel(val, chan->address, info->regmap); | 162 | ret = axp288_adc_read_channel(val, chan->address, info->regmap); |
| 163 | if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_ON, | ||
| 164 | chan->address)) | ||
| 165 | dev_err(&indio_dev->dev, "TS pin restore\n"); | ||
| 135 | break; | 166 | break; |
| 136 | default: | 167 | default: |
| 137 | ret = -EINVAL; | 168 | ret = -EINVAL; |
| @@ -141,6 +172,15 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev, | |||
| 141 | return ret; | 172 | return ret; |
| 142 | } | 173 | } |
| 143 | 174 | ||
| 175 | static int axp288_adc_set_state(struct regmap *regmap) | ||
| 176 | { | ||
| 177 | /* ADC should be always enabled for internal FG to function */ | ||
| 178 | if (regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, AXP288_ADC_TS_PIN_ON)) | ||
| 179 | return -EIO; | ||
| 180 | |||
| 181 | return regmap_write(regmap, AXP20X_ADC_EN1, AXP288_ADC_EN_MASK); | ||
| 182 | } | ||
| 183 | |||
| 144 | static const struct iio_info axp288_adc_iio_info = { | 184 | static const struct iio_info axp288_adc_iio_info = { |
| 145 | .read_raw = &axp288_adc_read_raw, | 185 | .read_raw = &axp288_adc_read_raw, |
| 146 | .driver_module = THIS_MODULE, | 186 | .driver_module = THIS_MODULE, |
| @@ -169,7 +209,7 @@ static int axp288_adc_probe(struct platform_device *pdev) | |||
| 169 | * Set ADC to enabled state at all time, including system suspend. | 209 | * Set ADC to enabled state at all time, including system suspend. |
| 170 | * otherwise internal fuel gauge functionality may be affected. | 210 | * otherwise internal fuel gauge functionality may be affected. |
| 171 | */ | 211 | */ |
| 172 | ret = regmap_write(info->regmap, AXP20X_ADC_EN1, AXP288_ADC_EN_MASK); | 212 | ret = axp288_adc_set_state(axp20x->regmap); |
| 173 | if (ret) { | 213 | if (ret) { |
| 174 | dev_err(&pdev->dev, "unable to enable ADC device\n"); | 214 | dev_err(&pdev->dev, "unable to enable ADC device\n"); |
| 175 | return ret; | 215 | return ret; |
diff --git a/drivers/iio/adc/ina2xx-adc.c b/drivers/iio/adc/ina2xx-adc.c index 232c0b80d658..c3f86138cb55 100644 --- a/drivers/iio/adc/ina2xx-adc.c +++ b/drivers/iio/adc/ina2xx-adc.c | |||
| @@ -644,7 +644,7 @@ static int ina2xx_capture_thread(void *data) | |||
| 644 | { | 644 | { |
| 645 | struct iio_dev *indio_dev = data; | 645 | struct iio_dev *indio_dev = data; |
| 646 | struct ina2xx_chip_info *chip = iio_priv(indio_dev); | 646 | struct ina2xx_chip_info *chip = iio_priv(indio_dev); |
| 647 | unsigned int sampling_us = SAMPLING_PERIOD(chip); | 647 | int sampling_us = SAMPLING_PERIOD(chip); |
| 648 | int buffer_us; | 648 | int buffer_us; |
| 649 | 649 | ||
| 650 | /* | 650 | /* |
diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c index e09233b03c05..609676384f5e 100644 --- a/drivers/iio/adc/stm32-adc-core.c +++ b/drivers/iio/adc/stm32-adc-core.c | |||
| @@ -64,7 +64,7 @@ | |||
| 64 | #define STM32H7_CKMODE_MASK GENMASK(17, 16) | 64 | #define STM32H7_CKMODE_MASK GENMASK(17, 16) |
| 65 | 65 | ||
| 66 | /* STM32 H7 maximum analog clock rate (from datasheet) */ | 66 | /* STM32 H7 maximum analog clock rate (from datasheet) */ |
| 67 | #define STM32H7_ADC_MAX_CLK_RATE 72000000 | 67 | #define STM32H7_ADC_MAX_CLK_RATE 36000000 |
| 68 | 68 | ||
| 69 | /** | 69 | /** |
| 70 | * stm32_adc_common_regs - stm32 common registers, compatible dependent data | 70 | * stm32_adc_common_regs - stm32 common registers, compatible dependent data |
| @@ -148,14 +148,14 @@ static int stm32f4_adc_clk_sel(struct platform_device *pdev, | |||
| 148 | return -EINVAL; | 148 | return -EINVAL; |
| 149 | } | 149 | } |
| 150 | 150 | ||
| 151 | priv->common.rate = rate; | 151 | priv->common.rate = rate / stm32f4_pclk_div[i]; |
| 152 | val = readl_relaxed(priv->common.base + STM32F4_ADC_CCR); | 152 | val = readl_relaxed(priv->common.base + STM32F4_ADC_CCR); |
| 153 | val &= ~STM32F4_ADC_ADCPRE_MASK; | 153 | val &= ~STM32F4_ADC_ADCPRE_MASK; |
| 154 | val |= i << STM32F4_ADC_ADCPRE_SHIFT; | 154 | val |= i << STM32F4_ADC_ADCPRE_SHIFT; |
| 155 | writel_relaxed(val, priv->common.base + STM32F4_ADC_CCR); | 155 | writel_relaxed(val, priv->common.base + STM32F4_ADC_CCR); |
| 156 | 156 | ||
| 157 | dev_dbg(&pdev->dev, "Using analog clock source at %ld kHz\n", | 157 | dev_dbg(&pdev->dev, "Using analog clock source at %ld kHz\n", |
| 158 | rate / (stm32f4_pclk_div[i] * 1000)); | 158 | priv->common.rate / 1000); |
| 159 | 159 | ||
| 160 | return 0; | 160 | return 0; |
| 161 | } | 161 | } |
| @@ -250,7 +250,7 @@ static int stm32h7_adc_clk_sel(struct platform_device *pdev, | |||
| 250 | 250 | ||
| 251 | out: | 251 | out: |
| 252 | /* rate used later by each ADC instance to control BOOST mode */ | 252 | /* rate used later by each ADC instance to control BOOST mode */ |
| 253 | priv->common.rate = rate; | 253 | priv->common.rate = rate / div; |
| 254 | 254 | ||
| 255 | /* Set common clock mode and prescaler */ | 255 | /* Set common clock mode and prescaler */ |
| 256 | val = readl_relaxed(priv->common.base + STM32H7_ADC_CCR); | 256 | val = readl_relaxed(priv->common.base + STM32H7_ADC_CCR); |
| @@ -260,7 +260,7 @@ out: | |||
| 260 | writel_relaxed(val, priv->common.base + STM32H7_ADC_CCR); | 260 | writel_relaxed(val, priv->common.base + STM32H7_ADC_CCR); |
| 261 | 261 | ||
| 262 | dev_dbg(&pdev->dev, "Using %s clock/%d source at %ld kHz\n", | 262 | dev_dbg(&pdev->dev, "Using %s clock/%d source at %ld kHz\n", |
| 263 | ckmode ? "bus" : "adc", div, rate / (div * 1000)); | 263 | ckmode ? "bus" : "adc", div, priv->common.rate / 1000); |
| 264 | 264 | ||
| 265 | return 0; | 265 | return 0; |
| 266 | } | 266 | } |
diff --git a/drivers/iio/adc/sun4i-gpadc-iio.c b/drivers/iio/adc/sun4i-gpadc-iio.c index 81d4c39e414a..137f577d9432 100644 --- a/drivers/iio/adc/sun4i-gpadc-iio.c +++ b/drivers/iio/adc/sun4i-gpadc-iio.c | |||
| @@ -256,6 +256,7 @@ static int sun4i_gpadc_read(struct iio_dev *indio_dev, int channel, int *val, | |||
| 256 | 256 | ||
| 257 | err: | 257 | err: |
| 258 | pm_runtime_put_autosuspend(indio_dev->dev.parent); | 258 | pm_runtime_put_autosuspend(indio_dev->dev.parent); |
| 259 | disable_irq(irq); | ||
| 259 | mutex_unlock(&info->mutex); | 260 | mutex_unlock(&info->mutex); |
| 260 | 261 | ||
| 261 | return ret; | 262 | return ret; |
| @@ -365,7 +366,6 @@ static irqreturn_t sun4i_gpadc_temp_data_irq_handler(int irq, void *dev_id) | |||
| 365 | complete(&info->completion); | 366 | complete(&info->completion); |
| 366 | 367 | ||
| 367 | out: | 368 | out: |
| 368 | disable_irq_nosync(info->temp_data_irq); | ||
| 369 | return IRQ_HANDLED; | 369 | return IRQ_HANDLED; |
| 370 | } | 370 | } |
| 371 | 371 | ||
| @@ -380,7 +380,6 @@ static irqreturn_t sun4i_gpadc_fifo_data_irq_handler(int irq, void *dev_id) | |||
| 380 | complete(&info->completion); | 380 | complete(&info->completion); |
| 381 | 381 | ||
| 382 | out: | 382 | out: |
| 383 | disable_irq_nosync(info->fifo_data_irq); | ||
| 384 | return IRQ_HANDLED; | 383 | return IRQ_HANDLED; |
| 385 | } | 384 | } |
| 386 | 385 | ||
diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c index 01fc76f7d660..c168e0db329a 100644 --- a/drivers/iio/adc/vf610_adc.c +++ b/drivers/iio/adc/vf610_adc.c | |||
| @@ -77,7 +77,7 @@ | |||
| 77 | #define VF610_ADC_ADSTS_MASK 0x300 | 77 | #define VF610_ADC_ADSTS_MASK 0x300 |
| 78 | #define VF610_ADC_ADLPC_EN 0x80 | 78 | #define VF610_ADC_ADLPC_EN 0x80 |
| 79 | #define VF610_ADC_ADHSC_EN 0x400 | 79 | #define VF610_ADC_ADHSC_EN 0x400 |
| 80 | #define VF610_ADC_REFSEL_VALT 0x100 | 80 | #define VF610_ADC_REFSEL_VALT 0x800 |
| 81 | #define VF610_ADC_REFSEL_VBG 0x1000 | 81 | #define VF610_ADC_REFSEL_VBG 0x1000 |
| 82 | #define VF610_ADC_ADTRG_HARD 0x2000 | 82 | #define VF610_ADC_ADTRG_HARD 0x2000 |
| 83 | #define VF610_ADC_AVGS_8 0x4000 | 83 | #define VF610_ADC_AVGS_8 0x4000 |
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c index 16ade0a0327b..0e4b379ada45 100644 --- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c +++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c | |||
| @@ -111,8 +111,6 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state) | |||
| 111 | s32 poll_value = 0; | 111 | s32 poll_value = 0; |
| 112 | 112 | ||
| 113 | if (state) { | 113 | if (state) { |
| 114 | if (!atomic_read(&st->user_requested_state)) | ||
| 115 | return 0; | ||
| 116 | if (sensor_hub_device_open(st->hsdev)) | 114 | if (sensor_hub_device_open(st->hsdev)) |
| 117 | return -EIO; | 115 | return -EIO; |
| 118 | 116 | ||
| @@ -161,6 +159,9 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state) | |||
| 161 | &report_val); | 159 | &report_val); |
| 162 | } | 160 | } |
| 163 | 161 | ||
| 162 | pr_debug("HID_SENSOR %s set power_state %d report_state %d\n", | ||
| 163 | st->pdev->name, state_val, report_val); | ||
| 164 | |||
| 164 | sensor_hub_get_feature(st->hsdev, st->power_state.report_id, | 165 | sensor_hub_get_feature(st->hsdev, st->power_state.report_id, |
| 165 | st->power_state.index, | 166 | st->power_state.index, |
| 166 | sizeof(state_val), &state_val); | 167 | sizeof(state_val), &state_val); |
| @@ -182,6 +183,7 @@ int hid_sensor_power_state(struct hid_sensor_common *st, bool state) | |||
| 182 | ret = pm_runtime_get_sync(&st->pdev->dev); | 183 | ret = pm_runtime_get_sync(&st->pdev->dev); |
| 183 | else { | 184 | else { |
| 184 | pm_runtime_mark_last_busy(&st->pdev->dev); | 185 | pm_runtime_mark_last_busy(&st->pdev->dev); |
| 186 | pm_runtime_use_autosuspend(&st->pdev->dev); | ||
| 185 | ret = pm_runtime_put_autosuspend(&st->pdev->dev); | 187 | ret = pm_runtime_put_autosuspend(&st->pdev->dev); |
| 186 | } | 188 | } |
| 187 | if (ret < 0) { | 189 | if (ret < 0) { |
| @@ -285,8 +287,6 @@ int hid_sensor_setup_trigger(struct iio_dev *indio_dev, const char *name, | |||
| 285 | /* Default to 3 seconds, but can be changed from sysfs */ | 287 | /* Default to 3 seconds, but can be changed from sysfs */ |
| 286 | pm_runtime_set_autosuspend_delay(&attrb->pdev->dev, | 288 | pm_runtime_set_autosuspend_delay(&attrb->pdev->dev, |
| 287 | 3000); | 289 | 3000); |
| 288 | pm_runtime_use_autosuspend(&attrb->pdev->dev); | ||
| 289 | |||
| 290 | return ret; | 290 | return ret; |
| 291 | error_unreg_trigger: | 291 | error_unreg_trigger: |
| 292 | iio_trigger_unregister(trig); | 292 | iio_trigger_unregister(trig); |
diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c index 79c8c7cd70d5..6e6a1ecc99dd 100644 --- a/drivers/iio/common/st_sensors/st_sensors_core.c +++ b/drivers/iio/common/st_sensors/st_sensors_core.c | |||
| @@ -550,6 +550,31 @@ out: | |||
| 550 | } | 550 | } |
| 551 | EXPORT_SYMBOL(st_sensors_read_info_raw); | 551 | EXPORT_SYMBOL(st_sensors_read_info_raw); |
| 552 | 552 | ||
| 553 | static int st_sensors_init_interface_mode(struct iio_dev *indio_dev, | ||
| 554 | const struct st_sensor_settings *sensor_settings) | ||
| 555 | { | ||
| 556 | struct st_sensor_data *sdata = iio_priv(indio_dev); | ||
| 557 | struct device_node *np = sdata->dev->of_node; | ||
| 558 | struct st_sensors_platform_data *pdata; | ||
| 559 | |||
| 560 | pdata = (struct st_sensors_platform_data *)sdata->dev->platform_data; | ||
| 561 | if (((np && of_property_read_bool(np, "spi-3wire")) || | ||
| 562 | (pdata && pdata->spi_3wire)) && sensor_settings->sim.addr) { | ||
| 563 | int err; | ||
| 564 | |||
| 565 | err = sdata->tf->write_byte(&sdata->tb, sdata->dev, | ||
| 566 | sensor_settings->sim.addr, | ||
| 567 | sensor_settings->sim.value); | ||
| 568 | if (err < 0) { | ||
| 569 | dev_err(&indio_dev->dev, | ||
| 570 | "failed to init interface mode\n"); | ||
| 571 | return err; | ||
| 572 | } | ||
| 573 | } | ||
| 574 | |||
| 575 | return 0; | ||
| 576 | } | ||
| 577 | |||
| 553 | int st_sensors_check_device_support(struct iio_dev *indio_dev, | 578 | int st_sensors_check_device_support(struct iio_dev *indio_dev, |
| 554 | int num_sensors_list, | 579 | int num_sensors_list, |
| 555 | const struct st_sensor_settings *sensor_settings) | 580 | const struct st_sensor_settings *sensor_settings) |
| @@ -574,6 +599,10 @@ int st_sensors_check_device_support(struct iio_dev *indio_dev, | |||
| 574 | return -ENODEV; | 599 | return -ENODEV; |
| 575 | } | 600 | } |
| 576 | 601 | ||
| 602 | err = st_sensors_init_interface_mode(indio_dev, &sensor_settings[i]); | ||
| 603 | if (err < 0) | ||
| 604 | return err; | ||
| 605 | |||
| 577 | if (sensor_settings[i].wai_addr) { | 606 | if (sensor_settings[i].wai_addr) { |
| 578 | err = sdata->tf->read_byte(&sdata->tb, sdata->dev, | 607 | err = sdata->tf->read_byte(&sdata->tb, sdata->dev, |
| 579 | sensor_settings[i].wai_addr, &wai); | 608 | sensor_settings[i].wai_addr, &wai); |
diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c index 8cf84d3488b2..12898424d838 100644 --- a/drivers/iio/imu/adis16480.c +++ b/drivers/iio/imu/adis16480.c | |||
| @@ -696,7 +696,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = { | |||
| 696 | .gyro_max_val = IIO_RAD_TO_DEGREE(22500), | 696 | .gyro_max_val = IIO_RAD_TO_DEGREE(22500), |
| 697 | .gyro_max_scale = 450, | 697 | .gyro_max_scale = 450, |
| 698 | .accel_max_val = IIO_M_S_2_TO_G(12500), | 698 | .accel_max_val = IIO_M_S_2_TO_G(12500), |
| 699 | .accel_max_scale = 5, | 699 | .accel_max_scale = 10, |
| 700 | }, | 700 | }, |
| 701 | [ADIS16485] = { | 701 | [ADIS16485] = { |
| 702 | .channels = adis16485_channels, | 702 | .channels = adis16485_channels, |
diff --git a/drivers/iio/light/tsl2563.c b/drivers/iio/light/tsl2563.c index e7d4ea75e007..7599693f7fe9 100644 --- a/drivers/iio/light/tsl2563.c +++ b/drivers/iio/light/tsl2563.c | |||
| @@ -626,7 +626,7 @@ static irqreturn_t tsl2563_event_handler(int irq, void *private) | |||
| 626 | struct tsl2563_chip *chip = iio_priv(dev_info); | 626 | struct tsl2563_chip *chip = iio_priv(dev_info); |
| 627 | 627 | ||
| 628 | iio_push_event(dev_info, | 628 | iio_push_event(dev_info, |
| 629 | IIO_UNMOD_EVENT_CODE(IIO_LIGHT, | 629 | IIO_UNMOD_EVENT_CODE(IIO_INTENSITY, |
| 630 | 0, | 630 | 0, |
| 631 | IIO_EV_TYPE_THRESH, | 631 | IIO_EV_TYPE_THRESH, |
| 632 | IIO_EV_DIR_EITHER), | 632 | IIO_EV_DIR_EITHER), |
diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c index 8e1b0861fbe4..c38563699984 100644 --- a/drivers/iio/magnetometer/st_magn_core.c +++ b/drivers/iio/magnetometer/st_magn_core.c | |||
| @@ -356,9 +356,7 @@ static const struct st_sensor_settings st_magn_sensors_settings[] = { | |||
| 356 | .drdy_irq = { | 356 | .drdy_irq = { |
| 357 | .addr = 0x62, | 357 | .addr = 0x62, |
| 358 | .mask_int1 = 0x01, | 358 | .mask_int1 = 0x01, |
| 359 | .addr_ihl = 0x63, | 359 | .addr_stat_drdy = 0x67, |
| 360 | .mask_ihl = 0x04, | ||
| 361 | .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, | ||
| 362 | }, | 360 | }, |
| 363 | .multi_read_bit = false, | 361 | .multi_read_bit = false, |
| 364 | .bootime = 2, | 362 | .bootime = 2, |
diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c index d82b788374b6..0d2ea3ee371b 100644 --- a/drivers/iio/pressure/bmp280-core.c +++ b/drivers/iio/pressure/bmp280-core.c | |||
| @@ -282,6 +282,11 @@ static int bmp280_read_temp(struct bmp280_data *data, | |||
| 282 | } | 282 | } |
| 283 | 283 | ||
| 284 | adc_temp = be32_to_cpu(tmp) >> 12; | 284 | adc_temp = be32_to_cpu(tmp) >> 12; |
| 285 | if (adc_temp == BMP280_TEMP_SKIPPED) { | ||
| 286 | /* reading was skipped */ | ||
| 287 | dev_err(data->dev, "reading temperature skipped\n"); | ||
| 288 | return -EIO; | ||
| 289 | } | ||
| 285 | comp_temp = bmp280_compensate_temp(data, adc_temp); | 290 | comp_temp = bmp280_compensate_temp(data, adc_temp); |
| 286 | 291 | ||
| 287 | /* | 292 | /* |
| @@ -317,6 +322,11 @@ static int bmp280_read_press(struct bmp280_data *data, | |||
| 317 | } | 322 | } |
| 318 | 323 | ||
| 319 | adc_press = be32_to_cpu(tmp) >> 12; | 324 | adc_press = be32_to_cpu(tmp) >> 12; |
| 325 | if (adc_press == BMP280_PRESS_SKIPPED) { | ||
| 326 | /* reading was skipped */ | ||
| 327 | dev_err(data->dev, "reading pressure skipped\n"); | ||
| 328 | return -EIO; | ||
| 329 | } | ||
| 320 | comp_press = bmp280_compensate_press(data, adc_press); | 330 | comp_press = bmp280_compensate_press(data, adc_press); |
| 321 | 331 | ||
| 322 | *val = comp_press; | 332 | *val = comp_press; |
| @@ -345,6 +355,11 @@ static int bmp280_read_humid(struct bmp280_data *data, int *val, int *val2) | |||
| 345 | } | 355 | } |
| 346 | 356 | ||
| 347 | adc_humidity = be16_to_cpu(tmp); | 357 | adc_humidity = be16_to_cpu(tmp); |
| 358 | if (adc_humidity == BMP280_HUMIDITY_SKIPPED) { | ||
| 359 | /* reading was skipped */ | ||
| 360 | dev_err(data->dev, "reading humidity skipped\n"); | ||
| 361 | return -EIO; | ||
| 362 | } | ||
| 348 | comp_humidity = bmp280_compensate_humidity(data, adc_humidity); | 363 | comp_humidity = bmp280_compensate_humidity(data, adc_humidity); |
| 349 | 364 | ||
| 350 | *val = comp_humidity; | 365 | *val = comp_humidity; |
| @@ -597,14 +612,20 @@ static const struct bmp280_chip_info bmp280_chip_info = { | |||
| 597 | 612 | ||
| 598 | static int bme280_chip_config(struct bmp280_data *data) | 613 | static int bme280_chip_config(struct bmp280_data *data) |
| 599 | { | 614 | { |
| 600 | int ret = bmp280_chip_config(data); | 615 | int ret; |
| 601 | u8 osrs = BMP280_OSRS_HUMIDITIY_X(data->oversampling_humid + 1); | 616 | u8 osrs = BMP280_OSRS_HUMIDITIY_X(data->oversampling_humid + 1); |
| 602 | 617 | ||
| 618 | /* | ||
| 619 | * Oversampling of humidity must be set before oversampling of | ||
| 620 | * temperature/pressure is set to become effective. | ||
| 621 | */ | ||
| 622 | ret = regmap_update_bits(data->regmap, BMP280_REG_CTRL_HUMIDITY, | ||
| 623 | BMP280_OSRS_HUMIDITY_MASK, osrs); | ||
| 624 | |||
| 603 | if (ret < 0) | 625 | if (ret < 0) |
| 604 | return ret; | 626 | return ret; |
| 605 | 627 | ||
| 606 | return regmap_update_bits(data->regmap, BMP280_REG_CTRL_HUMIDITY, | 628 | return bmp280_chip_config(data); |
| 607 | BMP280_OSRS_HUMIDITY_MASK, osrs); | ||
| 608 | } | 629 | } |
| 609 | 630 | ||
| 610 | static const struct bmp280_chip_info bme280_chip_info = { | 631 | static const struct bmp280_chip_info bme280_chip_info = { |
diff --git a/drivers/iio/pressure/bmp280.h b/drivers/iio/pressure/bmp280.h index 2c770e13be0e..61347438b779 100644 --- a/drivers/iio/pressure/bmp280.h +++ b/drivers/iio/pressure/bmp280.h | |||
| @@ -96,6 +96,11 @@ | |||
| 96 | #define BME280_CHIP_ID 0x60 | 96 | #define BME280_CHIP_ID 0x60 |
| 97 | #define BMP280_SOFT_RESET_VAL 0xB6 | 97 | #define BMP280_SOFT_RESET_VAL 0xB6 |
| 98 | 98 | ||
| 99 | /* BMP280 register skipped special values */ | ||
| 100 | #define BMP280_TEMP_SKIPPED 0x80000 | ||
| 101 | #define BMP280_PRESS_SKIPPED 0x80000 | ||
| 102 | #define BMP280_HUMIDITY_SKIPPED 0x8000 | ||
| 103 | |||
| 99 | /* Regmap configurations */ | 104 | /* Regmap configurations */ |
| 100 | extern const struct regmap_config bmp180_regmap_config; | 105 | extern const struct regmap_config bmp180_regmap_config; |
| 101 | extern const struct regmap_config bmp280_regmap_config; | 106 | extern const struct regmap_config bmp280_regmap_config; |
diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c index aa61ec15c139..f1bce05ffa13 100644 --- a/drivers/iio/pressure/st_pressure_core.c +++ b/drivers/iio/pressure/st_pressure_core.c | |||
| @@ -456,7 +456,7 @@ static const struct st_sensor_settings st_press_sensors_settings[] = { | |||
| 456 | .mask_od = 0x40, | 456 | .mask_od = 0x40, |
| 457 | .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, | 457 | .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, |
| 458 | }, | 458 | }, |
| 459 | .multi_read_bit = true, | 459 | .multi_read_bit = false, |
| 460 | .bootime = 2, | 460 | .bootime = 2, |
| 461 | }, | 461 | }, |
| 462 | }; | 462 | }; |
diff --git a/drivers/iio/trigger/stm32-timer-trigger.c b/drivers/iio/trigger/stm32-timer-trigger.c index d22bc56dd9fc..25ad6abfee22 100644 --- a/drivers/iio/trigger/stm32-timer-trigger.c +++ b/drivers/iio/trigger/stm32-timer-trigger.c | |||
| @@ -366,34 +366,32 @@ static int stm32_counter_read_raw(struct iio_dev *indio_dev, | |||
| 366 | int *val, int *val2, long mask) | 366 | int *val, int *val2, long mask) |
| 367 | { | 367 | { |
| 368 | struct stm32_timer_trigger *priv = iio_priv(indio_dev); | 368 | struct stm32_timer_trigger *priv = iio_priv(indio_dev); |
| 369 | u32 dat; | ||
| 369 | 370 | ||
| 370 | switch (mask) { | 371 | switch (mask) { |
| 371 | case IIO_CHAN_INFO_RAW: | 372 | case IIO_CHAN_INFO_RAW: |
| 372 | { | 373 | regmap_read(priv->regmap, TIM_CNT, &dat); |
| 373 | u32 cnt; | 374 | *val = dat; |
| 374 | 375 | return IIO_VAL_INT; | |
| 375 | regmap_read(priv->regmap, TIM_CNT, &cnt); | ||
| 376 | *val = cnt; | ||
| 377 | 376 | ||
| 377 | case IIO_CHAN_INFO_ENABLE: | ||
| 378 | regmap_read(priv->regmap, TIM_CR1, &dat); | ||
| 379 | *val = (dat & TIM_CR1_CEN) ? 1 : 0; | ||
| 378 | return IIO_VAL_INT; | 380 | return IIO_VAL_INT; |
| 379 | } | ||
| 380 | case IIO_CHAN_INFO_SCALE: | ||
| 381 | { | ||
| 382 | u32 smcr; | ||
| 383 | 381 | ||
| 384 | regmap_read(priv->regmap, TIM_SMCR, &smcr); | 382 | case IIO_CHAN_INFO_SCALE: |
| 385 | smcr &= TIM_SMCR_SMS; | 383 | regmap_read(priv->regmap, TIM_SMCR, &dat); |
| 384 | dat &= TIM_SMCR_SMS; | ||
| 386 | 385 | ||
| 387 | *val = 1; | 386 | *val = 1; |
| 388 | *val2 = 0; | 387 | *val2 = 0; |
| 389 | 388 | ||
| 390 | /* in quadrature case scale = 0.25 */ | 389 | /* in quadrature case scale = 0.25 */ |
| 391 | if (smcr == 3) | 390 | if (dat == 3) |
| 392 | *val2 = 2; | 391 | *val2 = 2; |
| 393 | 392 | ||
| 394 | return IIO_VAL_FRACTIONAL_LOG2; | 393 | return IIO_VAL_FRACTIONAL_LOG2; |
| 395 | } | 394 | } |
| 396 | } | ||
| 397 | 395 | ||
| 398 | return -EINVAL; | 396 | return -EINVAL; |
| 399 | } | 397 | } |
| @@ -403,15 +401,31 @@ static int stm32_counter_write_raw(struct iio_dev *indio_dev, | |||
| 403 | int val, int val2, long mask) | 401 | int val, int val2, long mask) |
| 404 | { | 402 | { |
| 405 | struct stm32_timer_trigger *priv = iio_priv(indio_dev); | 403 | struct stm32_timer_trigger *priv = iio_priv(indio_dev); |
| 404 | u32 dat; | ||
| 406 | 405 | ||
| 407 | switch (mask) { | 406 | switch (mask) { |
| 408 | case IIO_CHAN_INFO_RAW: | 407 | case IIO_CHAN_INFO_RAW: |
| 409 | regmap_write(priv->regmap, TIM_CNT, val); | 408 | return regmap_write(priv->regmap, TIM_CNT, val); |
| 410 | 409 | ||
| 411 | return IIO_VAL_INT; | ||
| 412 | case IIO_CHAN_INFO_SCALE: | 410 | case IIO_CHAN_INFO_SCALE: |
| 413 | /* fixed scale */ | 411 | /* fixed scale */ |
| 414 | return -EINVAL; | 412 | return -EINVAL; |
| 413 | |||
| 414 | case IIO_CHAN_INFO_ENABLE: | ||
| 415 | if (val) { | ||
| 416 | regmap_read(priv->regmap, TIM_CR1, &dat); | ||
| 417 | if (!(dat & TIM_CR1_CEN)) | ||
| 418 | clk_enable(priv->clk); | ||
| 419 | regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN, | ||
| 420 | TIM_CR1_CEN); | ||
| 421 | } else { | ||
| 422 | regmap_read(priv->regmap, TIM_CR1, &dat); | ||
| 423 | regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN, | ||
| 424 | 0); | ||
| 425 | if (dat & TIM_CR1_CEN) | ||
| 426 | clk_disable(priv->clk); | ||
| 427 | } | ||
| 428 | return 0; | ||
| 415 | } | 429 | } |
| 416 | 430 | ||
| 417 | return -EINVAL; | 431 | return -EINVAL; |
| @@ -471,7 +485,7 @@ static int stm32_get_trigger_mode(struct iio_dev *indio_dev, | |||
| 471 | 485 | ||
| 472 | regmap_read(priv->regmap, TIM_SMCR, &smcr); | 486 | regmap_read(priv->regmap, TIM_SMCR, &smcr); |
| 473 | 487 | ||
| 474 | return smcr == TIM_SMCR_SMS ? 0 : -EINVAL; | 488 | return (smcr & TIM_SMCR_SMS) == TIM_SMCR_SMS ? 0 : -EINVAL; |
| 475 | } | 489 | } |
| 476 | 490 | ||
| 477 | static const struct iio_enum stm32_trigger_mode_enum = { | 491 | static const struct iio_enum stm32_trigger_mode_enum = { |
| @@ -507,9 +521,19 @@ static int stm32_set_enable_mode(struct iio_dev *indio_dev, | |||
| 507 | { | 521 | { |
| 508 | struct stm32_timer_trigger *priv = iio_priv(indio_dev); | 522 | struct stm32_timer_trigger *priv = iio_priv(indio_dev); |
| 509 | int sms = stm32_enable_mode2sms(mode); | 523 | int sms = stm32_enable_mode2sms(mode); |
| 524 | u32 val; | ||
| 510 | 525 | ||
| 511 | if (sms < 0) | 526 | if (sms < 0) |
| 512 | return sms; | 527 | return sms; |
| 528 | /* | ||
| 529 | * Triggered mode sets CEN bit automatically by hardware. So, first | ||
| 530 | * enable counter clock, so it can use it. Keeps it in sync with CEN. | ||
| 531 | */ | ||
| 532 | if (sms == 6) { | ||
| 533 | regmap_read(priv->regmap, TIM_CR1, &val); | ||
| 534 | if (!(val & TIM_CR1_CEN)) | ||
| 535 | clk_enable(priv->clk); | ||
| 536 | } | ||
| 513 | 537 | ||
| 514 | regmap_update_bits(priv->regmap, TIM_SMCR, TIM_SMCR_SMS, sms); | 538 | regmap_update_bits(priv->regmap, TIM_SMCR, TIM_SMCR_SMS, sms); |
| 515 | 539 | ||
| @@ -571,11 +595,14 @@ static int stm32_get_quadrature_mode(struct iio_dev *indio_dev, | |||
| 571 | { | 595 | { |
| 572 | struct stm32_timer_trigger *priv = iio_priv(indio_dev); | 596 | struct stm32_timer_trigger *priv = iio_priv(indio_dev); |
| 573 | u32 smcr; | 597 | u32 smcr; |
| 598 | int mode; | ||
| 574 | 599 | ||
| 575 | regmap_read(priv->regmap, TIM_SMCR, &smcr); | 600 | regmap_read(priv->regmap, TIM_SMCR, &smcr); |
| 576 | smcr &= TIM_SMCR_SMS; | 601 | mode = (smcr & TIM_SMCR_SMS) - 1; |
| 602 | if ((mode < 0) || (mode > ARRAY_SIZE(stm32_quadrature_modes))) | ||
| 603 | return -EINVAL; | ||
| 577 | 604 | ||
| 578 | return smcr - 1; | 605 | return mode; |
| 579 | } | 606 | } |
| 580 | 607 | ||
| 581 | static const struct iio_enum stm32_quadrature_mode_enum = { | 608 | static const struct iio_enum stm32_quadrature_mode_enum = { |
| @@ -592,13 +619,20 @@ static const char *const stm32_count_direction_states[] = { | |||
| 592 | 619 | ||
| 593 | static int stm32_set_count_direction(struct iio_dev *indio_dev, | 620 | static int stm32_set_count_direction(struct iio_dev *indio_dev, |
| 594 | const struct iio_chan_spec *chan, | 621 | const struct iio_chan_spec *chan, |
| 595 | unsigned int mode) | 622 | unsigned int dir) |
| 596 | { | 623 | { |
| 597 | struct stm32_timer_trigger *priv = iio_priv(indio_dev); | 624 | struct stm32_timer_trigger *priv = iio_priv(indio_dev); |
| 625 | u32 val; | ||
| 626 | int mode; | ||
| 598 | 627 | ||
| 599 | regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_DIR, mode); | 628 | /* In encoder mode, direction is RO (given by TI1/TI2 signals) */ |
| 629 | regmap_read(priv->regmap, TIM_SMCR, &val); | ||
| 630 | mode = (val & TIM_SMCR_SMS) - 1; | ||
| 631 | if ((mode >= 0) || (mode < ARRAY_SIZE(stm32_quadrature_modes))) | ||
| 632 | return -EBUSY; | ||
| 600 | 633 | ||
| 601 | return 0; | 634 | return regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_DIR, |
| 635 | dir ? TIM_CR1_DIR : 0); | ||
| 602 | } | 636 | } |
| 603 | 637 | ||
| 604 | static int stm32_get_count_direction(struct iio_dev *indio_dev, | 638 | static int stm32_get_count_direction(struct iio_dev *indio_dev, |
| @@ -609,7 +643,7 @@ static int stm32_get_count_direction(struct iio_dev *indio_dev, | |||
| 609 | 643 | ||
| 610 | regmap_read(priv->regmap, TIM_CR1, &cr1); | 644 | regmap_read(priv->regmap, TIM_CR1, &cr1); |
| 611 | 645 | ||
| 612 | return (cr1 & TIM_CR1_DIR); | 646 | return ((cr1 & TIM_CR1_DIR) ? 1 : 0); |
| 613 | } | 647 | } |
| 614 | 648 | ||
| 615 | static const struct iio_enum stm32_count_direction_enum = { | 649 | static const struct iio_enum stm32_count_direction_enum = { |
| @@ -672,7 +706,9 @@ static const struct iio_chan_spec_ext_info stm32_trigger_count_info[] = { | |||
| 672 | static const struct iio_chan_spec stm32_trigger_channel = { | 706 | static const struct iio_chan_spec stm32_trigger_channel = { |
| 673 | .type = IIO_COUNT, | 707 | .type = IIO_COUNT, |
| 674 | .channel = 0, | 708 | .channel = 0, |
| 675 | .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE), | 709 | .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | |
| 710 | BIT(IIO_CHAN_INFO_ENABLE) | | ||
| 711 | BIT(IIO_CHAN_INFO_SCALE), | ||
| 676 | .ext_info = stm32_trigger_count_info, | 712 | .ext_info = stm32_trigger_count_info, |
| 677 | .indexed = 1 | 713 | .indexed = 1 |
| 678 | }; | 714 | }; |
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index 01236cef7bfb..437522ca97b4 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c | |||
| @@ -61,6 +61,7 @@ struct addr_req { | |||
| 61 | void (*callback)(int status, struct sockaddr *src_addr, | 61 | void (*callback)(int status, struct sockaddr *src_addr, |
| 62 | struct rdma_dev_addr *addr, void *context); | 62 | struct rdma_dev_addr *addr, void *context); |
| 63 | unsigned long timeout; | 63 | unsigned long timeout; |
| 64 | struct delayed_work work; | ||
| 64 | int status; | 65 | int status; |
| 65 | u32 seq; | 66 | u32 seq; |
| 66 | }; | 67 | }; |
| @@ -295,7 +296,7 @@ int rdma_translate_ip(const struct sockaddr *addr, | |||
| 295 | } | 296 | } |
| 296 | EXPORT_SYMBOL(rdma_translate_ip); | 297 | EXPORT_SYMBOL(rdma_translate_ip); |
| 297 | 298 | ||
| 298 | static void set_timeout(unsigned long time) | 299 | static void set_timeout(struct delayed_work *delayed_work, unsigned long time) |
| 299 | { | 300 | { |
| 300 | unsigned long delay; | 301 | unsigned long delay; |
| 301 | 302 | ||
| @@ -303,7 +304,7 @@ static void set_timeout(unsigned long time) | |||
| 303 | if ((long)delay < 0) | 304 | if ((long)delay < 0) |
| 304 | delay = 0; | 305 | delay = 0; |
| 305 | 306 | ||
| 306 | mod_delayed_work(addr_wq, &work, delay); | 307 | mod_delayed_work(addr_wq, delayed_work, delay); |
| 307 | } | 308 | } |
| 308 | 309 | ||
| 309 | static void queue_req(struct addr_req *req) | 310 | static void queue_req(struct addr_req *req) |
| @@ -318,8 +319,7 @@ static void queue_req(struct addr_req *req) | |||
| 318 | 319 | ||
| 319 | list_add(&req->list, &temp_req->list); | 320 | list_add(&req->list, &temp_req->list); |
| 320 | 321 | ||
| 321 | if (req_list.next == &req->list) | 322 | set_timeout(&req->work, req->timeout); |
| 322 | set_timeout(req->timeout); | ||
| 323 | mutex_unlock(&lock); | 323 | mutex_unlock(&lock); |
| 324 | } | 324 | } |
| 325 | 325 | ||
| @@ -574,6 +574,37 @@ static int addr_resolve(struct sockaddr *src_in, | |||
| 574 | return ret; | 574 | return ret; |
| 575 | } | 575 | } |
| 576 | 576 | ||
| 577 | static void process_one_req(struct work_struct *_work) | ||
| 578 | { | ||
| 579 | struct addr_req *req; | ||
| 580 | struct sockaddr *src_in, *dst_in; | ||
| 581 | |||
| 582 | mutex_lock(&lock); | ||
| 583 | req = container_of(_work, struct addr_req, work.work); | ||
| 584 | |||
| 585 | if (req->status == -ENODATA) { | ||
| 586 | src_in = (struct sockaddr *)&req->src_addr; | ||
| 587 | dst_in = (struct sockaddr *)&req->dst_addr; | ||
| 588 | req->status = addr_resolve(src_in, dst_in, req->addr, | ||
| 589 | true, req->seq); | ||
| 590 | if (req->status && time_after_eq(jiffies, req->timeout)) { | ||
| 591 | req->status = -ETIMEDOUT; | ||
| 592 | } else if (req->status == -ENODATA) { | ||
| 593 | /* requeue the work for retrying again */ | ||
| 594 | set_timeout(&req->work, req->timeout); | ||
| 595 | mutex_unlock(&lock); | ||
| 596 | return; | ||
| 597 | } | ||
| 598 | } | ||
| 599 | list_del(&req->list); | ||
| 600 | mutex_unlock(&lock); | ||
| 601 | |||
| 602 | req->callback(req->status, (struct sockaddr *)&req->src_addr, | ||
| 603 | req->addr, req->context); | ||
| 604 | put_client(req->client); | ||
| 605 | kfree(req); | ||
| 606 | } | ||
| 607 | |||
| 577 | static void process_req(struct work_struct *work) | 608 | static void process_req(struct work_struct *work) |
| 578 | { | 609 | { |
| 579 | struct addr_req *req, *temp_req; | 610 | struct addr_req *req, *temp_req; |
| @@ -591,20 +622,23 @@ static void process_req(struct work_struct *work) | |||
| 591 | true, req->seq); | 622 | true, req->seq); |
| 592 | if (req->status && time_after_eq(jiffies, req->timeout)) | 623 | if (req->status && time_after_eq(jiffies, req->timeout)) |
| 593 | req->status = -ETIMEDOUT; | 624 | req->status = -ETIMEDOUT; |
| 594 | else if (req->status == -ENODATA) | 625 | else if (req->status == -ENODATA) { |
| 626 | set_timeout(&req->work, req->timeout); | ||
| 595 | continue; | 627 | continue; |
| 628 | } | ||
| 596 | } | 629 | } |
| 597 | list_move_tail(&req->list, &done_list); | 630 | list_move_tail(&req->list, &done_list); |
| 598 | } | 631 | } |
| 599 | 632 | ||
| 600 | if (!list_empty(&req_list)) { | ||
| 601 | req = list_entry(req_list.next, struct addr_req, list); | ||
| 602 | set_timeout(req->timeout); | ||
| 603 | } | ||
| 604 | mutex_unlock(&lock); | 633 | mutex_unlock(&lock); |
| 605 | 634 | ||
| 606 | list_for_each_entry_safe(req, temp_req, &done_list, list) { | 635 | list_for_each_entry_safe(req, temp_req, &done_list, list) { |
| 607 | list_del(&req->list); | 636 | list_del(&req->list); |
| 637 | /* It is safe to cancel other work items from this work item | ||
| 638 | * because at a time there can be only one work item running | ||
| 639 | * with this single threaded work queue. | ||
| 640 | */ | ||
| 641 | cancel_delayed_work(&req->work); | ||
| 608 | req->callback(req->status, (struct sockaddr *) &req->src_addr, | 642 | req->callback(req->status, (struct sockaddr *) &req->src_addr, |
| 609 | req->addr, req->context); | 643 | req->addr, req->context); |
| 610 | put_client(req->client); | 644 | put_client(req->client); |
| @@ -647,6 +681,7 @@ int rdma_resolve_ip(struct rdma_addr_client *client, | |||
| 647 | req->context = context; | 681 | req->context = context; |
| 648 | req->client = client; | 682 | req->client = client; |
| 649 | atomic_inc(&client->refcount); | 683 | atomic_inc(&client->refcount); |
| 684 | INIT_DELAYED_WORK(&req->work, process_one_req); | ||
| 650 | req->seq = (u32)atomic_inc_return(&ib_nl_addr_request_seq); | 685 | req->seq = (u32)atomic_inc_return(&ib_nl_addr_request_seq); |
| 651 | 686 | ||
| 652 | req->status = addr_resolve(src_in, dst_in, addr, true, req->seq); | 687 | req->status = addr_resolve(src_in, dst_in, addr, true, req->seq); |
| @@ -701,7 +736,7 @@ void rdma_addr_cancel(struct rdma_dev_addr *addr) | |||
| 701 | req->status = -ECANCELED; | 736 | req->status = -ECANCELED; |
| 702 | req->timeout = jiffies; | 737 | req->timeout = jiffies; |
| 703 | list_move(&req->list, &req_list); | 738 | list_move(&req->list, &req_list); |
| 704 | set_timeout(req->timeout); | 739 | set_timeout(&req->work, req->timeout); |
| 705 | break; | 740 | break; |
| 706 | } | 741 | } |
| 707 | } | 742 | } |
| @@ -807,9 +842,8 @@ static int netevent_callback(struct notifier_block *self, unsigned long event, | |||
| 807 | if (event == NETEVENT_NEIGH_UPDATE) { | 842 | if (event == NETEVENT_NEIGH_UPDATE) { |
| 808 | struct neighbour *neigh = ctx; | 843 | struct neighbour *neigh = ctx; |
| 809 | 844 | ||
| 810 | if (neigh->nud_state & NUD_VALID) { | 845 | if (neigh->nud_state & NUD_VALID) |
| 811 | set_timeout(jiffies); | 846 | set_timeout(&work, jiffies); |
| 812 | } | ||
| 813 | } | 847 | } |
| 814 | return 0; | 848 | return 0; |
| 815 | } | 849 | } |
| @@ -820,7 +854,7 @@ static struct notifier_block nb = { | |||
| 820 | 854 | ||
| 821 | int addr_init(void) | 855 | int addr_init(void) |
| 822 | { | 856 | { |
| 823 | addr_wq = alloc_workqueue("ib_addr", WQ_MEM_RECLAIM, 0); | 857 | addr_wq = alloc_ordered_workqueue("ib_addr", WQ_MEM_RECLAIM); |
| 824 | if (!addr_wq) | 858 | if (!addr_wq) |
| 825 | return -ENOMEM; | 859 | return -ENOMEM; |
| 826 | 860 | ||
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index a5dfab6adf49..221468f77128 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c | |||
| @@ -537,10 +537,11 @@ void ib_unregister_device(struct ib_device *device) | |||
| 537 | } | 537 | } |
| 538 | up_read(&lists_rwsem); | 538 | up_read(&lists_rwsem); |
| 539 | 539 | ||
| 540 | mutex_unlock(&device_mutex); | ||
| 541 | |||
| 542 | ib_device_unregister_rdmacg(device); | 540 | ib_device_unregister_rdmacg(device); |
| 543 | ib_device_unregister_sysfs(device); | 541 | ib_device_unregister_sysfs(device); |
| 542 | |||
| 543 | mutex_unlock(&device_mutex); | ||
| 544 | |||
| 544 | ib_cache_cleanup_one(device); | 545 | ib_cache_cleanup_one(device); |
| 545 | 546 | ||
| 546 | ib_security_destroy_port_pkey_list(device); | 547 | ib_security_destroy_port_pkey_list(device); |
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c index 8c4ec564e495..55e8f5ed8b3c 100644 --- a/drivers/infiniband/core/umem_odp.c +++ b/drivers/infiniband/core/umem_odp.c | |||
| @@ -166,24 +166,6 @@ static int invalidate_page_trampoline(struct ib_umem *item, u64 start, | |||
| 166 | return 0; | 166 | return 0; |
| 167 | } | 167 | } |
| 168 | 168 | ||
| 169 | static void ib_umem_notifier_invalidate_page(struct mmu_notifier *mn, | ||
| 170 | struct mm_struct *mm, | ||
| 171 | unsigned long address) | ||
| 172 | { | ||
| 173 | struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn); | ||
| 174 | |||
| 175 | if (!context->invalidate_range) | ||
| 176 | return; | ||
| 177 | |||
| 178 | ib_ucontext_notifier_start_account(context); | ||
| 179 | down_read(&context->umem_rwsem); | ||
| 180 | rbt_ib_umem_for_each_in_range(&context->umem_tree, address, | ||
| 181 | address + PAGE_SIZE, | ||
| 182 | invalidate_page_trampoline, NULL); | ||
| 183 | up_read(&context->umem_rwsem); | ||
| 184 | ib_ucontext_notifier_end_account(context); | ||
| 185 | } | ||
| 186 | |||
| 187 | static int invalidate_range_start_trampoline(struct ib_umem *item, u64 start, | 169 | static int invalidate_range_start_trampoline(struct ib_umem *item, u64 start, |
| 188 | u64 end, void *cookie) | 170 | u64 end, void *cookie) |
| 189 | { | 171 | { |
| @@ -237,7 +219,6 @@ static void ib_umem_notifier_invalidate_range_end(struct mmu_notifier *mn, | |||
| 237 | 219 | ||
| 238 | static const struct mmu_notifier_ops ib_umem_notifiers = { | 220 | static const struct mmu_notifier_ops ib_umem_notifiers = { |
| 239 | .release = ib_umem_notifier_release, | 221 | .release = ib_umem_notifier_release, |
| 240 | .invalidate_page = ib_umem_notifier_invalidate_page, | ||
| 241 | .invalidate_range_start = ib_umem_notifier_invalidate_range_start, | 222 | .invalidate_range_start = ib_umem_notifier_invalidate_range_start, |
| 242 | .invalidate_range_end = ib_umem_notifier_invalidate_range_end, | 223 | .invalidate_range_end = ib_umem_notifier_invalidate_range_end, |
| 243 | }; | 224 | }; |
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 2c98533a0203..739bd69ef1d4 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c | |||
| @@ -1015,7 +1015,7 @@ static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file, | |||
| 1015 | cq->uobject = &obj->uobject; | 1015 | cq->uobject = &obj->uobject; |
| 1016 | cq->comp_handler = ib_uverbs_comp_handler; | 1016 | cq->comp_handler = ib_uverbs_comp_handler; |
| 1017 | cq->event_handler = ib_uverbs_cq_event_handler; | 1017 | cq->event_handler = ib_uverbs_cq_event_handler; |
| 1018 | cq->cq_context = &ev_file->ev_queue; | 1018 | cq->cq_context = ev_file ? &ev_file->ev_queue : NULL; |
| 1019 | atomic_set(&cq->usecnt, 0); | 1019 | atomic_set(&cq->usecnt, 0); |
| 1020 | 1020 | ||
| 1021 | obj->uobject.object = cq; | 1021 | obj->uobject.object = cq; |
| @@ -1153,7 +1153,7 @@ ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file, | |||
| 1153 | int out_len) | 1153 | int out_len) |
| 1154 | { | 1154 | { |
| 1155 | struct ib_uverbs_resize_cq cmd; | 1155 | struct ib_uverbs_resize_cq cmd; |
| 1156 | struct ib_uverbs_resize_cq_resp resp; | 1156 | struct ib_uverbs_resize_cq_resp resp = {}; |
| 1157 | struct ib_udata udata; | 1157 | struct ib_udata udata; |
| 1158 | struct ib_cq *cq; | 1158 | struct ib_cq *cq; |
| 1159 | int ret = -EINVAL; | 1159 | int ret = -EINVAL; |
| @@ -1522,6 +1522,7 @@ static int create_qp(struct ib_uverbs_file *file, | |||
| 1522 | qp->qp_type = attr.qp_type; | 1522 | qp->qp_type = attr.qp_type; |
| 1523 | atomic_set(&qp->usecnt, 0); | 1523 | atomic_set(&qp->usecnt, 0); |
| 1524 | atomic_inc(&pd->usecnt); | 1524 | atomic_inc(&pd->usecnt); |
| 1525 | qp->port = 0; | ||
| 1525 | if (attr.send_cq) | 1526 | if (attr.send_cq) |
| 1526 | atomic_inc(&attr.send_cq->usecnt); | 1527 | atomic_inc(&attr.send_cq->usecnt); |
| 1527 | if (attr.recv_cq) | 1528 | if (attr.recv_cq) |
| @@ -1962,8 +1963,9 @@ static int modify_qp(struct ib_uverbs_file *file, | |||
| 1962 | attr->alt_timeout = cmd->base.alt_timeout; | 1963 | attr->alt_timeout = cmd->base.alt_timeout; |
| 1963 | attr->rate_limit = cmd->rate_limit; | 1964 | attr->rate_limit = cmd->rate_limit; |
| 1964 | 1965 | ||
| 1965 | attr->ah_attr.type = rdma_ah_find_type(qp->device, | 1966 | if (cmd->base.attr_mask & IB_QP_AV) |
| 1966 | cmd->base.dest.port_num); | 1967 | attr->ah_attr.type = rdma_ah_find_type(qp->device, |
| 1968 | cmd->base.dest.port_num); | ||
| 1967 | if (cmd->base.dest.is_global) { | 1969 | if (cmd->base.dest.is_global) { |
| 1968 | rdma_ah_set_grh(&attr->ah_attr, NULL, | 1970 | rdma_ah_set_grh(&attr->ah_attr, NULL, |
| 1969 | cmd->base.dest.flow_label, | 1971 | cmd->base.dest.flow_label, |
| @@ -1981,8 +1983,9 @@ static int modify_qp(struct ib_uverbs_file *file, | |||
| 1981 | rdma_ah_set_port_num(&attr->ah_attr, | 1983 | rdma_ah_set_port_num(&attr->ah_attr, |
| 1982 | cmd->base.dest.port_num); | 1984 | cmd->base.dest.port_num); |
| 1983 | 1985 | ||
| 1984 | attr->alt_ah_attr.type = rdma_ah_find_type(qp->device, | 1986 | if (cmd->base.attr_mask & IB_QP_ALT_PATH) |
| 1985 | cmd->base.dest.port_num); | 1987 | attr->alt_ah_attr.type = |
| 1988 | rdma_ah_find_type(qp->device, cmd->base.dest.port_num); | ||
| 1986 | if (cmd->base.alt_dest.is_global) { | 1989 | if (cmd->base.alt_dest.is_global) { |
| 1987 | rdma_ah_set_grh(&attr->alt_ah_attr, NULL, | 1990 | rdma_ah_set_grh(&attr->alt_ah_attr, NULL, |
| 1988 | cmd->base.alt_dest.flow_label, | 1991 | cmd->base.alt_dest.flow_label, |
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index 3d2609608f58..5e530d2bee44 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c | |||
| @@ -250,6 +250,7 @@ void ib_uverbs_release_file(struct kref *ref) | |||
| 250 | if (atomic_dec_and_test(&file->device->refcount)) | 250 | if (atomic_dec_and_test(&file->device->refcount)) |
| 251 | ib_uverbs_comp_dev(file->device); | 251 | ib_uverbs_comp_dev(file->device); |
| 252 | 252 | ||
| 253 | kobject_put(&file->device->kobj); | ||
| 253 | kfree(file); | 254 | kfree(file); |
| 254 | } | 255 | } |
| 255 | 256 | ||
| @@ -917,7 +918,6 @@ err: | |||
| 917 | static int ib_uverbs_close(struct inode *inode, struct file *filp) | 918 | static int ib_uverbs_close(struct inode *inode, struct file *filp) |
| 918 | { | 919 | { |
| 919 | struct ib_uverbs_file *file = filp->private_data; | 920 | struct ib_uverbs_file *file = filp->private_data; |
| 920 | struct ib_uverbs_device *dev = file->device; | ||
| 921 | 921 | ||
| 922 | mutex_lock(&file->cleanup_mutex); | 922 | mutex_lock(&file->cleanup_mutex); |
| 923 | if (file->ucontext) { | 923 | if (file->ucontext) { |
| @@ -939,7 +939,6 @@ static int ib_uverbs_close(struct inode *inode, struct file *filp) | |||
| 939 | ib_uverbs_release_async_event_file); | 939 | ib_uverbs_release_async_event_file); |
| 940 | 940 | ||
| 941 | kref_put(&file->ref, ib_uverbs_release_file); | 941 | kref_put(&file->ref, ib_uverbs_release_file); |
| 942 | kobject_put(&dev->kobj); | ||
| 943 | 942 | ||
| 944 | return 0; | 943 | return 0; |
| 945 | } | 944 | } |
| @@ -1154,7 +1153,6 @@ static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev, | |||
| 1154 | kref_get(&file->ref); | 1153 | kref_get(&file->ref); |
| 1155 | mutex_unlock(&uverbs_dev->lists_mutex); | 1154 | mutex_unlock(&uverbs_dev->lists_mutex); |
| 1156 | 1155 | ||
| 1157 | ib_uverbs_event_handler(&file->event_handler, &event); | ||
| 1158 | 1156 | ||
| 1159 | mutex_lock(&file->cleanup_mutex); | 1157 | mutex_lock(&file->cleanup_mutex); |
| 1160 | ucontext = file->ucontext; | 1158 | ucontext = file->ucontext; |
| @@ -1171,6 +1169,7 @@ static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev, | |||
| 1171 | * for example due to freeing the resources | 1169 | * for example due to freeing the resources |
| 1172 | * (e.g mmput). | 1170 | * (e.g mmput). |
| 1173 | */ | 1171 | */ |
| 1172 | ib_uverbs_event_handler(&file->event_handler, &event); | ||
| 1174 | ib_dev->disassociate_ucontext(ucontext); | 1173 | ib_dev->disassociate_ucontext(ucontext); |
| 1175 | mutex_lock(&file->cleanup_mutex); | 1174 | mutex_lock(&file->cleanup_mutex); |
| 1176 | ib_uverbs_cleanup_ucontext(file, ucontext, true); | 1175 | ib_uverbs_cleanup_ucontext(file, ucontext, true); |
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index fb98ed67d5bc..b456e3ca1876 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c | |||
| @@ -838,6 +838,7 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd, | |||
| 838 | spin_lock_init(&qp->mr_lock); | 838 | spin_lock_init(&qp->mr_lock); |
| 839 | INIT_LIST_HEAD(&qp->rdma_mrs); | 839 | INIT_LIST_HEAD(&qp->rdma_mrs); |
| 840 | INIT_LIST_HEAD(&qp->sig_mrs); | 840 | INIT_LIST_HEAD(&qp->sig_mrs); |
| 841 | qp->port = 0; | ||
| 841 | 842 | ||
| 842 | if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) | 843 | if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) |
| 843 | return ib_create_xrc_qp(qp, qp_init_attr); | 844 | return ib_create_xrc_qp(qp, qp_init_attr); |
| @@ -895,7 +896,6 @@ static const struct { | |||
| 895 | } qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = { | 896 | } qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = { |
| 896 | [IB_QPS_RESET] = { | 897 | [IB_QPS_RESET] = { |
| 897 | [IB_QPS_RESET] = { .valid = 1 }, | 898 | [IB_QPS_RESET] = { .valid = 1 }, |
| 898 | [IB_QPS_ERR] = { .valid = 1 }, | ||
| 899 | [IB_QPS_INIT] = { | 899 | [IB_QPS_INIT] = { |
| 900 | .valid = 1, | 900 | .valid = 1, |
| 901 | .req_param = { | 901 | .req_param = { |
| @@ -1298,7 +1298,11 @@ int ib_modify_qp_with_udata(struct ib_qp *qp, struct ib_qp_attr *attr, | |||
| 1298 | if (ret) | 1298 | if (ret) |
| 1299 | return ret; | 1299 | return ret; |
| 1300 | } | 1300 | } |
| 1301 | return ib_security_modify_qp(qp, attr, attr_mask, udata); | 1301 | ret = ib_security_modify_qp(qp, attr, attr_mask, udata); |
| 1302 | if (!ret && (attr_mask & IB_QP_PORT)) | ||
| 1303 | qp->port = attr->port_num; | ||
| 1304 | |||
| 1305 | return ret; | ||
| 1302 | } | 1306 | } |
| 1303 | EXPORT_SYMBOL(ib_modify_qp_with_udata); | 1307 | EXPORT_SYMBOL(ib_modify_qp_with_udata); |
| 1304 | 1308 | ||
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c index 5332f06b99ba..c2fba76becd4 100644 --- a/drivers/infiniband/hw/cxgb4/mem.c +++ b/drivers/infiniband/hw/cxgb4/mem.c | |||
| @@ -661,7 +661,7 @@ struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd, | |||
| 661 | rhp = php->rhp; | 661 | rhp = php->rhp; |
| 662 | 662 | ||
| 663 | if (mr_type != IB_MR_TYPE_MEM_REG || | 663 | if (mr_type != IB_MR_TYPE_MEM_REG || |
| 664 | max_num_sg > t4_max_fr_depth(&rhp->rdev.lldi.ulptx_memwrite_dsgl && | 664 | max_num_sg > t4_max_fr_depth(rhp->rdev.lldi.ulptx_memwrite_dsgl && |
| 665 | use_dsgl)) | 665 | use_dsgl)) |
| 666 | return ERR_PTR(-EINVAL); | 666 | return ERR_PTR(-EINVAL); |
| 667 | 667 | ||
diff --git a/drivers/infiniband/hw/hfi1/mmu_rb.c b/drivers/infiniband/hw/hfi1/mmu_rb.c index ccbf52c8ff6f..e4b56a0dd6d0 100644 --- a/drivers/infiniband/hw/hfi1/mmu_rb.c +++ b/drivers/infiniband/hw/hfi1/mmu_rb.c | |||
| @@ -67,8 +67,6 @@ struct mmu_rb_handler { | |||
| 67 | 67 | ||
| 68 | static unsigned long mmu_node_start(struct mmu_rb_node *); | 68 | static unsigned long mmu_node_start(struct mmu_rb_node *); |
| 69 | static unsigned long mmu_node_last(struct mmu_rb_node *); | 69 | static unsigned long mmu_node_last(struct mmu_rb_node *); |
| 70 | static inline void mmu_notifier_page(struct mmu_notifier *, struct mm_struct *, | ||
| 71 | unsigned long); | ||
| 72 | static inline void mmu_notifier_range_start(struct mmu_notifier *, | 70 | static inline void mmu_notifier_range_start(struct mmu_notifier *, |
| 73 | struct mm_struct *, | 71 | struct mm_struct *, |
| 74 | unsigned long, unsigned long); | 72 | unsigned long, unsigned long); |
| @@ -82,7 +80,6 @@ static void do_remove(struct mmu_rb_handler *handler, | |||
| 82 | static void handle_remove(struct work_struct *work); | 80 | static void handle_remove(struct work_struct *work); |
| 83 | 81 | ||
| 84 | static const struct mmu_notifier_ops mn_opts = { | 82 | static const struct mmu_notifier_ops mn_opts = { |
| 85 | .invalidate_page = mmu_notifier_page, | ||
| 86 | .invalidate_range_start = mmu_notifier_range_start, | 83 | .invalidate_range_start = mmu_notifier_range_start, |
| 87 | }; | 84 | }; |
| 88 | 85 | ||
| @@ -285,12 +282,6 @@ void hfi1_mmu_rb_remove(struct mmu_rb_handler *handler, | |||
| 285 | handler->ops->remove(handler->ops_arg, node); | 282 | handler->ops->remove(handler->ops_arg, node); |
| 286 | } | 283 | } |
| 287 | 284 | ||
| 288 | static inline void mmu_notifier_page(struct mmu_notifier *mn, | ||
| 289 | struct mm_struct *mm, unsigned long addr) | ||
| 290 | { | ||
| 291 | mmu_notifier_mem_invalidate(mn, mm, addr, addr + PAGE_SIZE); | ||
| 292 | } | ||
| 293 | |||
| 294 | static inline void mmu_notifier_range_start(struct mmu_notifier *mn, | 285 | static inline void mmu_notifier_range_start(struct mmu_notifier *mn, |
| 295 | struct mm_struct *mm, | 286 | struct mm_struct *mm, |
| 296 | unsigned long start, | 287 | unsigned long start, |
diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c index f78a733a63ec..d545302b8ef8 100644 --- a/drivers/infiniband/hw/hns/hns_roce_ah.c +++ b/drivers/infiniband/hw/hns/hns_roce_ah.c | |||
| @@ -64,8 +64,10 @@ struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd, | |||
| 64 | } else { | 64 | } else { |
| 65 | u8 *dmac = rdma_ah_retrieve_dmac(ah_attr); | 65 | u8 *dmac = rdma_ah_retrieve_dmac(ah_attr); |
| 66 | 66 | ||
| 67 | if (!dmac) | 67 | if (!dmac) { |
| 68 | kfree(ah); | ||
| 68 | return ERR_PTR(-EINVAL); | 69 | return ERR_PTR(-EINVAL); |
| 70 | } | ||
| 69 | memcpy(ah->av.mac, dmac, ETH_ALEN); | 71 | memcpy(ah->av.mac, dmac, ETH_ALEN); |
| 70 | } | 72 | } |
| 71 | 73 | ||
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c index 23fad6d96944..2540b65e242c 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c | |||
| @@ -733,7 +733,7 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev) | |||
| 733 | continue; | 733 | continue; |
| 734 | 734 | ||
| 735 | free_mr->mr_free_qp[i] = hns_roce_v1_create_lp_qp(hr_dev, pd); | 735 | free_mr->mr_free_qp[i] = hns_roce_v1_create_lp_qp(hr_dev, pd); |
| 736 | if (IS_ERR(free_mr->mr_free_qp[i])) { | 736 | if (!free_mr->mr_free_qp[i]) { |
| 737 | dev_err(dev, "Create loop qp failed!\n"); | 737 | dev_err(dev, "Create loop qp failed!\n"); |
| 738 | goto create_lp_qp_failed; | 738 | goto create_lp_qp_failed; |
| 739 | } | 739 | } |
diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c index 9ec1ae9a82c9..a49ff2eb6fb3 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c +++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c | |||
| @@ -130,20 +130,32 @@ static enum i40iw_status_code i40iw_sc_parse_fpm_commit_buf( | |||
| 130 | u64 base = 0; | 130 | u64 base = 0; |
| 131 | u32 i, j; | 131 | u32 i, j; |
| 132 | u32 k = 0; | 132 | u32 k = 0; |
| 133 | u32 low; | ||
| 134 | 133 | ||
| 135 | /* copy base values in obj_info */ | 134 | /* copy base values in obj_info */ |
| 136 | for (i = I40IW_HMC_IW_QP, j = 0; | 135 | for (i = I40IW_HMC_IW_QP, j = 0; i <= I40IW_HMC_IW_PBLE; i++, j += 8) { |
| 137 | i <= I40IW_HMC_IW_PBLE; i++, j += 8) { | 136 | if ((i == I40IW_HMC_IW_SRQ) || |
| 137 | (i == I40IW_HMC_IW_FSIMC) || | ||
| 138 | (i == I40IW_HMC_IW_FSIAV)) { | ||
| 139 | info[i].base = 0; | ||
| 140 | info[i].cnt = 0; | ||
| 141 | continue; | ||
| 142 | } | ||
| 138 | get_64bit_val(buf, j, &temp); | 143 | get_64bit_val(buf, j, &temp); |
| 139 | info[i].base = RS_64_1(temp, 32) * 512; | 144 | info[i].base = RS_64_1(temp, 32) * 512; |
| 140 | if (info[i].base > base) { | 145 | if (info[i].base > base) { |
| 141 | base = info[i].base; | 146 | base = info[i].base; |
| 142 | k = i; | 147 | k = i; |
| 143 | } | 148 | } |
| 144 | low = (u32)(temp); | 149 | if (i == I40IW_HMC_IW_APBVT_ENTRY) { |
| 145 | if (low) | 150 | info[i].cnt = 1; |
| 146 | info[i].cnt = low; | 151 | continue; |
| 152 | } | ||
| 153 | if (i == I40IW_HMC_IW_QP) | ||
| 154 | info[i].cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_QPS); | ||
| 155 | else if (i == I40IW_HMC_IW_CQ) | ||
| 156 | info[i].cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_CQS); | ||
| 157 | else | ||
| 158 | info[i].cnt = (u32)(temp); | ||
| 147 | } | 159 | } |
| 148 | size = info[k].cnt * info[k].size + info[k].base; | 160 | size = info[k].cnt * info[k].size + info[k].base; |
| 149 | if (size & 0x1FFFFF) | 161 | if (size & 0x1FFFFF) |
| @@ -155,6 +167,31 @@ static enum i40iw_status_code i40iw_sc_parse_fpm_commit_buf( | |||
| 155 | } | 167 | } |
| 156 | 168 | ||
| 157 | /** | 169 | /** |
| 170 | * i40iw_sc_decode_fpm_query() - Decode a 64 bit value into max count and size | ||
| 171 | * @buf: ptr to fpm query buffer | ||
| 172 | * @buf_idx: index into buf | ||
| 173 | * @info: ptr to i40iw_hmc_obj_info struct | ||
| 174 | * @rsrc_idx: resource index into info | ||
| 175 | * | ||
| 176 | * Decode a 64 bit value from fpm query buffer into max count and size | ||
| 177 | */ | ||
| 178 | static u64 i40iw_sc_decode_fpm_query(u64 *buf, | ||
| 179 | u32 buf_idx, | ||
| 180 | struct i40iw_hmc_obj_info *obj_info, | ||
| 181 | u32 rsrc_idx) | ||
| 182 | { | ||
| 183 | u64 temp; | ||
| 184 | u32 size; | ||
| 185 | |||
| 186 | get_64bit_val(buf, buf_idx, &temp); | ||
| 187 | obj_info[rsrc_idx].max_cnt = (u32)temp; | ||
| 188 | size = (u32)RS_64_1(temp, 32); | ||
| 189 | obj_info[rsrc_idx].size = LS_64_1(1, size); | ||
| 190 | |||
| 191 | return temp; | ||
| 192 | } | ||
| 193 | |||
| 194 | /** | ||
| 158 | * i40iw_sc_parse_fpm_query_buf() - parses fpm query buffer | 195 | * i40iw_sc_parse_fpm_query_buf() - parses fpm query buffer |
| 159 | * @buf: ptr to fpm query buffer | 196 | * @buf: ptr to fpm query buffer |
| 160 | * @info: ptr to i40iw_hmc_obj_info struct | 197 | * @info: ptr to i40iw_hmc_obj_info struct |
| @@ -168,9 +205,9 @@ static enum i40iw_status_code i40iw_sc_parse_fpm_query_buf( | |||
| 168 | struct i40iw_hmc_info *hmc_info, | 205 | struct i40iw_hmc_info *hmc_info, |
| 169 | struct i40iw_hmc_fpm_misc *hmc_fpm_misc) | 206 | struct i40iw_hmc_fpm_misc *hmc_fpm_misc) |
| 170 | { | 207 | { |
| 171 | u64 temp; | ||
| 172 | struct i40iw_hmc_obj_info *obj_info; | 208 | struct i40iw_hmc_obj_info *obj_info; |
| 173 | u32 i, j, size; | 209 | u64 temp; |
| 210 | u32 size; | ||
| 174 | u16 max_pe_sds; | 211 | u16 max_pe_sds; |
| 175 | 212 | ||
| 176 | obj_info = hmc_info->hmc_obj; | 213 | obj_info = hmc_info->hmc_obj; |
| @@ -185,41 +222,52 @@ static enum i40iw_status_code i40iw_sc_parse_fpm_query_buf( | |||
| 185 | hmc_fpm_misc->max_sds = max_pe_sds; | 222 | hmc_fpm_misc->max_sds = max_pe_sds; |
| 186 | hmc_info->sd_table.sd_cnt = max_pe_sds + hmc_info->first_sd_index; | 223 | hmc_info->sd_table.sd_cnt = max_pe_sds + hmc_info->first_sd_index; |
| 187 | 224 | ||
| 188 | for (i = I40IW_HMC_IW_QP, j = 8; | 225 | get_64bit_val(buf, 8, &temp); |
| 189 | i <= I40IW_HMC_IW_ARP; i++, j += 8) { | 226 | obj_info[I40IW_HMC_IW_QP].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_QPS); |
| 190 | get_64bit_val(buf, j, &temp); | 227 | size = (u32)RS_64_1(temp, 32); |
| 191 | if (i == I40IW_HMC_IW_QP) | 228 | obj_info[I40IW_HMC_IW_QP].size = LS_64_1(1, size); |
| 192 | obj_info[i].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_QPS); | ||
| 193 | else if (i == I40IW_HMC_IW_CQ) | ||
| 194 | obj_info[i].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_CQS); | ||
| 195 | else | ||
| 196 | obj_info[i].max_cnt = (u32)temp; | ||
| 197 | 229 | ||
| 198 | size = (u32)RS_64_1(temp, 32); | 230 | get_64bit_val(buf, 16, &temp); |
| 199 | obj_info[i].size = ((u64)1 << size); | 231 | obj_info[I40IW_HMC_IW_CQ].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_CQS); |
| 200 | } | 232 | size = (u32)RS_64_1(temp, 32); |
| 201 | for (i = I40IW_HMC_IW_MR, j = 48; | 233 | obj_info[I40IW_HMC_IW_CQ].size = LS_64_1(1, size); |
| 202 | i <= I40IW_HMC_IW_PBLE; i++, j += 8) { | 234 | |
| 203 | get_64bit_val(buf, j, &temp); | 235 | i40iw_sc_decode_fpm_query(buf, 32, obj_info, I40IW_HMC_IW_HTE); |
| 204 | obj_info[i].max_cnt = (u32)temp; | 236 | i40iw_sc_decode_fpm_query(buf, 40, obj_info, I40IW_HMC_IW_ARP); |
| 205 | size = (u32)RS_64_1(temp, 32); | 237 | |
| 206 | obj_info[i].size = LS_64_1(1, size); | 238 | obj_info[I40IW_HMC_IW_APBVT_ENTRY].size = 8192; |
| 207 | } | 239 | obj_info[I40IW_HMC_IW_APBVT_ENTRY].max_cnt = 1; |
| 240 | |||
| 241 | i40iw_sc_decode_fpm_query(buf, 48, obj_info, I40IW_HMC_IW_MR); | ||
| 242 | i40iw_sc_decode_fpm_query(buf, 56, obj_info, I40IW_HMC_IW_XF); | ||
| 208 | 243 | ||
| 209 | get_64bit_val(buf, 120, &temp); | ||
| 210 | hmc_fpm_misc->max_ceqs = (u8)RS_64(temp, I40IW_QUERY_FPM_MAX_CEQS); | ||
| 211 | get_64bit_val(buf, 120, &temp); | ||
| 212 | hmc_fpm_misc->ht_multiplier = RS_64(temp, I40IW_QUERY_FPM_HTMULTIPLIER); | ||
| 213 | get_64bit_val(buf, 120, &temp); | ||
| 214 | hmc_fpm_misc->timer_bucket = RS_64(temp, I40IW_QUERY_FPM_TIMERBUCKET); | ||
| 215 | get_64bit_val(buf, 64, &temp); | 244 | get_64bit_val(buf, 64, &temp); |
| 245 | obj_info[I40IW_HMC_IW_XFFL].max_cnt = (u32)temp; | ||
| 246 | obj_info[I40IW_HMC_IW_XFFL].size = 4; | ||
| 216 | hmc_fpm_misc->xf_block_size = RS_64(temp, I40IW_QUERY_FPM_XFBLOCKSIZE); | 247 | hmc_fpm_misc->xf_block_size = RS_64(temp, I40IW_QUERY_FPM_XFBLOCKSIZE); |
| 217 | if (!hmc_fpm_misc->xf_block_size) | 248 | if (!hmc_fpm_misc->xf_block_size) |
| 218 | return I40IW_ERR_INVALID_SIZE; | 249 | return I40IW_ERR_INVALID_SIZE; |
| 250 | |||
| 251 | i40iw_sc_decode_fpm_query(buf, 72, obj_info, I40IW_HMC_IW_Q1); | ||
| 252 | |||
| 219 | get_64bit_val(buf, 80, &temp); | 253 | get_64bit_val(buf, 80, &temp); |
| 254 | obj_info[I40IW_HMC_IW_Q1FL].max_cnt = (u32)temp; | ||
| 255 | obj_info[I40IW_HMC_IW_Q1FL].size = 4; | ||
| 220 | hmc_fpm_misc->q1_block_size = RS_64(temp, I40IW_QUERY_FPM_Q1BLOCKSIZE); | 256 | hmc_fpm_misc->q1_block_size = RS_64(temp, I40IW_QUERY_FPM_Q1BLOCKSIZE); |
| 221 | if (!hmc_fpm_misc->q1_block_size) | 257 | if (!hmc_fpm_misc->q1_block_size) |
| 222 | return I40IW_ERR_INVALID_SIZE; | 258 | return I40IW_ERR_INVALID_SIZE; |
| 259 | |||
| 260 | i40iw_sc_decode_fpm_query(buf, 88, obj_info, I40IW_HMC_IW_TIMER); | ||
| 261 | |||
| 262 | get_64bit_val(buf, 112, &temp); | ||
| 263 | obj_info[I40IW_HMC_IW_PBLE].max_cnt = (u32)temp; | ||
| 264 | obj_info[I40IW_HMC_IW_PBLE].size = 8; | ||
| 265 | |||
| 266 | get_64bit_val(buf, 120, &temp); | ||
| 267 | hmc_fpm_misc->max_ceqs = (u8)RS_64(temp, I40IW_QUERY_FPM_MAX_CEQS); | ||
| 268 | hmc_fpm_misc->ht_multiplier = RS_64(temp, I40IW_QUERY_FPM_HTMULTIPLIER); | ||
| 269 | hmc_fpm_misc->timer_bucket = RS_64(temp, I40IW_QUERY_FPM_TIMERBUCKET); | ||
| 270 | |||
| 223 | return 0; | 271 | return 0; |
| 224 | } | 272 | } |
| 225 | 273 | ||
| @@ -3392,13 +3440,6 @@ enum i40iw_status_code i40iw_sc_init_iw_hmc(struct i40iw_sc_dev *dev, u8 hmc_fn_ | |||
| 3392 | hmc_info->sd_table.sd_entry = virt_mem.va; | 3440 | hmc_info->sd_table.sd_entry = virt_mem.va; |
| 3393 | } | 3441 | } |
| 3394 | 3442 | ||
| 3395 | /* fill size of objects which are fixed */ | ||
| 3396 | hmc_info->hmc_obj[I40IW_HMC_IW_XFFL].size = 4; | ||
| 3397 | hmc_info->hmc_obj[I40IW_HMC_IW_Q1FL].size = 4; | ||
| 3398 | hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].size = 8; | ||
| 3399 | hmc_info->hmc_obj[I40IW_HMC_IW_APBVT_ENTRY].size = 8192; | ||
| 3400 | hmc_info->hmc_obj[I40IW_HMC_IW_APBVT_ENTRY].max_cnt = 1; | ||
| 3401 | |||
| 3402 | return ret_code; | 3443 | return ret_code; |
| 3403 | } | 3444 | } |
| 3404 | 3445 | ||
| @@ -4840,7 +4881,7 @@ void i40iw_vsi_stats_free(struct i40iw_sc_vsi *vsi) | |||
| 4840 | { | 4881 | { |
| 4841 | u8 fcn_id = vsi->fcn_id; | 4882 | u8 fcn_id = vsi->fcn_id; |
| 4842 | 4883 | ||
| 4843 | if ((vsi->stats_fcn_id_alloc) && (fcn_id != I40IW_INVALID_FCN_ID)) | 4884 | if (vsi->stats_fcn_id_alloc && fcn_id < I40IW_MAX_STATS_COUNT) |
| 4844 | vsi->dev->fcn_id_array[fcn_id] = false; | 4885 | vsi->dev->fcn_id_array[fcn_id] = false; |
| 4845 | i40iw_hw_stats_stop_timer(vsi); | 4886 | i40iw_hw_stats_stop_timer(vsi); |
| 4846 | } | 4887 | } |
diff --git a/drivers/infiniband/hw/i40iw/i40iw_d.h b/drivers/infiniband/hw/i40iw/i40iw_d.h index a39ac12b6a7e..2ebaadbed379 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_d.h +++ b/drivers/infiniband/hw/i40iw/i40iw_d.h | |||
| @@ -1507,8 +1507,8 @@ enum { | |||
| 1507 | I40IW_CQ0_ALIGNMENT_MASK = (256 - 1), | 1507 | I40IW_CQ0_ALIGNMENT_MASK = (256 - 1), |
| 1508 | I40IW_HOST_CTX_ALIGNMENT_MASK = (4 - 1), | 1508 | I40IW_HOST_CTX_ALIGNMENT_MASK = (4 - 1), |
| 1509 | I40IW_SHADOWAREA_MASK = (128 - 1), | 1509 | I40IW_SHADOWAREA_MASK = (128 - 1), |
| 1510 | I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK = 0, | 1510 | I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK = (4 - 1), |
| 1511 | I40IW_FPM_COMMIT_BUF_ALIGNMENT_MASK = 0 | 1511 | I40IW_FPM_COMMIT_BUF_ALIGNMENT_MASK = (4 - 1) |
| 1512 | }; | 1512 | }; |
| 1513 | 1513 | ||
| 1514 | enum i40iw_alignment { | 1514 | enum i40iw_alignment { |
diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.c b/drivers/infiniband/hw/i40iw/i40iw_puda.c index 71050c5d29a0..7f5583d83622 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_puda.c +++ b/drivers/infiniband/hw/i40iw/i40iw_puda.c | |||
| @@ -685,7 +685,7 @@ static enum i40iw_status_code i40iw_puda_cq_create(struct i40iw_puda_rsrc *rsrc) | |||
| 685 | cqsize = rsrc->cq_size * (sizeof(struct i40iw_cqe)); | 685 | cqsize = rsrc->cq_size * (sizeof(struct i40iw_cqe)); |
| 686 | tsize = cqsize + sizeof(struct i40iw_cq_shadow_area); | 686 | tsize = cqsize + sizeof(struct i40iw_cq_shadow_area); |
| 687 | ret = i40iw_allocate_dma_mem(dev->hw, &rsrc->cqmem, tsize, | 687 | ret = i40iw_allocate_dma_mem(dev->hw, &rsrc->cqmem, tsize, |
| 688 | I40IW_CQ0_ALIGNMENT_MASK); | 688 | I40IW_CQ0_ALIGNMENT); |
| 689 | if (ret) | 689 | if (ret) |
| 690 | return ret; | 690 | return ret; |
| 691 | 691 | ||
diff --git a/drivers/infiniband/hw/i40iw/i40iw_status.h b/drivers/infiniband/hw/i40iw/i40iw_status.h index 91c421762f06..f7013f11d808 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_status.h +++ b/drivers/infiniband/hw/i40iw/i40iw_status.h | |||
| @@ -62,7 +62,7 @@ enum i40iw_status_code { | |||
| 62 | I40IW_ERR_INVALID_ALIGNMENT = -23, | 62 | I40IW_ERR_INVALID_ALIGNMENT = -23, |
| 63 | I40IW_ERR_FLUSHED_QUEUE = -24, | 63 | I40IW_ERR_FLUSHED_QUEUE = -24, |
| 64 | I40IW_ERR_INVALID_PUSH_PAGE_INDEX = -25, | 64 | I40IW_ERR_INVALID_PUSH_PAGE_INDEX = -25, |
| 65 | I40IW_ERR_INVALID_IMM_DATA_SIZE = -26, | 65 | I40IW_ERR_INVALID_INLINE_DATA_SIZE = -26, |
| 66 | I40IW_ERR_TIMEOUT = -27, | 66 | I40IW_ERR_TIMEOUT = -27, |
| 67 | I40IW_ERR_OPCODE_MISMATCH = -28, | 67 | I40IW_ERR_OPCODE_MISMATCH = -28, |
| 68 | I40IW_ERR_CQP_COMPL_ERROR = -29, | 68 | I40IW_ERR_CQP_COMPL_ERROR = -29, |
diff --git a/drivers/infiniband/hw/i40iw/i40iw_uk.c b/drivers/infiniband/hw/i40iw/i40iw_uk.c index b0d3a0e8a9b5..1060725d18bc 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_uk.c +++ b/drivers/infiniband/hw/i40iw/i40iw_uk.c | |||
| @@ -435,7 +435,7 @@ static enum i40iw_status_code i40iw_inline_rdma_write(struct i40iw_qp_uk *qp, | |||
| 435 | 435 | ||
| 436 | op_info = &info->op.inline_rdma_write; | 436 | op_info = &info->op.inline_rdma_write; |
| 437 | if (op_info->len > I40IW_MAX_INLINE_DATA_SIZE) | 437 | if (op_info->len > I40IW_MAX_INLINE_DATA_SIZE) |
| 438 | return I40IW_ERR_INVALID_IMM_DATA_SIZE; | 438 | return I40IW_ERR_INVALID_INLINE_DATA_SIZE; |
| 439 | 439 | ||
| 440 | ret_code = i40iw_inline_data_size_to_wqesize(op_info->len, &wqe_size); | 440 | ret_code = i40iw_inline_data_size_to_wqesize(op_info->len, &wqe_size); |
| 441 | if (ret_code) | 441 | if (ret_code) |
| @@ -511,7 +511,7 @@ static enum i40iw_status_code i40iw_inline_send(struct i40iw_qp_uk *qp, | |||
| 511 | 511 | ||
| 512 | op_info = &info->op.inline_send; | 512 | op_info = &info->op.inline_send; |
| 513 | if (op_info->len > I40IW_MAX_INLINE_DATA_SIZE) | 513 | if (op_info->len > I40IW_MAX_INLINE_DATA_SIZE) |
| 514 | return I40IW_ERR_INVALID_IMM_DATA_SIZE; | 514 | return I40IW_ERR_INVALID_INLINE_DATA_SIZE; |
| 515 | 515 | ||
| 516 | ret_code = i40iw_inline_data_size_to_wqesize(op_info->len, &wqe_size); | 516 | ret_code = i40iw_inline_data_size_to_wqesize(op_info->len, &wqe_size); |
| 517 | if (ret_code) | 517 | if (ret_code) |
| @@ -784,7 +784,7 @@ static enum i40iw_status_code i40iw_cq_poll_completion(struct i40iw_cq_uk *cq, | |||
| 784 | get_64bit_val(cqe, 0, &qword0); | 784 | get_64bit_val(cqe, 0, &qword0); |
| 785 | get_64bit_val(cqe, 16, &qword2); | 785 | get_64bit_val(cqe, 16, &qword2); |
| 786 | 786 | ||
| 787 | info->tcp_seq_num = (u8)RS_64(qword0, I40IWCQ_TCPSEQNUM); | 787 | info->tcp_seq_num = (u32)RS_64(qword0, I40IWCQ_TCPSEQNUM); |
| 788 | 788 | ||
| 789 | info->qp_id = (u32)RS_64(qword2, I40IWCQ_QPID); | 789 | info->qp_id = (u32)RS_64(qword2, I40IWCQ_QPID); |
| 790 | 790 | ||
| @@ -1187,7 +1187,7 @@ enum i40iw_status_code i40iw_inline_data_size_to_wqesize(u32 data_size, | |||
| 1187 | u8 *wqe_size) | 1187 | u8 *wqe_size) |
| 1188 | { | 1188 | { |
| 1189 | if (data_size > I40IW_MAX_INLINE_DATA_SIZE) | 1189 | if (data_size > I40IW_MAX_INLINE_DATA_SIZE) |
| 1190 | return I40IW_ERR_INVALID_IMM_DATA_SIZE; | 1190 | return I40IW_ERR_INVALID_INLINE_DATA_SIZE; |
| 1191 | 1191 | ||
| 1192 | if (data_size <= 16) | 1192 | if (data_size <= 16) |
| 1193 | *wqe_size = I40IW_QP_WQE_MIN_SIZE; | 1193 | *wqe_size = I40IW_QP_WQE_MIN_SIZE; |
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index a7f2e60085c4..f7fcde1ff0aa 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c | |||
| @@ -1085,6 +1085,12 @@ static int mlx5_ib_modify_port(struct ib_device *ibdev, u8 port, int mask, | |||
| 1085 | bool is_ib = (mlx5_ib_port_link_layer(ibdev, port) == | 1085 | bool is_ib = (mlx5_ib_port_link_layer(ibdev, port) == |
| 1086 | IB_LINK_LAYER_INFINIBAND); | 1086 | IB_LINK_LAYER_INFINIBAND); |
| 1087 | 1087 | ||
| 1088 | /* CM layer calls ib_modify_port() regardless of the link layer. For | ||
| 1089 | * Ethernet ports, qkey violation and Port capabilities are meaningless. | ||
| 1090 | */ | ||
| 1091 | if (!is_ib) | ||
| 1092 | return 0; | ||
| 1093 | |||
| 1088 | if (MLX5_CAP_GEN(dev->mdev, ib_virt) && is_ib) { | 1094 | if (MLX5_CAP_GEN(dev->mdev, ib_virt) && is_ib) { |
| 1089 | change_mask = props->clr_port_cap_mask | props->set_port_cap_mask; | 1095 | change_mask = props->clr_port_cap_mask | props->set_port_cap_mask; |
| 1090 | value = ~props->clr_port_cap_mask | props->set_port_cap_mask; | 1096 | value = ~props->clr_port_cap_mask | props->set_port_cap_mask; |
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c index ae0746754008..3d701c7a4c91 100644 --- a/drivers/infiniband/hw/mlx5/odp.c +++ b/drivers/infiniband/hw/mlx5/odp.c | |||
| @@ -939,7 +939,7 @@ static int mlx5_ib_mr_initiator_pfault_handler( | |||
| 939 | 939 | ||
| 940 | if (qp->ibqp.qp_type != IB_QPT_RC) { | 940 | if (qp->ibqp.qp_type != IB_QPT_RC) { |
| 941 | av = *wqe; | 941 | av = *wqe; |
| 942 | if (av->dqp_dct & be32_to_cpu(MLX5_WQE_AV_EXT)) | 942 | if (av->dqp_dct & cpu_to_be32(MLX5_EXTENDED_UD_AV)) |
| 943 | *wqe += sizeof(struct mlx5_av); | 943 | *wqe += sizeof(struct mlx5_av); |
| 944 | else | 944 | else |
| 945 | *wqe += sizeof(struct mlx5_base_av); | 945 | *wqe += sizeof(struct mlx5_base_av); |
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 0889ff367c86..f58f8f5f3ebe 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c | |||
| @@ -1238,6 +1238,7 @@ static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, | |||
| 1238 | goto err_destroy_tis; | 1238 | goto err_destroy_tis; |
| 1239 | 1239 | ||
| 1240 | sq->base.container_mibqp = qp; | 1240 | sq->base.container_mibqp = qp; |
| 1241 | sq->base.mqp.event = mlx5_ib_qp_event; | ||
| 1241 | } | 1242 | } |
| 1242 | 1243 | ||
| 1243 | if (qp->rq.wqe_cnt) { | 1244 | if (qp->rq.wqe_cnt) { |
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c index 69bda611d313..90aa326fd7c0 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c | |||
| @@ -65,13 +65,28 @@ int pvrdma_req_notify_cq(struct ib_cq *ibcq, | |||
| 65 | struct pvrdma_dev *dev = to_vdev(ibcq->device); | 65 | struct pvrdma_dev *dev = to_vdev(ibcq->device); |
| 66 | struct pvrdma_cq *cq = to_vcq(ibcq); | 66 | struct pvrdma_cq *cq = to_vcq(ibcq); |
| 67 | u32 val = cq->cq_handle; | 67 | u32 val = cq->cq_handle; |
| 68 | unsigned long flags; | ||
| 69 | int has_data = 0; | ||
| 68 | 70 | ||
| 69 | val |= (notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ? | 71 | val |= (notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ? |
| 70 | PVRDMA_UAR_CQ_ARM_SOL : PVRDMA_UAR_CQ_ARM; | 72 | PVRDMA_UAR_CQ_ARM_SOL : PVRDMA_UAR_CQ_ARM; |
| 71 | 73 | ||
| 74 | spin_lock_irqsave(&cq->cq_lock, flags); | ||
| 75 | |||
| 72 | pvrdma_write_uar_cq(dev, val); | 76 | pvrdma_write_uar_cq(dev, val); |
| 73 | 77 | ||
| 74 | return 0; | 78 | if (notify_flags & IB_CQ_REPORT_MISSED_EVENTS) { |
| 79 | unsigned int head; | ||
| 80 | |||
| 81 | has_data = pvrdma_idx_ring_has_data(&cq->ring_state->rx, | ||
| 82 | cq->ibcq.cqe, &head); | ||
| 83 | if (unlikely(has_data == PVRDMA_INVALID_IDX)) | ||
| 84 | dev_err(&dev->pdev->dev, "CQ ring state invalid\n"); | ||
| 85 | } | ||
| 86 | |||
| 87 | spin_unlock_irqrestore(&cq->cq_lock, flags); | ||
| 88 | |||
| 89 | return has_data; | ||
| 75 | } | 90 | } |
| 76 | 91 | ||
| 77 | /** | 92 | /** |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index ff50a7bd66d8..7ac25059c40f 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h | |||
| @@ -336,6 +336,7 @@ struct ipoib_dev_priv { | |||
| 336 | unsigned long flags; | 336 | unsigned long flags; |
| 337 | 337 | ||
| 338 | struct rw_semaphore vlan_rwsem; | 338 | struct rw_semaphore vlan_rwsem; |
| 339 | struct mutex mcast_mutex; | ||
| 339 | 340 | ||
| 340 | struct rb_root path_tree; | 341 | struct rb_root path_tree; |
| 341 | struct list_head path_list; | 342 | struct list_head path_list; |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index f87d104837dc..d69410c2ed97 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c | |||
| @@ -511,7 +511,6 @@ static int ipoib_cm_rx_handler(struct ib_cm_id *cm_id, | |||
| 511 | case IB_CM_REQ_RECEIVED: | 511 | case IB_CM_REQ_RECEIVED: |
| 512 | return ipoib_cm_req_handler(cm_id, event); | 512 | return ipoib_cm_req_handler(cm_id, event); |
| 513 | case IB_CM_DREQ_RECEIVED: | 513 | case IB_CM_DREQ_RECEIVED: |
| 514 | p = cm_id->context; | ||
| 515 | ib_send_cm_drep(cm_id, NULL, 0); | 514 | ib_send_cm_drep(cm_id, NULL, 0); |
| 516 | /* Fall through */ | 515 | /* Fall through */ |
| 517 | case IB_CM_REJ_RECEIVED: | 516 | case IB_CM_REJ_RECEIVED: |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c index 7871379342f4..184a22f48027 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c | |||
| @@ -52,7 +52,8 @@ static const struct ipoib_stats ipoib_gstrings_stats[] = { | |||
| 52 | IPOIB_NETDEV_STAT(tx_bytes), | 52 | IPOIB_NETDEV_STAT(tx_bytes), |
| 53 | IPOIB_NETDEV_STAT(tx_errors), | 53 | IPOIB_NETDEV_STAT(tx_errors), |
| 54 | IPOIB_NETDEV_STAT(rx_dropped), | 54 | IPOIB_NETDEV_STAT(rx_dropped), |
| 55 | IPOIB_NETDEV_STAT(tx_dropped) | 55 | IPOIB_NETDEV_STAT(tx_dropped), |
| 56 | IPOIB_NETDEV_STAT(multicast), | ||
| 56 | }; | 57 | }; |
| 57 | 58 | ||
| 58 | #define IPOIB_GLOBAL_STATS_LEN ARRAY_SIZE(ipoib_gstrings_stats) | 59 | #define IPOIB_GLOBAL_STATS_LEN ARRAY_SIZE(ipoib_gstrings_stats) |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index 57a9655e844d..2e075377242e 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c | |||
| @@ -256,6 +256,8 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) | |||
| 256 | 256 | ||
| 257 | ++dev->stats.rx_packets; | 257 | ++dev->stats.rx_packets; |
| 258 | dev->stats.rx_bytes += skb->len; | 258 | dev->stats.rx_bytes += skb->len; |
| 259 | if (skb->pkt_type == PACKET_MULTICAST) | ||
| 260 | dev->stats.multicast++; | ||
| 259 | 261 | ||
| 260 | skb->dev = dev; | 262 | skb->dev = dev; |
| 261 | if ((dev->features & NETIF_F_RXCSUM) && | 263 | if ((dev->features & NETIF_F_RXCSUM) && |
| @@ -709,6 +711,27 @@ static int recvs_pending(struct net_device *dev) | |||
| 709 | return pending; | 711 | return pending; |
| 710 | } | 712 | } |
| 711 | 713 | ||
| 714 | static void check_qp_movement_and_print(struct ipoib_dev_priv *priv, | ||
| 715 | struct ib_qp *qp, | ||
| 716 | enum ib_qp_state new_state) | ||
| 717 | { | ||
| 718 | struct ib_qp_attr qp_attr; | ||
| 719 | struct ib_qp_init_attr query_init_attr; | ||
| 720 | int ret; | ||
| 721 | |||
| 722 | ret = ib_query_qp(qp, &qp_attr, IB_QP_STATE, &query_init_attr); | ||
| 723 | if (ret) { | ||
| 724 | ipoib_warn(priv, "%s: Failed to query QP\n", __func__); | ||
| 725 | return; | ||
| 726 | } | ||
| 727 | /* print according to the new-state and the previous state.*/ | ||
| 728 | if (new_state == IB_QPS_ERR && qp_attr.qp_state == IB_QPS_RESET) | ||
| 729 | ipoib_dbg(priv, "Failed modify QP, IB_QPS_RESET to IB_QPS_ERR, acceptable\n"); | ||
| 730 | else | ||
| 731 | ipoib_warn(priv, "Failed to modify QP to state: %d from state: %d\n", | ||
| 732 | new_state, qp_attr.qp_state); | ||
| 733 | } | ||
| 734 | |||
| 712 | int ipoib_ib_dev_stop_default(struct net_device *dev) | 735 | int ipoib_ib_dev_stop_default(struct net_device *dev) |
| 713 | { | 736 | { |
| 714 | struct ipoib_dev_priv *priv = ipoib_priv(dev); | 737 | struct ipoib_dev_priv *priv = ipoib_priv(dev); |
| @@ -728,7 +751,7 @@ int ipoib_ib_dev_stop_default(struct net_device *dev) | |||
| 728 | */ | 751 | */ |
| 729 | qp_attr.qp_state = IB_QPS_ERR; | 752 | qp_attr.qp_state = IB_QPS_ERR; |
| 730 | if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE)) | 753 | if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE)) |
| 731 | ipoib_warn(priv, "Failed to modify QP to ERROR state\n"); | 754 | check_qp_movement_and_print(priv, priv->qp, IB_QPS_ERR); |
| 732 | 755 | ||
| 733 | /* Wait for all sends and receives to complete */ | 756 | /* Wait for all sends and receives to complete */ |
| 734 | begin = jiffies; | 757 | begin = jiffies; |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 4ce315c92b48..6c77df34869d 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c | |||
| @@ -1560,6 +1560,7 @@ static void ipoib_flush_neighs(struct ipoib_dev_priv *priv) | |||
| 1560 | int i, wait_flushed = 0; | 1560 | int i, wait_flushed = 0; |
| 1561 | 1561 | ||
| 1562 | init_completion(&priv->ntbl.flushed); | 1562 | init_completion(&priv->ntbl.flushed); |
| 1563 | set_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags); | ||
| 1563 | 1564 | ||
| 1564 | spin_lock_irqsave(&priv->lock, flags); | 1565 | spin_lock_irqsave(&priv->lock, flags); |
| 1565 | 1566 | ||
| @@ -1604,7 +1605,6 @@ static void ipoib_neigh_hash_uninit(struct net_device *dev) | |||
| 1604 | 1605 | ||
| 1605 | ipoib_dbg(priv, "ipoib_neigh_hash_uninit\n"); | 1606 | ipoib_dbg(priv, "ipoib_neigh_hash_uninit\n"); |
| 1606 | init_completion(&priv->ntbl.deleted); | 1607 | init_completion(&priv->ntbl.deleted); |
| 1607 | set_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags); | ||
| 1608 | 1608 | ||
| 1609 | /* Stop GC if called at init fail need to cancel work */ | 1609 | /* Stop GC if called at init fail need to cancel work */ |
| 1610 | stopped = test_and_set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); | 1610 | stopped = test_and_set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); |
| @@ -1847,6 +1847,7 @@ static const struct net_device_ops ipoib_netdev_ops_vf = { | |||
| 1847 | .ndo_tx_timeout = ipoib_timeout, | 1847 | .ndo_tx_timeout = ipoib_timeout, |
| 1848 | .ndo_set_rx_mode = ipoib_set_mcast_list, | 1848 | .ndo_set_rx_mode = ipoib_set_mcast_list, |
| 1849 | .ndo_get_iflink = ipoib_get_iflink, | 1849 | .ndo_get_iflink = ipoib_get_iflink, |
| 1850 | .ndo_get_stats64 = ipoib_get_stats, | ||
| 1850 | }; | 1851 | }; |
| 1851 | 1852 | ||
| 1852 | void ipoib_setup_common(struct net_device *dev) | 1853 | void ipoib_setup_common(struct net_device *dev) |
| @@ -1877,6 +1878,7 @@ static void ipoib_build_priv(struct net_device *dev) | |||
| 1877 | priv->dev = dev; | 1878 | priv->dev = dev; |
| 1878 | spin_lock_init(&priv->lock); | 1879 | spin_lock_init(&priv->lock); |
| 1879 | init_rwsem(&priv->vlan_rwsem); | 1880 | init_rwsem(&priv->vlan_rwsem); |
| 1881 | mutex_init(&priv->mcast_mutex); | ||
| 1880 | 1882 | ||
| 1881 | INIT_LIST_HEAD(&priv->path_list); | 1883 | INIT_LIST_HEAD(&priv->path_list); |
| 1882 | INIT_LIST_HEAD(&priv->child_intfs); | 1884 | INIT_LIST_HEAD(&priv->child_intfs); |
| @@ -2173,14 +2175,14 @@ static struct net_device *ipoib_add_port(const char *format, | |||
| 2173 | priv->dev->dev_id = port - 1; | 2175 | priv->dev->dev_id = port - 1; |
| 2174 | 2176 | ||
| 2175 | result = ib_query_port(hca, port, &attr); | 2177 | result = ib_query_port(hca, port, &attr); |
| 2176 | if (!result) | 2178 | if (result) { |
| 2177 | priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu); | ||
| 2178 | else { | ||
| 2179 | printk(KERN_WARNING "%s: ib_query_port %d failed\n", | 2179 | printk(KERN_WARNING "%s: ib_query_port %d failed\n", |
| 2180 | hca->name, port); | 2180 | hca->name, port); |
| 2181 | goto device_init_failed; | 2181 | goto device_init_failed; |
| 2182 | } | 2182 | } |
| 2183 | 2183 | ||
| 2184 | priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu); | ||
| 2185 | |||
| 2184 | /* MTU will be reset when mcast join happens */ | 2186 | /* MTU will be reset when mcast join happens */ |
| 2185 | priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu); | 2187 | priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu); |
| 2186 | priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu; | 2188 | priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu; |
| @@ -2211,12 +2213,14 @@ static struct net_device *ipoib_add_port(const char *format, | |||
| 2211 | printk(KERN_WARNING "%s: ib_query_gid port %d failed (ret = %d)\n", | 2213 | printk(KERN_WARNING "%s: ib_query_gid port %d failed (ret = %d)\n", |
| 2212 | hca->name, port, result); | 2214 | hca->name, port, result); |
| 2213 | goto device_init_failed; | 2215 | goto device_init_failed; |
| 2214 | } else | 2216 | } |
| 2215 | memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid)); | 2217 | |
| 2218 | memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, | ||
| 2219 | sizeof(union ib_gid)); | ||
| 2216 | set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags); | 2220 | set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags); |
| 2217 | 2221 | ||
| 2218 | result = ipoib_dev_init(priv->dev, hca, port); | 2222 | result = ipoib_dev_init(priv->dev, hca, port); |
| 2219 | if (result < 0) { | 2223 | if (result) { |
| 2220 | printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n", | 2224 | printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n", |
| 2221 | hca->name, port, result); | 2225 | hca->name, port, result); |
| 2222 | goto device_init_failed; | 2226 | goto device_init_failed; |
| @@ -2365,6 +2369,7 @@ static int __init ipoib_init_module(void) | |||
| 2365 | ipoib_sendq_size = max3(ipoib_sendq_size, 2 * MAX_SEND_CQE, IPOIB_MIN_QUEUE_SIZE); | 2369 | ipoib_sendq_size = max3(ipoib_sendq_size, 2 * MAX_SEND_CQE, IPOIB_MIN_QUEUE_SIZE); |
| 2366 | #ifdef CONFIG_INFINIBAND_IPOIB_CM | 2370 | #ifdef CONFIG_INFINIBAND_IPOIB_CM |
| 2367 | ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP); | 2371 | ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP); |
| 2372 | ipoib_max_conn_qp = max(ipoib_max_conn_qp, 0); | ||
| 2368 | #endif | 2373 | #endif |
| 2369 | 2374 | ||
| 2370 | /* | 2375 | /* |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index 057f58e6afca..93e149efc1f5 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c | |||
| @@ -684,15 +684,10 @@ void ipoib_mcast_start_thread(struct net_device *dev) | |||
| 684 | int ipoib_mcast_stop_thread(struct net_device *dev) | 684 | int ipoib_mcast_stop_thread(struct net_device *dev) |
| 685 | { | 685 | { |
| 686 | struct ipoib_dev_priv *priv = ipoib_priv(dev); | 686 | struct ipoib_dev_priv *priv = ipoib_priv(dev); |
| 687 | unsigned long flags; | ||
| 688 | 687 | ||
| 689 | ipoib_dbg_mcast(priv, "stopping multicast thread\n"); | 688 | ipoib_dbg_mcast(priv, "stopping multicast thread\n"); |
| 690 | 689 | ||
| 691 | spin_lock_irqsave(&priv->lock, flags); | 690 | cancel_delayed_work_sync(&priv->mcast_task); |
| 692 | cancel_delayed_work(&priv->mcast_task); | ||
| 693 | spin_unlock_irqrestore(&priv->lock, flags); | ||
| 694 | |||
| 695 | flush_workqueue(priv->wq); | ||
| 696 | 691 | ||
| 697 | return 0; | 692 | return 0; |
| 698 | } | 693 | } |
| @@ -748,6 +743,14 @@ void ipoib_mcast_remove_list(struct list_head *remove_list) | |||
| 748 | { | 743 | { |
| 749 | struct ipoib_mcast *mcast, *tmcast; | 744 | struct ipoib_mcast *mcast, *tmcast; |
| 750 | 745 | ||
| 746 | /* | ||
| 747 | * make sure the in-flight joins have finished before we attempt | ||
| 748 | * to leave | ||
| 749 | */ | ||
| 750 | list_for_each_entry_safe(mcast, tmcast, remove_list, list) | ||
| 751 | if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) | ||
| 752 | wait_for_completion(&mcast->done); | ||
| 753 | |||
| 751 | list_for_each_entry_safe(mcast, tmcast, remove_list, list) { | 754 | list_for_each_entry_safe(mcast, tmcast, remove_list, list) { |
| 752 | ipoib_mcast_leave(mcast->dev, mcast); | 755 | ipoib_mcast_leave(mcast->dev, mcast); |
| 753 | ipoib_mcast_free(mcast); | 756 | ipoib_mcast_free(mcast); |
| @@ -838,6 +841,7 @@ void ipoib_mcast_dev_flush(struct net_device *dev) | |||
| 838 | struct ipoib_mcast *mcast, *tmcast; | 841 | struct ipoib_mcast *mcast, *tmcast; |
| 839 | unsigned long flags; | 842 | unsigned long flags; |
| 840 | 843 | ||
| 844 | mutex_lock(&priv->mcast_mutex); | ||
| 841 | ipoib_dbg_mcast(priv, "flushing multicast list\n"); | 845 | ipoib_dbg_mcast(priv, "flushing multicast list\n"); |
| 842 | 846 | ||
| 843 | spin_lock_irqsave(&priv->lock, flags); | 847 | spin_lock_irqsave(&priv->lock, flags); |
| @@ -856,15 +860,8 @@ void ipoib_mcast_dev_flush(struct net_device *dev) | |||
| 856 | 860 | ||
| 857 | spin_unlock_irqrestore(&priv->lock, flags); | 861 | spin_unlock_irqrestore(&priv->lock, flags); |
| 858 | 862 | ||
| 859 | /* | ||
| 860 | * make sure the in-flight joins have finished before we attempt | ||
| 861 | * to leave | ||
| 862 | */ | ||
| 863 | list_for_each_entry_safe(mcast, tmcast, &remove_list, list) | ||
| 864 | if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) | ||
| 865 | wait_for_completion(&mcast->done); | ||
| 866 | |||
| 867 | ipoib_mcast_remove_list(&remove_list); | 863 | ipoib_mcast_remove_list(&remove_list); |
| 864 | mutex_unlock(&priv->mcast_mutex); | ||
| 868 | } | 865 | } |
| 869 | 866 | ||
| 870 | static int ipoib_mcast_addr_is_valid(const u8 *addr, const u8 *broadcast) | 867 | static int ipoib_mcast_addr_is_valid(const u8 *addr, const u8 *broadcast) |
| @@ -982,14 +979,6 @@ void ipoib_mcast_restart_task(struct work_struct *work) | |||
| 982 | netif_addr_unlock(dev); | 979 | netif_addr_unlock(dev); |
| 983 | local_irq_restore(flags); | 980 | local_irq_restore(flags); |
| 984 | 981 | ||
| 985 | /* | ||
| 986 | * make sure the in-flight joins have finished before we attempt | ||
| 987 | * to leave | ||
| 988 | */ | ||
| 989 | list_for_each_entry_safe(mcast, tmcast, &remove_list, list) | ||
| 990 | if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) | ||
| 991 | wait_for_completion(&mcast->done); | ||
| 992 | |||
| 993 | ipoib_mcast_remove_list(&remove_list); | 982 | ipoib_mcast_remove_list(&remove_list); |
| 994 | 983 | ||
| 995 | /* | 984 | /* |
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c index 298a6ba51411..ca0e19ae7a90 100644 --- a/drivers/input/joystick/xpad.c +++ b/drivers/input/joystick/xpad.c | |||
| @@ -476,10 +476,21 @@ static const u8 xboxone_hori_init[] = { | |||
| 476 | }; | 476 | }; |
| 477 | 477 | ||
| 478 | /* | 478 | /* |
| 479 | * A rumble packet is required for some PowerA pads to start | 479 | * A specific rumble packet is required for some PowerA pads to start |
| 480 | * sending input reports. One of those pads is (0x24c6:0x543a). | 480 | * sending input reports. One of those pads is (0x24c6:0x543a). |
| 481 | */ | 481 | */ |
| 482 | static const u8 xboxone_zerorumble_init[] = { | 482 | static const u8 xboxone_rumblebegin_init[] = { |
| 483 | 0x09, 0x00, 0x00, 0x09, 0x00, 0x0F, 0x00, 0x00, | ||
| 484 | 0x1D, 0x1D, 0xFF, 0x00, 0x00 | ||
| 485 | }; | ||
| 486 | |||
| 487 | /* | ||
| 488 | * A rumble packet with zero FF intensity will immediately | ||
| 489 | * terminate the rumbling required to init PowerA pads. | ||
| 490 | * This should happen fast enough that the motors don't | ||
| 491 | * spin up to enough speed to actually vibrate the gamepad. | ||
| 492 | */ | ||
| 493 | static const u8 xboxone_rumbleend_init[] = { | ||
| 483 | 0x09, 0x00, 0x00, 0x09, 0x00, 0x0F, 0x00, 0x00, | 494 | 0x09, 0x00, 0x00, 0x09, 0x00, 0x0F, 0x00, 0x00, |
| 484 | 0x00, 0x00, 0x00, 0x00, 0x00 | 495 | 0x00, 0x00, 0x00, 0x00, 0x00 |
| 485 | }; | 496 | }; |
| @@ -494,9 +505,12 @@ static const struct xboxone_init_packet xboxone_init_packets[] = { | |||
| 494 | XBOXONE_INIT_PKT(0x0e6f, 0x0165, xboxone_hori_init), | 505 | XBOXONE_INIT_PKT(0x0e6f, 0x0165, xboxone_hori_init), |
| 495 | XBOXONE_INIT_PKT(0x0f0d, 0x0067, xboxone_hori_init), | 506 | XBOXONE_INIT_PKT(0x0f0d, 0x0067, xboxone_hori_init), |
| 496 | XBOXONE_INIT_PKT(0x0000, 0x0000, xboxone_fw2015_init), | 507 | XBOXONE_INIT_PKT(0x0000, 0x0000, xboxone_fw2015_init), |
| 497 | XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_zerorumble_init), | 508 | XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init), |
| 498 | XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_zerorumble_init), | 509 | XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_rumblebegin_init), |
| 499 | XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_zerorumble_init), | 510 | XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_rumblebegin_init), |
| 511 | XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumbleend_init), | ||
| 512 | XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_rumbleend_init), | ||
| 513 | XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_rumbleend_init), | ||
| 500 | }; | 514 | }; |
| 501 | 515 | ||
| 502 | struct xpad_output_packet { | 516 | struct xpad_output_packet { |
diff --git a/drivers/input/misc/soc_button_array.c b/drivers/input/misc/soc_button_array.c index f600f3a7a3c6..23520df7650f 100644 --- a/drivers/input/misc/soc_button_array.c +++ b/drivers/input/misc/soc_button_array.c | |||
| @@ -331,7 +331,7 @@ static int soc_button_probe(struct platform_device *pdev) | |||
| 331 | error = gpiod_count(dev, NULL); | 331 | error = gpiod_count(dev, NULL); |
| 332 | if (error < 0) { | 332 | if (error < 0) { |
| 333 | dev_dbg(dev, "no GPIO attached, ignoring...\n"); | 333 | dev_dbg(dev, "no GPIO attached, ignoring...\n"); |
| 334 | return error; | 334 | return -ENODEV; |
| 335 | } | 335 | } |
| 336 | 336 | ||
| 337 | priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); | 337 | priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); |
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c index 262d1057c1da..850b00e3ad8e 100644 --- a/drivers/input/mouse/alps.c +++ b/drivers/input/mouse/alps.c | |||
| @@ -1215,14 +1215,24 @@ static int alps_decode_ss4_v2(struct alps_fields *f, | |||
| 1215 | 1215 | ||
| 1216 | case SS4_PACKET_ID_TWO: | 1216 | case SS4_PACKET_ID_TWO: |
| 1217 | if (priv->flags & ALPS_BUTTONPAD) { | 1217 | if (priv->flags & ALPS_BUTTONPAD) { |
| 1218 | f->mt[0].x = SS4_BTL_MF_X_V2(p, 0); | 1218 | if (IS_SS4PLUS_DEV(priv->dev_id)) { |
| 1219 | f->mt[0].x = SS4_PLUS_BTL_MF_X_V2(p, 0); | ||
| 1220 | f->mt[1].x = SS4_PLUS_BTL_MF_X_V2(p, 1); | ||
| 1221 | } else { | ||
| 1222 | f->mt[0].x = SS4_BTL_MF_X_V2(p, 0); | ||
| 1223 | f->mt[1].x = SS4_BTL_MF_X_V2(p, 1); | ||
| 1224 | } | ||
| 1219 | f->mt[0].y = SS4_BTL_MF_Y_V2(p, 0); | 1225 | f->mt[0].y = SS4_BTL_MF_Y_V2(p, 0); |
| 1220 | f->mt[1].x = SS4_BTL_MF_X_V2(p, 1); | ||
| 1221 | f->mt[1].y = SS4_BTL_MF_Y_V2(p, 1); | 1226 | f->mt[1].y = SS4_BTL_MF_Y_V2(p, 1); |
| 1222 | } else { | 1227 | } else { |
| 1223 | f->mt[0].x = SS4_STD_MF_X_V2(p, 0); | 1228 | if (IS_SS4PLUS_DEV(priv->dev_id)) { |
| 1229 | f->mt[0].x = SS4_PLUS_STD_MF_X_V2(p, 0); | ||
| 1230 | f->mt[1].x = SS4_PLUS_STD_MF_X_V2(p, 1); | ||
| 1231 | } else { | ||
| 1232 | f->mt[0].x = SS4_STD_MF_X_V2(p, 0); | ||
| 1233 | f->mt[1].x = SS4_STD_MF_X_V2(p, 1); | ||
| 1234 | } | ||
| 1224 | f->mt[0].y = SS4_STD_MF_Y_V2(p, 0); | 1235 | f->mt[0].y = SS4_STD_MF_Y_V2(p, 0); |
| 1225 | f->mt[1].x = SS4_STD_MF_X_V2(p, 1); | ||
| 1226 | f->mt[1].y = SS4_STD_MF_Y_V2(p, 1); | 1236 | f->mt[1].y = SS4_STD_MF_Y_V2(p, 1); |
| 1227 | } | 1237 | } |
| 1228 | f->pressure = SS4_MF_Z_V2(p, 0) ? 0x30 : 0; | 1238 | f->pressure = SS4_MF_Z_V2(p, 0) ? 0x30 : 0; |
| @@ -1239,16 +1249,27 @@ static int alps_decode_ss4_v2(struct alps_fields *f, | |||
| 1239 | 1249 | ||
| 1240 | case SS4_PACKET_ID_MULTI: | 1250 | case SS4_PACKET_ID_MULTI: |
| 1241 | if (priv->flags & ALPS_BUTTONPAD) { | 1251 | if (priv->flags & ALPS_BUTTONPAD) { |
| 1242 | f->mt[2].x = SS4_BTL_MF_X_V2(p, 0); | 1252 | if (IS_SS4PLUS_DEV(priv->dev_id)) { |
| 1253 | f->mt[0].x = SS4_PLUS_BTL_MF_X_V2(p, 0); | ||
| 1254 | f->mt[1].x = SS4_PLUS_BTL_MF_X_V2(p, 1); | ||
| 1255 | } else { | ||
| 1256 | f->mt[2].x = SS4_BTL_MF_X_V2(p, 0); | ||
| 1257 | f->mt[3].x = SS4_BTL_MF_X_V2(p, 1); | ||
| 1258 | } | ||
| 1259 | |||
| 1243 | f->mt[2].y = SS4_BTL_MF_Y_V2(p, 0); | 1260 | f->mt[2].y = SS4_BTL_MF_Y_V2(p, 0); |
| 1244 | f->mt[3].x = SS4_BTL_MF_X_V2(p, 1); | ||
| 1245 | f->mt[3].y = SS4_BTL_MF_Y_V2(p, 1); | 1261 | f->mt[3].y = SS4_BTL_MF_Y_V2(p, 1); |
| 1246 | no_data_x = SS4_MFPACKET_NO_AX_BL; | 1262 | no_data_x = SS4_MFPACKET_NO_AX_BL; |
| 1247 | no_data_y = SS4_MFPACKET_NO_AY_BL; | 1263 | no_data_y = SS4_MFPACKET_NO_AY_BL; |
| 1248 | } else { | 1264 | } else { |
| 1249 | f->mt[2].x = SS4_STD_MF_X_V2(p, 0); | 1265 | if (IS_SS4PLUS_DEV(priv->dev_id)) { |
| 1266 | f->mt[0].x = SS4_PLUS_STD_MF_X_V2(p, 0); | ||
| 1267 | f->mt[1].x = SS4_PLUS_STD_MF_X_V2(p, 1); | ||
| 1268 | } else { | ||
| 1269 | f->mt[0].x = SS4_STD_MF_X_V2(p, 0); | ||
| 1270 | f->mt[1].x = SS4_STD_MF_X_V2(p, 1); | ||
| 1271 | } | ||
| 1250 | f->mt[2].y = SS4_STD_MF_Y_V2(p, 0); | 1272 | f->mt[2].y = SS4_STD_MF_Y_V2(p, 0); |
| 1251 | f->mt[3].x = SS4_STD_MF_X_V2(p, 1); | ||
| 1252 | f->mt[3].y = SS4_STD_MF_Y_V2(p, 1); | 1273 | f->mt[3].y = SS4_STD_MF_Y_V2(p, 1); |
| 1253 | no_data_x = SS4_MFPACKET_NO_AX; | 1274 | no_data_x = SS4_MFPACKET_NO_AX; |
| 1254 | no_data_y = SS4_MFPACKET_NO_AY; | 1275 | no_data_y = SS4_MFPACKET_NO_AY; |
| @@ -2541,8 +2562,8 @@ static int alps_set_defaults_ss4_v2(struct psmouse *psmouse, | |||
| 2541 | 2562 | ||
| 2542 | memset(otp, 0, sizeof(otp)); | 2563 | memset(otp, 0, sizeof(otp)); |
| 2543 | 2564 | ||
| 2544 | if (alps_get_otp_values_ss4_v2(psmouse, 0, &otp[0][0]) || | 2565 | if (alps_get_otp_values_ss4_v2(psmouse, 1, &otp[1][0]) || |
| 2545 | alps_get_otp_values_ss4_v2(psmouse, 1, &otp[1][0])) | 2566 | alps_get_otp_values_ss4_v2(psmouse, 0, &otp[0][0])) |
| 2546 | return -1; | 2567 | return -1; |
| 2547 | 2568 | ||
| 2548 | alps_update_device_area_ss4_v2(otp, priv); | 2569 | alps_update_device_area_ss4_v2(otp, priv); |
diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h index ed2d6879fa52..c80a7c76cb76 100644 --- a/drivers/input/mouse/alps.h +++ b/drivers/input/mouse/alps.h | |||
| @@ -100,6 +100,10 @@ enum SS4_PACKET_ID { | |||
| 100 | ((_b[1 + _i * 3] << 5) & 0x1F00) \ | 100 | ((_b[1 + _i * 3] << 5) & 0x1F00) \ |
| 101 | ) | 101 | ) |
| 102 | 102 | ||
| 103 | #define SS4_PLUS_STD_MF_X_V2(_b, _i) (((_b[0 + (_i) * 3] << 4) & 0x0070) | \ | ||
| 104 | ((_b[1 + (_i) * 3] << 4) & 0x0F80) \ | ||
| 105 | ) | ||
| 106 | |||
| 103 | #define SS4_STD_MF_Y_V2(_b, _i) (((_b[1 + (_i) * 3] << 3) & 0x0010) | \ | 107 | #define SS4_STD_MF_Y_V2(_b, _i) (((_b[1 + (_i) * 3] << 3) & 0x0010) | \ |
| 104 | ((_b[2 + (_i) * 3] << 5) & 0x01E0) | \ | 108 | ((_b[2 + (_i) * 3] << 5) & 0x01E0) | \ |
| 105 | ((_b[2 + (_i) * 3] << 4) & 0x0E00) \ | 109 | ((_b[2 + (_i) * 3] << 4) & 0x0E00) \ |
| @@ -109,6 +113,10 @@ enum SS4_PACKET_ID { | |||
| 109 | ((_b[0 + (_i) * 3] >> 3) & 0x0010) \ | 113 | ((_b[0 + (_i) * 3] >> 3) & 0x0010) \ |
| 110 | ) | 114 | ) |
| 111 | 115 | ||
| 116 | #define SS4_PLUS_BTL_MF_X_V2(_b, _i) (SS4_PLUS_STD_MF_X_V2(_b, _i) | \ | ||
| 117 | ((_b[0 + (_i) * 3] >> 4) & 0x0008) \ | ||
| 118 | ) | ||
| 119 | |||
| 112 | #define SS4_BTL_MF_Y_V2(_b, _i) (SS4_STD_MF_Y_V2(_b, _i) | \ | 120 | #define SS4_BTL_MF_Y_V2(_b, _i) (SS4_STD_MF_Y_V2(_b, _i) | \ |
| 113 | ((_b[0 + (_i) * 3] >> 3) & 0x0008) \ | 121 | ((_b[0 + (_i) * 3] >> 3) & 0x0008) \ |
| 114 | ) | 122 | ) |
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c index 3b616cb7c67f..cfbc8ba4c96c 100644 --- a/drivers/input/mouse/elan_i2c_core.c +++ b/drivers/input/mouse/elan_i2c_core.c | |||
| @@ -1247,7 +1247,12 @@ static const struct acpi_device_id elan_acpi_id[] = { | |||
| 1247 | { "ELAN0000", 0 }, | 1247 | { "ELAN0000", 0 }, |
| 1248 | { "ELAN0100", 0 }, | 1248 | { "ELAN0100", 0 }, |
| 1249 | { "ELAN0600", 0 }, | 1249 | { "ELAN0600", 0 }, |
| 1250 | { "ELAN0602", 0 }, | ||
| 1250 | { "ELAN0605", 0 }, | 1251 | { "ELAN0605", 0 }, |
| 1252 | { "ELAN0608", 0 }, | ||
| 1253 | { "ELAN0605", 0 }, | ||
| 1254 | { "ELAN0609", 0 }, | ||
| 1255 | { "ELAN060B", 0 }, | ||
| 1251 | { "ELAN1000", 0 }, | 1256 | { "ELAN1000", 0 }, |
| 1252 | { } | 1257 | { } |
| 1253 | }; | 1258 | }; |
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index 16c30460ef04..5af0b7d200bc 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c | |||
| @@ -535,16 +535,17 @@ static void synaptics_apply_quirks(struct psmouse *psmouse, | |||
| 535 | } | 535 | } |
| 536 | } | 536 | } |
| 537 | 537 | ||
| 538 | static bool synaptics_has_agm(struct synaptics_data *priv) | ||
| 539 | { | ||
| 540 | return (SYN_CAP_ADV_GESTURE(priv->info.ext_cap_0c) || | ||
| 541 | SYN_CAP_IMAGE_SENSOR(priv->info.ext_cap_0c)); | ||
| 542 | } | ||
| 543 | |||
| 538 | static int synaptics_set_advanced_gesture_mode(struct psmouse *psmouse) | 544 | static int synaptics_set_advanced_gesture_mode(struct psmouse *psmouse) |
| 539 | { | 545 | { |
| 540 | static u8 param = 0xc8; | 546 | static u8 param = 0xc8; |
| 541 | struct synaptics_data *priv = psmouse->private; | ||
| 542 | int error; | 547 | int error; |
| 543 | 548 | ||
| 544 | if (!(SYN_CAP_ADV_GESTURE(priv->info.ext_cap_0c) || | ||
| 545 | SYN_CAP_IMAGE_SENSOR(priv->info.ext_cap_0c))) | ||
| 546 | return 0; | ||
| 547 | |||
| 548 | error = psmouse_sliced_command(psmouse, SYN_QUE_MODEL); | 549 | error = psmouse_sliced_command(psmouse, SYN_QUE_MODEL); |
| 549 | if (error) | 550 | if (error) |
| 550 | return error; | 551 | return error; |
| @@ -553,9 +554,6 @@ static int synaptics_set_advanced_gesture_mode(struct psmouse *psmouse) | |||
| 553 | if (error) | 554 | if (error) |
| 554 | return error; | 555 | return error; |
| 555 | 556 | ||
| 556 | /* Advanced gesture mode also sends multi finger data */ | ||
| 557 | priv->info.capabilities |= BIT(1); | ||
| 558 | |||
| 559 | return 0; | 557 | return 0; |
| 560 | } | 558 | } |
| 561 | 559 | ||
| @@ -578,7 +576,7 @@ static int synaptics_set_mode(struct psmouse *psmouse) | |||
| 578 | if (error) | 576 | if (error) |
| 579 | return error; | 577 | return error; |
| 580 | 578 | ||
| 581 | if (priv->absolute_mode) { | 579 | if (priv->absolute_mode && synaptics_has_agm(priv)) { |
| 582 | error = synaptics_set_advanced_gesture_mode(psmouse); | 580 | error = synaptics_set_advanced_gesture_mode(psmouse); |
| 583 | if (error) { | 581 | if (error) { |
| 584 | psmouse_err(psmouse, | 582 | psmouse_err(psmouse, |
| @@ -766,9 +764,7 @@ static int synaptics_parse_hw_state(const u8 buf[], | |||
| 766 | ((buf[0] & 0x04) >> 1) | | 764 | ((buf[0] & 0x04) >> 1) | |
| 767 | ((buf[3] & 0x04) >> 2)); | 765 | ((buf[3] & 0x04) >> 2)); |
| 768 | 766 | ||
| 769 | if ((SYN_CAP_ADV_GESTURE(priv->info.ext_cap_0c) || | 767 | if (synaptics_has_agm(priv) && hw->w == 2) { |
| 770 | SYN_CAP_IMAGE_SENSOR(priv->info.ext_cap_0c)) && | ||
| 771 | hw->w == 2) { | ||
| 772 | synaptics_parse_agm(buf, priv, hw); | 768 | synaptics_parse_agm(buf, priv, hw); |
| 773 | return 1; | 769 | return 1; |
| 774 | } | 770 | } |
| @@ -1033,6 +1029,15 @@ static void synaptics_image_sensor_process(struct psmouse *psmouse, | |||
| 1033 | synaptics_report_mt_data(psmouse, sgm, num_fingers); | 1029 | synaptics_report_mt_data(psmouse, sgm, num_fingers); |
| 1034 | } | 1030 | } |
| 1035 | 1031 | ||
| 1032 | static bool synaptics_has_multifinger(struct synaptics_data *priv) | ||
| 1033 | { | ||
| 1034 | if (SYN_CAP_MULTIFINGER(priv->info.capabilities)) | ||
| 1035 | return true; | ||
| 1036 | |||
| 1037 | /* Advanced gesture mode also sends multi finger data */ | ||
| 1038 | return synaptics_has_agm(priv); | ||
| 1039 | } | ||
| 1040 | |||
| 1036 | /* | 1041 | /* |
| 1037 | * called for each full received packet from the touchpad | 1042 | * called for each full received packet from the touchpad |
| 1038 | */ | 1043 | */ |
| @@ -1079,7 +1084,7 @@ static void synaptics_process_packet(struct psmouse *psmouse) | |||
| 1079 | if (SYN_CAP_EXTENDED(info->capabilities)) { | 1084 | if (SYN_CAP_EXTENDED(info->capabilities)) { |
| 1080 | switch (hw.w) { | 1085 | switch (hw.w) { |
| 1081 | case 0 ... 1: | 1086 | case 0 ... 1: |
| 1082 | if (SYN_CAP_MULTIFINGER(info->capabilities)) | 1087 | if (synaptics_has_multifinger(priv)) |
| 1083 | num_fingers = hw.w + 2; | 1088 | num_fingers = hw.w + 2; |
| 1084 | break; | 1089 | break; |
| 1085 | case 2: | 1090 | case 2: |
| @@ -1123,7 +1128,7 @@ static void synaptics_process_packet(struct psmouse *psmouse) | |||
| 1123 | input_report_abs(dev, ABS_TOOL_WIDTH, finger_width); | 1128 | input_report_abs(dev, ABS_TOOL_WIDTH, finger_width); |
| 1124 | 1129 | ||
| 1125 | input_report_key(dev, BTN_TOOL_FINGER, num_fingers == 1); | 1130 | input_report_key(dev, BTN_TOOL_FINGER, num_fingers == 1); |
| 1126 | if (SYN_CAP_MULTIFINGER(info->capabilities)) { | 1131 | if (synaptics_has_multifinger(priv)) { |
| 1127 | input_report_key(dev, BTN_TOOL_DOUBLETAP, num_fingers == 2); | 1132 | input_report_key(dev, BTN_TOOL_DOUBLETAP, num_fingers == 2); |
| 1128 | input_report_key(dev, BTN_TOOL_TRIPLETAP, num_fingers == 3); | 1133 | input_report_key(dev, BTN_TOOL_TRIPLETAP, num_fingers == 3); |
| 1129 | } | 1134 | } |
| @@ -1283,7 +1288,7 @@ static void set_input_params(struct psmouse *psmouse, | |||
| 1283 | __set_bit(BTN_TOUCH, dev->keybit); | 1288 | __set_bit(BTN_TOUCH, dev->keybit); |
| 1284 | __set_bit(BTN_TOOL_FINGER, dev->keybit); | 1289 | __set_bit(BTN_TOOL_FINGER, dev->keybit); |
| 1285 | 1290 | ||
| 1286 | if (SYN_CAP_MULTIFINGER(info->capabilities)) { | 1291 | if (synaptics_has_multifinger(priv)) { |
| 1287 | __set_bit(BTN_TOOL_DOUBLETAP, dev->keybit); | 1292 | __set_bit(BTN_TOOL_DOUBLETAP, dev->keybit); |
| 1288 | __set_bit(BTN_TOOL_TRIPLETAP, dev->keybit); | 1293 | __set_bit(BTN_TOOL_TRIPLETAP, dev->keybit); |
| 1289 | } | 1294 | } |
diff --git a/drivers/input/mouse/trackpoint.c b/drivers/input/mouse/trackpoint.c index 922ea02edcc3..0871010f18d5 100644 --- a/drivers/input/mouse/trackpoint.c +++ b/drivers/input/mouse/trackpoint.c | |||
| @@ -265,7 +265,8 @@ static int trackpoint_start_protocol(struct psmouse *psmouse, unsigned char *fir | |||
| 265 | if (ps2_command(&psmouse->ps2dev, param, MAKE_PS2_CMD(0, 2, TP_READ_ID))) | 265 | if (ps2_command(&psmouse->ps2dev, param, MAKE_PS2_CMD(0, 2, TP_READ_ID))) |
| 266 | return -1; | 266 | return -1; |
| 267 | 267 | ||
| 268 | if (param[0] != TP_MAGIC_IDENT) | 268 | /* add new TP ID. */ |
| 269 | if (!(param[0] & TP_MAGIC_IDENT)) | ||
| 269 | return -1; | 270 | return -1; |
| 270 | 271 | ||
| 271 | if (firmware_id) | 272 | if (firmware_id) |
| @@ -380,8 +381,8 @@ int trackpoint_detect(struct psmouse *psmouse, bool set_properties) | |||
| 380 | return 0; | 381 | return 0; |
| 381 | 382 | ||
| 382 | if (trackpoint_read(ps2dev, TP_EXT_BTN, &button_info)) { | 383 | if (trackpoint_read(ps2dev, TP_EXT_BTN, &button_info)) { |
| 383 | psmouse_warn(psmouse, "failed to get extended button data\n"); | 384 | psmouse_warn(psmouse, "failed to get extended button data, assuming 3 buttons\n"); |
| 384 | button_info = 0; | 385 | button_info = 0x33; |
| 385 | } | 386 | } |
| 386 | 387 | ||
| 387 | psmouse->private = kzalloc(sizeof(struct trackpoint_data), GFP_KERNEL); | 388 | psmouse->private = kzalloc(sizeof(struct trackpoint_data), GFP_KERNEL); |
diff --git a/drivers/input/mouse/trackpoint.h b/drivers/input/mouse/trackpoint.h index 5617ed3a7d7a..88055755f82e 100644 --- a/drivers/input/mouse/trackpoint.h +++ b/drivers/input/mouse/trackpoint.h | |||
| @@ -21,8 +21,9 @@ | |||
| 21 | #define TP_COMMAND 0xE2 /* Commands start with this */ | 21 | #define TP_COMMAND 0xE2 /* Commands start with this */ |
| 22 | 22 | ||
| 23 | #define TP_READ_ID 0xE1 /* Sent for device identification */ | 23 | #define TP_READ_ID 0xE1 /* Sent for device identification */ |
| 24 | #define TP_MAGIC_IDENT 0x01 /* Sent after a TP_READ_ID followed */ | 24 | #define TP_MAGIC_IDENT 0x03 /* Sent after a TP_READ_ID followed */ |
| 25 | /* by the firmware ID */ | 25 | /* by the firmware ID */ |
| 26 | /* Firmware ID includes 0x1, 0x2, 0x3 */ | ||
| 26 | 27 | ||
| 27 | 28 | ||
| 28 | /* | 29 | /* |
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 688e77576e5a..354cbd6392cd 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c | |||
| @@ -4452,6 +4452,7 @@ static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info) | |||
| 4452 | /* Setting */ | 4452 | /* Setting */ |
| 4453 | irte->hi.fields.ga_root_ptr = (pi_data->base >> 12); | 4453 | irte->hi.fields.ga_root_ptr = (pi_data->base >> 12); |
| 4454 | irte->hi.fields.vector = vcpu_pi_info->vector; | 4454 | irte->hi.fields.vector = vcpu_pi_info->vector; |
| 4455 | irte->lo.fields_vapic.ga_log_intr = 1; | ||
| 4455 | irte->lo.fields_vapic.guest_mode = 1; | 4456 | irte->lo.fields_vapic.guest_mode = 1; |
| 4456 | irte->lo.fields_vapic.ga_tag = pi_data->ga_tag; | 4457 | irte->lo.fields_vapic.ga_tag = pi_data->ga_tag; |
| 4457 | 4458 | ||
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index 5cc597b383c7..372303700566 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c | |||
| @@ -2440,11 +2440,11 @@ static int __init state_next(void) | |||
| 2440 | break; | 2440 | break; |
| 2441 | case IOMMU_ACPI_FINISHED: | 2441 | case IOMMU_ACPI_FINISHED: |
| 2442 | early_enable_iommus(); | 2442 | early_enable_iommus(); |
| 2443 | register_syscore_ops(&amd_iommu_syscore_ops); | ||
| 2444 | x86_platform.iommu_shutdown = disable_iommus; | 2443 | x86_platform.iommu_shutdown = disable_iommus; |
| 2445 | init_state = IOMMU_ENABLED; | 2444 | init_state = IOMMU_ENABLED; |
| 2446 | break; | 2445 | break; |
| 2447 | case IOMMU_ENABLED: | 2446 | case IOMMU_ENABLED: |
| 2447 | register_syscore_ops(&amd_iommu_syscore_ops); | ||
| 2448 | ret = amd_iommu_init_pci(); | 2448 | ret = amd_iommu_init_pci(); |
| 2449 | init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT; | 2449 | init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT; |
| 2450 | enable_iommus_v2(); | 2450 | enable_iommus_v2(); |
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h index 294a409e283b..d6b873b57054 100644 --- a/drivers/iommu/amd_iommu_types.h +++ b/drivers/iommu/amd_iommu_types.h | |||
| @@ -574,7 +574,9 @@ struct amd_iommu { | |||
| 574 | 574 | ||
| 575 | static inline struct amd_iommu *dev_to_amd_iommu(struct device *dev) | 575 | static inline struct amd_iommu *dev_to_amd_iommu(struct device *dev) |
| 576 | { | 576 | { |
| 577 | return container_of(dev, struct amd_iommu, iommu.dev); | 577 | struct iommu_device *iommu = dev_to_iommu_device(dev); |
| 578 | |||
| 579 | return container_of(iommu, struct amd_iommu, iommu); | ||
| 578 | } | 580 | } |
| 579 | 581 | ||
| 580 | #define ACPIHID_UID_LEN 256 | 582 | #define ACPIHID_UID_LEN 256 |
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c index 6629c472eafd..dccf5b76eff2 100644 --- a/drivers/iommu/amd_iommu_v2.c +++ b/drivers/iommu/amd_iommu_v2.c | |||
| @@ -391,13 +391,6 @@ static int mn_clear_flush_young(struct mmu_notifier *mn, | |||
| 391 | return 0; | 391 | return 0; |
| 392 | } | 392 | } |
| 393 | 393 | ||
| 394 | static void mn_invalidate_page(struct mmu_notifier *mn, | ||
| 395 | struct mm_struct *mm, | ||
| 396 | unsigned long address) | ||
| 397 | { | ||
| 398 | __mn_flush_page(mn, address); | ||
| 399 | } | ||
| 400 | |||
| 401 | static void mn_invalidate_range(struct mmu_notifier *mn, | 394 | static void mn_invalidate_range(struct mmu_notifier *mn, |
| 402 | struct mm_struct *mm, | 395 | struct mm_struct *mm, |
| 403 | unsigned long start, unsigned long end) | 396 | unsigned long start, unsigned long end) |
| @@ -436,7 +429,6 @@ static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm) | |||
| 436 | static const struct mmu_notifier_ops iommu_mn = { | 429 | static const struct mmu_notifier_ops iommu_mn = { |
| 437 | .release = mn_release, | 430 | .release = mn_release, |
| 438 | .clear_flush_young = mn_clear_flush_young, | 431 | .clear_flush_young = mn_clear_flush_young, |
| 439 | .invalidate_page = mn_invalidate_page, | ||
| 440 | .invalidate_range = mn_invalidate_range, | 432 | .invalidate_range = mn_invalidate_range, |
| 441 | }; | 433 | }; |
| 442 | 434 | ||
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index bc89b4d6c043..2d80fa8a0634 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c | |||
| @@ -400,6 +400,8 @@ struct arm_smmu_device { | |||
| 400 | 400 | ||
| 401 | u32 cavium_id_base; /* Specific to Cavium */ | 401 | u32 cavium_id_base; /* Specific to Cavium */ |
| 402 | 402 | ||
| 403 | spinlock_t global_sync_lock; | ||
| 404 | |||
| 403 | /* IOMMU core code handle */ | 405 | /* IOMMU core code handle */ |
| 404 | struct iommu_device iommu; | 406 | struct iommu_device iommu; |
| 405 | }; | 407 | }; |
| @@ -436,7 +438,7 @@ struct arm_smmu_domain { | |||
| 436 | struct arm_smmu_cfg cfg; | 438 | struct arm_smmu_cfg cfg; |
| 437 | enum arm_smmu_domain_stage stage; | 439 | enum arm_smmu_domain_stage stage; |
| 438 | struct mutex init_mutex; /* Protects smmu pointer */ | 440 | struct mutex init_mutex; /* Protects smmu pointer */ |
| 439 | spinlock_t cb_lock; /* Serialises ATS1* ops */ | 441 | spinlock_t cb_lock; /* Serialises ATS1* ops and TLB syncs */ |
| 440 | struct iommu_domain domain; | 442 | struct iommu_domain domain; |
| 441 | }; | 443 | }; |
| 442 | 444 | ||
| @@ -602,9 +604,12 @@ static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu, | |||
| 602 | static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu) | 604 | static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu) |
| 603 | { | 605 | { |
| 604 | void __iomem *base = ARM_SMMU_GR0(smmu); | 606 | void __iomem *base = ARM_SMMU_GR0(smmu); |
| 607 | unsigned long flags; | ||
| 605 | 608 | ||
| 609 | spin_lock_irqsave(&smmu->global_sync_lock, flags); | ||
| 606 | __arm_smmu_tlb_sync(smmu, base + ARM_SMMU_GR0_sTLBGSYNC, | 610 | __arm_smmu_tlb_sync(smmu, base + ARM_SMMU_GR0_sTLBGSYNC, |
| 607 | base + ARM_SMMU_GR0_sTLBGSTATUS); | 611 | base + ARM_SMMU_GR0_sTLBGSTATUS); |
| 612 | spin_unlock_irqrestore(&smmu->global_sync_lock, flags); | ||
| 608 | } | 613 | } |
| 609 | 614 | ||
| 610 | static void arm_smmu_tlb_sync_context(void *cookie) | 615 | static void arm_smmu_tlb_sync_context(void *cookie) |
| @@ -612,9 +617,12 @@ static void arm_smmu_tlb_sync_context(void *cookie) | |||
| 612 | struct arm_smmu_domain *smmu_domain = cookie; | 617 | struct arm_smmu_domain *smmu_domain = cookie; |
| 613 | struct arm_smmu_device *smmu = smmu_domain->smmu; | 618 | struct arm_smmu_device *smmu = smmu_domain->smmu; |
| 614 | void __iomem *base = ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx); | 619 | void __iomem *base = ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx); |
| 620 | unsigned long flags; | ||
| 615 | 621 | ||
| 622 | spin_lock_irqsave(&smmu_domain->cb_lock, flags); | ||
| 616 | __arm_smmu_tlb_sync(smmu, base + ARM_SMMU_CB_TLBSYNC, | 623 | __arm_smmu_tlb_sync(smmu, base + ARM_SMMU_CB_TLBSYNC, |
| 617 | base + ARM_SMMU_CB_TLBSTATUS); | 624 | base + ARM_SMMU_CB_TLBSTATUS); |
| 625 | spin_unlock_irqrestore(&smmu_domain->cb_lock, flags); | ||
| 618 | } | 626 | } |
| 619 | 627 | ||
| 620 | static void arm_smmu_tlb_sync_vmid(void *cookie) | 628 | static void arm_smmu_tlb_sync_vmid(void *cookie) |
| @@ -1511,6 +1519,12 @@ static int arm_smmu_add_device(struct device *dev) | |||
| 1511 | 1519 | ||
| 1512 | if (using_legacy_binding) { | 1520 | if (using_legacy_binding) { |
| 1513 | ret = arm_smmu_register_legacy_master(dev, &smmu); | 1521 | ret = arm_smmu_register_legacy_master(dev, &smmu); |
| 1522 | |||
| 1523 | /* | ||
| 1524 | * If dev->iommu_fwspec is initally NULL, arm_smmu_register_legacy_master() | ||
| 1525 | * will allocate/initialise a new one. Thus we need to update fwspec for | ||
| 1526 | * later use. | ||
| 1527 | */ | ||
| 1514 | fwspec = dev->iommu_fwspec; | 1528 | fwspec = dev->iommu_fwspec; |
| 1515 | if (ret) | 1529 | if (ret) |
| 1516 | goto out_free; | 1530 | goto out_free; |
| @@ -1550,15 +1564,15 @@ static int arm_smmu_add_device(struct device *dev) | |||
| 1550 | 1564 | ||
| 1551 | ret = arm_smmu_master_alloc_smes(dev); | 1565 | ret = arm_smmu_master_alloc_smes(dev); |
| 1552 | if (ret) | 1566 | if (ret) |
| 1553 | goto out_free; | 1567 | goto out_cfg_free; |
| 1554 | 1568 | ||
| 1555 | iommu_device_link(&smmu->iommu, dev); | 1569 | iommu_device_link(&smmu->iommu, dev); |
| 1556 | 1570 | ||
| 1557 | return 0; | 1571 | return 0; |
| 1558 | 1572 | ||
| 1573 | out_cfg_free: | ||
| 1574 | kfree(cfg); | ||
| 1559 | out_free: | 1575 | out_free: |
| 1560 | if (fwspec) | ||
| 1561 | kfree(fwspec->iommu_priv); | ||
| 1562 | iommu_fwspec_free(dev); | 1576 | iommu_fwspec_free(dev); |
| 1563 | return ret; | 1577 | return ret; |
| 1564 | } | 1578 | } |
| @@ -1925,6 +1939,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) | |||
| 1925 | 1939 | ||
| 1926 | smmu->num_mapping_groups = size; | 1940 | smmu->num_mapping_groups = size; |
| 1927 | mutex_init(&smmu->stream_map_mutex); | 1941 | mutex_init(&smmu->stream_map_mutex); |
| 1942 | spin_lock_init(&smmu->global_sync_lock); | ||
| 1928 | 1943 | ||
| 1929 | if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) { | 1944 | if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) { |
| 1930 | smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L; | 1945 | smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L; |
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 687f18f65cea..3e8636f1220e 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c | |||
| @@ -4736,7 +4736,9 @@ static void intel_disable_iommus(void) | |||
| 4736 | 4736 | ||
| 4737 | static inline struct intel_iommu *dev_to_intel_iommu(struct device *dev) | 4737 | static inline struct intel_iommu *dev_to_intel_iommu(struct device *dev) |
| 4738 | { | 4738 | { |
| 4739 | return container_of(dev, struct intel_iommu, iommu.dev); | 4739 | struct iommu_device *iommu_dev = dev_to_iommu_device(dev); |
| 4740 | |||
| 4741 | return container_of(iommu_dev, struct intel_iommu, iommu); | ||
| 4740 | } | 4742 | } |
| 4741 | 4743 | ||
| 4742 | static ssize_t intel_iommu_show_version(struct device *dev, | 4744 | static ssize_t intel_iommu_show_version(struct device *dev, |
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c index f167c0d84ebf..f620dccec8ee 100644 --- a/drivers/iommu/intel-svm.c +++ b/drivers/iommu/intel-svm.c | |||
| @@ -223,14 +223,6 @@ static void intel_change_pte(struct mmu_notifier *mn, struct mm_struct *mm, | |||
| 223 | intel_flush_svm_range(svm, address, 1, 1, 0); | 223 | intel_flush_svm_range(svm, address, 1, 1, 0); |
| 224 | } | 224 | } |
| 225 | 225 | ||
| 226 | static void intel_invalidate_page(struct mmu_notifier *mn, struct mm_struct *mm, | ||
| 227 | unsigned long address) | ||
| 228 | { | ||
| 229 | struct intel_svm *svm = container_of(mn, struct intel_svm, notifier); | ||
| 230 | |||
| 231 | intel_flush_svm_range(svm, address, 1, 1, 0); | ||
| 232 | } | ||
| 233 | |||
| 234 | /* Pages have been freed at this point */ | 226 | /* Pages have been freed at this point */ |
| 235 | static void intel_invalidate_range(struct mmu_notifier *mn, | 227 | static void intel_invalidate_range(struct mmu_notifier *mn, |
| 236 | struct mm_struct *mm, | 228 | struct mm_struct *mm, |
| @@ -285,7 +277,6 @@ static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm) | |||
| 285 | static const struct mmu_notifier_ops intel_mmuops = { | 277 | static const struct mmu_notifier_ops intel_mmuops = { |
| 286 | .release = intel_mm_release, | 278 | .release = intel_mm_release, |
| 287 | .change_pte = intel_change_pte, | 279 | .change_pte = intel_change_pte, |
| 288 | .invalidate_page = intel_invalidate_page, | ||
| 289 | .invalidate_range = intel_invalidate_range, | 280 | .invalidate_range = intel_invalidate_range, |
| 290 | }; | 281 | }; |
| 291 | 282 | ||
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c index af330f513653..d665d0dc16e8 100644 --- a/drivers/iommu/io-pgtable-arm-v7s.c +++ b/drivers/iommu/io-pgtable-arm-v7s.c | |||
| @@ -479,6 +479,9 @@ static int arm_v7s_map(struct io_pgtable_ops *ops, unsigned long iova, | |||
| 479 | if (!(prot & (IOMMU_READ | IOMMU_WRITE))) | 479 | if (!(prot & (IOMMU_READ | IOMMU_WRITE))) |
| 480 | return 0; | 480 | return 0; |
| 481 | 481 | ||
| 482 | if (WARN_ON(upper_32_bits(iova) || upper_32_bits(paddr))) | ||
| 483 | return -ERANGE; | ||
| 484 | |||
| 482 | ret = __arm_v7s_map(data, iova, paddr, size, prot, 1, data->pgd); | 485 | ret = __arm_v7s_map(data, iova, paddr, size, prot, 1, data->pgd); |
| 483 | /* | 486 | /* |
| 484 | * Synchronise all PTE updates for the new mapping before there's | 487 | * Synchronise all PTE updates for the new mapping before there's |
| @@ -659,6 +662,9 @@ static int arm_v7s_unmap(struct io_pgtable_ops *ops, unsigned long iova, | |||
| 659 | struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops); | 662 | struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops); |
| 660 | size_t unmapped; | 663 | size_t unmapped; |
| 661 | 664 | ||
| 665 | if (WARN_ON(upper_32_bits(iova))) | ||
| 666 | return 0; | ||
| 667 | |||
| 662 | unmapped = __arm_v7s_unmap(data, iova, size, 1, data->pgd); | 668 | unmapped = __arm_v7s_unmap(data, iova, size, 1, data->pgd); |
| 663 | if (unmapped) | 669 | if (unmapped) |
| 664 | io_pgtable_tlb_sync(&data->iop); | 670 | io_pgtable_tlb_sync(&data->iop); |
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index b182039862c5..e8018a308868 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c | |||
| @@ -452,6 +452,10 @@ static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova, | |||
| 452 | if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE))) | 452 | if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE))) |
| 453 | return 0; | 453 | return 0; |
| 454 | 454 | ||
| 455 | if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias) || | ||
| 456 | paddr >= (1ULL << data->iop.cfg.oas))) | ||
| 457 | return -ERANGE; | ||
| 458 | |||
| 455 | prot = arm_lpae_prot_to_pte(data, iommu_prot); | 459 | prot = arm_lpae_prot_to_pte(data, iommu_prot); |
| 456 | ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep); | 460 | ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep); |
| 457 | /* | 461 | /* |
| @@ -610,6 +614,9 @@ static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova, | |||
| 610 | arm_lpae_iopte *ptep = data->pgd; | 614 | arm_lpae_iopte *ptep = data->pgd; |
| 611 | int lvl = ARM_LPAE_START_LVL(data); | 615 | int lvl = ARM_LPAE_START_LVL(data); |
| 612 | 616 | ||
| 617 | if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias))) | ||
| 618 | return 0; | ||
| 619 | |||
| 613 | unmapped = __arm_lpae_unmap(data, iova, size, lvl, ptep); | 620 | unmapped = __arm_lpae_unmap(data, iova, size, lvl, ptep); |
| 614 | if (unmapped) | 621 | if (unmapped) |
| 615 | io_pgtable_tlb_sync(&data->iop); | 622 | io_pgtable_tlb_sync(&data->iop); |
diff --git a/drivers/iommu/io-pgtable.h b/drivers/iommu/io-pgtable.h index 524263a7ae6f..a3e667077b14 100644 --- a/drivers/iommu/io-pgtable.h +++ b/drivers/iommu/io-pgtable.h | |||
| @@ -158,14 +158,12 @@ void free_io_pgtable_ops(struct io_pgtable_ops *ops); | |||
| 158 | * @fmt: The page table format. | 158 | * @fmt: The page table format. |
| 159 | * @cookie: An opaque token provided by the IOMMU driver and passed back to | 159 | * @cookie: An opaque token provided by the IOMMU driver and passed back to |
| 160 | * any callback routines. | 160 | * any callback routines. |
| 161 | * @tlb_sync_pending: Private flag for optimising out redundant syncs. | ||
| 162 | * @cfg: A copy of the page table configuration. | 161 | * @cfg: A copy of the page table configuration. |
| 163 | * @ops: The page table operations in use for this set of page tables. | 162 | * @ops: The page table operations in use for this set of page tables. |
| 164 | */ | 163 | */ |
| 165 | struct io_pgtable { | 164 | struct io_pgtable { |
| 166 | enum io_pgtable_fmt fmt; | 165 | enum io_pgtable_fmt fmt; |
| 167 | void *cookie; | 166 | void *cookie; |
| 168 | bool tlb_sync_pending; | ||
| 169 | struct io_pgtable_cfg cfg; | 167 | struct io_pgtable_cfg cfg; |
| 170 | struct io_pgtable_ops ops; | 168 | struct io_pgtable_ops ops; |
| 171 | }; | 169 | }; |
| @@ -175,22 +173,17 @@ struct io_pgtable { | |||
| 175 | static inline void io_pgtable_tlb_flush_all(struct io_pgtable *iop) | 173 | static inline void io_pgtable_tlb_flush_all(struct io_pgtable *iop) |
| 176 | { | 174 | { |
| 177 | iop->cfg.tlb->tlb_flush_all(iop->cookie); | 175 | iop->cfg.tlb->tlb_flush_all(iop->cookie); |
| 178 | iop->tlb_sync_pending = true; | ||
| 179 | } | 176 | } |
| 180 | 177 | ||
| 181 | static inline void io_pgtable_tlb_add_flush(struct io_pgtable *iop, | 178 | static inline void io_pgtable_tlb_add_flush(struct io_pgtable *iop, |
| 182 | unsigned long iova, size_t size, size_t granule, bool leaf) | 179 | unsigned long iova, size_t size, size_t granule, bool leaf) |
| 183 | { | 180 | { |
| 184 | iop->cfg.tlb->tlb_add_flush(iova, size, granule, leaf, iop->cookie); | 181 | iop->cfg.tlb->tlb_add_flush(iova, size, granule, leaf, iop->cookie); |
| 185 | iop->tlb_sync_pending = true; | ||
| 186 | } | 182 | } |
| 187 | 183 | ||
| 188 | static inline void io_pgtable_tlb_sync(struct io_pgtable *iop) | 184 | static inline void io_pgtable_tlb_sync(struct io_pgtable *iop) |
| 189 | { | 185 | { |
| 190 | if (iop->tlb_sync_pending) { | 186 | iop->cfg.tlb->tlb_sync(iop->cookie); |
| 191 | iop->cfg.tlb->tlb_sync(iop->cookie); | ||
| 192 | iop->tlb_sync_pending = false; | ||
| 193 | } | ||
| 194 | } | 187 | } |
| 195 | 188 | ||
| 196 | /** | 189 | /** |
diff --git a/drivers/iommu/iommu-sysfs.c b/drivers/iommu/iommu-sysfs.c index c58351ed61c1..36d1a7ce7fc4 100644 --- a/drivers/iommu/iommu-sysfs.c +++ b/drivers/iommu/iommu-sysfs.c | |||
| @@ -62,32 +62,40 @@ int iommu_device_sysfs_add(struct iommu_device *iommu, | |||
| 62 | va_list vargs; | 62 | va_list vargs; |
| 63 | int ret; | 63 | int ret; |
| 64 | 64 | ||
| 65 | device_initialize(&iommu->dev); | 65 | iommu->dev = kzalloc(sizeof(*iommu->dev), GFP_KERNEL); |
| 66 | if (!iommu->dev) | ||
| 67 | return -ENOMEM; | ||
| 66 | 68 | ||
| 67 | iommu->dev.class = &iommu_class; | 69 | device_initialize(iommu->dev); |
| 68 | iommu->dev.parent = parent; | 70 | |
| 69 | iommu->dev.groups = groups; | 71 | iommu->dev->class = &iommu_class; |
| 72 | iommu->dev->parent = parent; | ||
| 73 | iommu->dev->groups = groups; | ||
| 70 | 74 | ||
| 71 | va_start(vargs, fmt); | 75 | va_start(vargs, fmt); |
| 72 | ret = kobject_set_name_vargs(&iommu->dev.kobj, fmt, vargs); | 76 | ret = kobject_set_name_vargs(&iommu->dev->kobj, fmt, vargs); |
| 73 | va_end(vargs); | 77 | va_end(vargs); |
| 74 | if (ret) | 78 | if (ret) |
| 75 | goto error; | 79 | goto error; |
| 76 | 80 | ||
| 77 | ret = device_add(&iommu->dev); | 81 | ret = device_add(iommu->dev); |
| 78 | if (ret) | 82 | if (ret) |
| 79 | goto error; | 83 | goto error; |
| 80 | 84 | ||
| 85 | dev_set_drvdata(iommu->dev, iommu); | ||
| 86 | |||
| 81 | return 0; | 87 | return 0; |
| 82 | 88 | ||
| 83 | error: | 89 | error: |
| 84 | put_device(&iommu->dev); | 90 | put_device(iommu->dev); |
| 85 | return ret; | 91 | return ret; |
| 86 | } | 92 | } |
| 87 | 93 | ||
| 88 | void iommu_device_sysfs_remove(struct iommu_device *iommu) | 94 | void iommu_device_sysfs_remove(struct iommu_device *iommu) |
| 89 | { | 95 | { |
| 90 | device_unregister(&iommu->dev); | 96 | dev_set_drvdata(iommu->dev, NULL); |
| 97 | device_unregister(iommu->dev); | ||
| 98 | iommu->dev = NULL; | ||
| 91 | } | 99 | } |
| 92 | /* | 100 | /* |
| 93 | * IOMMU drivers can indicate a device is managed by a given IOMMU using | 101 | * IOMMU drivers can indicate a device is managed by a given IOMMU using |
| @@ -102,14 +110,14 @@ int iommu_device_link(struct iommu_device *iommu, struct device *link) | |||
| 102 | if (!iommu || IS_ERR(iommu)) | 110 | if (!iommu || IS_ERR(iommu)) |
| 103 | return -ENODEV; | 111 | return -ENODEV; |
| 104 | 112 | ||
| 105 | ret = sysfs_add_link_to_group(&iommu->dev.kobj, "devices", | 113 | ret = sysfs_add_link_to_group(&iommu->dev->kobj, "devices", |
| 106 | &link->kobj, dev_name(link)); | 114 | &link->kobj, dev_name(link)); |
| 107 | if (ret) | 115 | if (ret) |
| 108 | return ret; | 116 | return ret; |
| 109 | 117 | ||
| 110 | ret = sysfs_create_link_nowarn(&link->kobj, &iommu->dev.kobj, "iommu"); | 118 | ret = sysfs_create_link_nowarn(&link->kobj, &iommu->dev->kobj, "iommu"); |
| 111 | if (ret) | 119 | if (ret) |
| 112 | sysfs_remove_link_from_group(&iommu->dev.kobj, "devices", | 120 | sysfs_remove_link_from_group(&iommu->dev->kobj, "devices", |
| 113 | dev_name(link)); | 121 | dev_name(link)); |
| 114 | 122 | ||
| 115 | return ret; | 123 | return ret; |
| @@ -121,5 +129,5 @@ void iommu_device_unlink(struct iommu_device *iommu, struct device *link) | |||
| 121 | return; | 129 | return; |
| 122 | 130 | ||
| 123 | sysfs_remove_link(&link->kobj, "iommu"); | 131 | sysfs_remove_link(&link->kobj, "iommu"); |
| 124 | sysfs_remove_link_from_group(&iommu->dev.kobj, "devices", dev_name(link)); | 132 | sysfs_remove_link_from_group(&iommu->dev->kobj, "devices", dev_name(link)); |
| 125 | } | 133 | } |
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index 5d14cd15198d..91c6d367ab35 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c | |||
| @@ -129,6 +129,7 @@ static void mtk_iommu_tlb_add_flush_nosync(unsigned long iova, size_t size, | |||
| 129 | writel_relaxed(iova, data->base + REG_MMU_INVLD_START_A); | 129 | writel_relaxed(iova, data->base + REG_MMU_INVLD_START_A); |
| 130 | writel_relaxed(iova + size - 1, data->base + REG_MMU_INVLD_END_A); | 130 | writel_relaxed(iova + size - 1, data->base + REG_MMU_INVLD_END_A); |
| 131 | writel_relaxed(F_MMU_INV_RANGE, data->base + REG_MMU_INVALIDATE); | 131 | writel_relaxed(F_MMU_INV_RANGE, data->base + REG_MMU_INVALIDATE); |
| 132 | data->tlb_flush_active = true; | ||
| 132 | } | 133 | } |
| 133 | 134 | ||
| 134 | static void mtk_iommu_tlb_sync(void *cookie) | 135 | static void mtk_iommu_tlb_sync(void *cookie) |
| @@ -137,6 +138,10 @@ static void mtk_iommu_tlb_sync(void *cookie) | |||
| 137 | int ret; | 138 | int ret; |
| 138 | u32 tmp; | 139 | u32 tmp; |
| 139 | 140 | ||
| 141 | /* Avoid timing out if there's nothing to wait for */ | ||
| 142 | if (!data->tlb_flush_active) | ||
| 143 | return; | ||
| 144 | |||
| 140 | ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE, tmp, | 145 | ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE, tmp, |
| 141 | tmp != 0, 10, 100000); | 146 | tmp != 0, 10, 100000); |
| 142 | if (ret) { | 147 | if (ret) { |
| @@ -146,6 +151,7 @@ static void mtk_iommu_tlb_sync(void *cookie) | |||
| 146 | } | 151 | } |
| 147 | /* Clear the CPE status */ | 152 | /* Clear the CPE status */ |
| 148 | writel_relaxed(0, data->base + REG_MMU_CPE_DONE); | 153 | writel_relaxed(0, data->base + REG_MMU_CPE_DONE); |
| 154 | data->tlb_flush_active = false; | ||
| 149 | } | 155 | } |
| 150 | 156 | ||
| 151 | static const struct iommu_gather_ops mtk_iommu_gather_ops = { | 157 | static const struct iommu_gather_ops mtk_iommu_gather_ops = { |
diff --git a/drivers/iommu/mtk_iommu.h b/drivers/iommu/mtk_iommu.h index 2a28eadeea0e..c06cc91b5d9a 100644 --- a/drivers/iommu/mtk_iommu.h +++ b/drivers/iommu/mtk_iommu.h | |||
| @@ -47,6 +47,7 @@ struct mtk_iommu_data { | |||
| 47 | struct iommu_group *m4u_group; | 47 | struct iommu_group *m4u_group; |
| 48 | struct mtk_smi_iommu smi_imu; /* SMI larb iommu info */ | 48 | struct mtk_smi_iommu smi_imu; /* SMI larb iommu info */ |
| 49 | bool enable_4GB; | 49 | bool enable_4GB; |
| 50 | bool tlb_flush_active; | ||
| 50 | 51 | ||
| 51 | struct iommu_device iommu; | 52 | struct iommu_device iommu; |
| 52 | }; | 53 | }; |
diff --git a/drivers/irqchip/irq-atmel-aic-common.c b/drivers/irqchip/irq-atmel-aic-common.c index 28b26c80f4cf..072bd227b6c6 100644 --- a/drivers/irqchip/irq-atmel-aic-common.c +++ b/drivers/irqchip/irq-atmel-aic-common.c | |||
| @@ -137,14 +137,14 @@ static void __init aic_common_ext_irq_of_init(struct irq_domain *domain) | |||
| 137 | #define AT91_RTC_IMR 0x28 | 137 | #define AT91_RTC_IMR 0x28 |
| 138 | #define AT91_RTC_IRQ_MASK 0x1f | 138 | #define AT91_RTC_IRQ_MASK 0x1f |
| 139 | 139 | ||
| 140 | void __init aic_common_rtc_irq_fixup(struct device_node *root) | 140 | void __init aic_common_rtc_irq_fixup(void) |
| 141 | { | 141 | { |
| 142 | struct device_node *np; | 142 | struct device_node *np; |
| 143 | void __iomem *regs; | 143 | void __iomem *regs; |
| 144 | 144 | ||
| 145 | np = of_find_compatible_node(root, NULL, "atmel,at91rm9200-rtc"); | 145 | np = of_find_compatible_node(NULL, NULL, "atmel,at91rm9200-rtc"); |
| 146 | if (!np) | 146 | if (!np) |
| 147 | np = of_find_compatible_node(root, NULL, | 147 | np = of_find_compatible_node(NULL, NULL, |
| 148 | "atmel,at91sam9x5-rtc"); | 148 | "atmel,at91sam9x5-rtc"); |
| 149 | 149 | ||
| 150 | if (!np) | 150 | if (!np) |
| @@ -165,7 +165,7 @@ void __init aic_common_rtc_irq_fixup(struct device_node *root) | |||
| 165 | #define AT91_RTT_ALMIEN (1 << 16) /* Alarm Interrupt Enable */ | 165 | #define AT91_RTT_ALMIEN (1 << 16) /* Alarm Interrupt Enable */ |
| 166 | #define AT91_RTT_RTTINCIEN (1 << 17) /* Real Time Timer Increment Interrupt Enable */ | 166 | #define AT91_RTT_RTTINCIEN (1 << 17) /* Real Time Timer Increment Interrupt Enable */ |
| 167 | 167 | ||
| 168 | void __init aic_common_rtt_irq_fixup(struct device_node *root) | 168 | void __init aic_common_rtt_irq_fixup(void) |
| 169 | { | 169 | { |
| 170 | struct device_node *np; | 170 | struct device_node *np; |
| 171 | void __iomem *regs; | 171 | void __iomem *regs; |
| @@ -196,11 +196,10 @@ static void __init aic_common_irq_fixup(const struct of_device_id *matches) | |||
| 196 | return; | 196 | return; |
| 197 | 197 | ||
| 198 | match = of_match_node(matches, root); | 198 | match = of_match_node(matches, root); |
| 199 | of_node_put(root); | ||
| 200 | 199 | ||
| 201 | if (match) { | 200 | if (match) { |
| 202 | void (*fixup)(struct device_node *) = match->data; | 201 | void (*fixup)(void) = match->data; |
| 203 | fixup(root); | 202 | fixup(); |
| 204 | } | 203 | } |
| 205 | 204 | ||
| 206 | of_node_put(root); | 205 | of_node_put(root); |
diff --git a/drivers/irqchip/irq-atmel-aic-common.h b/drivers/irqchip/irq-atmel-aic-common.h index af60376d50de..242e62c1851e 100644 --- a/drivers/irqchip/irq-atmel-aic-common.h +++ b/drivers/irqchip/irq-atmel-aic-common.h | |||
| @@ -33,8 +33,8 @@ struct irq_domain *__init aic_common_of_init(struct device_node *node, | |||
| 33 | const char *name, int nirqs, | 33 | const char *name, int nirqs, |
| 34 | const struct of_device_id *matches); | 34 | const struct of_device_id *matches); |
| 35 | 35 | ||
| 36 | void __init aic_common_rtc_irq_fixup(struct device_node *root); | 36 | void __init aic_common_rtc_irq_fixup(void); |
| 37 | 37 | ||
| 38 | void __init aic_common_rtt_irq_fixup(struct device_node *root); | 38 | void __init aic_common_rtt_irq_fixup(void); |
| 39 | 39 | ||
| 40 | #endif /* __IRQ_ATMEL_AIC_COMMON_H */ | 40 | #endif /* __IRQ_ATMEL_AIC_COMMON_H */ |
diff --git a/drivers/irqchip/irq-atmel-aic.c b/drivers/irqchip/irq-atmel-aic.c index 37f952dd9fc9..bb1ad451392f 100644 --- a/drivers/irqchip/irq-atmel-aic.c +++ b/drivers/irqchip/irq-atmel-aic.c | |||
| @@ -209,20 +209,20 @@ static const struct irq_domain_ops aic_irq_ops = { | |||
| 209 | .xlate = aic_irq_domain_xlate, | 209 | .xlate = aic_irq_domain_xlate, |
| 210 | }; | 210 | }; |
| 211 | 211 | ||
| 212 | static void __init at91rm9200_aic_irq_fixup(struct device_node *root) | 212 | static void __init at91rm9200_aic_irq_fixup(void) |
| 213 | { | 213 | { |
| 214 | aic_common_rtc_irq_fixup(root); | 214 | aic_common_rtc_irq_fixup(); |
| 215 | } | 215 | } |
| 216 | 216 | ||
| 217 | static void __init at91sam9260_aic_irq_fixup(struct device_node *root) | 217 | static void __init at91sam9260_aic_irq_fixup(void) |
| 218 | { | 218 | { |
| 219 | aic_common_rtt_irq_fixup(root); | 219 | aic_common_rtt_irq_fixup(); |
| 220 | } | 220 | } |
| 221 | 221 | ||
| 222 | static void __init at91sam9g45_aic_irq_fixup(struct device_node *root) | 222 | static void __init at91sam9g45_aic_irq_fixup(void) |
| 223 | { | 223 | { |
| 224 | aic_common_rtc_irq_fixup(root); | 224 | aic_common_rtc_irq_fixup(); |
| 225 | aic_common_rtt_irq_fixup(root); | 225 | aic_common_rtt_irq_fixup(); |
| 226 | } | 226 | } |
| 227 | 227 | ||
| 228 | static const struct of_device_id aic_irq_fixups[] __initconst = { | 228 | static const struct of_device_id aic_irq_fixups[] __initconst = { |
diff --git a/drivers/irqchip/irq-atmel-aic5.c b/drivers/irqchip/irq-atmel-aic5.c index c04ee9a23d09..6acad2ea0fb3 100644 --- a/drivers/irqchip/irq-atmel-aic5.c +++ b/drivers/irqchip/irq-atmel-aic5.c | |||
| @@ -305,9 +305,9 @@ static const struct irq_domain_ops aic5_irq_ops = { | |||
| 305 | .xlate = aic5_irq_domain_xlate, | 305 | .xlate = aic5_irq_domain_xlate, |
| 306 | }; | 306 | }; |
| 307 | 307 | ||
| 308 | static void __init sama5d3_aic_irq_fixup(struct device_node *root) | 308 | static void __init sama5d3_aic_irq_fixup(void) |
| 309 | { | 309 | { |
| 310 | aic_common_rtc_irq_fixup(root); | 310 | aic_common_rtc_irq_fixup(); |
| 311 | } | 311 | } |
| 312 | 312 | ||
| 313 | static const struct of_device_id aic5_irq_fixups[] __initconst = { | 313 | static const struct of_device_id aic5_irq_fixups[] __initconst = { |
diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c index bddf169c4b37..b009b916a292 100644 --- a/drivers/irqchip/irq-brcmstb-l2.c +++ b/drivers/irqchip/irq-brcmstb-l2.c | |||
| @@ -189,6 +189,7 @@ static int __init brcmstb_l2_intc_of_init(struct device_node *np, | |||
| 189 | 189 | ||
| 190 | ct->chip.irq_suspend = brcmstb_l2_intc_suspend; | 190 | ct->chip.irq_suspend = brcmstb_l2_intc_suspend; |
| 191 | ct->chip.irq_resume = brcmstb_l2_intc_resume; | 191 | ct->chip.irq_resume = brcmstb_l2_intc_resume; |
| 192 | ct->chip.irq_pm_shutdown = brcmstb_l2_intc_suspend; | ||
| 192 | 193 | ||
| 193 | if (data->can_wake) { | 194 | if (data->can_wake) { |
| 194 | /* This IRQ chip can wake the system, set all child interrupts | 195 | /* This IRQ chip can wake the system, set all child interrupts |
diff --git a/drivers/irqchip/irq-gic-v3-its-platform-msi.c b/drivers/irqchip/irq-gic-v3-its-platform-msi.c index 249240d9a425..833a90fe33ae 100644 --- a/drivers/irqchip/irq-gic-v3-its-platform-msi.c +++ b/drivers/irqchip/irq-gic-v3-its-platform-msi.c | |||
| @@ -43,6 +43,7 @@ static int of_pmsi_get_dev_id(struct irq_domain *domain, struct device *dev, | |||
| 43 | *dev_id = args.args[0]; | 43 | *dev_id = args.args[0]; |
| 44 | break; | 44 | break; |
| 45 | } | 45 | } |
| 46 | index++; | ||
| 46 | } while (!ret); | 47 | } while (!ret); |
| 47 | 48 | ||
| 48 | return ret; | 49 | return ret; |
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 68932873eebc..284738add89b 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c | |||
| @@ -1835,7 +1835,7 @@ static int __init its_of_probe(struct device_node *node) | |||
| 1835 | 1835 | ||
| 1836 | #define ACPI_GICV3_ITS_MEM_SIZE (SZ_128K) | 1836 | #define ACPI_GICV3_ITS_MEM_SIZE (SZ_128K) |
| 1837 | 1837 | ||
| 1838 | #if defined(CONFIG_ACPI_NUMA) && (ACPI_CA_VERSION >= 0x20170531) | 1838 | #ifdef CONFIG_ACPI_NUMA |
| 1839 | struct its_srat_map { | 1839 | struct its_srat_map { |
| 1840 | /* numa node id */ | 1840 | /* numa node id */ |
| 1841 | u32 numa_node; | 1841 | u32 numa_node; |
| @@ -1843,7 +1843,7 @@ struct its_srat_map { | |||
| 1843 | u32 its_id; | 1843 | u32 its_id; |
| 1844 | }; | 1844 | }; |
| 1845 | 1845 | ||
| 1846 | static struct its_srat_map its_srat_maps[MAX_NUMNODES] __initdata; | 1846 | static struct its_srat_map *its_srat_maps __initdata; |
| 1847 | static int its_in_srat __initdata; | 1847 | static int its_in_srat __initdata; |
| 1848 | 1848 | ||
| 1849 | static int __init acpi_get_its_numa_node(u32 its_id) | 1849 | static int __init acpi_get_its_numa_node(u32 its_id) |
| @@ -1857,6 +1857,12 @@ static int __init acpi_get_its_numa_node(u32 its_id) | |||
| 1857 | return NUMA_NO_NODE; | 1857 | return NUMA_NO_NODE; |
| 1858 | } | 1858 | } |
| 1859 | 1859 | ||
| 1860 | static int __init gic_acpi_match_srat_its(struct acpi_subtable_header *header, | ||
| 1861 | const unsigned long end) | ||
| 1862 | { | ||
| 1863 | return 0; | ||
| 1864 | } | ||
| 1865 | |||
| 1860 | static int __init gic_acpi_parse_srat_its(struct acpi_subtable_header *header, | 1866 | static int __init gic_acpi_parse_srat_its(struct acpi_subtable_header *header, |
| 1861 | const unsigned long end) | 1867 | const unsigned long end) |
| 1862 | { | 1868 | { |
| @@ -1873,12 +1879,6 @@ static int __init gic_acpi_parse_srat_its(struct acpi_subtable_header *header, | |||
| 1873 | return -EINVAL; | 1879 | return -EINVAL; |
| 1874 | } | 1880 | } |
| 1875 | 1881 | ||
| 1876 | if (its_in_srat >= MAX_NUMNODES) { | ||
| 1877 | pr_err("SRAT: ITS affinity exceeding max count[%d]\n", | ||
| 1878 | MAX_NUMNODES); | ||
| 1879 | return -EINVAL; | ||
| 1880 | } | ||
| 1881 | |||
| 1882 | node = acpi_map_pxm_to_node(its_affinity->proximity_domain); | 1882 | node = acpi_map_pxm_to_node(its_affinity->proximity_domain); |
| 1883 | 1883 | ||
| 1884 | if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) { | 1884 | if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) { |
| @@ -1897,14 +1897,37 @@ static int __init gic_acpi_parse_srat_its(struct acpi_subtable_header *header, | |||
| 1897 | 1897 | ||
| 1898 | static void __init acpi_table_parse_srat_its(void) | 1898 | static void __init acpi_table_parse_srat_its(void) |
| 1899 | { | 1899 | { |
| 1900 | int count; | ||
| 1901 | |||
| 1902 | count = acpi_table_parse_entries(ACPI_SIG_SRAT, | ||
| 1903 | sizeof(struct acpi_table_srat), | ||
| 1904 | ACPI_SRAT_TYPE_GIC_ITS_AFFINITY, | ||
| 1905 | gic_acpi_match_srat_its, 0); | ||
| 1906 | if (count <= 0) | ||
| 1907 | return; | ||
| 1908 | |||
| 1909 | its_srat_maps = kmalloc(count * sizeof(struct its_srat_map), | ||
| 1910 | GFP_KERNEL); | ||
| 1911 | if (!its_srat_maps) { | ||
| 1912 | pr_warn("SRAT: Failed to allocate memory for its_srat_maps!\n"); | ||
| 1913 | return; | ||
| 1914 | } | ||
| 1915 | |||
| 1900 | acpi_table_parse_entries(ACPI_SIG_SRAT, | 1916 | acpi_table_parse_entries(ACPI_SIG_SRAT, |
| 1901 | sizeof(struct acpi_table_srat), | 1917 | sizeof(struct acpi_table_srat), |
| 1902 | ACPI_SRAT_TYPE_GIC_ITS_AFFINITY, | 1918 | ACPI_SRAT_TYPE_GIC_ITS_AFFINITY, |
| 1903 | gic_acpi_parse_srat_its, 0); | 1919 | gic_acpi_parse_srat_its, 0); |
| 1904 | } | 1920 | } |
| 1921 | |||
| 1922 | /* free the its_srat_maps after ITS probing */ | ||
| 1923 | static void __init acpi_its_srat_maps_free(void) | ||
| 1924 | { | ||
| 1925 | kfree(its_srat_maps); | ||
| 1926 | } | ||
| 1905 | #else | 1927 | #else |
| 1906 | static void __init acpi_table_parse_srat_its(void) { } | 1928 | static void __init acpi_table_parse_srat_its(void) { } |
| 1907 | static int __init acpi_get_its_numa_node(u32 its_id) { return NUMA_NO_NODE; } | 1929 | static int __init acpi_get_its_numa_node(u32 its_id) { return NUMA_NO_NODE; } |
| 1930 | static void __init acpi_its_srat_maps_free(void) { } | ||
| 1908 | #endif | 1931 | #endif |
| 1909 | 1932 | ||
| 1910 | static int __init gic_acpi_parse_madt_its(struct acpi_subtable_header *header, | 1933 | static int __init gic_acpi_parse_madt_its(struct acpi_subtable_header *header, |
| @@ -1951,6 +1974,7 @@ static void __init its_acpi_probe(void) | |||
| 1951 | acpi_table_parse_srat_its(); | 1974 | acpi_table_parse_srat_its(); |
| 1952 | acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR, | 1975 | acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR, |
| 1953 | gic_acpi_parse_madt_its, 0); | 1976 | gic_acpi_parse_madt_its, 0); |
| 1977 | acpi_its_srat_maps_free(); | ||
| 1954 | } | 1978 | } |
| 1955 | #else | 1979 | #else |
| 1956 | static void __init its_acpi_probe(void) { } | 1980 | static void __init its_acpi_probe(void) { } |
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index dbffb7ab6203..984c3ecfd22c 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c | |||
| @@ -353,6 +353,8 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs | |||
| 353 | 353 | ||
| 354 | if (static_key_true(&supports_deactivate)) | 354 | if (static_key_true(&supports_deactivate)) |
| 355 | gic_write_eoir(irqnr); | 355 | gic_write_eoir(irqnr); |
| 356 | else | ||
| 357 | isb(); | ||
| 356 | 358 | ||
| 357 | err = handle_domain_irq(gic_data.domain, irqnr, regs); | 359 | err = handle_domain_irq(gic_data.domain, irqnr, regs); |
| 358 | if (err) { | 360 | if (err) { |
| @@ -640,11 +642,16 @@ static void gic_smp_init(void) | |||
| 640 | static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, | 642 | static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, |
| 641 | bool force) | 643 | bool force) |
| 642 | { | 644 | { |
| 643 | unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask); | 645 | unsigned int cpu; |
| 644 | void __iomem *reg; | 646 | void __iomem *reg; |
| 645 | int enabled; | 647 | int enabled; |
| 646 | u64 val; | 648 | u64 val; |
| 647 | 649 | ||
| 650 | if (force) | ||
| 651 | cpu = cpumask_first(mask_val); | ||
| 652 | else | ||
| 653 | cpu = cpumask_any_and(mask_val, cpu_online_mask); | ||
| 654 | |||
| 648 | if (cpu >= nr_cpu_ids) | 655 | if (cpu >= nr_cpu_ids) |
| 649 | return -EINVAL; | 656 | return -EINVAL; |
| 650 | 657 | ||
| @@ -831,8 +838,11 @@ static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, | |||
| 831 | if (ret) | 838 | if (ret) |
| 832 | return ret; | 839 | return ret; |
| 833 | 840 | ||
| 834 | for (i = 0; i < nr_irqs; i++) | 841 | for (i = 0; i < nr_irqs; i++) { |
| 835 | gic_irq_domain_map(domain, virq + i, hwirq + i); | 842 | ret = gic_irq_domain_map(domain, virq + i, hwirq + i); |
| 843 | if (ret) | ||
| 844 | return ret; | ||
| 845 | } | ||
| 836 | 846 | ||
| 837 | return 0; | 847 | return 0; |
| 838 | } | 848 | } |
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index 1b1df4f770bd..d3e7c43718b8 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c | |||
| @@ -361,6 +361,7 @@ static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs) | |||
| 361 | if (likely(irqnr > 15 && irqnr < 1020)) { | 361 | if (likely(irqnr > 15 && irqnr < 1020)) { |
| 362 | if (static_key_true(&supports_deactivate)) | 362 | if (static_key_true(&supports_deactivate)) |
| 363 | writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI); | 363 | writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI); |
| 364 | isb(); | ||
| 364 | handle_domain_irq(gic->domain, irqnr, regs); | 365 | handle_domain_irq(gic->domain, irqnr, regs); |
| 365 | continue; | 366 | continue; |
| 366 | } | 367 | } |
| @@ -401,10 +402,12 @@ static void gic_handle_cascade_irq(struct irq_desc *desc) | |||
| 401 | goto out; | 402 | goto out; |
| 402 | 403 | ||
| 403 | cascade_irq = irq_find_mapping(chip_data->domain, gic_irq); | 404 | cascade_irq = irq_find_mapping(chip_data->domain, gic_irq); |
| 404 | if (unlikely(gic_irq < 32 || gic_irq > 1020)) | 405 | if (unlikely(gic_irq < 32 || gic_irq > 1020)) { |
| 405 | handle_bad_irq(desc); | 406 | handle_bad_irq(desc); |
| 406 | else | 407 | } else { |
| 408 | isb(); | ||
| 407 | generic_handle_irq(cascade_irq); | 409 | generic_handle_irq(cascade_irq); |
| 410 | } | ||
| 408 | 411 | ||
| 409 | out: | 412 | out: |
| 410 | chained_irq_exit(chip, desc); | 413 | chained_irq_exit(chip, desc); |
| @@ -1027,8 +1030,11 @@ static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, | |||
| 1027 | if (ret) | 1030 | if (ret) |
| 1028 | return ret; | 1031 | return ret; |
| 1029 | 1032 | ||
| 1030 | for (i = 0; i < nr_irqs; i++) | 1033 | for (i = 0; i < nr_irqs; i++) { |
| 1031 | gic_irq_domain_map(domain, virq + i, hwirq + i); | 1034 | ret = gic_irq_domain_map(domain, virq + i, hwirq + i); |
| 1035 | if (ret) | ||
| 1036 | return ret; | ||
| 1037 | } | ||
| 1032 | 1038 | ||
| 1033 | return 0; | 1039 | return 0; |
| 1034 | } | 1040 | } |
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c index 6ab1d3afec02..48ee1bad473f 100644 --- a/drivers/irqchip/irq-mips-gic.c +++ b/drivers/irqchip/irq-mips-gic.c | |||
| @@ -1020,8 +1020,11 @@ static int __init gic_of_init(struct device_node *node, | |||
| 1020 | gic_len = resource_size(&res); | 1020 | gic_len = resource_size(&res); |
| 1021 | } | 1021 | } |
| 1022 | 1022 | ||
| 1023 | if (mips_cm_present()) | 1023 | if (mips_cm_present()) { |
| 1024 | write_gcr_gic_base(gic_base | CM_GCR_GIC_BASE_GICEN_MSK); | 1024 | write_gcr_gic_base(gic_base | CM_GCR_GIC_BASE_GICEN_MSK); |
| 1025 | /* Ensure GIC region is enabled before trying to access it */ | ||
| 1026 | __sync(); | ||
| 1027 | } | ||
| 1025 | gic_present = true; | 1028 | gic_present = true; |
| 1026 | 1029 | ||
| 1027 | __gic_init(gic_base, gic_len, cpu_vec, 0, node); | 1030 | __gic_init(gic_base, gic_len, cpu_vec, 0, node); |
diff --git a/drivers/isdn/hysdn/hysdn_proclog.c b/drivers/isdn/hysdn/hysdn_proclog.c index 7b5fd8fb1761..aaca0b3d662e 100644 --- a/drivers/isdn/hysdn/hysdn_proclog.c +++ b/drivers/isdn/hysdn/hysdn_proclog.c | |||
| @@ -44,7 +44,6 @@ struct procdata { | |||
| 44 | char log_name[15]; /* log filename */ | 44 | char log_name[15]; /* log filename */ |
| 45 | struct log_data *log_head, *log_tail; /* head and tail for queue */ | 45 | struct log_data *log_head, *log_tail; /* head and tail for queue */ |
| 46 | int if_used; /* open count for interface */ | 46 | int if_used; /* open count for interface */ |
| 47 | int volatile del_lock; /* lock for delete operations */ | ||
| 48 | unsigned char logtmp[LOG_MAX_LINELEN]; | 47 | unsigned char logtmp[LOG_MAX_LINELEN]; |
| 49 | wait_queue_head_t rd_queue; | 48 | wait_queue_head_t rd_queue; |
| 50 | }; | 49 | }; |
| @@ -102,7 +101,6 @@ put_log_buffer(hysdn_card *card, char *cp) | |||
| 102 | { | 101 | { |
| 103 | struct log_data *ib; | 102 | struct log_data *ib; |
| 104 | struct procdata *pd = card->proclog; | 103 | struct procdata *pd = card->proclog; |
| 105 | int i; | ||
| 106 | unsigned long flags; | 104 | unsigned long flags; |
| 107 | 105 | ||
| 108 | if (!pd) | 106 | if (!pd) |
| @@ -126,21 +124,21 @@ put_log_buffer(hysdn_card *card, char *cp) | |||
| 126 | else | 124 | else |
| 127 | pd->log_tail->next = ib; /* follows existing messages */ | 125 | pd->log_tail->next = ib; /* follows existing messages */ |
| 128 | pd->log_tail = ib; /* new tail */ | 126 | pd->log_tail = ib; /* new tail */ |
| 129 | i = pd->del_lock++; /* get lock state */ | ||
| 130 | spin_unlock_irqrestore(&card->hysdn_lock, flags); | ||
| 131 | 127 | ||
| 132 | /* delete old entrys */ | 128 | /* delete old entrys */ |
| 133 | if (!i) | 129 | while (pd->log_head->next) { |
| 134 | while (pd->log_head->next) { | 130 | if ((pd->log_head->usage_cnt <= 0) && |
| 135 | if ((pd->log_head->usage_cnt <= 0) && | 131 | (pd->log_head->next->usage_cnt <= 0)) { |
| 136 | (pd->log_head->next->usage_cnt <= 0)) { | 132 | ib = pd->log_head; |
| 137 | ib = pd->log_head; | 133 | pd->log_head = pd->log_head->next; |
| 138 | pd->log_head = pd->log_head->next; | 134 | kfree(ib); |
| 139 | kfree(ib); | 135 | } else { |
| 140 | } else | 136 | break; |
| 141 | break; | 137 | } |
| 142 | } /* pd->log_head->next */ | 138 | } /* pd->log_head->next */ |
| 143 | pd->del_lock--; /* release lock level */ | 139 | |
| 140 | spin_unlock_irqrestore(&card->hysdn_lock, flags); | ||
| 141 | |||
| 144 | wake_up_interruptible(&(pd->rd_queue)); /* announce new entry */ | 142 | wake_up_interruptible(&(pd->rd_queue)); /* announce new entry */ |
| 145 | } /* put_log_buffer */ | 143 | } /* put_log_buffer */ |
| 146 | 144 | ||
diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c index 89b09c51ab7c..38a5bb764c7b 100644 --- a/drivers/isdn/i4l/isdn_common.c +++ b/drivers/isdn/i4l/isdn_common.c | |||
| @@ -1376,6 +1376,7 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg) | |||
| 1376 | if (arg) { | 1376 | if (arg) { |
| 1377 | if (copy_from_user(bname, argp, sizeof(bname) - 1)) | 1377 | if (copy_from_user(bname, argp, sizeof(bname) - 1)) |
| 1378 | return -EFAULT; | 1378 | return -EFAULT; |
| 1379 | bname[sizeof(bname)-1] = 0; | ||
| 1379 | } else | 1380 | } else |
| 1380 | return -EINVAL; | 1381 | return -EINVAL; |
| 1381 | ret = mutex_lock_interruptible(&dev->mtx); | 1382 | ret = mutex_lock_interruptible(&dev->mtx); |
diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c index c151c6daa67e..f63a110b7bcb 100644 --- a/drivers/isdn/i4l/isdn_net.c +++ b/drivers/isdn/i4l/isdn_net.c | |||
| @@ -2611,10 +2611,9 @@ isdn_net_newslave(char *parm) | |||
| 2611 | char newname[10]; | 2611 | char newname[10]; |
| 2612 | 2612 | ||
| 2613 | if (p) { | 2613 | if (p) { |
| 2614 | /* Slave-Name MUST not be empty */ | 2614 | /* Slave-Name MUST not be empty or overflow 'newname' */ |
| 2615 | if (!strlen(p + 1)) | 2615 | if (strscpy(newname, p + 1, sizeof(newname)) <= 0) |
| 2616 | return NULL; | 2616 | return NULL; |
| 2617 | strcpy(newname, p + 1); | ||
| 2618 | *p = 0; | 2617 | *p = 0; |
| 2619 | /* Master must already exist */ | 2618 | /* Master must already exist */ |
| 2620 | if (!(n = isdn_net_findif(parm))) | 2619 | if (!(n = isdn_net_findif(parm))) |
diff --git a/drivers/isdn/mISDN/fsm.c b/drivers/isdn/mISDN/fsm.c index 78fc5d5e9051..92e6570b1143 100644 --- a/drivers/isdn/mISDN/fsm.c +++ b/drivers/isdn/mISDN/fsm.c | |||
| @@ -26,7 +26,7 @@ | |||
| 26 | 26 | ||
| 27 | #define FSM_TIMER_DEBUG 0 | 27 | #define FSM_TIMER_DEBUG 0 |
| 28 | 28 | ||
| 29 | void | 29 | int |
| 30 | mISDN_FsmNew(struct Fsm *fsm, | 30 | mISDN_FsmNew(struct Fsm *fsm, |
| 31 | struct FsmNode *fnlist, int fncount) | 31 | struct FsmNode *fnlist, int fncount) |
| 32 | { | 32 | { |
| @@ -34,6 +34,8 @@ mISDN_FsmNew(struct Fsm *fsm, | |||
| 34 | 34 | ||
| 35 | fsm->jumpmatrix = kzalloc(sizeof(FSMFNPTR) * fsm->state_count * | 35 | fsm->jumpmatrix = kzalloc(sizeof(FSMFNPTR) * fsm->state_count * |
| 36 | fsm->event_count, GFP_KERNEL); | 36 | fsm->event_count, GFP_KERNEL); |
| 37 | if (fsm->jumpmatrix == NULL) | ||
| 38 | return -ENOMEM; | ||
| 37 | 39 | ||
| 38 | for (i = 0; i < fncount; i++) | 40 | for (i = 0; i < fncount; i++) |
| 39 | if ((fnlist[i].state >= fsm->state_count) || | 41 | if ((fnlist[i].state >= fsm->state_count) || |
| @@ -45,6 +47,7 @@ mISDN_FsmNew(struct Fsm *fsm, | |||
| 45 | } else | 47 | } else |
| 46 | fsm->jumpmatrix[fsm->state_count * fnlist[i].event + | 48 | fsm->jumpmatrix[fsm->state_count * fnlist[i].event + |
| 47 | fnlist[i].state] = (FSMFNPTR) fnlist[i].routine; | 49 | fnlist[i].state] = (FSMFNPTR) fnlist[i].routine; |
| 50 | return 0; | ||
| 48 | } | 51 | } |
| 49 | EXPORT_SYMBOL(mISDN_FsmNew); | 52 | EXPORT_SYMBOL(mISDN_FsmNew); |
| 50 | 53 | ||
diff --git a/drivers/isdn/mISDN/fsm.h b/drivers/isdn/mISDN/fsm.h index 928f5be192c1..e1def8490221 100644 --- a/drivers/isdn/mISDN/fsm.h +++ b/drivers/isdn/mISDN/fsm.h | |||
| @@ -55,7 +55,7 @@ struct FsmTimer { | |||
| 55 | void *arg; | 55 | void *arg; |
| 56 | }; | 56 | }; |
| 57 | 57 | ||
| 58 | extern void mISDN_FsmNew(struct Fsm *, struct FsmNode *, int); | 58 | extern int mISDN_FsmNew(struct Fsm *, struct FsmNode *, int); |
| 59 | extern void mISDN_FsmFree(struct Fsm *); | 59 | extern void mISDN_FsmFree(struct Fsm *); |
| 60 | extern int mISDN_FsmEvent(struct FsmInst *, int , void *); | 60 | extern int mISDN_FsmEvent(struct FsmInst *, int , void *); |
| 61 | extern void mISDN_FsmChangeState(struct FsmInst *, int); | 61 | extern void mISDN_FsmChangeState(struct FsmInst *, int); |
diff --git a/drivers/isdn/mISDN/layer1.c b/drivers/isdn/mISDN/layer1.c index bebc57b72138..3192b0eb3944 100644 --- a/drivers/isdn/mISDN/layer1.c +++ b/drivers/isdn/mISDN/layer1.c | |||
| @@ -414,8 +414,7 @@ l1_init(u_int *deb) | |||
| 414 | l1fsm_s.event_count = L1_EVENT_COUNT; | 414 | l1fsm_s.event_count = L1_EVENT_COUNT; |
| 415 | l1fsm_s.strEvent = strL1Event; | 415 | l1fsm_s.strEvent = strL1Event; |
| 416 | l1fsm_s.strState = strL1SState; | 416 | l1fsm_s.strState = strL1SState; |
| 417 | mISDN_FsmNew(&l1fsm_s, L1SFnList, ARRAY_SIZE(L1SFnList)); | 417 | return mISDN_FsmNew(&l1fsm_s, L1SFnList, ARRAY_SIZE(L1SFnList)); |
| 418 | return 0; | ||
| 419 | } | 418 | } |
| 420 | 419 | ||
| 421 | void | 420 | void |
diff --git a/drivers/isdn/mISDN/layer2.c b/drivers/isdn/mISDN/layer2.c index 7243a6746f8b..9ff0903a0e89 100644 --- a/drivers/isdn/mISDN/layer2.c +++ b/drivers/isdn/mISDN/layer2.c | |||
| @@ -2247,15 +2247,26 @@ static struct Bprotocol X75SLP = { | |||
| 2247 | int | 2247 | int |
| 2248 | Isdnl2_Init(u_int *deb) | 2248 | Isdnl2_Init(u_int *deb) |
| 2249 | { | 2249 | { |
| 2250 | int res; | ||
| 2250 | debug = deb; | 2251 | debug = deb; |
| 2251 | mISDN_register_Bprotocol(&X75SLP); | 2252 | mISDN_register_Bprotocol(&X75SLP); |
| 2252 | l2fsm.state_count = L2_STATE_COUNT; | 2253 | l2fsm.state_count = L2_STATE_COUNT; |
| 2253 | l2fsm.event_count = L2_EVENT_COUNT; | 2254 | l2fsm.event_count = L2_EVENT_COUNT; |
| 2254 | l2fsm.strEvent = strL2Event; | 2255 | l2fsm.strEvent = strL2Event; |
| 2255 | l2fsm.strState = strL2State; | 2256 | l2fsm.strState = strL2State; |
| 2256 | mISDN_FsmNew(&l2fsm, L2FnList, ARRAY_SIZE(L2FnList)); | 2257 | res = mISDN_FsmNew(&l2fsm, L2FnList, ARRAY_SIZE(L2FnList)); |
| 2257 | TEIInit(deb); | 2258 | if (res) |
| 2259 | goto error; | ||
| 2260 | res = TEIInit(deb); | ||
| 2261 | if (res) | ||
| 2262 | goto error_fsm; | ||
| 2258 | return 0; | 2263 | return 0; |
| 2264 | |||
| 2265 | error_fsm: | ||
| 2266 | mISDN_FsmFree(&l2fsm); | ||
| 2267 | error: | ||
| 2268 | mISDN_unregister_Bprotocol(&X75SLP); | ||
| 2269 | return res; | ||
| 2259 | } | 2270 | } |
| 2260 | 2271 | ||
| 2261 | void | 2272 | void |
diff --git a/drivers/isdn/mISDN/tei.c b/drivers/isdn/mISDN/tei.c index 908127efccf8..12d9e5f4beb1 100644 --- a/drivers/isdn/mISDN/tei.c +++ b/drivers/isdn/mISDN/tei.c | |||
| @@ -1387,23 +1387,37 @@ create_teimanager(struct mISDNdevice *dev) | |||
| 1387 | 1387 | ||
| 1388 | int TEIInit(u_int *deb) | 1388 | int TEIInit(u_int *deb) |
| 1389 | { | 1389 | { |
| 1390 | int res; | ||
| 1390 | debug = deb; | 1391 | debug = deb; |
| 1391 | teifsmu.state_count = TEI_STATE_COUNT; | 1392 | teifsmu.state_count = TEI_STATE_COUNT; |
| 1392 | teifsmu.event_count = TEI_EVENT_COUNT; | 1393 | teifsmu.event_count = TEI_EVENT_COUNT; |
| 1393 | teifsmu.strEvent = strTeiEvent; | 1394 | teifsmu.strEvent = strTeiEvent; |
| 1394 | teifsmu.strState = strTeiState; | 1395 | teifsmu.strState = strTeiState; |
| 1395 | mISDN_FsmNew(&teifsmu, TeiFnListUser, ARRAY_SIZE(TeiFnListUser)); | 1396 | res = mISDN_FsmNew(&teifsmu, TeiFnListUser, ARRAY_SIZE(TeiFnListUser)); |
| 1397 | if (res) | ||
| 1398 | goto error; | ||
| 1396 | teifsmn.state_count = TEI_STATE_COUNT; | 1399 | teifsmn.state_count = TEI_STATE_COUNT; |
| 1397 | teifsmn.event_count = TEI_EVENT_COUNT; | 1400 | teifsmn.event_count = TEI_EVENT_COUNT; |
| 1398 | teifsmn.strEvent = strTeiEvent; | 1401 | teifsmn.strEvent = strTeiEvent; |
| 1399 | teifsmn.strState = strTeiState; | 1402 | teifsmn.strState = strTeiState; |
| 1400 | mISDN_FsmNew(&teifsmn, TeiFnListNet, ARRAY_SIZE(TeiFnListNet)); | 1403 | res = mISDN_FsmNew(&teifsmn, TeiFnListNet, ARRAY_SIZE(TeiFnListNet)); |
| 1404 | if (res) | ||
| 1405 | goto error_smn; | ||
| 1401 | deactfsm.state_count = DEACT_STATE_COUNT; | 1406 | deactfsm.state_count = DEACT_STATE_COUNT; |
| 1402 | deactfsm.event_count = DEACT_EVENT_COUNT; | 1407 | deactfsm.event_count = DEACT_EVENT_COUNT; |
| 1403 | deactfsm.strEvent = strDeactEvent; | 1408 | deactfsm.strEvent = strDeactEvent; |
| 1404 | deactfsm.strState = strDeactState; | 1409 | deactfsm.strState = strDeactState; |
| 1405 | mISDN_FsmNew(&deactfsm, DeactFnList, ARRAY_SIZE(DeactFnList)); | 1410 | res = mISDN_FsmNew(&deactfsm, DeactFnList, ARRAY_SIZE(DeactFnList)); |
| 1411 | if (res) | ||
| 1412 | goto error_deact; | ||
| 1406 | return 0; | 1413 | return 0; |
| 1414 | |||
| 1415 | error_deact: | ||
| 1416 | mISDN_FsmFree(&teifsmn); | ||
| 1417 | error_smn: | ||
| 1418 | mISDN_FsmFree(&teifsmu); | ||
| 1419 | error: | ||
| 1420 | return res; | ||
| 1407 | } | 1421 | } |
| 1408 | 1422 | ||
| 1409 | void TEIFree(void) | 1423 | void TEIFree(void) |
diff --git a/drivers/lightnvm/pblk-rb.c b/drivers/lightnvm/pblk-rb.c index 5ecc154f6831..9bc32578a766 100644 --- a/drivers/lightnvm/pblk-rb.c +++ b/drivers/lightnvm/pblk-rb.c | |||
| @@ -657,7 +657,7 @@ try: | |||
| 657 | * be directed to disk. | 657 | * be directed to disk. |
| 658 | */ | 658 | */ |
| 659 | int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba, | 659 | int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba, |
| 660 | struct ppa_addr ppa, int bio_iter) | 660 | struct ppa_addr ppa, int bio_iter, bool advanced_bio) |
| 661 | { | 661 | { |
| 662 | struct pblk *pblk = container_of(rb, struct pblk, rwb); | 662 | struct pblk *pblk = container_of(rb, struct pblk, rwb); |
| 663 | struct pblk_rb_entry *entry; | 663 | struct pblk_rb_entry *entry; |
| @@ -694,7 +694,7 @@ int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba, | |||
| 694 | * filled with data from the cache). If part of the data resides on the | 694 | * filled with data from the cache). If part of the data resides on the |
| 695 | * media, we will read later on | 695 | * media, we will read later on |
| 696 | */ | 696 | */ |
| 697 | if (unlikely(!bio->bi_iter.bi_idx)) | 697 | if (unlikely(!advanced_bio)) |
| 698 | bio_advance(bio, bio_iter * PBLK_EXPOSED_PAGE_SIZE); | 698 | bio_advance(bio, bio_iter * PBLK_EXPOSED_PAGE_SIZE); |
| 699 | 699 | ||
| 700 | data = bio_data(bio); | 700 | data = bio_data(bio); |
diff --git a/drivers/lightnvm/pblk-read.c b/drivers/lightnvm/pblk-read.c index 4e5c48f3de62..d682e89e6493 100644 --- a/drivers/lightnvm/pblk-read.c +++ b/drivers/lightnvm/pblk-read.c | |||
| @@ -26,7 +26,7 @@ | |||
| 26 | */ | 26 | */ |
| 27 | static int pblk_read_from_cache(struct pblk *pblk, struct bio *bio, | 27 | static int pblk_read_from_cache(struct pblk *pblk, struct bio *bio, |
| 28 | sector_t lba, struct ppa_addr ppa, | 28 | sector_t lba, struct ppa_addr ppa, |
| 29 | int bio_iter) | 29 | int bio_iter, bool advanced_bio) |
| 30 | { | 30 | { |
| 31 | #ifdef CONFIG_NVM_DEBUG | 31 | #ifdef CONFIG_NVM_DEBUG |
| 32 | /* Callers must ensure that the ppa points to a cache address */ | 32 | /* Callers must ensure that the ppa points to a cache address */ |
| @@ -34,7 +34,8 @@ static int pblk_read_from_cache(struct pblk *pblk, struct bio *bio, | |||
| 34 | BUG_ON(!pblk_addr_in_cache(ppa)); | 34 | BUG_ON(!pblk_addr_in_cache(ppa)); |
| 35 | #endif | 35 | #endif |
| 36 | 36 | ||
| 37 | return pblk_rb_copy_to_bio(&pblk->rwb, bio, lba, ppa, bio_iter); | 37 | return pblk_rb_copy_to_bio(&pblk->rwb, bio, lba, ppa, |
| 38 | bio_iter, advanced_bio); | ||
| 38 | } | 39 | } |
| 39 | 40 | ||
| 40 | static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd, | 41 | static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd, |
| @@ -44,7 +45,7 @@ static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd, | |||
| 44 | struct ppa_addr ppas[PBLK_MAX_REQ_ADDRS]; | 45 | struct ppa_addr ppas[PBLK_MAX_REQ_ADDRS]; |
| 45 | sector_t blba = pblk_get_lba(bio); | 46 | sector_t blba = pblk_get_lba(bio); |
| 46 | int nr_secs = rqd->nr_ppas; | 47 | int nr_secs = rqd->nr_ppas; |
| 47 | int advanced_bio = 0; | 48 | bool advanced_bio = false; |
| 48 | int i, j = 0; | 49 | int i, j = 0; |
| 49 | 50 | ||
| 50 | /* logic error: lba out-of-bounds. Ignore read request */ | 51 | /* logic error: lba out-of-bounds. Ignore read request */ |
| @@ -62,19 +63,26 @@ static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd, | |||
| 62 | retry: | 63 | retry: |
| 63 | if (pblk_ppa_empty(p)) { | 64 | if (pblk_ppa_empty(p)) { |
| 64 | WARN_ON(test_and_set_bit(i, read_bitmap)); | 65 | WARN_ON(test_and_set_bit(i, read_bitmap)); |
| 65 | continue; | 66 | |
| 67 | if (unlikely(!advanced_bio)) { | ||
| 68 | bio_advance(bio, (i) * PBLK_EXPOSED_PAGE_SIZE); | ||
| 69 | advanced_bio = true; | ||
| 70 | } | ||
| 71 | |||
| 72 | goto next; | ||
| 66 | } | 73 | } |
| 67 | 74 | ||
| 68 | /* Try to read from write buffer. The address is later checked | 75 | /* Try to read from write buffer. The address is later checked |
| 69 | * on the write buffer to prevent retrieving overwritten data. | 76 | * on the write buffer to prevent retrieving overwritten data. |
| 70 | */ | 77 | */ |
| 71 | if (pblk_addr_in_cache(p)) { | 78 | if (pblk_addr_in_cache(p)) { |
| 72 | if (!pblk_read_from_cache(pblk, bio, lba, p, i)) { | 79 | if (!pblk_read_from_cache(pblk, bio, lba, p, i, |
| 80 | advanced_bio)) { | ||
| 73 | pblk_lookup_l2p_seq(pblk, &p, lba, 1); | 81 | pblk_lookup_l2p_seq(pblk, &p, lba, 1); |
| 74 | goto retry; | 82 | goto retry; |
| 75 | } | 83 | } |
| 76 | WARN_ON(test_and_set_bit(i, read_bitmap)); | 84 | WARN_ON(test_and_set_bit(i, read_bitmap)); |
| 77 | advanced_bio = 1; | 85 | advanced_bio = true; |
| 78 | #ifdef CONFIG_NVM_DEBUG | 86 | #ifdef CONFIG_NVM_DEBUG |
| 79 | atomic_long_inc(&pblk->cache_reads); | 87 | atomic_long_inc(&pblk->cache_reads); |
| 80 | #endif | 88 | #endif |
| @@ -83,6 +91,7 @@ retry: | |||
| 83 | rqd->ppa_list[j++] = p; | 91 | rqd->ppa_list[j++] = p; |
| 84 | } | 92 | } |
| 85 | 93 | ||
| 94 | next: | ||
| 86 | if (advanced_bio) | 95 | if (advanced_bio) |
| 87 | bio_advance(bio, PBLK_EXPOSED_PAGE_SIZE); | 96 | bio_advance(bio, PBLK_EXPOSED_PAGE_SIZE); |
| 88 | } | 97 | } |
| @@ -282,7 +291,7 @@ retry: | |||
| 282 | * write buffer to prevent retrieving overwritten data. | 291 | * write buffer to prevent retrieving overwritten data. |
| 283 | */ | 292 | */ |
| 284 | if (pblk_addr_in_cache(ppa)) { | 293 | if (pblk_addr_in_cache(ppa)) { |
| 285 | if (!pblk_read_from_cache(pblk, bio, lba, ppa, 0)) { | 294 | if (!pblk_read_from_cache(pblk, bio, lba, ppa, 0, 1)) { |
| 286 | pblk_lookup_l2p_seq(pblk, &ppa, lba, 1); | 295 | pblk_lookup_l2p_seq(pblk, &ppa, lba, 1); |
| 287 | goto retry; | 296 | goto retry; |
| 288 | } | 297 | } |
diff --git a/drivers/lightnvm/pblk.h b/drivers/lightnvm/pblk.h index 0c5692cc2f60..67e623bd5c2d 100644 --- a/drivers/lightnvm/pblk.h +++ b/drivers/lightnvm/pblk.h | |||
| @@ -670,7 +670,7 @@ unsigned int pblk_rb_read_to_bio_list(struct pblk_rb *rb, struct bio *bio, | |||
| 670 | struct list_head *list, | 670 | struct list_head *list, |
| 671 | unsigned int max); | 671 | unsigned int max); |
| 672 | int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba, | 672 | int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba, |
| 673 | struct ppa_addr ppa, int bio_iter); | 673 | struct ppa_addr ppa, int bio_iter, bool advanced_bio); |
| 674 | unsigned int pblk_rb_read_commit(struct pblk_rb *rb, unsigned int entries); | 674 | unsigned int pblk_rb_read_commit(struct pblk_rb *rb, unsigned int entries); |
| 675 | 675 | ||
| 676 | unsigned int pblk_rb_sync_init(struct pblk_rb *rb, unsigned long *flags); | 676 | unsigned int pblk_rb_sync_init(struct pblk_rb *rb, unsigned long *flags); |
diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c index ac91fd0d62c6..cbca5e51b975 100644 --- a/drivers/mailbox/pcc.c +++ b/drivers/mailbox/pcc.c | |||
| @@ -92,7 +92,7 @@ static struct mbox_controller pcc_mbox_ctrl = {}; | |||
| 92 | */ | 92 | */ |
| 93 | static struct mbox_chan *get_pcc_channel(int id) | 93 | static struct mbox_chan *get_pcc_channel(int id) |
| 94 | { | 94 | { |
| 95 | if (id < 0 || id > pcc_mbox_ctrl.num_chans) | 95 | if (id < 0 || id >= pcc_mbox_ctrl.num_chans) |
| 96 | return ERR_PTR(-ENOENT); | 96 | return ERR_PTR(-ENOENT); |
| 97 | 97 | ||
| 98 | return &pcc_mbox_channels[id]; | 98 | return &pcc_mbox_channels[id]; |
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index 850ff6c67994..44f4a8ac95bd 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c | |||
| @@ -1258,8 +1258,7 @@ EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async); | |||
| 1258 | */ | 1258 | */ |
| 1259 | int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c) | 1259 | int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c) |
| 1260 | { | 1260 | { |
| 1261 | blk_status_t a; | 1261 | int a, f; |
| 1262 | int f; | ||
| 1263 | unsigned long buffers_processed = 0; | 1262 | unsigned long buffers_processed = 0; |
| 1264 | struct dm_buffer *b, *tmp; | 1263 | struct dm_buffer *b, *tmp; |
| 1265 | 1264 | ||
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index 1b224aa9cf15..3acce09bba35 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c | |||
| @@ -1587,16 +1587,18 @@ retry: | |||
| 1587 | if (likely(ic->mode == 'J')) { | 1587 | if (likely(ic->mode == 'J')) { |
| 1588 | if (dio->write) { | 1588 | if (dio->write) { |
| 1589 | unsigned next_entry, i, pos; | 1589 | unsigned next_entry, i, pos; |
| 1590 | unsigned ws, we; | 1590 | unsigned ws, we, range_sectors; |
| 1591 | 1591 | ||
| 1592 | dio->range.n_sectors = min(dio->range.n_sectors, ic->free_sectors); | 1592 | dio->range.n_sectors = min(dio->range.n_sectors, |
| 1593 | ic->free_sectors << ic->sb->log2_sectors_per_block); | ||
| 1593 | if (unlikely(!dio->range.n_sectors)) | 1594 | if (unlikely(!dio->range.n_sectors)) |
| 1594 | goto sleep; | 1595 | goto sleep; |
| 1595 | ic->free_sectors -= dio->range.n_sectors; | 1596 | range_sectors = dio->range.n_sectors >> ic->sb->log2_sectors_per_block; |
| 1597 | ic->free_sectors -= range_sectors; | ||
| 1596 | journal_section = ic->free_section; | 1598 | journal_section = ic->free_section; |
| 1597 | journal_entry = ic->free_section_entry; | 1599 | journal_entry = ic->free_section_entry; |
| 1598 | 1600 | ||
| 1599 | next_entry = ic->free_section_entry + dio->range.n_sectors; | 1601 | next_entry = ic->free_section_entry + range_sectors; |
| 1600 | ic->free_section_entry = next_entry % ic->journal_section_entries; | 1602 | ic->free_section_entry = next_entry % ic->journal_section_entries; |
| 1601 | ic->free_section += next_entry / ic->journal_section_entries; | 1603 | ic->free_section += next_entry / ic->journal_section_entries; |
| 1602 | ic->n_uncommitted_sections += next_entry / ic->journal_section_entries; | 1604 | ic->n_uncommitted_sections += next_entry / ic->journal_section_entries; |
| @@ -1727,6 +1729,8 @@ static void pad_uncommitted(struct dm_integrity_c *ic) | |||
| 1727 | wraparound_section(ic, &ic->free_section); | 1729 | wraparound_section(ic, &ic->free_section); |
| 1728 | ic->n_uncommitted_sections++; | 1730 | ic->n_uncommitted_sections++; |
| 1729 | } | 1731 | } |
| 1732 | WARN_ON(ic->journal_sections * ic->journal_section_entries != | ||
| 1733 | (ic->n_uncommitted_sections + ic->n_committed_sections) * ic->journal_section_entries + ic->free_sectors); | ||
| 1730 | } | 1734 | } |
| 1731 | 1735 | ||
| 1732 | static void integrity_commit(struct work_struct *w) | 1736 | static void integrity_commit(struct work_struct *w) |
| @@ -1821,6 +1825,9 @@ static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start, | |||
| 1821 | { | 1825 | { |
| 1822 | unsigned i, j, n; | 1826 | unsigned i, j, n; |
| 1823 | struct journal_completion comp; | 1827 | struct journal_completion comp; |
| 1828 | struct blk_plug plug; | ||
| 1829 | |||
| 1830 | blk_start_plug(&plug); | ||
| 1824 | 1831 | ||
| 1825 | comp.ic = ic; | 1832 | comp.ic = ic; |
| 1826 | comp.in_flight = (atomic_t)ATOMIC_INIT(1); | 1833 | comp.in_flight = (atomic_t)ATOMIC_INIT(1); |
| @@ -1945,6 +1952,8 @@ skip_io: | |||
| 1945 | 1952 | ||
| 1946 | dm_bufio_write_dirty_buffers_async(ic->bufio); | 1953 | dm_bufio_write_dirty_buffers_async(ic->bufio); |
| 1947 | 1954 | ||
| 1955 | blk_finish_plug(&plug); | ||
| 1956 | |||
| 1948 | complete_journal_op(&comp); | 1957 | complete_journal_op(&comp); |
| 1949 | wait_for_completion_io(&comp.comp); | 1958 | wait_for_completion_io(&comp.comp); |
| 1950 | 1959 | ||
| @@ -3019,6 +3028,11 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv) | |||
| 3019 | ti->error = "Block size doesn't match the information in superblock"; | 3028 | ti->error = "Block size doesn't match the information in superblock"; |
| 3020 | goto bad; | 3029 | goto bad; |
| 3021 | } | 3030 | } |
| 3031 | if (!le32_to_cpu(ic->sb->journal_sections)) { | ||
| 3032 | r = -EINVAL; | ||
| 3033 | ti->error = "Corrupted superblock, journal_sections is 0"; | ||
| 3034 | goto bad; | ||
| 3035 | } | ||
| 3022 | /* make sure that ti->max_io_len doesn't overflow */ | 3036 | /* make sure that ti->max_io_len doesn't overflow */ |
| 3023 | if (ic->sb->log2_interleave_sectors < MIN_LOG2_INTERLEAVE_SECTORS || | 3037 | if (ic->sb->log2_interleave_sectors < MIN_LOG2_INTERLEAVE_SECTORS || |
| 3024 | ic->sb->log2_interleave_sectors > MAX_LOG2_INTERLEAVE_SECTORS) { | 3038 | ic->sb->log2_interleave_sectors > MAX_LOG2_INTERLEAVE_SECTORS) { |
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 0e8ab5bb3575..d24e4b05f5da 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c | |||
| @@ -504,7 +504,6 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq, | |||
| 504 | if (queue_dying) { | 504 | if (queue_dying) { |
| 505 | atomic_inc(&m->pg_init_in_progress); | 505 | atomic_inc(&m->pg_init_in_progress); |
| 506 | activate_or_offline_path(pgpath); | 506 | activate_or_offline_path(pgpath); |
| 507 | return DM_MAPIO_REQUEUE; | ||
| 508 | } | 507 | } |
| 509 | return DM_MAPIO_DELAY_REQUEUE; | 508 | return DM_MAPIO_DELAY_REQUEUE; |
| 510 | } | 509 | } |
| @@ -1458,7 +1457,6 @@ static int noretry_error(blk_status_t error) | |||
| 1458 | case BLK_STS_TARGET: | 1457 | case BLK_STS_TARGET: |
| 1459 | case BLK_STS_NEXUS: | 1458 | case BLK_STS_NEXUS: |
| 1460 | case BLK_STS_MEDIUM: | 1459 | case BLK_STS_MEDIUM: |
| 1461 | case BLK_STS_RESOURCE: | ||
| 1462 | return 1; | 1460 | return 1; |
| 1463 | } | 1461 | } |
| 1464 | 1462 | ||
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 2e10c2f13a34..5bfe285ea9d1 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c | |||
| @@ -208,6 +208,7 @@ struct raid_dev { | |||
| 208 | #define RT_FLAG_RS_BITMAP_LOADED 2 | 208 | #define RT_FLAG_RS_BITMAP_LOADED 2 |
| 209 | #define RT_FLAG_UPDATE_SBS 3 | 209 | #define RT_FLAG_UPDATE_SBS 3 |
| 210 | #define RT_FLAG_RESHAPE_RS 4 | 210 | #define RT_FLAG_RESHAPE_RS 4 |
| 211 | #define RT_FLAG_RS_SUSPENDED 5 | ||
| 211 | 212 | ||
| 212 | /* Array elements of 64 bit needed for rebuild/failed disk bits */ | 213 | /* Array elements of 64 bit needed for rebuild/failed disk bits */ |
| 213 | #define DISKS_ARRAY_ELEMS ((MAX_RAID_DEVICES + (sizeof(uint64_t) * 8 - 1)) / sizeof(uint64_t) / 8) | 214 | #define DISKS_ARRAY_ELEMS ((MAX_RAID_DEVICES + (sizeof(uint64_t) * 8 - 1)) / sizeof(uint64_t) / 8) |
| @@ -564,9 +565,10 @@ static const char *raid10_md_layout_to_format(int layout) | |||
| 564 | if (__raid10_near_copies(layout) > 1) | 565 | if (__raid10_near_copies(layout) > 1) |
| 565 | return "near"; | 566 | return "near"; |
| 566 | 567 | ||
| 567 | WARN_ON(__raid10_far_copies(layout) < 2); | 568 | if (__raid10_far_copies(layout) > 1) |
| 569 | return "far"; | ||
| 568 | 570 | ||
| 569 | return "far"; | 571 | return "unknown"; |
| 570 | } | 572 | } |
| 571 | 573 | ||
| 572 | /* Return md raid10 algorithm for @name */ | 574 | /* Return md raid10 algorithm for @name */ |
| @@ -2540,11 +2542,6 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs) | |||
| 2540 | if (!freshest) | 2542 | if (!freshest) |
| 2541 | return 0; | 2543 | return 0; |
| 2542 | 2544 | ||
| 2543 | if (validate_raid_redundancy(rs)) { | ||
| 2544 | rs->ti->error = "Insufficient redundancy to activate array"; | ||
| 2545 | return -EINVAL; | ||
| 2546 | } | ||
| 2547 | |||
| 2548 | /* | 2545 | /* |
| 2549 | * Validation of the freshest device provides the source of | 2546 | * Validation of the freshest device provides the source of |
| 2550 | * validation for the remaining devices. | 2547 | * validation for the remaining devices. |
| @@ -2553,6 +2550,11 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs) | |||
| 2553 | if (super_validate(rs, freshest)) | 2550 | if (super_validate(rs, freshest)) |
| 2554 | return -EINVAL; | 2551 | return -EINVAL; |
| 2555 | 2552 | ||
| 2553 | if (validate_raid_redundancy(rs)) { | ||
| 2554 | rs->ti->error = "Insufficient redundancy to activate array"; | ||
| 2555 | return -EINVAL; | ||
| 2556 | } | ||
| 2557 | |||
| 2556 | rdev_for_each(rdev, mddev) | 2558 | rdev_for_each(rdev, mddev) |
| 2557 | if (!test_bit(Journal, &rdev->flags) && | 2559 | if (!test_bit(Journal, &rdev->flags) && |
| 2558 | rdev != freshest && | 2560 | rdev != freshest && |
| @@ -3168,6 +3170,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
| 3168 | } | 3170 | } |
| 3169 | 3171 | ||
| 3170 | mddev_suspend(&rs->md); | 3172 | mddev_suspend(&rs->md); |
| 3173 | set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags); | ||
| 3171 | 3174 | ||
| 3172 | /* Try to adjust the raid4/5/6 stripe cache size to the stripe size */ | 3175 | /* Try to adjust the raid4/5/6 stripe cache size to the stripe size */ |
| 3173 | if (rs_is_raid456(rs)) { | 3176 | if (rs_is_raid456(rs)) { |
| @@ -3625,7 +3628,7 @@ static void raid_postsuspend(struct dm_target *ti) | |||
| 3625 | { | 3628 | { |
| 3626 | struct raid_set *rs = ti->private; | 3629 | struct raid_set *rs = ti->private; |
| 3627 | 3630 | ||
| 3628 | if (!rs->md.suspended) | 3631 | if (!test_and_set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) |
| 3629 | mddev_suspend(&rs->md); | 3632 | mddev_suspend(&rs->md); |
| 3630 | 3633 | ||
| 3631 | rs->md.ro = 1; | 3634 | rs->md.ro = 1; |
| @@ -3759,7 +3762,7 @@ static int rs_start_reshape(struct raid_set *rs) | |||
| 3759 | return r; | 3762 | return r; |
| 3760 | 3763 | ||
| 3761 | /* Need to be resumed to be able to start reshape, recovery is frozen until raid_resume() though */ | 3764 | /* Need to be resumed to be able to start reshape, recovery is frozen until raid_resume() though */ |
| 3762 | if (mddev->suspended) | 3765 | if (test_and_clear_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) |
| 3763 | mddev_resume(mddev); | 3766 | mddev_resume(mddev); |
| 3764 | 3767 | ||
| 3765 | /* | 3768 | /* |
| @@ -3786,8 +3789,8 @@ static int rs_start_reshape(struct raid_set *rs) | |||
| 3786 | } | 3789 | } |
| 3787 | 3790 | ||
| 3788 | /* Suspend because a resume will happen in raid_resume() */ | 3791 | /* Suspend because a resume will happen in raid_resume() */ |
| 3789 | if (!mddev->suspended) | 3792 | set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags); |
| 3790 | mddev_suspend(mddev); | 3793 | mddev_suspend(mddev); |
| 3791 | 3794 | ||
| 3792 | /* | 3795 | /* |
| 3793 | * Now reshape got set up, update superblocks to | 3796 | * Now reshape got set up, update superblocks to |
| @@ -3883,13 +3886,13 @@ static void raid_resume(struct dm_target *ti) | |||
| 3883 | if (!(rs->ctr_flags & RESUME_STAY_FROZEN_FLAGS)) | 3886 | if (!(rs->ctr_flags & RESUME_STAY_FROZEN_FLAGS)) |
| 3884 | clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); | 3887 | clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); |
| 3885 | 3888 | ||
| 3886 | if (mddev->suspended) | 3889 | if (test_and_clear_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) |
| 3887 | mddev_resume(mddev); | 3890 | mddev_resume(mddev); |
| 3888 | } | 3891 | } |
| 3889 | 3892 | ||
| 3890 | static struct target_type raid_target = { | 3893 | static struct target_type raid_target = { |
| 3891 | .name = "raid", | 3894 | .name = "raid", |
| 3892 | .version = {1, 11, 1}, | 3895 | .version = {1, 12, 1}, |
| 3893 | .module = THIS_MODULE, | 3896 | .module = THIS_MODULE, |
| 3894 | .ctr = raid_ctr, | 3897 | .ctr = raid_ctr, |
| 3895 | .dtr = raid_dtr, | 3898 | .dtr = raid_dtr, |
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index a39bcd9b982a..28a4071cdf85 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c | |||
| @@ -20,6 +20,7 @@ | |||
| 20 | #include <linux/atomic.h> | 20 | #include <linux/atomic.h> |
| 21 | #include <linux/blk-mq.h> | 21 | #include <linux/blk-mq.h> |
| 22 | #include <linux/mount.h> | 22 | #include <linux/mount.h> |
| 23 | #include <linux/dax.h> | ||
| 23 | 24 | ||
| 24 | #define DM_MSG_PREFIX "table" | 25 | #define DM_MSG_PREFIX "table" |
| 25 | 26 | ||
| @@ -1630,6 +1631,37 @@ static bool dm_table_supports_flush(struct dm_table *t, unsigned long flush) | |||
| 1630 | return false; | 1631 | return false; |
| 1631 | } | 1632 | } |
| 1632 | 1633 | ||
| 1634 | static int device_dax_write_cache_enabled(struct dm_target *ti, | ||
| 1635 | struct dm_dev *dev, sector_t start, | ||
| 1636 | sector_t len, void *data) | ||
| 1637 | { | ||
| 1638 | struct dax_device *dax_dev = dev->dax_dev; | ||
| 1639 | |||
| 1640 | if (!dax_dev) | ||
| 1641 | return false; | ||
| 1642 | |||
| 1643 | if (dax_write_cache_enabled(dax_dev)) | ||
| 1644 | return true; | ||
| 1645 | return false; | ||
| 1646 | } | ||
| 1647 | |||
| 1648 | static int dm_table_supports_dax_write_cache(struct dm_table *t) | ||
| 1649 | { | ||
| 1650 | struct dm_target *ti; | ||
| 1651 | unsigned i; | ||
| 1652 | |||
| 1653 | for (i = 0; i < dm_table_get_num_targets(t); i++) { | ||
| 1654 | ti = dm_table_get_target(t, i); | ||
| 1655 | |||
| 1656 | if (ti->type->iterate_devices && | ||
| 1657 | ti->type->iterate_devices(ti, | ||
| 1658 | device_dax_write_cache_enabled, NULL)) | ||
| 1659 | return true; | ||
| 1660 | } | ||
| 1661 | |||
| 1662 | return false; | ||
| 1663 | } | ||
| 1664 | |||
| 1633 | static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev, | 1665 | static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev, |
| 1634 | sector_t start, sector_t len, void *data) | 1666 | sector_t start, sector_t len, void *data) |
| 1635 | { | 1667 | { |
| @@ -1785,6 +1817,9 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, | |||
| 1785 | } | 1817 | } |
| 1786 | blk_queue_write_cache(q, wc, fua); | 1818 | blk_queue_write_cache(q, wc, fua); |
| 1787 | 1819 | ||
| 1820 | if (dm_table_supports_dax_write_cache(t)) | ||
| 1821 | dax_write_cache(t->md->dax_dev, true); | ||
| 1822 | |||
| 1788 | /* Ensure that all underlying devices are non-rotational. */ | 1823 | /* Ensure that all underlying devices are non-rotational. */ |
| 1789 | if (dm_table_all_devices_attribute(t, device_is_nonrot)) | 1824 | if (dm_table_all_devices_attribute(t, device_is_nonrot)) |
| 1790 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); | 1825 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); |
diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c index 504ba3fa328b..e13f90832b6b 100644 --- a/drivers/md/dm-verity-fec.c +++ b/drivers/md/dm-verity-fec.c | |||
| @@ -308,19 +308,14 @@ static int fec_alloc_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio) | |||
| 308 | { | 308 | { |
| 309 | unsigned n; | 309 | unsigned n; |
| 310 | 310 | ||
| 311 | if (!fio->rs) { | 311 | if (!fio->rs) |
| 312 | fio->rs = mempool_alloc(v->fec->rs_pool, 0); | 312 | fio->rs = mempool_alloc(v->fec->rs_pool, GFP_NOIO); |
| 313 | if (unlikely(!fio->rs)) { | ||
| 314 | DMERR("failed to allocate RS"); | ||
| 315 | return -ENOMEM; | ||
| 316 | } | ||
| 317 | } | ||
| 318 | 313 | ||
| 319 | fec_for_each_prealloc_buffer(n) { | 314 | fec_for_each_prealloc_buffer(n) { |
| 320 | if (fio->bufs[n]) | 315 | if (fio->bufs[n]) |
| 321 | continue; | 316 | continue; |
| 322 | 317 | ||
| 323 | fio->bufs[n] = mempool_alloc(v->fec->prealloc_pool, GFP_NOIO); | 318 | fio->bufs[n] = mempool_alloc(v->fec->prealloc_pool, GFP_NOWAIT); |
| 324 | if (unlikely(!fio->bufs[n])) { | 319 | if (unlikely(!fio->bufs[n])) { |
| 325 | DMERR("failed to allocate FEC buffer"); | 320 | DMERR("failed to allocate FEC buffer"); |
| 326 | return -ENOMEM; | 321 | return -ENOMEM; |
| @@ -332,22 +327,16 @@ static int fec_alloc_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio) | |||
| 332 | if (fio->bufs[n]) | 327 | if (fio->bufs[n]) |
| 333 | continue; | 328 | continue; |
| 334 | 329 | ||
| 335 | fio->bufs[n] = mempool_alloc(v->fec->extra_pool, GFP_NOIO); | 330 | fio->bufs[n] = mempool_alloc(v->fec->extra_pool, GFP_NOWAIT); |
| 336 | /* we can manage with even one buffer if necessary */ | 331 | /* we can manage with even one buffer if necessary */ |
| 337 | if (unlikely(!fio->bufs[n])) | 332 | if (unlikely(!fio->bufs[n])) |
| 338 | break; | 333 | break; |
| 339 | } | 334 | } |
| 340 | fio->nbufs = n; | 335 | fio->nbufs = n; |
| 341 | 336 | ||
| 342 | if (!fio->output) { | 337 | if (!fio->output) |
| 343 | fio->output = mempool_alloc(v->fec->output_pool, GFP_NOIO); | 338 | fio->output = mempool_alloc(v->fec->output_pool, GFP_NOIO); |
| 344 | 339 | ||
| 345 | if (!fio->output) { | ||
| 346 | DMERR("failed to allocate FEC page"); | ||
| 347 | return -ENOMEM; | ||
| 348 | } | ||
| 349 | } | ||
| 350 | |||
| 351 | return 0; | 340 | return 0; |
| 352 | } | 341 | } |
| 353 | 342 | ||
diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c index 884ff7c170a0..a4fa2ada6883 100644 --- a/drivers/md/dm-zoned-metadata.c +++ b/drivers/md/dm-zoned-metadata.c | |||
| @@ -624,7 +624,7 @@ static int dmz_write_sb(struct dmz_metadata *zmd, unsigned int set) | |||
| 624 | 624 | ||
| 625 | ret = dmz_rdwr_block(zmd, REQ_OP_WRITE, block, mblk->page); | 625 | ret = dmz_rdwr_block(zmd, REQ_OP_WRITE, block, mblk->page); |
| 626 | if (ret == 0) | 626 | if (ret == 0) |
| 627 | ret = blkdev_issue_flush(zmd->dev->bdev, GFP_KERNEL, NULL); | 627 | ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO, NULL); |
| 628 | 628 | ||
| 629 | return ret; | 629 | return ret; |
| 630 | } | 630 | } |
| @@ -658,7 +658,7 @@ static int dmz_write_dirty_mblocks(struct dmz_metadata *zmd, | |||
| 658 | 658 | ||
| 659 | /* Flush drive cache (this will also sync data) */ | 659 | /* Flush drive cache (this will also sync data) */ |
| 660 | if (ret == 0) | 660 | if (ret == 0) |
| 661 | ret = blkdev_issue_flush(zmd->dev->bdev, GFP_KERNEL, NULL); | 661 | ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO, NULL); |
| 662 | 662 | ||
| 663 | return ret; | 663 | return ret; |
| 664 | } | 664 | } |
| @@ -722,7 +722,7 @@ int dmz_flush_metadata(struct dmz_metadata *zmd) | |||
| 722 | 722 | ||
| 723 | /* If there are no dirty metadata blocks, just flush the device cache */ | 723 | /* If there are no dirty metadata blocks, just flush the device cache */ |
| 724 | if (list_empty(&write_list)) { | 724 | if (list_empty(&write_list)) { |
| 725 | ret = blkdev_issue_flush(zmd->dev->bdev, GFP_KERNEL, NULL); | 725 | ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO, NULL); |
| 726 | goto out; | 726 | goto out; |
| 727 | } | 727 | } |
| 728 | 728 | ||
| @@ -927,7 +927,7 @@ static int dmz_recover_mblocks(struct dmz_metadata *zmd, unsigned int dst_set) | |||
| 927 | (zmd->nr_meta_zones << zmd->dev->zone_nr_blocks_shift); | 927 | (zmd->nr_meta_zones << zmd->dev->zone_nr_blocks_shift); |
| 928 | } | 928 | } |
| 929 | 929 | ||
| 930 | page = alloc_page(GFP_KERNEL); | 930 | page = alloc_page(GFP_NOIO); |
| 931 | if (!page) | 931 | if (!page) |
| 932 | return -ENOMEM; | 932 | return -ENOMEM; |
| 933 | 933 | ||
| @@ -1183,7 +1183,7 @@ static int dmz_update_zone(struct dmz_metadata *zmd, struct dm_zone *zone) | |||
| 1183 | 1183 | ||
| 1184 | /* Get zone information from disk */ | 1184 | /* Get zone information from disk */ |
| 1185 | ret = blkdev_report_zones(zmd->dev->bdev, dmz_start_sect(zmd, zone), | 1185 | ret = blkdev_report_zones(zmd->dev->bdev, dmz_start_sect(zmd, zone), |
| 1186 | &blkz, &nr_blkz, GFP_KERNEL); | 1186 | &blkz, &nr_blkz, GFP_NOIO); |
| 1187 | if (ret) { | 1187 | if (ret) { |
| 1188 | dmz_dev_err(zmd->dev, "Get zone %u report failed", | 1188 | dmz_dev_err(zmd->dev, "Get zone %u report failed", |
| 1189 | dmz_id(zmd, zone)); | 1189 | dmz_id(zmd, zone)); |
| @@ -1257,7 +1257,7 @@ static int dmz_reset_zone(struct dmz_metadata *zmd, struct dm_zone *zone) | |||
| 1257 | 1257 | ||
| 1258 | ret = blkdev_reset_zones(dev->bdev, | 1258 | ret = blkdev_reset_zones(dev->bdev, |
| 1259 | dmz_start_sect(zmd, zone), | 1259 | dmz_start_sect(zmd, zone), |
| 1260 | dev->zone_nr_sectors, GFP_KERNEL); | 1260 | dev->zone_nr_sectors, GFP_NOIO); |
| 1261 | if (ret) { | 1261 | if (ret) { |
| 1262 | dmz_dev_err(dev, "Reset zone %u failed %d", | 1262 | dmz_dev_err(dev, "Reset zone %u failed %d", |
| 1263 | dmz_id(zmd, zone), ret); | 1263 | dmz_id(zmd, zone), ret); |
diff --git a/drivers/md/dm-zoned-reclaim.c b/drivers/md/dm-zoned-reclaim.c index 05c0a126f5c8..44a119e12f1a 100644 --- a/drivers/md/dm-zoned-reclaim.c +++ b/drivers/md/dm-zoned-reclaim.c | |||
| @@ -75,7 +75,7 @@ static int dmz_reclaim_align_wp(struct dmz_reclaim *zrc, struct dm_zone *zone, | |||
| 75 | nr_blocks = block - wp_block; | 75 | nr_blocks = block - wp_block; |
| 76 | ret = blkdev_issue_zeroout(zrc->dev->bdev, | 76 | ret = blkdev_issue_zeroout(zrc->dev->bdev, |
| 77 | dmz_start_sect(zmd, zone) + dmz_blk2sect(wp_block), | 77 | dmz_start_sect(zmd, zone) + dmz_blk2sect(wp_block), |
| 78 | dmz_blk2sect(nr_blocks), GFP_NOFS, false); | 78 | dmz_blk2sect(nr_blocks), GFP_NOIO, 0); |
| 79 | if (ret) { | 79 | if (ret) { |
| 80 | dmz_dev_err(zrc->dev, | 80 | dmz_dev_err(zrc->dev, |
| 81 | "Align zone %u wp %llu to %llu (wp+%u) blocks failed %d", | 81 | "Align zone %u wp %llu to %llu (wp+%u) blocks failed %d", |
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c index 2b538fa817f4..b08bbbd4d902 100644 --- a/drivers/md/dm-zoned-target.c +++ b/drivers/md/dm-zoned-target.c | |||
| @@ -541,7 +541,7 @@ static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio) | |||
| 541 | int ret; | 541 | int ret; |
| 542 | 542 | ||
| 543 | /* Create a new chunk work */ | 543 | /* Create a new chunk work */ |
| 544 | cw = kmalloc(sizeof(struct dm_chunk_work), GFP_NOFS); | 544 | cw = kmalloc(sizeof(struct dm_chunk_work), GFP_NOIO); |
| 545 | if (!cw) | 545 | if (!cw) |
| 546 | goto out; | 546 | goto out; |
| 547 | 547 | ||
| @@ -588,7 +588,7 @@ static int dmz_map(struct dm_target *ti, struct bio *bio) | |||
| 588 | 588 | ||
| 589 | bio->bi_bdev = dev->bdev; | 589 | bio->bi_bdev = dev->bdev; |
| 590 | 590 | ||
| 591 | if (!nr_sectors && (bio_op(bio) != REQ_OP_FLUSH) && (bio_op(bio) != REQ_OP_WRITE)) | 591 | if (!nr_sectors && bio_op(bio) != REQ_OP_WRITE) |
| 592 | return DM_MAPIO_REMAPPED; | 592 | return DM_MAPIO_REMAPPED; |
| 593 | 593 | ||
| 594 | /* The BIO should be block aligned */ | 594 | /* The BIO should be block aligned */ |
| @@ -603,7 +603,7 @@ static int dmz_map(struct dm_target *ti, struct bio *bio) | |||
| 603 | bioctx->status = BLK_STS_OK; | 603 | bioctx->status = BLK_STS_OK; |
| 604 | 604 | ||
| 605 | /* Set the BIO pending in the flush list */ | 605 | /* Set the BIO pending in the flush list */ |
| 606 | if (bio_op(bio) == REQ_OP_FLUSH || (!nr_sectors && bio_op(bio) == REQ_OP_WRITE)) { | 606 | if (!nr_sectors && bio_op(bio) == REQ_OP_WRITE) { |
| 607 | spin_lock(&dmz->flush_lock); | 607 | spin_lock(&dmz->flush_lock); |
| 608 | bio_list_add(&dmz->flush_list, bio); | 608 | bio_list_add(&dmz->flush_list, bio); |
| 609 | spin_unlock(&dmz->flush_lock); | 609 | spin_unlock(&dmz->flush_lock); |
| @@ -785,7 +785,7 @@ static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
| 785 | 785 | ||
| 786 | /* Chunk BIO work */ | 786 | /* Chunk BIO work */ |
| 787 | mutex_init(&dmz->chunk_lock); | 787 | mutex_init(&dmz->chunk_lock); |
| 788 | INIT_RADIX_TREE(&dmz->chunk_rxtree, GFP_NOFS); | 788 | INIT_RADIX_TREE(&dmz->chunk_rxtree, GFP_KERNEL); |
| 789 | dmz->chunk_wq = alloc_workqueue("dmz_cwq_%s", WQ_MEM_RECLAIM | WQ_UNBOUND, | 789 | dmz->chunk_wq = alloc_workqueue("dmz_cwq_%s", WQ_MEM_RECLAIM | WQ_UNBOUND, |
| 790 | 0, dev->name); | 790 | 0, dev->name); |
| 791 | if (!dmz->chunk_wq) { | 791 | if (!dmz->chunk_wq) { |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 2edbcc2d7d3f..d669fddd9290 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
| @@ -27,16 +27,6 @@ | |||
| 27 | 27 | ||
| 28 | #define DM_MSG_PREFIX "core" | 28 | #define DM_MSG_PREFIX "core" |
| 29 | 29 | ||
| 30 | #ifdef CONFIG_PRINTK | ||
| 31 | /* | ||
| 32 | * ratelimit state to be used in DMXXX_LIMIT(). | ||
| 33 | */ | ||
| 34 | DEFINE_RATELIMIT_STATE(dm_ratelimit_state, | ||
| 35 | DEFAULT_RATELIMIT_INTERVAL, | ||
| 36 | DEFAULT_RATELIMIT_BURST); | ||
| 37 | EXPORT_SYMBOL(dm_ratelimit_state); | ||
| 38 | #endif | ||
| 39 | |||
| 40 | /* | 30 | /* |
| 41 | * Cookies are numeric values sent with CHANGE and REMOVE | 31 | * Cookies are numeric values sent with CHANGE and REMOVE |
| 42 | * uevents while resuming, removing or renaming the device. | 32 | * uevents while resuming, removing or renaming the device. |
| @@ -1523,7 +1513,7 @@ static void __split_and_process_bio(struct mapped_device *md, | |||
| 1523 | } | 1513 | } |
| 1524 | 1514 | ||
| 1525 | /* drop the extra reference count */ | 1515 | /* drop the extra reference count */ |
| 1526 | dec_pending(ci.io, error); | 1516 | dec_pending(ci.io, errno_to_blk_status(error)); |
| 1527 | } | 1517 | } |
| 1528 | /*----------------------------------------------------------------- | 1518 | /*----------------------------------------------------------------- |
| 1529 | * CRUD END | 1519 | * CRUD END |
diff --git a/drivers/md/md.c b/drivers/md/md.c index 8cdca0296749..b01e458d31e9 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
| @@ -2287,7 +2287,7 @@ static void export_array(struct mddev *mddev) | |||
| 2287 | 2287 | ||
| 2288 | static bool set_in_sync(struct mddev *mddev) | 2288 | static bool set_in_sync(struct mddev *mddev) |
| 2289 | { | 2289 | { |
| 2290 | WARN_ON_ONCE(!spin_is_locked(&mddev->lock)); | 2290 | WARN_ON_ONCE(NR_CPUS != 1 && !spin_is_locked(&mddev->lock)); |
| 2291 | if (!mddev->in_sync) { | 2291 | if (!mddev->in_sync) { |
| 2292 | mddev->sync_checkers++; | 2292 | mddev->sync_checkers++; |
| 2293 | spin_unlock(&mddev->lock); | 2293 | spin_unlock(&mddev->lock); |
| @@ -7996,7 +7996,7 @@ bool md_write_start(struct mddev *mddev, struct bio *bi) | |||
| 7996 | if (mddev->safemode == 1) | 7996 | if (mddev->safemode == 1) |
| 7997 | mddev->safemode = 0; | 7997 | mddev->safemode = 0; |
| 7998 | /* sync_checkers is always 0 when writes_pending is in per-cpu mode */ | 7998 | /* sync_checkers is always 0 when writes_pending is in per-cpu mode */ |
| 7999 | if (mddev->in_sync || !mddev->sync_checkers) { | 7999 | if (mddev->in_sync || mddev->sync_checkers) { |
| 8000 | spin_lock(&mddev->lock); | 8000 | spin_lock(&mddev->lock); |
| 8001 | if (mddev->in_sync) { | 8001 | if (mddev->in_sync) { |
| 8002 | mddev->in_sync = 0; | 8002 | mddev->in_sync = 0; |
| @@ -8656,6 +8656,9 @@ void md_check_recovery(struct mddev *mddev) | |||
| 8656 | if (mddev_trylock(mddev)) { | 8656 | if (mddev_trylock(mddev)) { |
| 8657 | int spares = 0; | 8657 | int spares = 0; |
| 8658 | 8658 | ||
| 8659 | if (!mddev->external && mddev->safemode == 1) | ||
| 8660 | mddev->safemode = 0; | ||
| 8661 | |||
| 8659 | if (mddev->ro) { | 8662 | if (mddev->ro) { |
| 8660 | struct md_rdev *rdev; | 8663 | struct md_rdev *rdev; |
| 8661 | if (!mddev->external && mddev->in_sync) | 8664 | if (!mddev->external && mddev->in_sync) |
diff --git a/drivers/md/md.h b/drivers/md/md.h index b50eb4ac1b82..09db03455801 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h | |||
| @@ -731,58 +731,4 @@ static inline void mddev_check_write_zeroes(struct mddev *mddev, struct bio *bio | |||
| 731 | !bdev_get_queue(bio->bi_bdev)->limits.max_write_zeroes_sectors) | 731 | !bdev_get_queue(bio->bi_bdev)->limits.max_write_zeroes_sectors) |
| 732 | mddev->queue->limits.max_write_zeroes_sectors = 0; | 732 | mddev->queue->limits.max_write_zeroes_sectors = 0; |
| 733 | } | 733 | } |
| 734 | |||
| 735 | /* Maximum size of each resync request */ | ||
| 736 | #define RESYNC_BLOCK_SIZE (64*1024) | ||
| 737 | #define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE) | ||
| 738 | |||
| 739 | /* for managing resync I/O pages */ | ||
| 740 | struct resync_pages { | ||
| 741 | unsigned idx; /* for get/put page from the pool */ | ||
| 742 | void *raid_bio; | ||
| 743 | struct page *pages[RESYNC_PAGES]; | ||
| 744 | }; | ||
| 745 | |||
| 746 | static inline int resync_alloc_pages(struct resync_pages *rp, | ||
| 747 | gfp_t gfp_flags) | ||
| 748 | { | ||
| 749 | int i; | ||
| 750 | |||
| 751 | for (i = 0; i < RESYNC_PAGES; i++) { | ||
| 752 | rp->pages[i] = alloc_page(gfp_flags); | ||
| 753 | if (!rp->pages[i]) | ||
| 754 | goto out_free; | ||
| 755 | } | ||
| 756 | |||
| 757 | return 0; | ||
| 758 | |||
| 759 | out_free: | ||
| 760 | while (--i >= 0) | ||
| 761 | put_page(rp->pages[i]); | ||
| 762 | return -ENOMEM; | ||
| 763 | } | ||
| 764 | |||
| 765 | static inline void resync_free_pages(struct resync_pages *rp) | ||
| 766 | { | ||
| 767 | int i; | ||
| 768 | |||
| 769 | for (i = 0; i < RESYNC_PAGES; i++) | ||
| 770 | put_page(rp->pages[i]); | ||
| 771 | } | ||
| 772 | |||
| 773 | static inline void resync_get_all_pages(struct resync_pages *rp) | ||
| 774 | { | ||
| 775 | int i; | ||
| 776 | |||
| 777 | for (i = 0; i < RESYNC_PAGES; i++) | ||
| 778 | get_page(rp->pages[i]); | ||
| 779 | } | ||
| 780 | |||
| 781 | static inline struct page *resync_fetch_page(struct resync_pages *rp, | ||
| 782 | unsigned idx) | ||
| 783 | { | ||
| 784 | if (WARN_ON_ONCE(idx >= RESYNC_PAGES)) | ||
| 785 | return NULL; | ||
| 786 | return rp->pages[idx]; | ||
| 787 | } | ||
| 788 | #endif /* _MD_MD_H */ | 734 | #endif /* _MD_MD_H */ |
diff --git a/drivers/md/raid1-10.c b/drivers/md/raid1-10.c new file mode 100644 index 000000000000..9f2670b45f31 --- /dev/null +++ b/drivers/md/raid1-10.c | |||
| @@ -0,0 +1,81 @@ | |||
| 1 | /* Maximum size of each resync request */ | ||
| 2 | #define RESYNC_BLOCK_SIZE (64*1024) | ||
| 3 | #define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE) | ||
| 4 | |||
| 5 | /* for managing resync I/O pages */ | ||
| 6 | struct resync_pages { | ||
| 7 | void *raid_bio; | ||
| 8 | struct page *pages[RESYNC_PAGES]; | ||
| 9 | }; | ||
| 10 | |||
| 11 | static inline int resync_alloc_pages(struct resync_pages *rp, | ||
| 12 | gfp_t gfp_flags) | ||
| 13 | { | ||
| 14 | int i; | ||
| 15 | |||
| 16 | for (i = 0; i < RESYNC_PAGES; i++) { | ||
| 17 | rp->pages[i] = alloc_page(gfp_flags); | ||
| 18 | if (!rp->pages[i]) | ||
| 19 | goto out_free; | ||
| 20 | } | ||
| 21 | |||
| 22 | return 0; | ||
| 23 | |||
| 24 | out_free: | ||
| 25 | while (--i >= 0) | ||
| 26 | put_page(rp->pages[i]); | ||
| 27 | return -ENOMEM; | ||
| 28 | } | ||
| 29 | |||
| 30 | static inline void resync_free_pages(struct resync_pages *rp) | ||
| 31 | { | ||
| 32 | int i; | ||
| 33 | |||
| 34 | for (i = 0; i < RESYNC_PAGES; i++) | ||
| 35 | put_page(rp->pages[i]); | ||
| 36 | } | ||
| 37 | |||
| 38 | static inline void resync_get_all_pages(struct resync_pages *rp) | ||
| 39 | { | ||
| 40 | int i; | ||
| 41 | |||
| 42 | for (i = 0; i < RESYNC_PAGES; i++) | ||
| 43 | get_page(rp->pages[i]); | ||
| 44 | } | ||
| 45 | |||
| 46 | static inline struct page *resync_fetch_page(struct resync_pages *rp, | ||
| 47 | unsigned idx) | ||
| 48 | { | ||
| 49 | if (WARN_ON_ONCE(idx >= RESYNC_PAGES)) | ||
| 50 | return NULL; | ||
| 51 | return rp->pages[idx]; | ||
| 52 | } | ||
| 53 | |||
| 54 | /* | ||
| 55 | * 'strct resync_pages' stores actual pages used for doing the resync | ||
| 56 | * IO, and it is per-bio, so make .bi_private points to it. | ||
| 57 | */ | ||
| 58 | static inline struct resync_pages *get_resync_pages(struct bio *bio) | ||
| 59 | { | ||
| 60 | return bio->bi_private; | ||
| 61 | } | ||
| 62 | |||
| 63 | /* generally called after bio_reset() for reseting bvec */ | ||
| 64 | static void md_bio_reset_resync_pages(struct bio *bio, struct resync_pages *rp, | ||
| 65 | int size) | ||
| 66 | { | ||
| 67 | int idx = 0; | ||
| 68 | |||
| 69 | /* initialize bvec table again */ | ||
| 70 | do { | ||
| 71 | struct page *page = resync_fetch_page(rp, idx); | ||
| 72 | int len = min_t(int, size, PAGE_SIZE); | ||
| 73 | |||
| 74 | /* | ||
| 75 | * won't fail because the vec table is big | ||
| 76 | * enough to hold all these pages | ||
| 77 | */ | ||
| 78 | bio_add_page(bio, page, len, 0); | ||
| 79 | size -= len; | ||
| 80 | } while (idx++ < RESYNC_PAGES && size > 0); | ||
| 81 | } | ||
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 3febfc8391fb..f50958ded9f0 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
| @@ -81,14 +81,7 @@ static void lower_barrier(struct r1conf *conf, sector_t sector_nr); | |||
| 81 | #define raid1_log(md, fmt, args...) \ | 81 | #define raid1_log(md, fmt, args...) \ |
| 82 | do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid1 " fmt, ##args); } while (0) | 82 | do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid1 " fmt, ##args); } while (0) |
| 83 | 83 | ||
| 84 | /* | 84 | #include "raid1-10.c" |
| 85 | * 'strct resync_pages' stores actual pages used for doing the resync | ||
| 86 | * IO, and it is per-bio, so make .bi_private points to it. | ||
| 87 | */ | ||
| 88 | static inline struct resync_pages *get_resync_pages(struct bio *bio) | ||
| 89 | { | ||
| 90 | return bio->bi_private; | ||
| 91 | } | ||
| 92 | 85 | ||
| 93 | /* | 86 | /* |
| 94 | * for resync bio, r1bio pointer can be retrieved from the per-bio | 87 | * for resync bio, r1bio pointer can be retrieved from the per-bio |
| @@ -170,7 +163,6 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data) | |||
| 170 | resync_get_all_pages(rp); | 163 | resync_get_all_pages(rp); |
| 171 | } | 164 | } |
| 172 | 165 | ||
| 173 | rp->idx = 0; | ||
| 174 | rp->raid_bio = r1_bio; | 166 | rp->raid_bio = r1_bio; |
| 175 | bio->bi_private = rp; | 167 | bio->bi_private = rp; |
| 176 | } | 168 | } |
| @@ -492,10 +484,6 @@ static void raid1_end_write_request(struct bio *bio) | |||
| 492 | } | 484 | } |
| 493 | 485 | ||
| 494 | if (behind) { | 486 | if (behind) { |
| 495 | /* we release behind master bio when all write are done */ | ||
| 496 | if (r1_bio->behind_master_bio == bio) | ||
| 497 | to_put = NULL; | ||
| 498 | |||
| 499 | if (test_bit(WriteMostly, &rdev->flags)) | 487 | if (test_bit(WriteMostly, &rdev->flags)) |
| 500 | atomic_dec(&r1_bio->behind_remaining); | 488 | atomic_dec(&r1_bio->behind_remaining); |
| 501 | 489 | ||
| @@ -802,8 +790,7 @@ static void flush_bio_list(struct r1conf *conf, struct bio *bio) | |||
| 802 | bio->bi_next = NULL; | 790 | bio->bi_next = NULL; |
| 803 | bio->bi_bdev = rdev->bdev; | 791 | bio->bi_bdev = rdev->bdev; |
| 804 | if (test_bit(Faulty, &rdev->flags)) { | 792 | if (test_bit(Faulty, &rdev->flags)) { |
| 805 | bio->bi_status = BLK_STS_IOERR; | 793 | bio_io_error(bio); |
| 806 | bio_endio(bio); | ||
| 807 | } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && | 794 | } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && |
| 808 | !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) | 795 | !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) |
| 809 | /* Just ignore it */ | 796 | /* Just ignore it */ |
| @@ -1088,7 +1075,7 @@ static void unfreeze_array(struct r1conf *conf) | |||
| 1088 | wake_up(&conf->wait_barrier); | 1075 | wake_up(&conf->wait_barrier); |
| 1089 | } | 1076 | } |
| 1090 | 1077 | ||
| 1091 | static struct bio *alloc_behind_master_bio(struct r1bio *r1_bio, | 1078 | static void alloc_behind_master_bio(struct r1bio *r1_bio, |
| 1092 | struct bio *bio) | 1079 | struct bio *bio) |
| 1093 | { | 1080 | { |
| 1094 | int size = bio->bi_iter.bi_size; | 1081 | int size = bio->bi_iter.bi_size; |
| @@ -1098,11 +1085,13 @@ static struct bio *alloc_behind_master_bio(struct r1bio *r1_bio, | |||
| 1098 | 1085 | ||
| 1099 | behind_bio = bio_alloc_mddev(GFP_NOIO, vcnt, r1_bio->mddev); | 1086 | behind_bio = bio_alloc_mddev(GFP_NOIO, vcnt, r1_bio->mddev); |
| 1100 | if (!behind_bio) | 1087 | if (!behind_bio) |
| 1101 | goto fail; | 1088 | return; |
| 1102 | 1089 | ||
| 1103 | /* discard op, we don't support writezero/writesame yet */ | 1090 | /* discard op, we don't support writezero/writesame yet */ |
| 1104 | if (!bio_has_data(bio)) | 1091 | if (!bio_has_data(bio)) { |
| 1092 | behind_bio->bi_iter.bi_size = size; | ||
| 1105 | goto skip_copy; | 1093 | goto skip_copy; |
| 1094 | } | ||
| 1106 | 1095 | ||
| 1107 | while (i < vcnt && size) { | 1096 | while (i < vcnt && size) { |
| 1108 | struct page *page; | 1097 | struct page *page; |
| @@ -1123,14 +1112,13 @@ skip_copy: | |||
| 1123 | r1_bio->behind_master_bio = behind_bio;; | 1112 | r1_bio->behind_master_bio = behind_bio;; |
| 1124 | set_bit(R1BIO_BehindIO, &r1_bio->state); | 1113 | set_bit(R1BIO_BehindIO, &r1_bio->state); |
| 1125 | 1114 | ||
| 1126 | return behind_bio; | 1115 | return; |
| 1127 | 1116 | ||
| 1128 | free_pages: | 1117 | free_pages: |
| 1129 | pr_debug("%dB behind alloc failed, doing sync I/O\n", | 1118 | pr_debug("%dB behind alloc failed, doing sync I/O\n", |
| 1130 | bio->bi_iter.bi_size); | 1119 | bio->bi_iter.bi_size); |
| 1131 | bio_free_pages(behind_bio); | 1120 | bio_free_pages(behind_bio); |
| 1132 | fail: | 1121 | bio_put(behind_bio); |
| 1133 | return behind_bio; | ||
| 1134 | } | 1122 | } |
| 1135 | 1123 | ||
| 1136 | struct raid1_plug_cb { | 1124 | struct raid1_plug_cb { |
| @@ -1483,7 +1471,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, | |||
| 1483 | (atomic_read(&bitmap->behind_writes) | 1471 | (atomic_read(&bitmap->behind_writes) |
| 1484 | < mddev->bitmap_info.max_write_behind) && | 1472 | < mddev->bitmap_info.max_write_behind) && |
| 1485 | !waitqueue_active(&bitmap->behind_wait)) { | 1473 | !waitqueue_active(&bitmap->behind_wait)) { |
| 1486 | mbio = alloc_behind_master_bio(r1_bio, bio); | 1474 | alloc_behind_master_bio(r1_bio, bio); |
| 1487 | } | 1475 | } |
| 1488 | 1476 | ||
| 1489 | bitmap_startwrite(bitmap, r1_bio->sector, | 1477 | bitmap_startwrite(bitmap, r1_bio->sector, |
| @@ -1493,14 +1481,11 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, | |||
| 1493 | first_clone = 0; | 1481 | first_clone = 0; |
| 1494 | } | 1482 | } |
| 1495 | 1483 | ||
| 1496 | if (!mbio) { | 1484 | if (r1_bio->behind_master_bio) |
| 1497 | if (r1_bio->behind_master_bio) | 1485 | mbio = bio_clone_fast(r1_bio->behind_master_bio, |
| 1498 | mbio = bio_clone_fast(r1_bio->behind_master_bio, | 1486 | GFP_NOIO, mddev->bio_set); |
| 1499 | GFP_NOIO, | 1487 | else |
| 1500 | mddev->bio_set); | 1488 | mbio = bio_clone_fast(bio, GFP_NOIO, mddev->bio_set); |
| 1501 | else | ||
| 1502 | mbio = bio_clone_fast(bio, GFP_NOIO, mddev->bio_set); | ||
| 1503 | } | ||
| 1504 | 1489 | ||
| 1505 | if (r1_bio->behind_master_bio) { | 1490 | if (r1_bio->behind_master_bio) { |
| 1506 | if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags)) | 1491 | if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags)) |
| @@ -2086,10 +2071,7 @@ static void process_checks(struct r1bio *r1_bio) | |||
| 2086 | /* Fix variable parts of all bios */ | 2071 | /* Fix variable parts of all bios */ |
| 2087 | vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9); | 2072 | vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9); |
| 2088 | for (i = 0; i < conf->raid_disks * 2; i++) { | 2073 | for (i = 0; i < conf->raid_disks * 2; i++) { |
| 2089 | int j; | ||
| 2090 | int size; | ||
| 2091 | blk_status_t status; | 2074 | blk_status_t status; |
| 2092 | struct bio_vec *bi; | ||
| 2093 | struct bio *b = r1_bio->bios[i]; | 2075 | struct bio *b = r1_bio->bios[i]; |
| 2094 | struct resync_pages *rp = get_resync_pages(b); | 2076 | struct resync_pages *rp = get_resync_pages(b); |
| 2095 | if (b->bi_end_io != end_sync_read) | 2077 | if (b->bi_end_io != end_sync_read) |
| @@ -2098,8 +2080,6 @@ static void process_checks(struct r1bio *r1_bio) | |||
| 2098 | status = b->bi_status; | 2080 | status = b->bi_status; |
| 2099 | bio_reset(b); | 2081 | bio_reset(b); |
| 2100 | b->bi_status = status; | 2082 | b->bi_status = status; |
| 2101 | b->bi_vcnt = vcnt; | ||
| 2102 | b->bi_iter.bi_size = r1_bio->sectors << 9; | ||
| 2103 | b->bi_iter.bi_sector = r1_bio->sector + | 2083 | b->bi_iter.bi_sector = r1_bio->sector + |
| 2104 | conf->mirrors[i].rdev->data_offset; | 2084 | conf->mirrors[i].rdev->data_offset; |
| 2105 | b->bi_bdev = conf->mirrors[i].rdev->bdev; | 2085 | b->bi_bdev = conf->mirrors[i].rdev->bdev; |
| @@ -2107,15 +2087,8 @@ static void process_checks(struct r1bio *r1_bio) | |||
| 2107 | rp->raid_bio = r1_bio; | 2087 | rp->raid_bio = r1_bio; |
| 2108 | b->bi_private = rp; | 2088 | b->bi_private = rp; |
| 2109 | 2089 | ||
| 2110 | size = b->bi_iter.bi_size; | 2090 | /* initialize bvec table again */ |
| 2111 | bio_for_each_segment_all(bi, b, j) { | 2091 | md_bio_reset_resync_pages(b, rp, r1_bio->sectors << 9); |
| 2112 | bi->bv_offset = 0; | ||
| 2113 | if (size > PAGE_SIZE) | ||
| 2114 | bi->bv_len = PAGE_SIZE; | ||
| 2115 | else | ||
| 2116 | bi->bv_len = size; | ||
| 2117 | size -= PAGE_SIZE; | ||
| 2118 | } | ||
| 2119 | } | 2092 | } |
| 2120 | for (primary = 0; primary < conf->raid_disks * 2; primary++) | 2093 | for (primary = 0; primary < conf->raid_disks * 2; primary++) |
| 2121 | if (r1_bio->bios[primary]->bi_end_io == end_sync_read && | 2094 | if (r1_bio->bios[primary]->bi_end_io == end_sync_read && |
| @@ -2366,8 +2339,6 @@ static int narrow_write_error(struct r1bio *r1_bio, int i) | |||
| 2366 | wbio = bio_clone_fast(r1_bio->behind_master_bio, | 2339 | wbio = bio_clone_fast(r1_bio->behind_master_bio, |
| 2367 | GFP_NOIO, | 2340 | GFP_NOIO, |
| 2368 | mddev->bio_set); | 2341 | mddev->bio_set); |
| 2369 | /* We really need a _all clone */ | ||
| 2370 | wbio->bi_iter = (struct bvec_iter){ 0 }; | ||
| 2371 | } else { | 2342 | } else { |
| 2372 | wbio = bio_clone_fast(r1_bio->master_bio, GFP_NOIO, | 2343 | wbio = bio_clone_fast(r1_bio->master_bio, GFP_NOIO, |
| 2373 | mddev->bio_set); | 2344 | mddev->bio_set); |
| @@ -2619,6 +2590,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, | |||
| 2619 | int good_sectors = RESYNC_SECTORS; | 2590 | int good_sectors = RESYNC_SECTORS; |
| 2620 | int min_bad = 0; /* number of sectors that are bad in all devices */ | 2591 | int min_bad = 0; /* number of sectors that are bad in all devices */ |
| 2621 | int idx = sector_to_idx(sector_nr); | 2592 | int idx = sector_to_idx(sector_nr); |
| 2593 | int page_idx = 0; | ||
| 2622 | 2594 | ||
| 2623 | if (!conf->r1buf_pool) | 2595 | if (!conf->r1buf_pool) |
| 2624 | if (init_resync(conf)) | 2596 | if (init_resync(conf)) |
| @@ -2846,7 +2818,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, | |||
| 2846 | bio = r1_bio->bios[i]; | 2818 | bio = r1_bio->bios[i]; |
| 2847 | rp = get_resync_pages(bio); | 2819 | rp = get_resync_pages(bio); |
| 2848 | if (bio->bi_end_io) { | 2820 | if (bio->bi_end_io) { |
| 2849 | page = resync_fetch_page(rp, rp->idx++); | 2821 | page = resync_fetch_page(rp, page_idx); |
| 2850 | 2822 | ||
| 2851 | /* | 2823 | /* |
| 2852 | * won't fail because the vec table is big | 2824 | * won't fail because the vec table is big |
| @@ -2858,7 +2830,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, | |||
| 2858 | nr_sectors += len>>9; | 2830 | nr_sectors += len>>9; |
| 2859 | sector_nr += len>>9; | 2831 | sector_nr += len>>9; |
| 2860 | sync_blocks -= (len>>9); | 2832 | sync_blocks -= (len>>9); |
| 2861 | } while (get_resync_pages(r1_bio->bios[disk]->bi_private)->idx < RESYNC_PAGES); | 2833 | } while (++page_idx < RESYNC_PAGES); |
| 2862 | 2834 | ||
| 2863 | r1_bio->sectors = nr_sectors; | 2835 | r1_bio->sectors = nr_sectors; |
| 2864 | 2836 | ||
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 5026e7ad51d3..f55d4cc085f6 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
| @@ -110,14 +110,7 @@ static void end_reshape(struct r10conf *conf); | |||
| 110 | #define raid10_log(md, fmt, args...) \ | 110 | #define raid10_log(md, fmt, args...) \ |
| 111 | do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid10 " fmt, ##args); } while (0) | 111 | do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid10 " fmt, ##args); } while (0) |
| 112 | 112 | ||
| 113 | /* | 113 | #include "raid1-10.c" |
| 114 | * 'strct resync_pages' stores actual pages used for doing the resync | ||
| 115 | * IO, and it is per-bio, so make .bi_private points to it. | ||
| 116 | */ | ||
| 117 | static inline struct resync_pages *get_resync_pages(struct bio *bio) | ||
| 118 | { | ||
| 119 | return bio->bi_private; | ||
| 120 | } | ||
| 121 | 114 | ||
| 122 | /* | 115 | /* |
| 123 | * for resync bio, r10bio pointer can be retrieved from the per-bio | 116 | * for resync bio, r10bio pointer can be retrieved from the per-bio |
| @@ -221,7 +214,6 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data) | |||
| 221 | resync_get_all_pages(rp); | 214 | resync_get_all_pages(rp); |
| 222 | } | 215 | } |
| 223 | 216 | ||
| 224 | rp->idx = 0; | ||
| 225 | rp->raid_bio = r10_bio; | 217 | rp->raid_bio = r10_bio; |
| 226 | bio->bi_private = rp; | 218 | bio->bi_private = rp; |
| 227 | if (rbio) { | 219 | if (rbio) { |
| @@ -913,8 +905,7 @@ static void flush_pending_writes(struct r10conf *conf) | |||
| 913 | bio->bi_next = NULL; | 905 | bio->bi_next = NULL; |
| 914 | bio->bi_bdev = rdev->bdev; | 906 | bio->bi_bdev = rdev->bdev; |
| 915 | if (test_bit(Faulty, &rdev->flags)) { | 907 | if (test_bit(Faulty, &rdev->flags)) { |
| 916 | bio->bi_status = BLK_STS_IOERR; | 908 | bio_io_error(bio); |
| 917 | bio_endio(bio); | ||
| 918 | } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && | 909 | } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && |
| 919 | !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) | 910 | !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) |
| 920 | /* Just ignore it */ | 911 | /* Just ignore it */ |
| @@ -1098,8 +1089,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule) | |||
| 1098 | bio->bi_next = NULL; | 1089 | bio->bi_next = NULL; |
| 1099 | bio->bi_bdev = rdev->bdev; | 1090 | bio->bi_bdev = rdev->bdev; |
| 1100 | if (test_bit(Faulty, &rdev->flags)) { | 1091 | if (test_bit(Faulty, &rdev->flags)) { |
| 1101 | bio->bi_status = BLK_STS_IOERR; | 1092 | bio_io_error(bio); |
| 1102 | bio_endio(bio); | ||
| 1103 | } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && | 1093 | } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && |
| 1104 | !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) | 1094 | !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) |
| 1105 | /* Just ignore it */ | 1095 | /* Just ignore it */ |
| @@ -2087,8 +2077,8 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) | |||
| 2087 | rp = get_resync_pages(tbio); | 2077 | rp = get_resync_pages(tbio); |
| 2088 | bio_reset(tbio); | 2078 | bio_reset(tbio); |
| 2089 | 2079 | ||
| 2090 | tbio->bi_vcnt = vcnt; | 2080 | md_bio_reset_resync_pages(tbio, rp, fbio->bi_iter.bi_size); |
| 2091 | tbio->bi_iter.bi_size = fbio->bi_iter.bi_size; | 2081 | |
| 2092 | rp->raid_bio = r10_bio; | 2082 | rp->raid_bio = r10_bio; |
| 2093 | tbio->bi_private = rp; | 2083 | tbio->bi_private = rp; |
| 2094 | tbio->bi_iter.bi_sector = r10_bio->devs[i].addr; | 2084 | tbio->bi_iter.bi_sector = r10_bio->devs[i].addr; |
| @@ -2853,6 +2843,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, | |||
| 2853 | sector_t sectors_skipped = 0; | 2843 | sector_t sectors_skipped = 0; |
| 2854 | int chunks_skipped = 0; | 2844 | int chunks_skipped = 0; |
| 2855 | sector_t chunk_mask = conf->geo.chunk_mask; | 2845 | sector_t chunk_mask = conf->geo.chunk_mask; |
| 2846 | int page_idx = 0; | ||
| 2856 | 2847 | ||
| 2857 | if (!conf->r10buf_pool) | 2848 | if (!conf->r10buf_pool) |
| 2858 | if (init_resync(conf)) | 2849 | if (init_resync(conf)) |
| @@ -3355,7 +3346,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, | |||
| 3355 | break; | 3346 | break; |
| 3356 | for (bio= biolist ; bio ; bio=bio->bi_next) { | 3347 | for (bio= biolist ; bio ; bio=bio->bi_next) { |
| 3357 | struct resync_pages *rp = get_resync_pages(bio); | 3348 | struct resync_pages *rp = get_resync_pages(bio); |
| 3358 | page = resync_fetch_page(rp, rp->idx++); | 3349 | page = resync_fetch_page(rp, page_idx); |
| 3359 | /* | 3350 | /* |
| 3360 | * won't fail because the vec table is big enough | 3351 | * won't fail because the vec table is big enough |
| 3361 | * to hold all these pages | 3352 | * to hold all these pages |
| @@ -3364,7 +3355,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, | |||
| 3364 | } | 3355 | } |
| 3365 | nr_sectors += len>>9; | 3356 | nr_sectors += len>>9; |
| 3366 | sector_nr += len>>9; | 3357 | sector_nr += len>>9; |
| 3367 | } while (get_resync_pages(biolist)->idx < RESYNC_PAGES); | 3358 | } while (++page_idx < RESYNC_PAGES); |
| 3368 | r10_bio->sectors = nr_sectors; | 3359 | r10_bio->sectors = nr_sectors; |
| 3369 | 3360 | ||
| 3370 | while (biolist) { | 3361 | while (biolist) { |
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index bfa1e907c472..2dcbafa8e66c 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c | |||
| @@ -236,9 +236,10 @@ struct r5l_io_unit { | |||
| 236 | bool need_split_bio; | 236 | bool need_split_bio; |
| 237 | struct bio *split_bio; | 237 | struct bio *split_bio; |
| 238 | 238 | ||
| 239 | unsigned int has_flush:1; /* include flush request */ | 239 | unsigned int has_flush:1; /* include flush request */ |
| 240 | unsigned int has_fua:1; /* include fua request */ | 240 | unsigned int has_fua:1; /* include fua request */ |
| 241 | unsigned int has_null_flush:1; /* include empty flush request */ | 241 | unsigned int has_null_flush:1; /* include null flush request */ |
| 242 | unsigned int has_flush_payload:1; /* include flush payload */ | ||
| 242 | /* | 243 | /* |
| 243 | * io isn't sent yet, flush/fua request can only be submitted till it's | 244 | * io isn't sent yet, flush/fua request can only be submitted till it's |
| 244 | * the first IO in running_ios list | 245 | * the first IO in running_ios list |
| @@ -571,6 +572,8 @@ static void r5l_log_endio(struct bio *bio) | |||
| 571 | struct r5l_io_unit *io_deferred; | 572 | struct r5l_io_unit *io_deferred; |
| 572 | struct r5l_log *log = io->log; | 573 | struct r5l_log *log = io->log; |
| 573 | unsigned long flags; | 574 | unsigned long flags; |
| 575 | bool has_null_flush; | ||
| 576 | bool has_flush_payload; | ||
| 574 | 577 | ||
| 575 | if (bio->bi_status) | 578 | if (bio->bi_status) |
| 576 | md_error(log->rdev->mddev, log->rdev); | 579 | md_error(log->rdev->mddev, log->rdev); |
| @@ -580,6 +583,16 @@ static void r5l_log_endio(struct bio *bio) | |||
| 580 | 583 | ||
| 581 | spin_lock_irqsave(&log->io_list_lock, flags); | 584 | spin_lock_irqsave(&log->io_list_lock, flags); |
| 582 | __r5l_set_io_unit_state(io, IO_UNIT_IO_END); | 585 | __r5l_set_io_unit_state(io, IO_UNIT_IO_END); |
| 586 | |||
| 587 | /* | ||
| 588 | * if the io doesn't not have null_flush or flush payload, | ||
| 589 | * it is not safe to access it after releasing io_list_lock. | ||
| 590 | * Therefore, it is necessary to check the condition with | ||
| 591 | * the lock held. | ||
| 592 | */ | ||
| 593 | has_null_flush = io->has_null_flush; | ||
| 594 | has_flush_payload = io->has_flush_payload; | ||
| 595 | |||
| 583 | if (log->need_cache_flush && !list_empty(&io->stripe_list)) | 596 | if (log->need_cache_flush && !list_empty(&io->stripe_list)) |
| 584 | r5l_move_to_end_ios(log); | 597 | r5l_move_to_end_ios(log); |
| 585 | else | 598 | else |
| @@ -600,19 +613,23 @@ static void r5l_log_endio(struct bio *bio) | |||
| 600 | if (log->need_cache_flush) | 613 | if (log->need_cache_flush) |
| 601 | md_wakeup_thread(log->rdev->mddev->thread); | 614 | md_wakeup_thread(log->rdev->mddev->thread); |
| 602 | 615 | ||
| 603 | if (io->has_null_flush) { | 616 | /* finish flush only io_unit and PAYLOAD_FLUSH only io_unit */ |
| 617 | if (has_null_flush) { | ||
| 604 | struct bio *bi; | 618 | struct bio *bi; |
| 605 | 619 | ||
| 606 | WARN_ON(bio_list_empty(&io->flush_barriers)); | 620 | WARN_ON(bio_list_empty(&io->flush_barriers)); |
| 607 | while ((bi = bio_list_pop(&io->flush_barriers)) != NULL) { | 621 | while ((bi = bio_list_pop(&io->flush_barriers)) != NULL) { |
| 608 | bio_endio(bi); | 622 | bio_endio(bi); |
| 609 | atomic_dec(&io->pending_stripe); | 623 | if (atomic_dec_and_test(&io->pending_stripe)) { |
| 624 | __r5l_stripe_write_finished(io); | ||
| 625 | return; | ||
| 626 | } | ||
| 610 | } | 627 | } |
| 611 | } | 628 | } |
| 612 | 629 | /* decrease pending_stripe for flush payload */ | |
| 613 | /* finish flush only io_unit and PAYLOAD_FLUSH only io_unit */ | 630 | if (has_flush_payload) |
| 614 | if (atomic_read(&io->pending_stripe) == 0) | 631 | if (atomic_dec_and_test(&io->pending_stripe)) |
| 615 | __r5l_stripe_write_finished(io); | 632 | __r5l_stripe_write_finished(io); |
| 616 | } | 633 | } |
| 617 | 634 | ||
| 618 | static void r5l_do_submit_io(struct r5l_log *log, struct r5l_io_unit *io) | 635 | static void r5l_do_submit_io(struct r5l_log *log, struct r5l_io_unit *io) |
| @@ -881,6 +898,11 @@ static void r5l_append_flush_payload(struct r5l_log *log, sector_t sect) | |||
| 881 | payload->size = cpu_to_le32(sizeof(__le64)); | 898 | payload->size = cpu_to_le32(sizeof(__le64)); |
| 882 | payload->flush_stripes[0] = cpu_to_le64(sect); | 899 | payload->flush_stripes[0] = cpu_to_le64(sect); |
| 883 | io->meta_offset += meta_size; | 900 | io->meta_offset += meta_size; |
| 901 | /* multiple flush payloads count as one pending_stripe */ | ||
| 902 | if (!io->has_flush_payload) { | ||
| 903 | io->has_flush_payload = 1; | ||
| 904 | atomic_inc(&io->pending_stripe); | ||
| 905 | } | ||
| 884 | mutex_unlock(&log->io_mutex); | 906 | mutex_unlock(&log->io_mutex); |
| 885 | } | 907 | } |
| 886 | 908 | ||
| @@ -2540,23 +2562,32 @@ static ssize_t r5c_journal_mode_show(struct mddev *mddev, char *page) | |||
| 2540 | */ | 2562 | */ |
| 2541 | int r5c_journal_mode_set(struct mddev *mddev, int mode) | 2563 | int r5c_journal_mode_set(struct mddev *mddev, int mode) |
| 2542 | { | 2564 | { |
| 2543 | struct r5conf *conf = mddev->private; | 2565 | struct r5conf *conf; |
| 2544 | struct r5l_log *log = conf->log; | 2566 | int err; |
| 2545 | |||
| 2546 | if (!log) | ||
| 2547 | return -ENODEV; | ||
| 2548 | 2567 | ||
| 2549 | if (mode < R5C_JOURNAL_MODE_WRITE_THROUGH || | 2568 | if (mode < R5C_JOURNAL_MODE_WRITE_THROUGH || |
| 2550 | mode > R5C_JOURNAL_MODE_WRITE_BACK) | 2569 | mode > R5C_JOURNAL_MODE_WRITE_BACK) |
| 2551 | return -EINVAL; | 2570 | return -EINVAL; |
| 2552 | 2571 | ||
| 2572 | err = mddev_lock(mddev); | ||
| 2573 | if (err) | ||
| 2574 | return err; | ||
| 2575 | conf = mddev->private; | ||
| 2576 | if (!conf || !conf->log) { | ||
| 2577 | mddev_unlock(mddev); | ||
| 2578 | return -ENODEV; | ||
| 2579 | } | ||
| 2580 | |||
| 2553 | if (raid5_calc_degraded(conf) > 0 && | 2581 | if (raid5_calc_degraded(conf) > 0 && |
| 2554 | mode == R5C_JOURNAL_MODE_WRITE_BACK) | 2582 | mode == R5C_JOURNAL_MODE_WRITE_BACK) { |
| 2583 | mddev_unlock(mddev); | ||
| 2555 | return -EINVAL; | 2584 | return -EINVAL; |
| 2585 | } | ||
| 2556 | 2586 | ||
| 2557 | mddev_suspend(mddev); | 2587 | mddev_suspend(mddev); |
| 2558 | conf->log->r5c_journal_mode = mode; | 2588 | conf->log->r5c_journal_mode = mode; |
| 2559 | mddev_resume(mddev); | 2589 | mddev_resume(mddev); |
| 2590 | mddev_unlock(mddev); | ||
| 2560 | 2591 | ||
| 2561 | pr_debug("md/raid:%s: setting r5c cache mode to %d: %s\n", | 2592 | pr_debug("md/raid:%s: setting r5c cache mode to %d: %s\n", |
| 2562 | mdname(mddev), mode, r5c_journal_mode_str[mode]); | 2593 | mdname(mddev), mode, r5c_journal_mode_str[mode]); |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index aeeb8d6854e2..0fc2748aaf95 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
| @@ -3381,9 +3381,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, | |||
| 3381 | sh->dev[i].sector + STRIPE_SECTORS) { | 3381 | sh->dev[i].sector + STRIPE_SECTORS) { |
| 3382 | struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); | 3382 | struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); |
| 3383 | 3383 | ||
| 3384 | bi->bi_status = BLK_STS_IOERR; | ||
| 3385 | md_write_end(conf->mddev); | 3384 | md_write_end(conf->mddev); |
| 3386 | bio_endio(bi); | 3385 | bio_io_error(bi); |
| 3387 | bi = nextbi; | 3386 | bi = nextbi; |
| 3388 | } | 3387 | } |
| 3389 | if (bitmap_end) | 3388 | if (bitmap_end) |
| @@ -3403,9 +3402,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, | |||
| 3403 | sh->dev[i].sector + STRIPE_SECTORS) { | 3402 | sh->dev[i].sector + STRIPE_SECTORS) { |
| 3404 | struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); | 3403 | struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); |
| 3405 | 3404 | ||
| 3406 | bi->bi_status = BLK_STS_IOERR; | ||
| 3407 | md_write_end(conf->mddev); | 3405 | md_write_end(conf->mddev); |
| 3408 | bio_endio(bi); | 3406 | bio_io_error(bi); |
| 3409 | bi = bi2; | 3407 | bi = bi2; |
| 3410 | } | 3408 | } |
| 3411 | 3409 | ||
| @@ -3429,8 +3427,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, | |||
| 3429 | struct bio *nextbi = | 3427 | struct bio *nextbi = |
| 3430 | r5_next_bio(bi, sh->dev[i].sector); | 3428 | r5_next_bio(bi, sh->dev[i].sector); |
| 3431 | 3429 | ||
| 3432 | bi->bi_status = BLK_STS_IOERR; | 3430 | bio_io_error(bi); |
| 3433 | bio_endio(bi); | ||
| 3434 | bi = nextbi; | 3431 | bi = nextbi; |
| 3435 | } | 3432 | } |
| 3436 | } | 3433 | } |
| @@ -6237,6 +6234,8 @@ static void raid5_do_work(struct work_struct *work) | |||
| 6237 | pr_debug("%d stripes handled\n", handled); | 6234 | pr_debug("%d stripes handled\n", handled); |
| 6238 | 6235 | ||
| 6239 | spin_unlock_irq(&conf->device_lock); | 6236 | spin_unlock_irq(&conf->device_lock); |
| 6237 | |||
| 6238 | async_tx_issue_pending_all(); | ||
| 6240 | blk_finish_plug(&plug); | 6239 | blk_finish_plug(&plug); |
| 6241 | 6240 | ||
| 6242 | pr_debug("--- raid5worker inactive\n"); | 6241 | pr_debug("--- raid5worker inactive\n"); |
diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c index bf45977b2823..d596b601ff42 100644 --- a/drivers/media/cec/cec-adap.c +++ b/drivers/media/cec/cec-adap.c | |||
| @@ -559,7 +559,7 @@ EXPORT_SYMBOL_GPL(cec_transmit_done); | |||
| 559 | 559 | ||
| 560 | void cec_transmit_attempt_done(struct cec_adapter *adap, u8 status) | 560 | void cec_transmit_attempt_done(struct cec_adapter *adap, u8 status) |
| 561 | { | 561 | { |
| 562 | switch (status) { | 562 | switch (status & ~CEC_TX_STATUS_MAX_RETRIES) { |
| 563 | case CEC_TX_STATUS_OK: | 563 | case CEC_TX_STATUS_OK: |
| 564 | cec_transmit_done(adap, status, 0, 0, 0, 0); | 564 | cec_transmit_done(adap, status, 0, 0, 0, 0); |
| 565 | return; | 565 | return; |
diff --git a/drivers/media/cec/cec-notifier.c b/drivers/media/cec/cec-notifier.c index 74dc1c32080e..08b619d0ea1e 100644 --- a/drivers/media/cec/cec-notifier.c +++ b/drivers/media/cec/cec-notifier.c | |||
| @@ -87,6 +87,9 @@ EXPORT_SYMBOL_GPL(cec_notifier_put); | |||
| 87 | 87 | ||
| 88 | void cec_notifier_set_phys_addr(struct cec_notifier *n, u16 pa) | 88 | void cec_notifier_set_phys_addr(struct cec_notifier *n, u16 pa) |
| 89 | { | 89 | { |
| 90 | if (n == NULL) | ||
| 91 | return; | ||
| 92 | |||
| 90 | mutex_lock(&n->lock); | 93 | mutex_lock(&n->lock); |
| 91 | n->phys_addr = pa; | 94 | n->phys_addr = pa; |
| 92 | if (n->callback) | 95 | if (n->callback) |
| @@ -100,6 +103,9 @@ void cec_notifier_set_phys_addr_from_edid(struct cec_notifier *n, | |||
| 100 | { | 103 | { |
| 101 | u16 pa = CEC_PHYS_ADDR_INVALID; | 104 | u16 pa = CEC_PHYS_ADDR_INVALID; |
| 102 | 105 | ||
| 106 | if (n == NULL) | ||
| 107 | return; | ||
| 108 | |||
| 103 | if (edid && edid->extensions) | 109 | if (edid && edid->extensions) |
| 104 | pa = cec_get_edid_phys_addr((const u8 *)edid, | 110 | pa = cec_get_edid_phys_addr((const u8 *)edid, |
| 105 | EDID_LENGTH * (edid->extensions + 1), NULL); | 111 | EDID_LENGTH * (edid->extensions + 1), NULL); |
diff --git a/drivers/media/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb-core/dvb_ca_en50221.c index af694f2066a2..17970cdd55fa 100644 --- a/drivers/media/dvb-core/dvb_ca_en50221.c +++ b/drivers/media/dvb-core/dvb_ca_en50221.c | |||
| @@ -349,7 +349,8 @@ static int dvb_ca_en50221_link_init(struct dvb_ca_private *ca, int slot) | |||
| 349 | /* read the buffer size from the CAM */ | 349 | /* read the buffer size from the CAM */ |
| 350 | if ((ret = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_COMMAND, IRQEN | CMDREG_SR)) != 0) | 350 | if ((ret = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_COMMAND, IRQEN | CMDREG_SR)) != 0) |
| 351 | return ret; | 351 | return ret; |
| 352 | if ((ret = dvb_ca_en50221_wait_if_status(ca, slot, STATUSREG_DA, HZ / 10)) != 0) | 352 | ret = dvb_ca_en50221_wait_if_status(ca, slot, STATUSREG_DA, HZ); |
| 353 | if (ret != 0) | ||
| 353 | return ret; | 354 | return ret; |
| 354 | if ((ret = dvb_ca_en50221_read_data(ca, slot, buf, 2)) != 2) | 355 | if ((ret = dvb_ca_en50221_read_data(ca, slot, buf, 2)) != 2) |
| 355 | return -EIO; | 356 | return -EIO; |
| @@ -644,72 +645,101 @@ static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, int slot, | |||
| 644 | } | 645 | } |
| 645 | buf_free = dvb_ringbuffer_free(&ca->slot_info[slot].rx_buffer); | 646 | buf_free = dvb_ringbuffer_free(&ca->slot_info[slot].rx_buffer); |
| 646 | 647 | ||
| 647 | if (buf_free < (ca->slot_info[slot].link_buf_size + DVB_RINGBUFFER_PKTHDRSIZE)) { | 648 | if (buf_free < (ca->slot_info[slot].link_buf_size + |
| 649 | DVB_RINGBUFFER_PKTHDRSIZE)) { | ||
| 648 | status = -EAGAIN; | 650 | status = -EAGAIN; |
| 649 | goto exit; | 651 | goto exit; |
| 650 | } | 652 | } |
| 651 | } | 653 | } |
| 652 | 654 | ||
| 653 | /* check if there is data available */ | 655 | if (ca->pub->read_data && |
| 654 | if ((status = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_STATUS)) < 0) | 656 | (ca->slot_info[slot].slot_state != DVB_CA_SLOTSTATE_LINKINIT)) { |
| 655 | goto exit; | 657 | if (ebuf == NULL) |
| 656 | if (!(status & STATUSREG_DA)) { | 658 | status = ca->pub->read_data(ca->pub, slot, buf, |
| 657 | /* no data */ | 659 | sizeof(buf)); |
| 658 | status = 0; | 660 | else |
| 659 | goto exit; | 661 | status = ca->pub->read_data(ca->pub, slot, buf, ecount); |
| 660 | } | 662 | if (status < 0) |
| 661 | 663 | return status; | |
| 662 | /* read the amount of data */ | 664 | bytes_read = status; |
| 663 | if ((status = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_SIZE_HIGH)) < 0) | 665 | if (status == 0) |
| 664 | goto exit; | 666 | goto exit; |
| 665 | bytes_read = status << 8; | 667 | } else { |
| 666 | if ((status = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_SIZE_LOW)) < 0) | ||
| 667 | goto exit; | ||
| 668 | bytes_read |= status; | ||
| 669 | 668 | ||
| 670 | /* check it will fit */ | 669 | /* check if there is data available */ |
| 671 | if (ebuf == NULL) { | 670 | status = ca->pub->read_cam_control(ca->pub, slot, |
| 672 | if (bytes_read > ca->slot_info[slot].link_buf_size) { | 671 | CTRLIF_STATUS); |
| 673 | pr_err("dvb_ca adapter %d: CAM tried to send a buffer larger than the link buffer size (%i > %i)!\n", | 672 | if (status < 0) |
| 674 | ca->dvbdev->adapter->num, bytes_read, | ||
| 675 | ca->slot_info[slot].link_buf_size); | ||
| 676 | ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_LINKINIT; | ||
| 677 | status = -EIO; | ||
| 678 | goto exit; | 673 | goto exit; |
| 679 | } | 674 | if (!(status & STATUSREG_DA)) { |
| 680 | if (bytes_read < 2) { | 675 | /* no data */ |
| 681 | pr_err("dvb_ca adapter %d: CAM sent a buffer that was less than 2 bytes!\n", | 676 | status = 0; |
| 682 | ca->dvbdev->adapter->num); | ||
| 683 | ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_LINKINIT; | ||
| 684 | status = -EIO; | ||
| 685 | goto exit; | 677 | goto exit; |
| 686 | } | 678 | } |
| 687 | } else { | 679 | |
| 688 | if (bytes_read > ecount) { | 680 | /* read the amount of data */ |
| 689 | pr_err("dvb_ca adapter %d: CAM tried to send a buffer larger than the ecount size!\n", | 681 | status = ca->pub->read_cam_control(ca->pub, slot, |
| 690 | ca->dvbdev->adapter->num); | 682 | CTRLIF_SIZE_HIGH); |
| 691 | status = -EIO; | 683 | if (status < 0) |
| 684 | goto exit; | ||
| 685 | bytes_read = status << 8; | ||
| 686 | status = ca->pub->read_cam_control(ca->pub, slot, | ||
| 687 | CTRLIF_SIZE_LOW); | ||
| 688 | if (status < 0) | ||
| 692 | goto exit; | 689 | goto exit; |
| 690 | bytes_read |= status; | ||
| 691 | |||
| 692 | /* check it will fit */ | ||
| 693 | if (ebuf == NULL) { | ||
| 694 | if (bytes_read > ca->slot_info[slot].link_buf_size) { | ||
| 695 | pr_err("dvb_ca adapter %d: CAM tried to send a buffer larger than the link buffer size (%i > %i)!\n", | ||
| 696 | ca->dvbdev->adapter->num, bytes_read, | ||
| 697 | ca->slot_info[slot].link_buf_size); | ||
| 698 | ca->slot_info[slot].slot_state = | ||
| 699 | DVB_CA_SLOTSTATE_LINKINIT; | ||
| 700 | status = -EIO; | ||
| 701 | goto exit; | ||
| 702 | } | ||
| 703 | if (bytes_read < 2) { | ||
| 704 | pr_err("dvb_ca adapter %d: CAM sent a buffer that was less than 2 bytes!\n", | ||
| 705 | ca->dvbdev->adapter->num); | ||
| 706 | ca->slot_info[slot].slot_state = | ||
| 707 | DVB_CA_SLOTSTATE_LINKINIT; | ||
| 708 | status = -EIO; | ||
| 709 | goto exit; | ||
| 710 | } | ||
| 711 | } else { | ||
| 712 | if (bytes_read > ecount) { | ||
| 713 | pr_err("dvb_ca adapter %d: CAM tried to send a buffer larger than the ecount size!\n", | ||
| 714 | ca->dvbdev->adapter->num); | ||
| 715 | status = -EIO; | ||
| 716 | goto exit; | ||
| 717 | } | ||
| 693 | } | 718 | } |
| 694 | } | ||
| 695 | 719 | ||
| 696 | /* fill the buffer */ | 720 | /* fill the buffer */ |
| 697 | for (i = 0; i < bytes_read; i++) { | 721 | for (i = 0; i < bytes_read; i++) { |
| 698 | /* read byte and check */ | 722 | /* read byte and check */ |
| 699 | if ((status = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_DATA)) < 0) | 723 | status = ca->pub->read_cam_control(ca->pub, slot, |
| 700 | goto exit; | 724 | CTRLIF_DATA); |
| 725 | if (status < 0) | ||
| 726 | goto exit; | ||
| 701 | 727 | ||
| 702 | /* OK, store it in the buffer */ | 728 | /* OK, store it in the buffer */ |
| 703 | buf[i] = status; | 729 | buf[i] = status; |
| 704 | } | 730 | } |
| 705 | 731 | ||
| 706 | /* check for read error (RE should now be 0) */ | 732 | /* check for read error (RE should now be 0) */ |
| 707 | if ((status = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_STATUS)) < 0) | 733 | status = ca->pub->read_cam_control(ca->pub, slot, |
| 708 | goto exit; | 734 | CTRLIF_STATUS); |
| 709 | if (status & STATUSREG_RE) { | 735 | if (status < 0) |
| 710 | ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_LINKINIT; | 736 | goto exit; |
| 711 | status = -EIO; | 737 | if (status & STATUSREG_RE) { |
| 712 | goto exit; | 738 | ca->slot_info[slot].slot_state = |
| 739 | DVB_CA_SLOTSTATE_LINKINIT; | ||
| 740 | status = -EIO; | ||
| 741 | goto exit; | ||
| 742 | } | ||
| 713 | } | 743 | } |
| 714 | 744 | ||
| 715 | /* OK, add it to the receive buffer, or copy into external buffer if supplied */ | 745 | /* OK, add it to the receive buffer, or copy into external buffer if supplied */ |
| @@ -762,6 +792,10 @@ static int dvb_ca_en50221_write_data(struct dvb_ca_private *ca, int slot, | |||
| 762 | if (bytes_write > ca->slot_info[slot].link_buf_size) | 792 | if (bytes_write > ca->slot_info[slot].link_buf_size) |
| 763 | return -EINVAL; | 793 | return -EINVAL; |
| 764 | 794 | ||
| 795 | if (ca->pub->write_data && | ||
| 796 | (ca->slot_info[slot].slot_state != DVB_CA_SLOTSTATE_LINKINIT)) | ||
| 797 | return ca->pub->write_data(ca->pub, slot, buf, bytes_write); | ||
| 798 | |||
| 765 | /* it is possible we are dealing with a single buffer implementation, | 799 | /* it is possible we are dealing with a single buffer implementation, |
| 766 | thus if there is data available for read or if there is even a read | 800 | thus if there is data available for read or if there is even a read |
| 767 | already in progress, we do nothing but awake the kernel thread to | 801 | already in progress, we do nothing but awake the kernel thread to |
| @@ -1176,7 +1210,8 @@ static int dvb_ca_en50221_thread(void *data) | |||
| 1176 | 1210 | ||
| 1177 | pr_err("dvb_ca adapter %d: DVB CAM link initialisation failed :(\n", | 1211 | pr_err("dvb_ca adapter %d: DVB CAM link initialisation failed :(\n", |
| 1178 | ca->dvbdev->adapter->num); | 1212 | ca->dvbdev->adapter->num); |
| 1179 | ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_INVALID; | 1213 | ca->slot_info[slot].slot_state = |
| 1214 | DVB_CA_SLOTSTATE_UNINITIALISED; | ||
| 1180 | dvb_ca_en50221_thread_update_delay(ca); | 1215 | dvb_ca_en50221_thread_update_delay(ca); |
| 1181 | break; | 1216 | break; |
| 1182 | } | 1217 | } |
diff --git a/drivers/media/dvb-core/dvb_ca_en50221.h b/drivers/media/dvb-core/dvb_ca_en50221.h index 1e4bbbd34d91..82617bac0875 100644 --- a/drivers/media/dvb-core/dvb_ca_en50221.h +++ b/drivers/media/dvb-core/dvb_ca_en50221.h | |||
| @@ -41,6 +41,8 @@ | |||
| 41 | * @write_attribute_mem: function for writing attribute memory on the CAM | 41 | * @write_attribute_mem: function for writing attribute memory on the CAM |
| 42 | * @read_cam_control: function for reading the control interface on the CAM | 42 | * @read_cam_control: function for reading the control interface on the CAM |
| 43 | * @write_cam_control: function for reading the control interface on the CAM | 43 | * @write_cam_control: function for reading the control interface on the CAM |
| 44 | * @read_data: function for reading data (block mode) | ||
| 45 | * @write_data: function for writing data (block mode) | ||
| 44 | * @slot_reset: function to reset the CAM slot | 46 | * @slot_reset: function to reset the CAM slot |
| 45 | * @slot_shutdown: function to shutdown a CAM slot | 47 | * @slot_shutdown: function to shutdown a CAM slot |
| 46 | * @slot_ts_enable: function to enable the Transport Stream on a CAM slot | 48 | * @slot_ts_enable: function to enable the Transport Stream on a CAM slot |
| @@ -66,6 +68,11 @@ struct dvb_ca_en50221 { | |||
| 66 | int (*write_cam_control)(struct dvb_ca_en50221 *ca, | 68 | int (*write_cam_control)(struct dvb_ca_en50221 *ca, |
| 67 | int slot, u8 address, u8 value); | 69 | int slot, u8 address, u8 value); |
| 68 | 70 | ||
| 71 | int (*read_data)(struct dvb_ca_en50221 *ca, | ||
| 72 | int slot, u8 *ebuf, int ecount); | ||
| 73 | int (*write_data)(struct dvb_ca_en50221 *ca, | ||
| 74 | int slot, u8 *ebuf, int ecount); | ||
| 75 | |||
| 69 | int (*slot_reset)(struct dvb_ca_en50221 *ca, int slot); | 76 | int (*slot_reset)(struct dvb_ca_en50221 *ca, int slot); |
| 70 | int (*slot_shutdown)(struct dvb_ca_en50221 *ca, int slot); | 77 | int (*slot_shutdown)(struct dvb_ca_en50221 *ca, int slot); |
| 71 | int (*slot_ts_enable)(struct dvb_ca_en50221 *ca, int slot); | 78 | int (*slot_ts_enable)(struct dvb_ca_en50221 *ca, int slot); |
diff --git a/drivers/media/dvb-frontends/cxd2841er.c b/drivers/media/dvb-frontends/cxd2841er.c index 08f67d60a7d9..12bff778c97f 100644 --- a/drivers/media/dvb-frontends/cxd2841er.c +++ b/drivers/media/dvb-frontends/cxd2841er.c | |||
| @@ -3279,7 +3279,10 @@ static int cxd2841er_get_frontend(struct dvb_frontend *fe, | |||
| 3279 | else if (priv->state == STATE_ACTIVE_TC) | 3279 | else if (priv->state == STATE_ACTIVE_TC) |
| 3280 | cxd2841er_read_status_tc(fe, &status); | 3280 | cxd2841er_read_status_tc(fe, &status); |
| 3281 | 3281 | ||
| 3282 | cxd2841er_read_signal_strength(fe); | 3282 | if (priv->state == STATE_ACTIVE_TC || priv->state == STATE_ACTIVE_S) |
| 3283 | cxd2841er_read_signal_strength(fe); | ||
| 3284 | else | ||
| 3285 | p->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE; | ||
| 3283 | 3286 | ||
| 3284 | if (status & FE_HAS_LOCK) { | 3287 | if (status & FE_HAS_LOCK) { |
| 3285 | cxd2841er_read_snr(fe); | 3288 | cxd2841er_read_snr(fe); |
diff --git a/drivers/media/dvb-frontends/drx39xyj/drx_driver.h b/drivers/media/dvb-frontends/drx39xyj/drx_driver.h index 4442e478db72..cd69e187ba7a 100644 --- a/drivers/media/dvb-frontends/drx39xyj/drx_driver.h +++ b/drivers/media/dvb-frontends/drx39xyj/drx_driver.h | |||
| @@ -307,7 +307,7 @@ int drxbsp_tuner_default_i2c_write_read(struct tuner_instance *tuner, | |||
| 307 | * \def DRX_UNKNOWN | 307 | * \def DRX_UNKNOWN |
| 308 | * \brief Generic UNKNOWN value for DRX enumerated types. | 308 | * \brief Generic UNKNOWN value for DRX enumerated types. |
| 309 | * | 309 | * |
| 310 | * Used to indicate that the parameter value is unknown or not yet initalized. | 310 | * Used to indicate that the parameter value is unknown or not yet initialized. |
| 311 | */ | 311 | */ |
| 312 | #ifndef DRX_UNKNOWN | 312 | #ifndef DRX_UNKNOWN |
| 313 | #define DRX_UNKNOWN (254) | 313 | #define DRX_UNKNOWN (254) |
| @@ -450,19 +450,6 @@ MACROS | |||
| 450 | ((u8)((((u16)x)>>8)&0xFF)) | 450 | ((u8)((((u16)x)>>8)&0xFF)) |
| 451 | 451 | ||
| 452 | /** | 452 | /** |
| 453 | * \brief Macro to sign extend signed 9 bit value to signed 16 bit value | ||
| 454 | */ | ||
| 455 | #define DRX_S9TOS16(x) ((((u16)x)&0x100) ? ((s16)((u16)(x)|0xFF00)) : (x)) | ||
| 456 | |||
| 457 | /** | ||
| 458 | * \brief Macro to sign extend signed 9 bit value to signed 16 bit value | ||
| 459 | */ | ||
| 460 | #define DRX_S24TODRXFREQ(x) ((((u32) x) & 0x00800000UL) ? \ | ||
| 461 | ((s32) \ | ||
| 462 | (((u32) x) | 0xFF000000)) : \ | ||
| 463 | ((s32) x)) | ||
| 464 | |||
| 465 | /** | ||
| 466 | * \brief Macro to convert 16 bit register value to a s32 | 453 | * \brief Macro to convert 16 bit register value to a s32 |
| 467 | */ | 454 | */ |
| 468 | #define DRX_U16TODRXFREQ(x) ((x & 0x8000) ? \ | 455 | #define DRX_U16TODRXFREQ(x) ((x & 0x8000) ? \ |
diff --git a/drivers/media/dvb-frontends/lnbh25.c b/drivers/media/dvb-frontends/lnbh25.c index ef3021e964be..cb486e879fdd 100644 --- a/drivers/media/dvb-frontends/lnbh25.c +++ b/drivers/media/dvb-frontends/lnbh25.c | |||
| @@ -76,8 +76,8 @@ static int lnbh25_read_vmon(struct lnbh25_priv *priv) | |||
| 76 | return ret; | 76 | return ret; |
| 77 | } | 77 | } |
| 78 | } | 78 | } |
| 79 | print_hex_dump_bytes("lnbh25_read_vmon: ", | 79 | dev_dbg(&priv->i2c->dev, "%s(): %*ph\n", |
| 80 | DUMP_PREFIX_OFFSET, status, sizeof(status)); | 80 | __func__, (int) sizeof(status), status); |
| 81 | if ((status[0] & (LNBH25_STATUS_OFL | LNBH25_STATUS_VMON)) != 0) { | 81 | if ((status[0] & (LNBH25_STATUS_OFL | LNBH25_STATUS_VMON)) != 0) { |
| 82 | dev_err(&priv->i2c->dev, | 82 | dev_err(&priv->i2c->dev, |
| 83 | "%s(): voltage in failure state, status reg 0x%x\n", | 83 | "%s(): voltage in failure state, status reg 0x%x\n", |
| @@ -178,7 +178,7 @@ struct dvb_frontend *lnbh25_attach(struct dvb_frontend *fe, | |||
| 178 | fe->ops.release_sec = lnbh25_release; | 178 | fe->ops.release_sec = lnbh25_release; |
| 179 | fe->ops.set_voltage = lnbh25_set_voltage; | 179 | fe->ops.set_voltage = lnbh25_set_voltage; |
| 180 | 180 | ||
| 181 | dev_err(&i2c->dev, "%s(): attached at I2C addr 0x%02x\n", | 181 | dev_info(&i2c->dev, "%s(): attached at I2C addr 0x%02x\n", |
| 182 | __func__, priv->i2c_address); | 182 | __func__, priv->i2c_address); |
| 183 | return fe; | 183 | return fe; |
| 184 | } | 184 | } |
diff --git a/drivers/media/dvb-frontends/stv0367.c b/drivers/media/dvb-frontends/stv0367.c index e726c2e00460..8ac0f598978d 100644 --- a/drivers/media/dvb-frontends/stv0367.c +++ b/drivers/media/dvb-frontends/stv0367.c | |||
| @@ -25,6 +25,8 @@ | |||
| 25 | #include <linux/slab.h> | 25 | #include <linux/slab.h> |
| 26 | #include <linux/i2c.h> | 26 | #include <linux/i2c.h> |
| 27 | 27 | ||
| 28 | #include "dvb_math.h" | ||
| 29 | |||
| 28 | #include "stv0367.h" | 30 | #include "stv0367.h" |
| 29 | #include "stv0367_defs.h" | 31 | #include "stv0367_defs.h" |
| 30 | #include "stv0367_regs.h" | 32 | #include "stv0367_regs.h" |
| @@ -1437,7 +1439,7 @@ static int stv0367ter_get_frontend(struct dvb_frontend *fe, | |||
| 1437 | return 0; | 1439 | return 0; |
| 1438 | } | 1440 | } |
| 1439 | 1441 | ||
| 1440 | static int stv0367ter_read_snr(struct dvb_frontend *fe, u16 *snr) | 1442 | static u32 stv0367ter_snr_readreg(struct dvb_frontend *fe) |
| 1441 | { | 1443 | { |
| 1442 | struct stv0367_state *state = fe->demodulator_priv; | 1444 | struct stv0367_state *state = fe->demodulator_priv; |
| 1443 | u32 snru32 = 0; | 1445 | u32 snru32 = 0; |
| @@ -1453,10 +1455,16 @@ static int stv0367ter_read_snr(struct dvb_frontend *fe, u16 *snr) | |||
| 1453 | 1455 | ||
| 1454 | cpt++; | 1456 | cpt++; |
| 1455 | } | 1457 | } |
| 1456 | |||
| 1457 | snru32 /= 10;/*average on 10 values*/ | 1458 | snru32 /= 10;/*average on 10 values*/ |
| 1458 | 1459 | ||
| 1459 | *snr = snru32 / 1000; | 1460 | return snru32; |
| 1461 | } | ||
| 1462 | |||
| 1463 | static int stv0367ter_read_snr(struct dvb_frontend *fe, u16 *snr) | ||
| 1464 | { | ||
| 1465 | u32 snrval = stv0367ter_snr_readreg(fe); | ||
| 1466 | |||
| 1467 | *snr = snrval / 1000; | ||
| 1460 | 1468 | ||
| 1461 | return 0; | 1469 | return 0; |
| 1462 | } | 1470 | } |
| @@ -1501,7 +1509,8 @@ static int stv0367ter_read_status(struct dvb_frontend *fe, | |||
| 1501 | *status = 0; | 1509 | *status = 0; |
| 1502 | 1510 | ||
| 1503 | if (stv0367_readbits(state, F367TER_LK)) { | 1511 | if (stv0367_readbits(state, F367TER_LK)) { |
| 1504 | *status |= FE_HAS_LOCK; | 1512 | *status = FE_HAS_SIGNAL | FE_HAS_CARRIER | FE_HAS_VITERBI |
| 1513 | | FE_HAS_SYNC | FE_HAS_LOCK; | ||
| 1505 | dprintk("%s: stv0367 has locked\n", __func__); | 1514 | dprintk("%s: stv0367 has locked\n", __func__); |
| 1506 | } | 1515 | } |
| 1507 | 1516 | ||
| @@ -2149,6 +2158,18 @@ static int stv0367cab_read_status(struct dvb_frontend *fe, | |||
| 2149 | 2158 | ||
| 2150 | *status = 0; | 2159 | *status = 0; |
| 2151 | 2160 | ||
| 2161 | if (state->cab_state->state > FE_CAB_NOSIGNAL) | ||
| 2162 | *status |= FE_HAS_SIGNAL; | ||
| 2163 | |||
| 2164 | if (state->cab_state->state > FE_CAB_NOCARRIER) | ||
| 2165 | *status |= FE_HAS_CARRIER; | ||
| 2166 | |||
| 2167 | if (state->cab_state->state >= FE_CAB_DEMODOK) | ||
| 2168 | *status |= FE_HAS_VITERBI; | ||
| 2169 | |||
| 2170 | if (state->cab_state->state >= FE_CAB_DATAOK) | ||
| 2171 | *status |= FE_HAS_SYNC; | ||
| 2172 | |||
| 2152 | if (stv0367_readbits(state, (state->cab_state->qamfec_status_reg ? | 2173 | if (stv0367_readbits(state, (state->cab_state->qamfec_status_reg ? |
| 2153 | state->cab_state->qamfec_status_reg : F367CAB_QAMFEC_LOCK))) { | 2174 | state->cab_state->qamfec_status_reg : F367CAB_QAMFEC_LOCK))) { |
| 2154 | *status |= FE_HAS_LOCK; | 2175 | *status |= FE_HAS_LOCK; |
| @@ -2702,51 +2723,61 @@ static int stv0367cab_read_strength(struct dvb_frontend *fe, u16 *strength) | |||
| 2702 | return 0; | 2723 | return 0; |
| 2703 | } | 2724 | } |
| 2704 | 2725 | ||
| 2705 | static int stv0367cab_read_snr(struct dvb_frontend *fe, u16 *snr) | 2726 | static int stv0367cab_snr_power(struct dvb_frontend *fe) |
| 2706 | { | 2727 | { |
| 2707 | struct stv0367_state *state = fe->demodulator_priv; | 2728 | struct stv0367_state *state = fe->demodulator_priv; |
| 2708 | u32 noisepercentage; | ||
| 2709 | enum stv0367cab_mod QAMSize; | 2729 | enum stv0367cab_mod QAMSize; |
| 2710 | u32 regval = 0, temp = 0; | ||
| 2711 | int power, i; | ||
| 2712 | 2730 | ||
| 2713 | QAMSize = stv0367_readbits(state, F367CAB_QAM_MODE); | 2731 | QAMSize = stv0367_readbits(state, F367CAB_QAM_MODE); |
| 2714 | switch (QAMSize) { | 2732 | switch (QAMSize) { |
| 2715 | case FE_CAB_MOD_QAM4: | 2733 | case FE_CAB_MOD_QAM4: |
| 2716 | power = 21904; | 2734 | return 21904; |
| 2717 | break; | ||
| 2718 | case FE_CAB_MOD_QAM16: | 2735 | case FE_CAB_MOD_QAM16: |
| 2719 | power = 20480; | 2736 | return 20480; |
| 2720 | break; | ||
| 2721 | case FE_CAB_MOD_QAM32: | 2737 | case FE_CAB_MOD_QAM32: |
| 2722 | power = 23040; | 2738 | return 23040; |
| 2723 | break; | ||
| 2724 | case FE_CAB_MOD_QAM64: | 2739 | case FE_CAB_MOD_QAM64: |
| 2725 | power = 21504; | 2740 | return 21504; |
| 2726 | break; | ||
| 2727 | case FE_CAB_MOD_QAM128: | 2741 | case FE_CAB_MOD_QAM128: |
| 2728 | power = 23616; | 2742 | return 23616; |
| 2729 | break; | ||
| 2730 | case FE_CAB_MOD_QAM256: | 2743 | case FE_CAB_MOD_QAM256: |
| 2731 | power = 21760; | 2744 | return 21760; |
| 2732 | break; | ||
| 2733 | case FE_CAB_MOD_QAM512: | ||
| 2734 | power = 1; | ||
| 2735 | break; | ||
| 2736 | case FE_CAB_MOD_QAM1024: | 2745 | case FE_CAB_MOD_QAM1024: |
| 2737 | power = 21280; | 2746 | return 21280; |
| 2738 | break; | ||
| 2739 | default: | 2747 | default: |
| 2740 | power = 1; | ||
| 2741 | break; | 2748 | break; |
| 2742 | } | 2749 | } |
| 2743 | 2750 | ||
| 2751 | return 1; | ||
| 2752 | } | ||
| 2753 | |||
| 2754 | static int stv0367cab_snr_readreg(struct dvb_frontend *fe, int avgdiv) | ||
| 2755 | { | ||
| 2756 | struct stv0367_state *state = fe->demodulator_priv; | ||
| 2757 | u32 regval = 0; | ||
| 2758 | int i; | ||
| 2759 | |||
| 2744 | for (i = 0; i < 10; i++) { | 2760 | for (i = 0; i < 10; i++) { |
| 2745 | regval += (stv0367_readbits(state, F367CAB_SNR_LO) | 2761 | regval += (stv0367_readbits(state, F367CAB_SNR_LO) |
| 2746 | + 256 * stv0367_readbits(state, F367CAB_SNR_HI)); | 2762 | + 256 * stv0367_readbits(state, F367CAB_SNR_HI)); |
| 2747 | } | 2763 | } |
| 2748 | 2764 | ||
| 2749 | regval /= 10; /*for average over 10 times in for loop above*/ | 2765 | if (avgdiv) |
| 2766 | regval /= 10; | ||
| 2767 | |||
| 2768 | return regval; | ||
| 2769 | } | ||
| 2770 | |||
| 2771 | static int stv0367cab_read_snr(struct dvb_frontend *fe, u16 *snr) | ||
| 2772 | { | ||
| 2773 | struct stv0367_state *state = fe->demodulator_priv; | ||
| 2774 | u32 noisepercentage; | ||
| 2775 | u32 regval = 0, temp = 0; | ||
| 2776 | int power; | ||
| 2777 | |||
| 2778 | power = stv0367cab_snr_power(fe); | ||
| 2779 | regval = stv0367cab_snr_readreg(fe, 1); | ||
| 2780 | |||
| 2750 | if (regval != 0) { | 2781 | if (regval != 0) { |
| 2751 | temp = power | 2782 | temp = power |
| 2752 | * (1 << (3 + stv0367_readbits(state, F367CAB_SNR_PER))); | 2783 | * (1 << (3 + stv0367_readbits(state, F367CAB_SNR_PER))); |
| @@ -2980,21 +3011,117 @@ static int stv0367ddb_set_frontend(struct dvb_frontend *fe) | |||
| 2980 | return -EINVAL; | 3011 | return -EINVAL; |
| 2981 | } | 3012 | } |
| 2982 | 3013 | ||
| 3014 | static void stv0367ddb_read_signal_strength(struct dvb_frontend *fe) | ||
| 3015 | { | ||
| 3016 | struct stv0367_state *state = fe->demodulator_priv; | ||
| 3017 | struct dtv_frontend_properties *p = &fe->dtv_property_cache; | ||
| 3018 | s32 signalstrength; | ||
| 3019 | |||
| 3020 | switch (state->activedemod) { | ||
| 3021 | case demod_cab: | ||
| 3022 | signalstrength = stv0367cab_get_rf_lvl(state) * 1000; | ||
| 3023 | break; | ||
| 3024 | default: | ||
| 3025 | p->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE; | ||
| 3026 | return; | ||
| 3027 | } | ||
| 3028 | |||
| 3029 | p->strength.stat[0].scale = FE_SCALE_DECIBEL; | ||
| 3030 | p->strength.stat[0].uvalue = signalstrength; | ||
| 3031 | } | ||
| 3032 | |||
| 3033 | static void stv0367ddb_read_snr(struct dvb_frontend *fe) | ||
| 3034 | { | ||
| 3035 | struct stv0367_state *state = fe->demodulator_priv; | ||
| 3036 | struct dtv_frontend_properties *p = &fe->dtv_property_cache; | ||
| 3037 | int cab_pwr; | ||
| 3038 | u32 regval, tmpval, snrval = 0; | ||
| 3039 | |||
| 3040 | switch (state->activedemod) { | ||
| 3041 | case demod_ter: | ||
| 3042 | snrval = stv0367ter_snr_readreg(fe); | ||
| 3043 | break; | ||
| 3044 | case demod_cab: | ||
| 3045 | cab_pwr = stv0367cab_snr_power(fe); | ||
| 3046 | regval = stv0367cab_snr_readreg(fe, 0); | ||
| 3047 | |||
| 3048 | /* prevent division by zero */ | ||
| 3049 | if (!regval) { | ||
| 3050 | snrval = 0; | ||
| 3051 | break; | ||
| 3052 | } | ||
| 3053 | |||
| 3054 | tmpval = (cab_pwr * 320) / regval; | ||
| 3055 | snrval = ((tmpval != 0) ? (intlog2(tmpval) / 5581) : 0); | ||
| 3056 | break; | ||
| 3057 | default: | ||
| 3058 | p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE; | ||
| 3059 | return; | ||
| 3060 | } | ||
| 3061 | |||
| 3062 | p->cnr.stat[0].scale = FE_SCALE_DECIBEL; | ||
| 3063 | p->cnr.stat[0].uvalue = snrval; | ||
| 3064 | } | ||
| 3065 | |||
| 3066 | static void stv0367ddb_read_ucblocks(struct dvb_frontend *fe) | ||
| 3067 | { | ||
| 3068 | struct stv0367_state *state = fe->demodulator_priv; | ||
| 3069 | struct dtv_frontend_properties *p = &fe->dtv_property_cache; | ||
| 3070 | u32 ucblocks = 0; | ||
| 3071 | |||
| 3072 | switch (state->activedemod) { | ||
| 3073 | case demod_ter: | ||
| 3074 | stv0367ter_read_ucblocks(fe, &ucblocks); | ||
| 3075 | break; | ||
| 3076 | case demod_cab: | ||
| 3077 | stv0367cab_read_ucblcks(fe, &ucblocks); | ||
| 3078 | break; | ||
| 3079 | default: | ||
| 3080 | p->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; | ||
| 3081 | return; | ||
| 3082 | } | ||
| 3083 | |||
| 3084 | p->block_error.stat[0].scale = FE_SCALE_COUNTER; | ||
| 3085 | p->block_error.stat[0].uvalue = ucblocks; | ||
| 3086 | } | ||
| 3087 | |||
| 2983 | static int stv0367ddb_read_status(struct dvb_frontend *fe, | 3088 | static int stv0367ddb_read_status(struct dvb_frontend *fe, |
| 2984 | enum fe_status *status) | 3089 | enum fe_status *status) |
| 2985 | { | 3090 | { |
| 2986 | struct stv0367_state *state = fe->demodulator_priv; | 3091 | struct stv0367_state *state = fe->demodulator_priv; |
| 3092 | struct dtv_frontend_properties *p = &fe->dtv_property_cache; | ||
| 3093 | int ret; | ||
| 2987 | 3094 | ||
| 2988 | switch (state->activedemod) { | 3095 | switch (state->activedemod) { |
| 2989 | case demod_ter: | 3096 | case demod_ter: |
| 2990 | return stv0367ter_read_status(fe, status); | 3097 | ret = stv0367ter_read_status(fe, status); |
| 3098 | break; | ||
| 2991 | case demod_cab: | 3099 | case demod_cab: |
| 2992 | return stv0367cab_read_status(fe, status); | 3100 | ret = stv0367cab_read_status(fe, status); |
| 2993 | default: | ||
| 2994 | break; | 3101 | break; |
| 3102 | default: | ||
| 3103 | return 0; | ||
| 2995 | } | 3104 | } |
| 2996 | 3105 | ||
| 2997 | return -EINVAL; | 3106 | /* stop and report on *_read_status failure */ |
| 3107 | if (ret) | ||
| 3108 | return ret; | ||
| 3109 | |||
| 3110 | stv0367ddb_read_signal_strength(fe); | ||
| 3111 | |||
| 3112 | /* read carrier/noise when a carrier is detected */ | ||
| 3113 | if (*status & FE_HAS_CARRIER) | ||
| 3114 | stv0367ddb_read_snr(fe); | ||
| 3115 | else | ||
| 3116 | p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE; | ||
| 3117 | |||
| 3118 | /* read uncorrected blocks on FE_HAS_LOCK */ | ||
| 3119 | if (*status & FE_HAS_LOCK) | ||
| 3120 | stv0367ddb_read_ucblocks(fe); | ||
| 3121 | else | ||
| 3122 | p->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; | ||
| 3123 | |||
| 3124 | return 0; | ||
| 2998 | } | 3125 | } |
| 2999 | 3126 | ||
| 3000 | static int stv0367ddb_get_frontend(struct dvb_frontend *fe, | 3127 | static int stv0367ddb_get_frontend(struct dvb_frontend *fe, |
| @@ -3035,6 +3162,7 @@ static int stv0367ddb_sleep(struct dvb_frontend *fe) | |||
| 3035 | static int stv0367ddb_init(struct stv0367_state *state) | 3162 | static int stv0367ddb_init(struct stv0367_state *state) |
| 3036 | { | 3163 | { |
| 3037 | struct stv0367ter_state *ter_state = state->ter_state; | 3164 | struct stv0367ter_state *ter_state = state->ter_state; |
| 3165 | struct dtv_frontend_properties *p = &state->fe.dtv_property_cache; | ||
| 3038 | 3166 | ||
| 3039 | stv0367_writereg(state, R367TER_TOPCTRL, 0x10); | 3167 | stv0367_writereg(state, R367TER_TOPCTRL, 0x10); |
| 3040 | 3168 | ||
| @@ -3109,6 +3237,13 @@ static int stv0367ddb_init(struct stv0367_state *state) | |||
| 3109 | ter_state->first_lock = 0; | 3237 | ter_state->first_lock = 0; |
| 3110 | ter_state->unlock_counter = 2; | 3238 | ter_state->unlock_counter = 2; |
| 3111 | 3239 | ||
| 3240 | p->strength.len = 1; | ||
| 3241 | p->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE; | ||
| 3242 | p->cnr.len = 1; | ||
| 3243 | p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE; | ||
| 3244 | p->block_error.len = 1; | ||
| 3245 | p->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; | ||
| 3246 | |||
| 3112 | return 0; | 3247 | return 0; |
| 3113 | } | 3248 | } |
| 3114 | 3249 | ||
| @@ -3126,15 +3261,12 @@ static const struct dvb_frontend_ops stv0367ddb_ops = { | |||
| 3126 | 0x400 |/* FE_CAN_QAM_4 */ | 3261 | 0x400 |/* FE_CAN_QAM_4 */ |
| 3127 | FE_CAN_QAM_16 | FE_CAN_QAM_32 | | 3262 | FE_CAN_QAM_16 | FE_CAN_QAM_32 | |
| 3128 | FE_CAN_QAM_64 | FE_CAN_QAM_128 | | 3263 | FE_CAN_QAM_64 | FE_CAN_QAM_128 | |
| 3129 | FE_CAN_QAM_256 | FE_CAN_FEC_AUTO | | 3264 | FE_CAN_QAM_256 | FE_CAN_QAM_AUTO | |
| 3130 | /* DVB-T */ | 3265 | /* DVB-T */ |
| 3131 | FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | | 3266 | FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 | |
| 3132 | FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | | 3267 | FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO | |
| 3133 | FE_CAN_FEC_AUTO | | 3268 | FE_CAN_QPSK | FE_CAN_TRANSMISSION_MODE_AUTO | |
| 3134 | FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_64 | | 3269 | FE_CAN_RECOVER | FE_CAN_INVERSION_AUTO | |
| 3135 | FE_CAN_QAM_128 | FE_CAN_QAM_256 | FE_CAN_QAM_AUTO | | ||
| 3136 | FE_CAN_TRANSMISSION_MODE_AUTO | FE_CAN_RECOVER | | ||
| 3137 | FE_CAN_INVERSION_AUTO | | ||
| 3138 | FE_CAN_MUTE_TS | 3270 | FE_CAN_MUTE_TS |
| 3139 | }, | 3271 | }, |
| 3140 | .release = stv0367_release, | 3272 | .release = stv0367_release, |
diff --git a/drivers/media/i2c/et8ek8/et8ek8_driver.c b/drivers/media/i2c/et8ek8/et8ek8_driver.c index 6e313d5243a0..f39f5179dd95 100644 --- a/drivers/media/i2c/et8ek8/et8ek8_driver.c +++ b/drivers/media/i2c/et8ek8/et8ek8_driver.c | |||
| @@ -1496,7 +1496,6 @@ MODULE_DEVICE_TABLE(i2c, et8ek8_id_table); | |||
| 1496 | static const struct dev_pm_ops et8ek8_pm_ops = { | 1496 | static const struct dev_pm_ops et8ek8_pm_ops = { |
| 1497 | SET_SYSTEM_SLEEP_PM_OPS(et8ek8_suspend, et8ek8_resume) | 1497 | SET_SYSTEM_SLEEP_PM_OPS(et8ek8_suspend, et8ek8_resume) |
| 1498 | }; | 1498 | }; |
| 1499 | MODULE_DEVICE_TABLE(of, et8ek8_of_table); | ||
| 1500 | 1499 | ||
| 1501 | static struct i2c_driver et8ek8_i2c_driver = { | 1500 | static struct i2c_driver et8ek8_i2c_driver = { |
| 1502 | .driver = { | 1501 | .driver = { |
diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c index 9da4bf4f2c7a..7b79a7498751 100644 --- a/drivers/media/i2c/tvp5150.c +++ b/drivers/media/i2c/tvp5150.c | |||
| @@ -659,7 +659,7 @@ static int tvp5150_set_vbi(struct v4l2_subdev *sd, | |||
| 659 | struct tvp5150 *decoder = to_tvp5150(sd); | 659 | struct tvp5150 *decoder = to_tvp5150(sd); |
| 660 | v4l2_std_id std = decoder->norm; | 660 | v4l2_std_id std = decoder->norm; |
| 661 | u8 reg; | 661 | u8 reg; |
| 662 | int pos=0; | 662 | int pos = 0; |
| 663 | 663 | ||
| 664 | if (std == V4L2_STD_ALL) { | 664 | if (std == V4L2_STD_ALL) { |
| 665 | dev_err(sd->dev, "VBI can't be configured without knowing number of lines\n"); | 665 | dev_err(sd->dev, "VBI can't be configured without knowing number of lines\n"); |
| @@ -669,33 +669,30 @@ static int tvp5150_set_vbi(struct v4l2_subdev *sd, | |||
| 669 | line += 3; | 669 | line += 3; |
| 670 | } | 670 | } |
| 671 | 671 | ||
| 672 | if (line<6||line>27) | 672 | if (line < 6 || line > 27) |
| 673 | return 0; | 673 | return 0; |
| 674 | 674 | ||
| 675 | while (regs->reg != (u16)-1 ) { | 675 | while (regs->reg != (u16)-1) { |
| 676 | if ((type & regs->type.vbi_type) && | 676 | if ((type & regs->type.vbi_type) && |
| 677 | (line>=regs->type.ini_line) && | 677 | (line >= regs->type.ini_line) && |
| 678 | (line<=regs->type.end_line)) { | 678 | (line <= regs->type.end_line)) |
| 679 | type=regs->type.vbi_type; | ||
| 680 | break; | 679 | break; |
| 681 | } | ||
| 682 | 680 | ||
| 683 | regs++; | 681 | regs++; |
| 684 | pos++; | 682 | pos++; |
| 685 | } | 683 | } |
| 684 | |||
| 686 | if (regs->reg == (u16)-1) | 685 | if (regs->reg == (u16)-1) |
| 687 | return 0; | 686 | return 0; |
| 688 | 687 | ||
| 689 | type=pos | (flags & 0xf0); | 688 | type = pos | (flags & 0xf0); |
| 690 | reg=((line-6)<<1)+TVP5150_LINE_MODE_INI; | 689 | reg = ((line - 6) << 1) + TVP5150_LINE_MODE_INI; |
| 691 | 690 | ||
| 692 | if (fields&1) { | 691 | if (fields & 1) |
| 693 | tvp5150_write(sd, reg, type); | 692 | tvp5150_write(sd, reg, type); |
| 694 | } | ||
| 695 | 693 | ||
| 696 | if (fields&2) { | 694 | if (fields & 2) |
| 697 | tvp5150_write(sd, reg+1, type); | 695 | tvp5150_write(sd, reg + 1, type); |
| 698 | } | ||
| 699 | 696 | ||
| 700 | return type; | 697 | return type; |
| 701 | } | 698 | } |
diff --git a/drivers/media/pci/ddbridge/ddbridge-core.c b/drivers/media/pci/ddbridge/ddbridge-core.c index 9420479bee9a..cd1723e79a07 100644 --- a/drivers/media/pci/ddbridge/ddbridge-core.c +++ b/drivers/media/pci/ddbridge/ddbridge-core.c | |||
| @@ -17,6 +17,8 @@ | |||
| 17 | * http://www.gnu.org/copyleft/gpl.html | 17 | * http://www.gnu.org/copyleft/gpl.html |
| 18 | */ | 18 | */ |
| 19 | 19 | ||
| 20 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
| 21 | |||
| 20 | #include <linux/module.h> | 22 | #include <linux/module.h> |
| 21 | #include <linux/init.h> | 23 | #include <linux/init.h> |
| 22 | #include <linux/interrupt.h> | 24 | #include <linux/interrupt.h> |
| @@ -114,6 +116,19 @@ static int i2c_write_reg(struct i2c_adapter *adap, u8 adr, | |||
| 114 | return i2c_write(adap, adr, msg, 2); | 116 | return i2c_write(adap, adr, msg, 2); |
| 115 | } | 117 | } |
| 116 | 118 | ||
| 119 | static inline u32 safe_ddbreadl(struct ddb *dev, u32 adr) | ||
| 120 | { | ||
| 121 | u32 val = ddbreadl(adr); | ||
| 122 | |||
| 123 | /* (ddb)readl returns (uint)-1 (all bits set) on failure, catch that */ | ||
| 124 | if (val == ~0) { | ||
| 125 | dev_err(&dev->pdev->dev, "ddbreadl failure, adr=%08x\n", adr); | ||
| 126 | return 0; | ||
| 127 | } | ||
| 128 | |||
| 129 | return val; | ||
| 130 | } | ||
| 131 | |||
| 117 | static int ddb_i2c_cmd(struct ddb_i2c *i2c, u32 adr, u32 cmd) | 132 | static int ddb_i2c_cmd(struct ddb_i2c *i2c, u32 adr, u32 cmd) |
| 118 | { | 133 | { |
| 119 | struct ddb *dev = i2c->dev; | 134 | struct ddb *dev = i2c->dev; |
| @@ -124,10 +139,10 @@ static int ddb_i2c_cmd(struct ddb_i2c *i2c, u32 adr, u32 cmd) | |||
| 124 | ddbwritel((adr << 9) | cmd, i2c->regs + I2C_COMMAND); | 139 | ddbwritel((adr << 9) | cmd, i2c->regs + I2C_COMMAND); |
| 125 | stat = wait_event_timeout(i2c->wq, i2c->done == 1, HZ); | 140 | stat = wait_event_timeout(i2c->wq, i2c->done == 1, HZ); |
| 126 | if (stat == 0) { | 141 | if (stat == 0) { |
| 127 | printk(KERN_ERR "I2C timeout\n"); | 142 | dev_err(&dev->pdev->dev, "I2C timeout\n"); |
| 128 | { /* MSI debugging*/ | 143 | { /* MSI debugging*/ |
| 129 | u32 istat = ddbreadl(INTERRUPT_STATUS); | 144 | u32 istat = ddbreadl(INTERRUPT_STATUS); |
| 130 | printk(KERN_ERR "IRS %08x\n", istat); | 145 | dev_err(&dev->pdev->dev, "IRS %08x\n", istat); |
| 131 | ddbwritel(istat, INTERRUPT_ACK); | 146 | ddbwritel(istat, INTERRUPT_ACK); |
| 132 | } | 147 | } |
| 133 | return -EIO; | 148 | return -EIO; |
| @@ -533,7 +548,7 @@ static u32 ddb_input_avail(struct ddb_input *input) | |||
| 533 | off = (stat & 0x7ff) << 7; | 548 | off = (stat & 0x7ff) << 7; |
| 534 | 549 | ||
| 535 | if (ctrl & 4) { | 550 | if (ctrl & 4) { |
| 536 | printk(KERN_ERR "IA %d %d %08x\n", idx, off, ctrl); | 551 | dev_err(&dev->pdev->dev, "IA %d %d %08x\n", idx, off, ctrl); |
| 537 | ddbwritel(input->stat, DMA_BUFFER_ACK(input->nr)); | 552 | ddbwritel(input->stat, DMA_BUFFER_ACK(input->nr)); |
| 538 | return 0; | 553 | return 0; |
| 539 | } | 554 | } |
| @@ -611,6 +626,7 @@ static int demod_attach_drxk(struct ddb_input *input) | |||
| 611 | struct i2c_adapter *i2c = &input->port->i2c->adap; | 626 | struct i2c_adapter *i2c = &input->port->i2c->adap; |
| 612 | struct dvb_frontend *fe; | 627 | struct dvb_frontend *fe; |
| 613 | struct drxk_config config; | 628 | struct drxk_config config; |
| 629 | struct device *dev = &input->port->dev->pdev->dev; | ||
| 614 | 630 | ||
| 615 | memset(&config, 0, sizeof(config)); | 631 | memset(&config, 0, sizeof(config)); |
| 616 | config.microcode_name = "drxk_a3.mc"; | 632 | config.microcode_name = "drxk_a3.mc"; |
| @@ -619,7 +635,7 @@ static int demod_attach_drxk(struct ddb_input *input) | |||
| 619 | 635 | ||
| 620 | fe = input->fe = dvb_attach(drxk_attach, &config, i2c); | 636 | fe = input->fe = dvb_attach(drxk_attach, &config, i2c); |
| 621 | if (!input->fe) { | 637 | if (!input->fe) { |
| 622 | printk(KERN_ERR "No DRXK found!\n"); | 638 | dev_err(dev, "No DRXK found!\n"); |
| 623 | return -ENODEV; | 639 | return -ENODEV; |
| 624 | } | 640 | } |
| 625 | fe->sec_priv = input; | 641 | fe->sec_priv = input; |
| @@ -632,12 +648,13 @@ static int tuner_attach_tda18271(struct ddb_input *input) | |||
| 632 | { | 648 | { |
| 633 | struct i2c_adapter *i2c = &input->port->i2c->adap; | 649 | struct i2c_adapter *i2c = &input->port->i2c->adap; |
| 634 | struct dvb_frontend *fe; | 650 | struct dvb_frontend *fe; |
| 651 | struct device *dev = &input->port->dev->pdev->dev; | ||
| 635 | 652 | ||
| 636 | if (input->fe->ops.i2c_gate_ctrl) | 653 | if (input->fe->ops.i2c_gate_ctrl) |
| 637 | input->fe->ops.i2c_gate_ctrl(input->fe, 1); | 654 | input->fe->ops.i2c_gate_ctrl(input->fe, 1); |
| 638 | fe = dvb_attach(tda18271c2dd_attach, input->fe, i2c, 0x60); | 655 | fe = dvb_attach(tda18271c2dd_attach, input->fe, i2c, 0x60); |
| 639 | if (!fe) { | 656 | if (!fe) { |
| 640 | printk(KERN_ERR "No TDA18271 found!\n"); | 657 | dev_err(dev, "No TDA18271 found!\n"); |
| 641 | return -ENODEV; | 658 | return -ENODEV; |
| 642 | } | 659 | } |
| 643 | if (input->fe->ops.i2c_gate_ctrl) | 660 | if (input->fe->ops.i2c_gate_ctrl) |
| @@ -670,13 +687,14 @@ static struct stv0367_config ddb_stv0367_config[] = { | |||
| 670 | static int demod_attach_stv0367(struct ddb_input *input) | 687 | static int demod_attach_stv0367(struct ddb_input *input) |
| 671 | { | 688 | { |
| 672 | struct i2c_adapter *i2c = &input->port->i2c->adap; | 689 | struct i2c_adapter *i2c = &input->port->i2c->adap; |
| 690 | struct device *dev = &input->port->dev->pdev->dev; | ||
| 673 | 691 | ||
| 674 | /* attach frontend */ | 692 | /* attach frontend */ |
| 675 | input->fe = dvb_attach(stv0367ddb_attach, | 693 | input->fe = dvb_attach(stv0367ddb_attach, |
| 676 | &ddb_stv0367_config[(input->nr & 1)], i2c); | 694 | &ddb_stv0367_config[(input->nr & 1)], i2c); |
| 677 | 695 | ||
| 678 | if (!input->fe) { | 696 | if (!input->fe) { |
| 679 | printk(KERN_ERR "stv0367ddb_attach failed (not found?)\n"); | 697 | dev_err(dev, "stv0367ddb_attach failed (not found?)\n"); |
| 680 | return -ENODEV; | 698 | return -ENODEV; |
| 681 | } | 699 | } |
| 682 | 700 | ||
| @@ -690,17 +708,19 @@ static int demod_attach_stv0367(struct ddb_input *input) | |||
| 690 | static int tuner_tda18212_ping(struct ddb_input *input, unsigned short adr) | 708 | static int tuner_tda18212_ping(struct ddb_input *input, unsigned short adr) |
| 691 | { | 709 | { |
| 692 | struct i2c_adapter *adapter = &input->port->i2c->adap; | 710 | struct i2c_adapter *adapter = &input->port->i2c->adap; |
| 711 | struct device *dev = &input->port->dev->pdev->dev; | ||
| 712 | |||
| 693 | u8 tda_id[2]; | 713 | u8 tda_id[2]; |
| 694 | u8 subaddr = 0x00; | 714 | u8 subaddr = 0x00; |
| 695 | 715 | ||
| 696 | printk(KERN_DEBUG "stv0367-tda18212 tuner ping\n"); | 716 | dev_dbg(dev, "stv0367-tda18212 tuner ping\n"); |
| 697 | if (input->fe->ops.i2c_gate_ctrl) | 717 | if (input->fe->ops.i2c_gate_ctrl) |
| 698 | input->fe->ops.i2c_gate_ctrl(input->fe, 1); | 718 | input->fe->ops.i2c_gate_ctrl(input->fe, 1); |
| 699 | 719 | ||
| 700 | if (i2c_read_regs(adapter, adr, subaddr, tda_id, sizeof(tda_id)) < 0) | 720 | if (i2c_read_regs(adapter, adr, subaddr, tda_id, sizeof(tda_id)) < 0) |
| 701 | printk(KERN_DEBUG "tda18212 ping 1 fail\n"); | 721 | dev_dbg(dev, "tda18212 ping 1 fail\n"); |
| 702 | if (i2c_read_regs(adapter, adr, subaddr, tda_id, sizeof(tda_id)) < 0) | 722 | if (i2c_read_regs(adapter, adr, subaddr, tda_id, sizeof(tda_id)) < 0) |
| 703 | printk(KERN_DEBUG "tda18212 ping 2 fail\n"); | 723 | dev_warn(dev, "tda18212 ping failed, expect problems\n"); |
| 704 | 724 | ||
| 705 | if (input->fe->ops.i2c_gate_ctrl) | 725 | if (input->fe->ops.i2c_gate_ctrl) |
| 706 | input->fe->ops.i2c_gate_ctrl(input->fe, 0); | 726 | input->fe->ops.i2c_gate_ctrl(input->fe, 0); |
| @@ -711,6 +731,7 @@ static int tuner_tda18212_ping(struct ddb_input *input, unsigned short adr) | |||
| 711 | static int demod_attach_cxd28xx(struct ddb_input *input, int par, int osc24) | 731 | static int demod_attach_cxd28xx(struct ddb_input *input, int par, int osc24) |
| 712 | { | 732 | { |
| 713 | struct i2c_adapter *i2c = &input->port->i2c->adap; | 733 | struct i2c_adapter *i2c = &input->port->i2c->adap; |
| 734 | struct device *dev = &input->port->dev->pdev->dev; | ||
| 714 | struct cxd2841er_config cfg; | 735 | struct cxd2841er_config cfg; |
| 715 | 736 | ||
| 716 | /* the cxd2841er driver expects 8bit/shifted I2C addresses */ | 737 | /* the cxd2841er driver expects 8bit/shifted I2C addresses */ |
| @@ -728,7 +749,7 @@ static int demod_attach_cxd28xx(struct ddb_input *input, int par, int osc24) | |||
| 728 | input->fe = dvb_attach(cxd2841er_attach_t_c, &cfg, i2c); | 749 | input->fe = dvb_attach(cxd2841er_attach_t_c, &cfg, i2c); |
| 729 | 750 | ||
| 730 | if (!input->fe) { | 751 | if (!input->fe) { |
| 731 | printk(KERN_ERR "No Sony CXD28xx found!\n"); | 752 | dev_err(dev, "No Sony CXD28xx found!\n"); |
| 732 | return -ENODEV; | 753 | return -ENODEV; |
| 733 | } | 754 | } |
| 734 | 755 | ||
| @@ -742,6 +763,7 @@ static int demod_attach_cxd28xx(struct ddb_input *input, int par, int osc24) | |||
| 742 | static int tuner_attach_tda18212(struct ddb_input *input, u32 porttype) | 763 | static int tuner_attach_tda18212(struct ddb_input *input, u32 porttype) |
| 743 | { | 764 | { |
| 744 | struct i2c_adapter *adapter = &input->port->i2c->adap; | 765 | struct i2c_adapter *adapter = &input->port->i2c->adap; |
| 766 | struct device *dev = &input->port->dev->pdev->dev; | ||
| 745 | struct i2c_client *client; | 767 | struct i2c_client *client; |
| 746 | struct tda18212_config config = { | 768 | struct tda18212_config config = { |
| 747 | .fe = input->fe, | 769 | .fe = input->fe, |
| @@ -786,7 +808,7 @@ static int tuner_attach_tda18212(struct ddb_input *input, u32 porttype) | |||
| 786 | 808 | ||
| 787 | return 0; | 809 | return 0; |
| 788 | err: | 810 | err: |
| 789 | printk(KERN_INFO "TDA18212 tuner not found. Device is not fully operational.\n"); | 811 | dev_warn(dev, "TDA18212 tuner not found. Device is not fully operational.\n"); |
| 790 | return -ENODEV; | 812 | return -ENODEV; |
| 791 | } | 813 | } |
| 792 | 814 | ||
| @@ -847,19 +869,20 @@ static struct stv6110x_config stv6110b = { | |||
| 847 | static int demod_attach_stv0900(struct ddb_input *input, int type) | 869 | static int demod_attach_stv0900(struct ddb_input *input, int type) |
| 848 | { | 870 | { |
| 849 | struct i2c_adapter *i2c = &input->port->i2c->adap; | 871 | struct i2c_adapter *i2c = &input->port->i2c->adap; |
| 872 | struct device *dev = &input->port->dev->pdev->dev; | ||
| 850 | struct stv090x_config *feconf = type ? &stv0900_aa : &stv0900; | 873 | struct stv090x_config *feconf = type ? &stv0900_aa : &stv0900; |
| 851 | 874 | ||
| 852 | input->fe = dvb_attach(stv090x_attach, feconf, i2c, | 875 | input->fe = dvb_attach(stv090x_attach, feconf, i2c, |
| 853 | (input->nr & 1) ? STV090x_DEMODULATOR_1 | 876 | (input->nr & 1) ? STV090x_DEMODULATOR_1 |
| 854 | : STV090x_DEMODULATOR_0); | 877 | : STV090x_DEMODULATOR_0); |
| 855 | if (!input->fe) { | 878 | if (!input->fe) { |
| 856 | printk(KERN_ERR "No STV0900 found!\n"); | 879 | dev_err(dev, "No STV0900 found!\n"); |
| 857 | return -ENODEV; | 880 | return -ENODEV; |
| 858 | } | 881 | } |
| 859 | if (!dvb_attach(lnbh24_attach, input->fe, i2c, 0, | 882 | if (!dvb_attach(lnbh24_attach, input->fe, i2c, 0, |
| 860 | 0, (input->nr & 1) ? | 883 | 0, (input->nr & 1) ? |
| 861 | (0x09 - type) : (0x0b - type))) { | 884 | (0x09 - type) : (0x0b - type))) { |
| 862 | printk(KERN_ERR "No LNBH24 found!\n"); | 885 | dev_err(dev, "No LNBH24 found!\n"); |
| 863 | return -ENODEV; | 886 | return -ENODEV; |
| 864 | } | 887 | } |
| 865 | return 0; | 888 | return 0; |
| @@ -868,6 +891,7 @@ static int demod_attach_stv0900(struct ddb_input *input, int type) | |||
| 868 | static int tuner_attach_stv6110(struct ddb_input *input, int type) | 891 | static int tuner_attach_stv6110(struct ddb_input *input, int type) |
| 869 | { | 892 | { |
| 870 | struct i2c_adapter *i2c = &input->port->i2c->adap; | 893 | struct i2c_adapter *i2c = &input->port->i2c->adap; |
| 894 | struct device *dev = &input->port->dev->pdev->dev; | ||
| 871 | struct stv090x_config *feconf = type ? &stv0900_aa : &stv0900; | 895 | struct stv090x_config *feconf = type ? &stv0900_aa : &stv0900; |
| 872 | struct stv6110x_config *tunerconf = (input->nr & 1) ? | 896 | struct stv6110x_config *tunerconf = (input->nr & 1) ? |
| 873 | &stv6110b : &stv6110a; | 897 | &stv6110b : &stv6110a; |
| @@ -875,10 +899,10 @@ static int tuner_attach_stv6110(struct ddb_input *input, int type) | |||
| 875 | 899 | ||
| 876 | ctl = dvb_attach(stv6110x_attach, input->fe, tunerconf, i2c); | 900 | ctl = dvb_attach(stv6110x_attach, input->fe, tunerconf, i2c); |
| 877 | if (!ctl) { | 901 | if (!ctl) { |
| 878 | printk(KERN_ERR "No STV6110X found!\n"); | 902 | dev_err(dev, "No STV6110X found!\n"); |
| 879 | return -ENODEV; | 903 | return -ENODEV; |
| 880 | } | 904 | } |
| 881 | printk(KERN_INFO "attach tuner input %d adr %02x\n", | 905 | dev_info(dev, "attach tuner input %d adr %02x\n", |
| 882 | input->nr, tunerconf->addr); | 906 | input->nr, tunerconf->addr); |
| 883 | 907 | ||
| 884 | feconf->tuner_init = ctl->tuner_init; | 908 | feconf->tuner_init = ctl->tuner_init; |
| @@ -1009,13 +1033,14 @@ static int dvb_input_attach(struct ddb_input *input) | |||
| 1009 | struct ddb_port *port = input->port; | 1033 | struct ddb_port *port = input->port; |
| 1010 | struct dvb_adapter *adap = &input->adap; | 1034 | struct dvb_adapter *adap = &input->adap; |
| 1011 | struct dvb_demux *dvbdemux = &input->demux; | 1035 | struct dvb_demux *dvbdemux = &input->demux; |
| 1036 | struct device *dev = &input->port->dev->pdev->dev; | ||
| 1012 | int sony_osc24 = 0, sony_tspar = 0; | 1037 | int sony_osc24 = 0, sony_tspar = 0; |
| 1013 | 1038 | ||
| 1014 | ret = dvb_register_adapter(adap, "DDBridge", THIS_MODULE, | 1039 | ret = dvb_register_adapter(adap, "DDBridge", THIS_MODULE, |
| 1015 | &input->port->dev->pdev->dev, | 1040 | &input->port->dev->pdev->dev, |
| 1016 | adapter_nr); | 1041 | adapter_nr); |
| 1017 | if (ret < 0) { | 1042 | if (ret < 0) { |
| 1018 | printk(KERN_ERR "ddbridge: Could not register adapter.Check if you enabled enough adapters in dvb-core!\n"); | 1043 | dev_err(dev, "Could not register adapter. Check if you enabled enough adapters in dvb-core!\n"); |
| 1019 | return ret; | 1044 | return ret; |
| 1020 | } | 1045 | } |
| 1021 | input->attached = 1; | 1046 | input->attached = 1; |
| @@ -1241,9 +1266,9 @@ static void input_tasklet(unsigned long data) | |||
| 1241 | 1266 | ||
| 1242 | if (input->port->class == DDB_PORT_TUNER) { | 1267 | if (input->port->class == DDB_PORT_TUNER) { |
| 1243 | if (4&ddbreadl(DMA_BUFFER_CONTROL(input->nr))) | 1268 | if (4&ddbreadl(DMA_BUFFER_CONTROL(input->nr))) |
| 1244 | printk(KERN_ERR "Overflow input %d\n", input->nr); | 1269 | dev_err(&dev->pdev->dev, "Overflow input %d\n", input->nr); |
| 1245 | while (input->cbuf != ((input->stat >> 11) & 0x1f) | 1270 | while (input->cbuf != ((input->stat >> 11) & 0x1f) |
| 1246 | || (4&ddbreadl(DMA_BUFFER_CONTROL(input->nr)))) { | 1271 | || (4 & safe_ddbreadl(dev, DMA_BUFFER_CONTROL(input->nr)))) { |
| 1247 | dvb_dmx_swfilter_packets(&input->demux, | 1272 | dvb_dmx_swfilter_packets(&input->demux, |
| 1248 | input->vbuf[input->cbuf], | 1273 | input->vbuf[input->cbuf], |
| 1249 | input->dma_buf_size / 188); | 1274 | input->dma_buf_size / 188); |
| @@ -1280,6 +1305,7 @@ static struct cxd2099_cfg cxd_cfg = { | |||
| 1280 | .adr = 0x40, | 1305 | .adr = 0x40, |
| 1281 | .polarity = 1, | 1306 | .polarity = 1, |
| 1282 | .clock_mode = 1, | 1307 | .clock_mode = 1, |
| 1308 | .max_i2c = 512, | ||
| 1283 | }; | 1309 | }; |
| 1284 | 1310 | ||
| 1285 | static int ddb_ci_attach(struct ddb_port *port) | 1311 | static int ddb_ci_attach(struct ddb_port *port) |
| @@ -1310,6 +1336,7 @@ static int ddb_ci_attach(struct ddb_port *port) | |||
| 1310 | 1336 | ||
| 1311 | static int ddb_port_attach(struct ddb_port *port) | 1337 | static int ddb_port_attach(struct ddb_port *port) |
| 1312 | { | 1338 | { |
| 1339 | struct device *dev = &port->dev->pdev->dev; | ||
| 1313 | int ret = 0; | 1340 | int ret = 0; |
| 1314 | 1341 | ||
| 1315 | switch (port->class) { | 1342 | switch (port->class) { |
| @@ -1326,7 +1353,7 @@ static int ddb_port_attach(struct ddb_port *port) | |||
| 1326 | break; | 1353 | break; |
| 1327 | } | 1354 | } |
| 1328 | if (ret < 0) | 1355 | if (ret < 0) |
| 1329 | printk(KERN_ERR "port_attach on port %d failed\n", port->nr); | 1356 | dev_err(dev, "port_attach on port %d failed\n", port->nr); |
| 1330 | return ret; | 1357 | return ret; |
| 1331 | } | 1358 | } |
| 1332 | 1359 | ||
| @@ -1377,6 +1404,7 @@ static void ddb_ports_detach(struct ddb *dev) | |||
| 1377 | static int init_xo2(struct ddb_port *port) | 1404 | static int init_xo2(struct ddb_port *port) |
| 1378 | { | 1405 | { |
| 1379 | struct i2c_adapter *i2c = &port->i2c->adap; | 1406 | struct i2c_adapter *i2c = &port->i2c->adap; |
| 1407 | struct device *dev = &port->dev->pdev->dev; | ||
| 1380 | u8 val, data[2]; | 1408 | u8 val, data[2]; |
| 1381 | int res; | 1409 | int res; |
| 1382 | 1410 | ||
| @@ -1385,7 +1413,7 @@ static int init_xo2(struct ddb_port *port) | |||
| 1385 | return res; | 1413 | return res; |
| 1386 | 1414 | ||
| 1387 | if (data[0] != 0x01) { | 1415 | if (data[0] != 0x01) { |
| 1388 | pr_info("Port %d: invalid XO2\n", port->nr); | 1416 | dev_info(dev, "Port %d: invalid XO2\n", port->nr); |
| 1389 | return -1; | 1417 | return -1; |
| 1390 | } | 1418 | } |
| 1391 | 1419 | ||
| @@ -1511,7 +1539,7 @@ static void ddb_port_probe(struct ddb_port *port) | |||
| 1511 | port->class = DDB_PORT_CI; | 1539 | port->class = DDB_PORT_CI; |
| 1512 | ddbwritel(I2C_SPEED_400, port->i2c->regs + I2C_TIMING); | 1540 | ddbwritel(I2C_SPEED_400, port->i2c->regs + I2C_TIMING); |
| 1513 | } else if (port_has_xo2(port, &xo2_type, &xo2_id)) { | 1541 | } else if (port_has_xo2(port, &xo2_type, &xo2_id)) { |
| 1514 | printk(KERN_INFO "Port %d (TAB %d): XO2 type: %d, id: %d\n", | 1542 | dev_dbg(&dev->pdev->dev, "Port %d (TAB %d): XO2 type: %d, id: %d\n", |
| 1515 | port->nr, port->nr+1, xo2_type, xo2_id); | 1543 | port->nr, port->nr+1, xo2_type, xo2_id); |
| 1516 | 1544 | ||
| 1517 | ddbwritel(I2C_SPEED_400, port->i2c->regs + I2C_TIMING); | 1545 | ddbwritel(I2C_SPEED_400, port->i2c->regs + I2C_TIMING); |
| @@ -1556,10 +1584,10 @@ static void ddb_port_probe(struct ddb_port *port) | |||
| 1556 | } | 1584 | } |
| 1557 | break; | 1585 | break; |
| 1558 | case DDB_XO2_TYPE_CI: | 1586 | case DDB_XO2_TYPE_CI: |
| 1559 | printk(KERN_INFO "DuoFlex CI modules not supported\n"); | 1587 | dev_info(&dev->pdev->dev, "DuoFlex CI modules not supported\n"); |
| 1560 | break; | 1588 | break; |
| 1561 | default: | 1589 | default: |
| 1562 | printk(KERN_INFO "Unknown XO2 DuoFlex module\n"); | 1590 | dev_info(&dev->pdev->dev, "Unknown XO2 DuoFlex module\n"); |
| 1563 | break; | 1591 | break; |
| 1564 | } | 1592 | } |
| 1565 | } else if (port_has_cxd28xx(port, &cxd_id)) { | 1593 | } else if (port_has_cxd28xx(port, &cxd_id)) { |
| @@ -1611,7 +1639,7 @@ static void ddb_port_probe(struct ddb_port *port) | |||
| 1611 | ddbwritel(I2C_SPEED_100, port->i2c->regs + I2C_TIMING); | 1639 | ddbwritel(I2C_SPEED_100, port->i2c->regs + I2C_TIMING); |
| 1612 | } | 1640 | } |
| 1613 | 1641 | ||
| 1614 | printk(KERN_INFO "Port %d (TAB %d): %s\n", | 1642 | dev_info(&dev->pdev->dev, "Port %d (TAB %d): %s\n", |
| 1615 | port->nr, port->nr+1, modname); | 1643 | port->nr, port->nr+1, modname); |
| 1616 | } | 1644 | } |
| 1617 | 1645 | ||
| @@ -1765,7 +1793,7 @@ static int flashio(struct ddb *dev, u8 *wbuf, u32 wlen, u8 *rbuf, u32 rlen) | |||
| 1765 | wbuf += 4; | 1793 | wbuf += 4; |
| 1766 | wlen -= 4; | 1794 | wlen -= 4; |
| 1767 | ddbwritel(data, SPI_DATA); | 1795 | ddbwritel(data, SPI_DATA); |
| 1768 | while (ddbreadl(SPI_CONTROL) & 0x0004) | 1796 | while (safe_ddbreadl(dev, SPI_CONTROL) & 0x0004) |
| 1769 | ; | 1797 | ; |
| 1770 | } | 1798 | } |
| 1771 | 1799 | ||
| @@ -1785,7 +1813,7 @@ static int flashio(struct ddb *dev, u8 *wbuf, u32 wlen, u8 *rbuf, u32 rlen) | |||
| 1785 | if (shift) | 1813 | if (shift) |
| 1786 | data <<= shift; | 1814 | data <<= shift; |
| 1787 | ddbwritel(data, SPI_DATA); | 1815 | ddbwritel(data, SPI_DATA); |
| 1788 | while (ddbreadl(SPI_CONTROL) & 0x0004) | 1816 | while (safe_ddbreadl(dev, SPI_CONTROL) & 0x0004) |
| 1789 | ; | 1817 | ; |
| 1790 | 1818 | ||
| 1791 | if (!rlen) { | 1819 | if (!rlen) { |
| @@ -1797,7 +1825,7 @@ static int flashio(struct ddb *dev, u8 *wbuf, u32 wlen, u8 *rbuf, u32 rlen) | |||
| 1797 | 1825 | ||
| 1798 | while (rlen > 4) { | 1826 | while (rlen > 4) { |
| 1799 | ddbwritel(0xffffffff, SPI_DATA); | 1827 | ddbwritel(0xffffffff, SPI_DATA); |
| 1800 | while (ddbreadl(SPI_CONTROL) & 0x0004) | 1828 | while (safe_ddbreadl(dev, SPI_CONTROL) & 0x0004) |
| 1801 | ; | 1829 | ; |
| 1802 | data = ddbreadl(SPI_DATA); | 1830 | data = ddbreadl(SPI_DATA); |
| 1803 | *(u32 *) rbuf = swab32(data); | 1831 | *(u32 *) rbuf = swab32(data); |
| @@ -1806,7 +1834,7 @@ static int flashio(struct ddb *dev, u8 *wbuf, u32 wlen, u8 *rbuf, u32 rlen) | |||
| 1806 | } | 1834 | } |
| 1807 | ddbwritel(0x0003 | ((rlen << (8 + 3)) & 0x1F00), SPI_CONTROL); | 1835 | ddbwritel(0x0003 | ((rlen << (8 + 3)) & 0x1F00), SPI_CONTROL); |
| 1808 | ddbwritel(0xffffffff, SPI_DATA); | 1836 | ddbwritel(0xffffffff, SPI_DATA); |
| 1809 | while (ddbreadl(SPI_CONTROL) & 0x0004) | 1837 | while (safe_ddbreadl(dev, SPI_CONTROL) & 0x0004) |
| 1810 | ; | 1838 | ; |
| 1811 | 1839 | ||
| 1812 | data = ddbreadl(SPI_DATA); | 1840 | data = ddbreadl(SPI_DATA); |
| @@ -1993,7 +2021,7 @@ static int ddb_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 1993 | dev->pdev = pdev; | 2021 | dev->pdev = pdev; |
| 1994 | pci_set_drvdata(pdev, dev); | 2022 | pci_set_drvdata(pdev, dev); |
| 1995 | dev->info = (struct ddb_info *) id->driver_data; | 2023 | dev->info = (struct ddb_info *) id->driver_data; |
| 1996 | printk(KERN_INFO "DDBridge driver detected: %s\n", dev->info->name); | 2024 | dev_info(&pdev->dev, "Detected %s\n", dev->info->name); |
| 1997 | 2025 | ||
| 1998 | dev->regs = ioremap(pci_resource_start(dev->pdev, 0), | 2026 | dev->regs = ioremap(pci_resource_start(dev->pdev, 0), |
| 1999 | pci_resource_len(dev->pdev, 0)); | 2027 | pci_resource_len(dev->pdev, 0)); |
| @@ -2001,13 +2029,13 @@ static int ddb_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 2001 | stat = -ENOMEM; | 2029 | stat = -ENOMEM; |
| 2002 | goto fail; | 2030 | goto fail; |
| 2003 | } | 2031 | } |
| 2004 | printk(KERN_INFO "HW %08x FW %08x\n", ddbreadl(0), ddbreadl(4)); | 2032 | dev_info(&pdev->dev, "HW %08x FW %08x\n", ddbreadl(0), ddbreadl(4)); |
| 2005 | 2033 | ||
| 2006 | #ifdef CONFIG_PCI_MSI | 2034 | #ifdef CONFIG_PCI_MSI |
| 2007 | if (pci_msi_enabled()) | 2035 | if (pci_msi_enabled()) |
| 2008 | stat = pci_enable_msi(dev->pdev); | 2036 | stat = pci_enable_msi(dev->pdev); |
| 2009 | if (stat) { | 2037 | if (stat) { |
| 2010 | printk(KERN_INFO ": MSI not available.\n"); | 2038 | dev_info(&pdev->dev, "MSI not available.\n"); |
| 2011 | } else { | 2039 | } else { |
| 2012 | irq_flag = 0; | 2040 | irq_flag = 0; |
| 2013 | dev->msi = 1; | 2041 | dev->msi = 1; |
| @@ -2040,7 +2068,7 @@ static int ddb_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 2040 | goto fail1; | 2068 | goto fail1; |
| 2041 | ddb_ports_init(dev); | 2069 | ddb_ports_init(dev); |
| 2042 | if (ddb_buffers_alloc(dev) < 0) { | 2070 | if (ddb_buffers_alloc(dev) < 0) { |
| 2043 | printk(KERN_INFO ": Could not allocate buffer memory\n"); | 2071 | dev_err(&pdev->dev, "Could not allocate buffer memory\n"); |
| 2044 | goto fail2; | 2072 | goto fail2; |
| 2045 | } | 2073 | } |
| 2046 | if (ddb_ports_attach(dev) < 0) | 2074 | if (ddb_ports_attach(dev) < 0) |
| @@ -2050,19 +2078,19 @@ static int ddb_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 2050 | 2078 | ||
| 2051 | fail3: | 2079 | fail3: |
| 2052 | ddb_ports_detach(dev); | 2080 | ddb_ports_detach(dev); |
| 2053 | printk(KERN_ERR "fail3\n"); | 2081 | dev_err(&pdev->dev, "fail3\n"); |
| 2054 | ddb_ports_release(dev); | 2082 | ddb_ports_release(dev); |
| 2055 | fail2: | 2083 | fail2: |
| 2056 | printk(KERN_ERR "fail2\n"); | 2084 | dev_err(&pdev->dev, "fail2\n"); |
| 2057 | ddb_buffers_free(dev); | 2085 | ddb_buffers_free(dev); |
| 2058 | fail1: | 2086 | fail1: |
| 2059 | printk(KERN_ERR "fail1\n"); | 2087 | dev_err(&pdev->dev, "fail1\n"); |
| 2060 | if (dev->msi) | 2088 | if (dev->msi) |
| 2061 | pci_disable_msi(dev->pdev); | 2089 | pci_disable_msi(dev->pdev); |
| 2062 | if (stat == 0) | 2090 | if (stat == 0) |
| 2063 | free_irq(dev->pdev->irq, dev); | 2091 | free_irq(dev->pdev->irq, dev); |
| 2064 | fail: | 2092 | fail: |
| 2065 | printk(KERN_ERR "fail\n"); | 2093 | dev_err(&pdev->dev, "fail\n"); |
| 2066 | ddb_unmap(dev); | 2094 | ddb_unmap(dev); |
| 2067 | pci_set_drvdata(pdev, NULL); | 2095 | pci_set_drvdata(pdev, NULL); |
| 2068 | pci_disable_device(pdev); | 2096 | pci_disable_device(pdev); |
| @@ -2242,7 +2270,7 @@ static __init int module_init_ddbridge(void) | |||
| 2242 | { | 2270 | { |
| 2243 | int ret; | 2271 | int ret; |
| 2244 | 2272 | ||
| 2245 | printk(KERN_INFO "Digital Devices PCIE bridge driver, Copyright (C) 2010-11 Digital Devices GmbH\n"); | 2273 | pr_info("Digital Devices PCIE bridge driver, Copyright (C) 2010-11 Digital Devices GmbH\n"); |
| 2246 | 2274 | ||
| 2247 | ret = ddb_class_create(); | 2275 | ret = ddb_class_create(); |
| 2248 | if (ret < 0) | 2276 | if (ret < 0) |
diff --git a/drivers/media/pci/ngene/ngene-core.c b/drivers/media/pci/ngene/ngene-core.c index ce69e648b663..8c92cb7f7e72 100644 --- a/drivers/media/pci/ngene/ngene-core.c +++ b/drivers/media/pci/ngene/ngene-core.c | |||
| @@ -336,9 +336,9 @@ int ngene_command(struct ngene *dev, struct ngene_command *com) | |||
| 336 | { | 336 | { |
| 337 | int result; | 337 | int result; |
| 338 | 338 | ||
| 339 | down(&dev->cmd_mutex); | 339 | mutex_lock(&dev->cmd_mutex); |
| 340 | result = ngene_command_mutex(dev, com); | 340 | result = ngene_command_mutex(dev, com); |
| 341 | up(&dev->cmd_mutex); | 341 | mutex_unlock(&dev->cmd_mutex); |
| 342 | return result; | 342 | return result; |
| 343 | } | 343 | } |
| 344 | 344 | ||
| @@ -560,7 +560,6 @@ static int ngene_command_stream_control(struct ngene *dev, u8 stream, | |||
| 560 | u16 BsSPI = ((stream & 1) ? 0x9800 : 0x9700); | 560 | u16 BsSPI = ((stream & 1) ? 0x9800 : 0x9700); |
| 561 | u16 BsSDO = 0x9B00; | 561 | u16 BsSDO = 0x9B00; |
| 562 | 562 | ||
| 563 | down(&dev->stream_mutex); | ||
| 564 | memset(&com, 0, sizeof(com)); | 563 | memset(&com, 0, sizeof(com)); |
| 565 | com.cmd.hdr.Opcode = CMD_CONTROL; | 564 | com.cmd.hdr.Opcode = CMD_CONTROL; |
| 566 | com.cmd.hdr.Length = sizeof(struct FW_STREAM_CONTROL) - 2; | 565 | com.cmd.hdr.Length = sizeof(struct FW_STREAM_CONTROL) - 2; |
| @@ -586,17 +585,13 @@ static int ngene_command_stream_control(struct ngene *dev, u8 stream, | |||
| 586 | chan->State = KSSTATE_ACQUIRE; | 585 | chan->State = KSSTATE_ACQUIRE; |
| 587 | chan->HWState = HWSTATE_STOP; | 586 | chan->HWState = HWSTATE_STOP; |
| 588 | spin_unlock_irq(&chan->state_lock); | 587 | spin_unlock_irq(&chan->state_lock); |
| 589 | if (ngene_command(dev, &com) < 0) { | 588 | if (ngene_command(dev, &com) < 0) |
| 590 | up(&dev->stream_mutex); | ||
| 591 | return -1; | 589 | return -1; |
| 592 | } | ||
| 593 | /* clear_buffers(chan); */ | 590 | /* clear_buffers(chan); */ |
| 594 | flush_buffers(chan); | 591 | flush_buffers(chan); |
| 595 | up(&dev->stream_mutex); | ||
| 596 | return 0; | 592 | return 0; |
| 597 | } | 593 | } |
| 598 | spin_unlock_irq(&chan->state_lock); | 594 | spin_unlock_irq(&chan->state_lock); |
| 599 | up(&dev->stream_mutex); | ||
| 600 | return 0; | 595 | return 0; |
| 601 | } | 596 | } |
| 602 | 597 | ||
| @@ -692,11 +687,9 @@ static int ngene_command_stream_control(struct ngene *dev, u8 stream, | |||
| 692 | chan->HWState = HWSTATE_STARTUP; | 687 | chan->HWState = HWSTATE_STARTUP; |
| 693 | spin_unlock_irq(&chan->state_lock); | 688 | spin_unlock_irq(&chan->state_lock); |
| 694 | 689 | ||
| 695 | if (ngene_command(dev, &com) < 0) { | 690 | if (ngene_command(dev, &com) < 0) |
| 696 | up(&dev->stream_mutex); | ||
| 697 | return -1; | 691 | return -1; |
| 698 | } | 692 | |
| 699 | up(&dev->stream_mutex); | ||
| 700 | return 0; | 693 | return 0; |
| 701 | } | 694 | } |
| 702 | 695 | ||
| @@ -750,8 +743,11 @@ void set_transfer(struct ngene_channel *chan, int state) | |||
| 750 | /* else printk(KERN_INFO DEVICE_NAME ": lock=%08x\n", | 743 | /* else printk(KERN_INFO DEVICE_NAME ": lock=%08x\n", |
| 751 | ngreadl(0x9310)); */ | 744 | ngreadl(0x9310)); */ |
| 752 | 745 | ||
| 746 | mutex_lock(&dev->stream_mutex); | ||
| 753 | ret = ngene_command_stream_control(dev, chan->number, | 747 | ret = ngene_command_stream_control(dev, chan->number, |
| 754 | control, mode, flags); | 748 | control, mode, flags); |
| 749 | mutex_unlock(&dev->stream_mutex); | ||
| 750 | |||
| 755 | if (!ret) | 751 | if (!ret) |
| 756 | chan->running = state; | 752 | chan->running = state; |
| 757 | else | 753 | else |
| @@ -1283,7 +1279,7 @@ static int ngene_load_firm(struct ngene *dev) | |||
| 1283 | 1279 | ||
| 1284 | static void ngene_stop(struct ngene *dev) | 1280 | static void ngene_stop(struct ngene *dev) |
| 1285 | { | 1281 | { |
| 1286 | down(&dev->cmd_mutex); | 1282 | mutex_destroy(&dev->cmd_mutex); |
| 1287 | i2c_del_adapter(&(dev->channel[0].i2c_adapter)); | 1283 | i2c_del_adapter(&(dev->channel[0].i2c_adapter)); |
| 1288 | i2c_del_adapter(&(dev->channel[1].i2c_adapter)); | 1284 | i2c_del_adapter(&(dev->channel[1].i2c_adapter)); |
| 1289 | ngwritel(0, NGENE_INT_ENABLE); | 1285 | ngwritel(0, NGENE_INT_ENABLE); |
| @@ -1346,10 +1342,10 @@ static int ngene_start(struct ngene *dev) | |||
| 1346 | init_waitqueue_head(&dev->cmd_wq); | 1342 | init_waitqueue_head(&dev->cmd_wq); |
| 1347 | init_waitqueue_head(&dev->tx_wq); | 1343 | init_waitqueue_head(&dev->tx_wq); |
| 1348 | init_waitqueue_head(&dev->rx_wq); | 1344 | init_waitqueue_head(&dev->rx_wq); |
| 1349 | sema_init(&dev->cmd_mutex, 1); | 1345 | mutex_init(&dev->cmd_mutex); |
| 1350 | sema_init(&dev->stream_mutex, 1); | 1346 | mutex_init(&dev->stream_mutex); |
| 1351 | sema_init(&dev->pll_mutex, 1); | 1347 | sema_init(&dev->pll_mutex, 1); |
| 1352 | sema_init(&dev->i2c_switch_mutex, 1); | 1348 | mutex_init(&dev->i2c_switch_mutex); |
| 1353 | spin_lock_init(&dev->cmd_lock); | 1349 | spin_lock_init(&dev->cmd_lock); |
| 1354 | for (i = 0; i < MAX_STREAM; i++) | 1350 | for (i = 0; i < MAX_STREAM; i++) |
| 1355 | spin_lock_init(&dev->channel[i].state_lock); | 1351 | spin_lock_init(&dev->channel[i].state_lock); |
| @@ -1606,10 +1602,10 @@ static void ngene_unlink(struct ngene *dev) | |||
| 1606 | com.in_len = 3; | 1602 | com.in_len = 3; |
| 1607 | com.out_len = 1; | 1603 | com.out_len = 1; |
| 1608 | 1604 | ||
| 1609 | down(&dev->cmd_mutex); | 1605 | mutex_lock(&dev->cmd_mutex); |
| 1610 | ngwritel(0, NGENE_INT_ENABLE); | 1606 | ngwritel(0, NGENE_INT_ENABLE); |
| 1611 | ngene_command_mutex(dev, &com); | 1607 | ngene_command_mutex(dev, &com); |
| 1612 | up(&dev->cmd_mutex); | 1608 | mutex_unlock(&dev->cmd_mutex); |
| 1613 | } | 1609 | } |
| 1614 | 1610 | ||
| 1615 | void ngene_shutdown(struct pci_dev *pdev) | 1611 | void ngene_shutdown(struct pci_dev *pdev) |
diff --git a/drivers/media/pci/ngene/ngene-i2c.c b/drivers/media/pci/ngene/ngene-i2c.c index cf39fcf54adf..fbf36353c701 100644 --- a/drivers/media/pci/ngene/ngene-i2c.c +++ b/drivers/media/pci/ngene/ngene-i2c.c | |||
| @@ -118,7 +118,7 @@ static int ngene_i2c_master_xfer(struct i2c_adapter *adapter, | |||
| 118 | (struct ngene_channel *)i2c_get_adapdata(adapter); | 118 | (struct ngene_channel *)i2c_get_adapdata(adapter); |
| 119 | struct ngene *dev = chan->dev; | 119 | struct ngene *dev = chan->dev; |
| 120 | 120 | ||
| 121 | down(&dev->i2c_switch_mutex); | 121 | mutex_lock(&dev->i2c_switch_mutex); |
| 122 | ngene_i2c_set_bus(dev, chan->number); | 122 | ngene_i2c_set_bus(dev, chan->number); |
| 123 | 123 | ||
| 124 | if (num == 2 && msg[1].flags & I2C_M_RD && !(msg[0].flags & I2C_M_RD)) | 124 | if (num == 2 && msg[1].flags & I2C_M_RD && !(msg[0].flags & I2C_M_RD)) |
| @@ -136,11 +136,11 @@ static int ngene_i2c_master_xfer(struct i2c_adapter *adapter, | |||
| 136 | msg[0].buf, msg[0].len, 0)) | 136 | msg[0].buf, msg[0].len, 0)) |
| 137 | goto done; | 137 | goto done; |
| 138 | 138 | ||
| 139 | up(&dev->i2c_switch_mutex); | 139 | mutex_unlock(&dev->i2c_switch_mutex); |
| 140 | return -EIO; | 140 | return -EIO; |
| 141 | 141 | ||
| 142 | done: | 142 | done: |
| 143 | up(&dev->i2c_switch_mutex); | 143 | mutex_unlock(&dev->i2c_switch_mutex); |
| 144 | return num; | 144 | return num; |
| 145 | } | 145 | } |
| 146 | 146 | ||
diff --git a/drivers/media/pci/ngene/ngene.h b/drivers/media/pci/ngene/ngene.h index 10d8f74c4f0a..7c7cd217333d 100644 --- a/drivers/media/pci/ngene/ngene.h +++ b/drivers/media/pci/ngene/ngene.h | |||
| @@ -762,10 +762,10 @@ struct ngene { | |||
| 762 | 762 | ||
| 763 | wait_queue_head_t cmd_wq; | 763 | wait_queue_head_t cmd_wq; |
| 764 | int cmd_done; | 764 | int cmd_done; |
| 765 | struct semaphore cmd_mutex; | 765 | struct mutex cmd_mutex; |
| 766 | struct semaphore stream_mutex; | 766 | struct mutex stream_mutex; |
| 767 | struct semaphore pll_mutex; | 767 | struct semaphore pll_mutex; |
| 768 | struct semaphore i2c_switch_mutex; | 768 | struct mutex i2c_switch_mutex; |
| 769 | int i2c_current_channel; | 769 | int i2c_current_channel; |
| 770 | int i2c_current_bus; | 770 | int i2c_current_bus; |
| 771 | spinlock_t cmd_lock; | 771 | spinlock_t cmd_lock; |
diff --git a/drivers/media/pci/tw5864/tw5864-video.c b/drivers/media/pci/tw5864/tw5864-video.c index 2a044be729da..e7bd2b8484e3 100644 --- a/drivers/media/pci/tw5864/tw5864-video.c +++ b/drivers/media/pci/tw5864/tw5864-video.c | |||
| @@ -545,6 +545,7 @@ static int tw5864_fmt_vid_cap(struct file *file, void *priv, | |||
| 545 | switch (input->std) { | 545 | switch (input->std) { |
| 546 | default: | 546 | default: |
| 547 | WARN_ON_ONCE(1); | 547 | WARN_ON_ONCE(1); |
| 548 | return -EINVAL; | ||
| 548 | case STD_NTSC: | 549 | case STD_NTSC: |
| 549 | f->fmt.pix.height = 480; | 550 | f->fmt.pix.height = 480; |
| 550 | break; | 551 | break; |
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig index 1313cd533436..fb1fa0b82077 100644 --- a/drivers/media/platform/Kconfig +++ b/drivers/media/platform/Kconfig | |||
| @@ -475,8 +475,8 @@ config VIDEO_QCOM_VENUS | |||
| 475 | tristate "Qualcomm Venus V4L2 encoder/decoder driver" | 475 | tristate "Qualcomm Venus V4L2 encoder/decoder driver" |
| 476 | depends on VIDEO_DEV && VIDEO_V4L2 && HAS_DMA | 476 | depends on VIDEO_DEV && VIDEO_V4L2 && HAS_DMA |
| 477 | depends on (ARCH_QCOM && IOMMU_DMA) || COMPILE_TEST | 477 | depends on (ARCH_QCOM && IOMMU_DMA) || COMPILE_TEST |
| 478 | select QCOM_MDT_LOADER if (ARM || ARM64) | 478 | select QCOM_MDT_LOADER if ARCH_QCOM |
| 479 | select QCOM_SCM if (ARM || ARM64) | 479 | select QCOM_SCM if ARCH_QCOM |
| 480 | select VIDEOBUF2_DMA_SG | 480 | select VIDEOBUF2_DMA_SG |
| 481 | select V4L2_MEM2MEM_DEV | 481 | select V4L2_MEM2MEM_DEV |
| 482 | ---help--- | 482 | ---help--- |
diff --git a/drivers/media/platform/coda/coda-bit.c b/drivers/media/platform/coda/coda-bit.c index 25cbf9e5ac5a..bba1eb43b5d8 100644 --- a/drivers/media/platform/coda/coda-bit.c +++ b/drivers/media/platform/coda/coda-bit.c | |||
| @@ -393,8 +393,8 @@ static int coda_alloc_framebuffers(struct coda_ctx *ctx, | |||
| 393 | int ret; | 393 | int ret; |
| 394 | int i; | 394 | int i; |
| 395 | 395 | ||
| 396 | if (ctx->codec && (ctx->codec->src_fourcc == V4L2_PIX_FMT_H264 || | 396 | if (ctx->codec->src_fourcc == V4L2_PIX_FMT_H264 || |
| 397 | ctx->codec->dst_fourcc == V4L2_PIX_FMT_H264)) { | 397 | ctx->codec->dst_fourcc == V4L2_PIX_FMT_H264) { |
| 398 | width = round_up(q_data->width, 16); | 398 | width = round_up(q_data->width, 16); |
| 399 | height = round_up(q_data->height, 16); | 399 | height = round_up(q_data->height, 16); |
| 400 | } else { | 400 | } else { |
| @@ -2198,7 +2198,7 @@ static void coda_finish_decode(struct coda_ctx *ctx) | |||
| 2198 | ctx->display_idx = display_idx; | 2198 | ctx->display_idx = display_idx; |
| 2199 | } | 2199 | } |
| 2200 | 2200 | ||
| 2201 | static void coda_error_decode(struct coda_ctx *ctx) | 2201 | static void coda_decode_timeout(struct coda_ctx *ctx) |
| 2202 | { | 2202 | { |
| 2203 | struct vb2_v4l2_buffer *dst_buf; | 2203 | struct vb2_v4l2_buffer *dst_buf; |
| 2204 | 2204 | ||
| @@ -2223,7 +2223,7 @@ const struct coda_context_ops coda_bit_decode_ops = { | |||
| 2223 | .start_streaming = coda_start_decoding, | 2223 | .start_streaming = coda_start_decoding, |
| 2224 | .prepare_run = coda_prepare_decode, | 2224 | .prepare_run = coda_prepare_decode, |
| 2225 | .finish_run = coda_finish_decode, | 2225 | .finish_run = coda_finish_decode, |
| 2226 | .error_run = coda_error_decode, | 2226 | .run_timeout = coda_decode_timeout, |
| 2227 | .seq_end_work = coda_seq_end_work, | 2227 | .seq_end_work = coda_seq_end_work, |
| 2228 | .release = coda_bit_release, | 2228 | .release = coda_bit_release, |
| 2229 | }; | 2229 | }; |
diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c index f92cc7df58fb..829c7895a98a 100644 --- a/drivers/media/platform/coda/coda-common.c +++ b/drivers/media/platform/coda/coda-common.c | |||
| @@ -1164,8 +1164,8 @@ static void coda_pic_run_work(struct work_struct *work) | |||
| 1164 | 1164 | ||
| 1165 | coda_hw_reset(ctx); | 1165 | coda_hw_reset(ctx); |
| 1166 | 1166 | ||
| 1167 | if (ctx->ops->error_run) | 1167 | if (ctx->ops->run_timeout) |
| 1168 | ctx->ops->error_run(ctx); | 1168 | ctx->ops->run_timeout(ctx); |
| 1169 | } else if (!ctx->aborting) { | 1169 | } else if (!ctx->aborting) { |
| 1170 | ctx->ops->finish_run(ctx); | 1170 | ctx->ops->finish_run(ctx); |
| 1171 | } | 1171 | } |
diff --git a/drivers/media/platform/coda/coda.h b/drivers/media/platform/coda/coda.h index 40fe22f0d757..c5f504d8cf67 100644 --- a/drivers/media/platform/coda/coda.h +++ b/drivers/media/platform/coda/coda.h | |||
| @@ -183,7 +183,7 @@ struct coda_context_ops { | |||
| 183 | int (*start_streaming)(struct coda_ctx *ctx); | 183 | int (*start_streaming)(struct coda_ctx *ctx); |
| 184 | int (*prepare_run)(struct coda_ctx *ctx); | 184 | int (*prepare_run)(struct coda_ctx *ctx); |
| 185 | void (*finish_run)(struct coda_ctx *ctx); | 185 | void (*finish_run)(struct coda_ctx *ctx); |
| 186 | void (*error_run)(struct coda_ctx *ctx); | 186 | void (*run_timeout)(struct coda_ctx *ctx); |
| 187 | void (*seq_end_work)(struct work_struct *work); | 187 | void (*seq_end_work)(struct work_struct *work); |
| 188 | void (*release)(struct coda_ctx *ctx); | 188 | void (*release)(struct coda_ctx *ctx); |
| 189 | }; | 189 | }; |
diff --git a/drivers/media/platform/davinci/ccdc_hw_device.h b/drivers/media/platform/davinci/ccdc_hw_device.h index 8f6688a7a111..f1b521045d64 100644 --- a/drivers/media/platform/davinci/ccdc_hw_device.h +++ b/drivers/media/platform/davinci/ccdc_hw_device.h | |||
| @@ -42,16 +42,6 @@ struct ccdc_hw_ops { | |||
| 42 | int (*set_hw_if_params) (struct vpfe_hw_if_param *param); | 42 | int (*set_hw_if_params) (struct vpfe_hw_if_param *param); |
| 43 | /* get interface parameters */ | 43 | /* get interface parameters */ |
| 44 | int (*get_hw_if_params) (struct vpfe_hw_if_param *param); | 44 | int (*get_hw_if_params) (struct vpfe_hw_if_param *param); |
| 45 | /* | ||
| 46 | * Pointer to function to set parameters. Used | ||
| 47 | * for implementing VPFE_S_CCDC_PARAMS | ||
| 48 | */ | ||
| 49 | int (*set_params) (void *params); | ||
| 50 | /* | ||
| 51 | * Pointer to function to get parameter. Used | ||
| 52 | * for implementing VPFE_G_CCDC_PARAMS | ||
| 53 | */ | ||
| 54 | int (*get_params) (void *params); | ||
| 55 | /* Pointer to function to configure ccdc */ | 45 | /* Pointer to function to configure ccdc */ |
| 56 | int (*configure) (void); | 46 | int (*configure) (void); |
| 57 | 47 | ||
diff --git a/drivers/media/platform/davinci/dm355_ccdc.c b/drivers/media/platform/davinci/dm355_ccdc.c index 73db166dc338..6d492dc4c3a9 100644 --- a/drivers/media/platform/davinci/dm355_ccdc.c +++ b/drivers/media/platform/davinci/dm355_ccdc.c | |||
| @@ -17,12 +17,7 @@ | |||
| 17 | * This module is for configuring DM355 CCD controller of VPFE to capture | 17 | * This module is for configuring DM355 CCD controller of VPFE to capture |
| 18 | * Raw yuv or Bayer RGB data from a decoder. CCDC has several modules | 18 | * Raw yuv or Bayer RGB data from a decoder. CCDC has several modules |
| 19 | * such as Defect Pixel Correction, Color Space Conversion etc to | 19 | * such as Defect Pixel Correction, Color Space Conversion etc to |
| 20 | * pre-process the Bayer RGB data, before writing it to SDRAM. This | 20 | * pre-process the Bayer RGB data, before writing it to SDRAM. |
| 21 | * module also allows application to configure individual | ||
| 22 | * module parameters through VPFE_CMD_S_CCDC_RAW_PARAMS IOCTL. | ||
| 23 | * To do so, application include dm355_ccdc.h and vpfe_capture.h header | ||
| 24 | * files. The setparams() API is called by vpfe_capture driver | ||
| 25 | * to configure module parameters | ||
| 26 | * | 21 | * |
| 27 | * TODO: 1) Raw bayer parameter settings and bayer capture | 22 | * TODO: 1) Raw bayer parameter settings and bayer capture |
| 28 | * 2) Split module parameter structure to module specific ioctl structs | 23 | * 2) Split module parameter structure to module specific ioctl structs |
| @@ -260,90 +255,6 @@ static void ccdc_setwin(struct v4l2_rect *image_win, | |||
| 260 | dev_dbg(ccdc_cfg.dev, "\nEnd of ccdc_setwin..."); | 255 | dev_dbg(ccdc_cfg.dev, "\nEnd of ccdc_setwin..."); |
| 261 | } | 256 | } |
| 262 | 257 | ||
| 263 | static int validate_ccdc_param(struct ccdc_config_params_raw *ccdcparam) | ||
| 264 | { | ||
| 265 | if (ccdcparam->datasft < CCDC_DATA_NO_SHIFT || | ||
| 266 | ccdcparam->datasft > CCDC_DATA_SHIFT_6BIT) { | ||
| 267 | dev_dbg(ccdc_cfg.dev, "Invalid value of data shift\n"); | ||
| 268 | return -EINVAL; | ||
| 269 | } | ||
| 270 | |||
| 271 | if (ccdcparam->mfilt1 < CCDC_NO_MEDIAN_FILTER1 || | ||
| 272 | ccdcparam->mfilt1 > CCDC_MEDIAN_FILTER1) { | ||
| 273 | dev_dbg(ccdc_cfg.dev, "Invalid value of median filter1\n"); | ||
| 274 | return -EINVAL; | ||
| 275 | } | ||
| 276 | |||
| 277 | if (ccdcparam->mfilt2 < CCDC_NO_MEDIAN_FILTER2 || | ||
| 278 | ccdcparam->mfilt2 > CCDC_MEDIAN_FILTER2) { | ||
| 279 | dev_dbg(ccdc_cfg.dev, "Invalid value of median filter2\n"); | ||
| 280 | return -EINVAL; | ||
| 281 | } | ||
| 282 | |||
| 283 | if ((ccdcparam->med_filt_thres < 0) || | ||
| 284 | (ccdcparam->med_filt_thres > CCDC_MED_FILT_THRESH)) { | ||
| 285 | dev_dbg(ccdc_cfg.dev, | ||
| 286 | "Invalid value of median filter threshold\n"); | ||
| 287 | return -EINVAL; | ||
| 288 | } | ||
| 289 | |||
| 290 | if (ccdcparam->data_sz < CCDC_DATA_16BITS || | ||
| 291 | ccdcparam->data_sz > CCDC_DATA_8BITS) { | ||
| 292 | dev_dbg(ccdc_cfg.dev, "Invalid value of data size\n"); | ||
| 293 | return -EINVAL; | ||
| 294 | } | ||
| 295 | |||
| 296 | if (ccdcparam->alaw.enable) { | ||
| 297 | if (ccdcparam->alaw.gamma_wd < CCDC_GAMMA_BITS_13_4 || | ||
| 298 | ccdcparam->alaw.gamma_wd > CCDC_GAMMA_BITS_09_0) { | ||
| 299 | dev_dbg(ccdc_cfg.dev, "Invalid value of ALAW\n"); | ||
| 300 | return -EINVAL; | ||
| 301 | } | ||
| 302 | } | ||
| 303 | |||
| 304 | if (ccdcparam->blk_clamp.b_clamp_enable) { | ||
| 305 | if (ccdcparam->blk_clamp.sample_pixel < CCDC_SAMPLE_1PIXELS || | ||
| 306 | ccdcparam->blk_clamp.sample_pixel > CCDC_SAMPLE_16PIXELS) { | ||
| 307 | dev_dbg(ccdc_cfg.dev, | ||
| 308 | "Invalid value of sample pixel\n"); | ||
| 309 | return -EINVAL; | ||
| 310 | } | ||
| 311 | if (ccdcparam->blk_clamp.sample_ln < CCDC_SAMPLE_1LINES || | ||
| 312 | ccdcparam->blk_clamp.sample_ln > CCDC_SAMPLE_16LINES) { | ||
| 313 | dev_dbg(ccdc_cfg.dev, | ||
| 314 | "Invalid value of sample lines\n"); | ||
| 315 | return -EINVAL; | ||
| 316 | } | ||
| 317 | } | ||
| 318 | return 0; | ||
| 319 | } | ||
| 320 | |||
| 321 | /* Parameter operations */ | ||
| 322 | static int ccdc_set_params(void __user *params) | ||
| 323 | { | ||
| 324 | struct ccdc_config_params_raw ccdc_raw_params; | ||
| 325 | int x; | ||
| 326 | |||
| 327 | /* only raw module parameters can be set through the IOCTL */ | ||
| 328 | if (ccdc_cfg.if_type != VPFE_RAW_BAYER) | ||
| 329 | return -EINVAL; | ||
| 330 | |||
| 331 | x = copy_from_user(&ccdc_raw_params, params, sizeof(ccdc_raw_params)); | ||
| 332 | if (x) { | ||
| 333 | dev_dbg(ccdc_cfg.dev, "ccdc_set_params: error in copying ccdcparams, %d\n", | ||
| 334 | x); | ||
| 335 | return -EFAULT; | ||
| 336 | } | ||
| 337 | |||
| 338 | if (!validate_ccdc_param(&ccdc_raw_params)) { | ||
| 339 | memcpy(&ccdc_cfg.bayer.config_params, | ||
| 340 | &ccdc_raw_params, | ||
| 341 | sizeof(ccdc_raw_params)); | ||
| 342 | return 0; | ||
| 343 | } | ||
| 344 | return -EINVAL; | ||
| 345 | } | ||
| 346 | |||
| 347 | /* This function will configure CCDC for YCbCr video capture */ | 258 | /* This function will configure CCDC for YCbCr video capture */ |
| 348 | static void ccdc_config_ycbcr(void) | 259 | static void ccdc_config_ycbcr(void) |
| 349 | { | 260 | { |
| @@ -939,7 +850,6 @@ static struct ccdc_hw_device ccdc_hw_dev = { | |||
| 939 | .enable = ccdc_enable, | 850 | .enable = ccdc_enable, |
| 940 | .enable_out_to_sdram = ccdc_enable_output_to_sdram, | 851 | .enable_out_to_sdram = ccdc_enable_output_to_sdram, |
| 941 | .set_hw_if_params = ccdc_set_hw_if_params, | 852 | .set_hw_if_params = ccdc_set_hw_if_params, |
| 942 | .set_params = ccdc_set_params, | ||
| 943 | .configure = ccdc_configure, | 853 | .configure = ccdc_configure, |
| 944 | .set_buftype = ccdc_set_buftype, | 854 | .set_buftype = ccdc_set_buftype, |
| 945 | .get_buftype = ccdc_get_buftype, | 855 | .get_buftype = ccdc_get_buftype, |
diff --git a/drivers/media/platform/davinci/dm644x_ccdc.c b/drivers/media/platform/davinci/dm644x_ccdc.c index 740fbc7a8c14..3b2d8a9317b8 100644 --- a/drivers/media/platform/davinci/dm644x_ccdc.c +++ b/drivers/media/platform/davinci/dm644x_ccdc.c | |||
| @@ -17,13 +17,9 @@ | |||
| 17 | * This module is for configuring CCD controller of DM6446 VPFE to capture | 17 | * This module is for configuring CCD controller of DM6446 VPFE to capture |
| 18 | * Raw yuv or Bayer RGB data from a decoder. CCDC has several modules | 18 | * Raw yuv or Bayer RGB data from a decoder. CCDC has several modules |
| 19 | * such as Defect Pixel Correction, Color Space Conversion etc to | 19 | * such as Defect Pixel Correction, Color Space Conversion etc to |
| 20 | * pre-process the Raw Bayer RGB data, before writing it to SDRAM. This | 20 | * pre-process the Raw Bayer RGB data, before writing it to SDRAM. |
| 21 | * module also allows application to configure individual | 21 | * This file is named DM644x so that other variants such DM6443 |
| 22 | * module parameters through VPFE_CMD_S_CCDC_RAW_PARAMS IOCTL. | 22 | * may be supported using the same module. |
| 23 | * To do so, application includes dm644x_ccdc.h and vpfe_capture.h header | ||
| 24 | * files. The setparams() API is called by vpfe_capture driver | ||
| 25 | * to configure module parameters. This file is named DM644x so that other | ||
| 26 | * variants such DM6443 may be supported using the same module. | ||
| 27 | * | 23 | * |
| 28 | * TODO: Test Raw bayer parameter settings and bayer capture | 24 | * TODO: Test Raw bayer parameter settings and bayer capture |
| 29 | * Split module parameter structure to module specific ioctl structs | 25 | * Split module parameter structure to module specific ioctl structs |
| @@ -216,96 +212,8 @@ static void ccdc_readregs(void) | |||
| 216 | dev_notice(ccdc_cfg.dev, "\nReading 0x%x to VERT_LINES...\n", val); | 212 | dev_notice(ccdc_cfg.dev, "\nReading 0x%x to VERT_LINES...\n", val); |
| 217 | } | 213 | } |
| 218 | 214 | ||
| 219 | static int validate_ccdc_param(struct ccdc_config_params_raw *ccdcparam) | ||
| 220 | { | ||
| 221 | if (ccdcparam->alaw.enable) { | ||
| 222 | u8 max_gamma = ccdc_gamma_width_max_bit(ccdcparam->alaw.gamma_wd); | ||
| 223 | u8 max_data = ccdc_data_size_max_bit(ccdcparam->data_sz); | ||
| 224 | |||
| 225 | if ((ccdcparam->alaw.gamma_wd > CCDC_GAMMA_BITS_09_0) || | ||
| 226 | (ccdcparam->alaw.gamma_wd < CCDC_GAMMA_BITS_15_6) || | ||
| 227 | (max_gamma > max_data)) { | ||
| 228 | dev_dbg(ccdc_cfg.dev, "\nInvalid data line select"); | ||
| 229 | return -1; | ||
| 230 | } | ||
| 231 | } | ||
| 232 | return 0; | ||
| 233 | } | ||
| 234 | |||
| 235 | static int ccdc_update_raw_params(struct ccdc_config_params_raw *raw_params) | ||
| 236 | { | ||
| 237 | struct ccdc_config_params_raw *config_params = | ||
| 238 | &ccdc_cfg.bayer.config_params; | ||
| 239 | unsigned int *fpc_virtaddr = NULL; | ||
| 240 | unsigned int *fpc_physaddr = NULL; | ||
| 241 | |||
| 242 | memcpy(config_params, raw_params, sizeof(*raw_params)); | ||
| 243 | /* | ||
| 244 | * allocate memory for fault pixel table and copy the user | ||
| 245 | * values to the table | ||
| 246 | */ | ||
| 247 | if (!config_params->fault_pxl.enable) | ||
| 248 | return 0; | ||
| 249 | |||
| 250 | fpc_physaddr = (unsigned int *)config_params->fault_pxl.fpc_table_addr; | ||
| 251 | fpc_virtaddr = (unsigned int *)phys_to_virt( | ||
| 252 | (unsigned long)fpc_physaddr); | ||
| 253 | /* | ||
| 254 | * Allocate memory for FPC table if current | ||
| 255 | * FPC table buffer is not big enough to | ||
| 256 | * accommodate FPC Number requested | ||
| 257 | */ | ||
| 258 | if (raw_params->fault_pxl.fp_num != config_params->fault_pxl.fp_num) { | ||
| 259 | if (fpc_physaddr != NULL) { | ||
| 260 | free_pages((unsigned long)fpc_virtaddr, | ||
| 261 | get_order | ||
| 262 | (config_params->fault_pxl.fp_num * | ||
| 263 | FP_NUM_BYTES)); | ||
| 264 | } | ||
| 265 | |||
| 266 | /* Allocate memory for FPC table */ | ||
| 267 | fpc_virtaddr = | ||
| 268 | (unsigned int *)__get_free_pages(GFP_KERNEL | GFP_DMA, | ||
| 269 | get_order(raw_params-> | ||
| 270 | fault_pxl.fp_num * | ||
| 271 | FP_NUM_BYTES)); | ||
| 272 | |||
| 273 | if (fpc_virtaddr == NULL) { | ||
| 274 | dev_dbg(ccdc_cfg.dev, | ||
| 275 | "\nUnable to allocate memory for FPC"); | ||
| 276 | return -EFAULT; | ||
| 277 | } | ||
| 278 | fpc_physaddr = | ||
| 279 | (unsigned int *)virt_to_phys((void *)fpc_virtaddr); | ||
| 280 | } | ||
| 281 | |||
| 282 | /* Copy number of fault pixels and FPC table */ | ||
| 283 | config_params->fault_pxl.fp_num = raw_params->fault_pxl.fp_num; | ||
| 284 | if (copy_from_user(fpc_virtaddr, | ||
| 285 | (void __user *)raw_params->fault_pxl.fpc_table_addr, | ||
| 286 | config_params->fault_pxl.fp_num * FP_NUM_BYTES)) { | ||
| 287 | dev_dbg(ccdc_cfg.dev, "\n copy_from_user failed"); | ||
| 288 | return -EFAULT; | ||
| 289 | } | ||
| 290 | config_params->fault_pxl.fpc_table_addr = (unsigned long)fpc_physaddr; | ||
| 291 | return 0; | ||
| 292 | } | ||
| 293 | |||
| 294 | static int ccdc_close(struct device *dev) | 215 | static int ccdc_close(struct device *dev) |
| 295 | { | 216 | { |
| 296 | struct ccdc_config_params_raw *config_params = | ||
| 297 | &ccdc_cfg.bayer.config_params; | ||
| 298 | unsigned int *fpc_physaddr = NULL, *fpc_virtaddr = NULL; | ||
| 299 | |||
| 300 | fpc_physaddr = (unsigned int *)config_params->fault_pxl.fpc_table_addr; | ||
| 301 | |||
| 302 | if (fpc_physaddr != NULL) { | ||
| 303 | fpc_virtaddr = (unsigned int *) | ||
| 304 | phys_to_virt((unsigned long)fpc_physaddr); | ||
| 305 | free_pages((unsigned long)fpc_virtaddr, | ||
| 306 | get_order(config_params->fault_pxl.fp_num * | ||
| 307 | FP_NUM_BYTES)); | ||
| 308 | } | ||
| 309 | return 0; | 217 | return 0; |
| 310 | } | 218 | } |
| 311 | 219 | ||
| @@ -339,29 +247,6 @@ static void ccdc_sbl_reset(void) | |||
| 339 | vpss_clear_wbl_overflow(VPSS_PCR_CCDC_WBL_O); | 247 | vpss_clear_wbl_overflow(VPSS_PCR_CCDC_WBL_O); |
| 340 | } | 248 | } |
| 341 | 249 | ||
| 342 | /* Parameter operations */ | ||
| 343 | static int ccdc_set_params(void __user *params) | ||
| 344 | { | ||
| 345 | struct ccdc_config_params_raw ccdc_raw_params; | ||
| 346 | int x; | ||
| 347 | |||
| 348 | if (ccdc_cfg.if_type != VPFE_RAW_BAYER) | ||
| 349 | return -EINVAL; | ||
| 350 | |||
| 351 | x = copy_from_user(&ccdc_raw_params, params, sizeof(ccdc_raw_params)); | ||
| 352 | if (x) { | ||
| 353 | dev_dbg(ccdc_cfg.dev, "ccdc_set_params: error in copyingccdc params, %d\n", | ||
| 354 | x); | ||
| 355 | return -EFAULT; | ||
| 356 | } | ||
| 357 | |||
| 358 | if (!validate_ccdc_param(&ccdc_raw_params)) { | ||
| 359 | if (!ccdc_update_raw_params(&ccdc_raw_params)) | ||
| 360 | return 0; | ||
| 361 | } | ||
| 362 | return -EINVAL; | ||
| 363 | } | ||
| 364 | |||
| 365 | /* | 250 | /* |
| 366 | * ccdc_config_ycbcr() | 251 | * ccdc_config_ycbcr() |
| 367 | * This function will configure CCDC for YCbCr video capture | 252 | * This function will configure CCDC for YCbCr video capture |
| @@ -489,32 +374,6 @@ static void ccdc_config_black_compense(struct ccdc_black_compensation *bcomp) | |||
| 489 | regw(val, CCDC_BLKCMP); | 374 | regw(val, CCDC_BLKCMP); |
| 490 | } | 375 | } |
| 491 | 376 | ||
| 492 | static void ccdc_config_fpc(struct ccdc_fault_pixel *fpc) | ||
| 493 | { | ||
| 494 | u32 val; | ||
| 495 | |||
| 496 | /* Initially disable FPC */ | ||
| 497 | val = CCDC_FPC_DISABLE; | ||
| 498 | regw(val, CCDC_FPC); | ||
| 499 | |||
| 500 | if (!fpc->enable) | ||
| 501 | return; | ||
| 502 | |||
| 503 | /* Configure Fault pixel if needed */ | ||
| 504 | regw(fpc->fpc_table_addr, CCDC_FPC_ADDR); | ||
| 505 | dev_dbg(ccdc_cfg.dev, "\nWriting 0x%lx to FPC_ADDR...\n", | ||
| 506 | (fpc->fpc_table_addr)); | ||
| 507 | /* Write the FPC params with FPC disable */ | ||
| 508 | val = fpc->fp_num & CCDC_FPC_FPC_NUM_MASK; | ||
| 509 | regw(val, CCDC_FPC); | ||
| 510 | |||
| 511 | dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to FPC...\n", val); | ||
| 512 | /* read the FPC register */ | ||
| 513 | val = regr(CCDC_FPC) | CCDC_FPC_ENABLE; | ||
| 514 | regw(val, CCDC_FPC); | ||
| 515 | dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to FPC...\n", val); | ||
| 516 | } | ||
| 517 | |||
| 518 | /* | 377 | /* |
| 519 | * ccdc_config_raw() | 378 | * ccdc_config_raw() |
| 520 | * This function will configure CCDC for Raw capture mode | 379 | * This function will configure CCDC for Raw capture mode |
| @@ -569,9 +428,6 @@ static void ccdc_config_raw(void) | |||
| 569 | /* Configure Black level compensation */ | 428 | /* Configure Black level compensation */ |
| 570 | ccdc_config_black_compense(&config_params->blk_comp); | 429 | ccdc_config_black_compense(&config_params->blk_comp); |
| 571 | 430 | ||
| 572 | /* Configure Fault Pixel Correction */ | ||
| 573 | ccdc_config_fpc(&config_params->fault_pxl); | ||
| 574 | |||
| 575 | /* If data size is 8 bit then pack the data */ | 431 | /* If data size is 8 bit then pack the data */ |
| 576 | if ((config_params->data_sz == CCDC_DATA_8BITS) || | 432 | if ((config_params->data_sz == CCDC_DATA_8BITS) || |
| 577 | config_params->alaw.enable) | 433 | config_params->alaw.enable) |
| @@ -929,7 +785,6 @@ static struct ccdc_hw_device ccdc_hw_dev = { | |||
| 929 | .reset = ccdc_sbl_reset, | 785 | .reset = ccdc_sbl_reset, |
| 930 | .enable = ccdc_enable, | 786 | .enable = ccdc_enable, |
| 931 | .set_hw_if_params = ccdc_set_hw_if_params, | 787 | .set_hw_if_params = ccdc_set_hw_if_params, |
| 932 | .set_params = ccdc_set_params, | ||
| 933 | .configure = ccdc_configure, | 788 | .configure = ccdc_configure, |
| 934 | .set_buftype = ccdc_set_buftype, | 789 | .set_buftype = ccdc_set_buftype, |
| 935 | .get_buftype = ccdc_get_buftype, | 790 | .get_buftype = ccdc_get_buftype, |
diff --git a/drivers/media/platform/davinci/vpfe_capture.c b/drivers/media/platform/davinci/vpfe_capture.c index e3fe3e0635aa..b1bf4a7e8eb7 100644 --- a/drivers/media/platform/davinci/vpfe_capture.c +++ b/drivers/media/platform/davinci/vpfe_capture.c | |||
| @@ -281,45 +281,6 @@ void vpfe_unregister_ccdc_device(struct ccdc_hw_device *dev) | |||
| 281 | EXPORT_SYMBOL(vpfe_unregister_ccdc_device); | 281 | EXPORT_SYMBOL(vpfe_unregister_ccdc_device); |
| 282 | 282 | ||
| 283 | /* | 283 | /* |
| 284 | * vpfe_get_ccdc_image_format - Get image parameters based on CCDC settings | ||
| 285 | */ | ||
| 286 | static int vpfe_get_ccdc_image_format(struct vpfe_device *vpfe_dev, | ||
| 287 | struct v4l2_format *f) | ||
| 288 | { | ||
| 289 | struct v4l2_rect image_win; | ||
| 290 | enum ccdc_buftype buf_type; | ||
| 291 | enum ccdc_frmfmt frm_fmt; | ||
| 292 | |||
| 293 | memset(f, 0, sizeof(*f)); | ||
| 294 | f->type = V4L2_BUF_TYPE_VIDEO_OUTPUT; | ||
| 295 | ccdc_dev->hw_ops.get_image_window(&image_win); | ||
| 296 | f->fmt.pix.width = image_win.width; | ||
| 297 | f->fmt.pix.height = image_win.height; | ||
| 298 | f->fmt.pix.bytesperline = ccdc_dev->hw_ops.get_line_length(); | ||
| 299 | f->fmt.pix.sizeimage = f->fmt.pix.bytesperline * | ||
| 300 | f->fmt.pix.height; | ||
| 301 | buf_type = ccdc_dev->hw_ops.get_buftype(); | ||
| 302 | f->fmt.pix.pixelformat = ccdc_dev->hw_ops.get_pixel_format(); | ||
| 303 | frm_fmt = ccdc_dev->hw_ops.get_frame_format(); | ||
| 304 | if (frm_fmt == CCDC_FRMFMT_PROGRESSIVE) | ||
| 305 | f->fmt.pix.field = V4L2_FIELD_NONE; | ||
| 306 | else if (frm_fmt == CCDC_FRMFMT_INTERLACED) { | ||
| 307 | if (buf_type == CCDC_BUFTYPE_FLD_INTERLEAVED) | ||
| 308 | f->fmt.pix.field = V4L2_FIELD_INTERLACED; | ||
| 309 | else if (buf_type == CCDC_BUFTYPE_FLD_SEPARATED) | ||
| 310 | f->fmt.pix.field = V4L2_FIELD_SEQ_TB; | ||
| 311 | else { | ||
| 312 | v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buf_type\n"); | ||
| 313 | return -EINVAL; | ||
| 314 | } | ||
| 315 | } else { | ||
| 316 | v4l2_err(&vpfe_dev->v4l2_dev, "Invalid frm_fmt\n"); | ||
| 317 | return -EINVAL; | ||
| 318 | } | ||
| 319 | return 0; | ||
| 320 | } | ||
| 321 | |||
| 322 | /* | ||
| 323 | * vpfe_config_ccdc_image_format() | 284 | * vpfe_config_ccdc_image_format() |
| 324 | * For a pix format, configure ccdc to setup the capture | 285 | * For a pix format, configure ccdc to setup the capture |
| 325 | */ | 286 | */ |
| @@ -1697,59 +1658,6 @@ unlock_out: | |||
| 1697 | return ret; | 1658 | return ret; |
| 1698 | } | 1659 | } |
| 1699 | 1660 | ||
| 1700 | |||
| 1701 | static long vpfe_param_handler(struct file *file, void *priv, | ||
| 1702 | bool valid_prio, unsigned int cmd, void *param) | ||
| 1703 | { | ||
| 1704 | struct vpfe_device *vpfe_dev = video_drvdata(file); | ||
| 1705 | int ret; | ||
| 1706 | |||
| 1707 | v4l2_dbg(2, debug, &vpfe_dev->v4l2_dev, "vpfe_param_handler\n"); | ||
| 1708 | |||
| 1709 | if (vpfe_dev->started) { | ||
| 1710 | /* only allowed if streaming is not started */ | ||
| 1711 | v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, | ||
| 1712 | "device already started\n"); | ||
| 1713 | return -EBUSY; | ||
| 1714 | } | ||
| 1715 | |||
| 1716 | ret = mutex_lock_interruptible(&vpfe_dev->lock); | ||
| 1717 | if (ret) | ||
| 1718 | return ret; | ||
| 1719 | |||
| 1720 | switch (cmd) { | ||
| 1721 | case VPFE_CMD_S_CCDC_RAW_PARAMS: | ||
| 1722 | v4l2_warn(&vpfe_dev->v4l2_dev, | ||
| 1723 | "VPFE_CMD_S_CCDC_RAW_PARAMS: experimental ioctl\n"); | ||
| 1724 | if (ccdc_dev->hw_ops.set_params) { | ||
| 1725 | ret = ccdc_dev->hw_ops.set_params(param); | ||
| 1726 | if (ret) { | ||
| 1727 | v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, | ||
| 1728 | "Error setting parameters in CCDC\n"); | ||
| 1729 | goto unlock_out; | ||
| 1730 | } | ||
| 1731 | ret = vpfe_get_ccdc_image_format(vpfe_dev, | ||
| 1732 | &vpfe_dev->fmt); | ||
| 1733 | if (ret < 0) { | ||
| 1734 | v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, | ||
| 1735 | "Invalid image format at CCDC\n"); | ||
| 1736 | goto unlock_out; | ||
| 1737 | } | ||
| 1738 | } else { | ||
| 1739 | ret = -EINVAL; | ||
| 1740 | v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, | ||
| 1741 | "VPFE_CMD_S_CCDC_RAW_PARAMS not supported\n"); | ||
| 1742 | } | ||
| 1743 | break; | ||
| 1744 | default: | ||
| 1745 | ret = -ENOTTY; | ||
| 1746 | } | ||
| 1747 | unlock_out: | ||
| 1748 | mutex_unlock(&vpfe_dev->lock); | ||
| 1749 | return ret; | ||
| 1750 | } | ||
| 1751 | |||
| 1752 | |||
| 1753 | /* vpfe capture ioctl operations */ | 1661 | /* vpfe capture ioctl operations */ |
| 1754 | static const struct v4l2_ioctl_ops vpfe_ioctl_ops = { | 1662 | static const struct v4l2_ioctl_ops vpfe_ioctl_ops = { |
| 1755 | .vidioc_querycap = vpfe_querycap, | 1663 | .vidioc_querycap = vpfe_querycap, |
| @@ -1772,7 +1680,6 @@ static const struct v4l2_ioctl_ops vpfe_ioctl_ops = { | |||
| 1772 | .vidioc_cropcap = vpfe_cropcap, | 1680 | .vidioc_cropcap = vpfe_cropcap, |
| 1773 | .vidioc_g_selection = vpfe_g_selection, | 1681 | .vidioc_g_selection = vpfe_g_selection, |
| 1774 | .vidioc_s_selection = vpfe_s_selection, | 1682 | .vidioc_s_selection = vpfe_s_selection, |
| 1775 | .vidioc_default = vpfe_param_handler, | ||
| 1776 | }; | 1683 | }; |
| 1777 | 1684 | ||
| 1778 | static struct vpfe_device *vpfe_initialize(void) | 1685 | static struct vpfe_device *vpfe_initialize(void) |
diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c index d78580f9e431..4be6554c56c5 100644 --- a/drivers/media/platform/davinci/vpif_capture.c +++ b/drivers/media/platform/davinci/vpif_capture.c | |||
| @@ -1719,7 +1719,6 @@ vpif_unregister: | |||
| 1719 | */ | 1719 | */ |
| 1720 | static int vpif_remove(struct platform_device *device) | 1720 | static int vpif_remove(struct platform_device *device) |
| 1721 | { | 1721 | { |
| 1722 | struct common_obj *common; | ||
| 1723 | struct channel_obj *ch; | 1722 | struct channel_obj *ch; |
| 1724 | int i; | 1723 | int i; |
| 1725 | 1724 | ||
| @@ -1730,7 +1729,6 @@ static int vpif_remove(struct platform_device *device) | |||
| 1730 | for (i = 0; i < VPIF_CAPTURE_MAX_DEVICES; i++) { | 1729 | for (i = 0; i < VPIF_CAPTURE_MAX_DEVICES; i++) { |
| 1731 | /* Get the pointer to the channel object */ | 1730 | /* Get the pointer to the channel object */ |
| 1732 | ch = vpif_obj.dev[i]; | 1731 | ch = vpif_obj.dev[i]; |
| 1733 | common = &ch->common[VPIF_VIDEO_INDEX]; | ||
| 1734 | /* Unregister video device */ | 1732 | /* Unregister video device */ |
| 1735 | video_unregister_device(&ch->video_dev); | 1733 | video_unregister_device(&ch->video_dev); |
| 1736 | kfree(vpif_obj.dev[i]); | 1734 | kfree(vpif_obj.dev[i]); |
diff --git a/drivers/media/platform/davinci/vpif_display.c b/drivers/media/platform/davinci/vpif_display.c index b5ac6ce626b3..bf982bf86542 100644 --- a/drivers/media/platform/davinci/vpif_display.c +++ b/drivers/media/platform/davinci/vpif_display.c | |||
| @@ -1339,7 +1339,6 @@ vpif_unregister: | |||
| 1339 | */ | 1339 | */ |
| 1340 | static int vpif_remove(struct platform_device *device) | 1340 | static int vpif_remove(struct platform_device *device) |
| 1341 | { | 1341 | { |
| 1342 | struct common_obj *common; | ||
| 1343 | struct channel_obj *ch; | 1342 | struct channel_obj *ch; |
| 1344 | int i; | 1343 | int i; |
| 1345 | 1344 | ||
| @@ -1350,7 +1349,6 @@ static int vpif_remove(struct platform_device *device) | |||
| 1350 | for (i = 0; i < VPIF_DISPLAY_MAX_DEVICES; i++) { | 1349 | for (i = 0; i < VPIF_DISPLAY_MAX_DEVICES; i++) { |
| 1351 | /* Get the pointer to the channel object */ | 1350 | /* Get the pointer to the channel object */ |
| 1352 | ch = vpif_obj.dev[i]; | 1351 | ch = vpif_obj.dev[i]; |
| 1353 | common = &ch->common[VPIF_VIDEO_INDEX]; | ||
| 1354 | /* Unregister video device */ | 1352 | /* Unregister video device */ |
| 1355 | video_unregister_device(&ch->video_dev); | 1353 | video_unregister_device(&ch->video_dev); |
| 1356 | kfree(vpif_obj.dev[i]); | 1354 | kfree(vpif_obj.dev[i]); |
diff --git a/drivers/media/platform/omap/omap_vout_vrfb.c b/drivers/media/platform/omap/omap_vout_vrfb.c index 92c4e1826356..45a553d4f5b2 100644 --- a/drivers/media/platform/omap/omap_vout_vrfb.c +++ b/drivers/media/platform/omap/omap_vout_vrfb.c | |||
| @@ -16,7 +16,6 @@ | |||
| 16 | #include <media/videobuf-dma-contig.h> | 16 | #include <media/videobuf-dma-contig.h> |
| 17 | #include <media/v4l2-device.h> | 17 | #include <media/v4l2-device.h> |
| 18 | 18 | ||
| 19 | #include <linux/omap-dma.h> | ||
| 20 | #include <video/omapvrfb.h> | 19 | #include <video/omapvrfb.h> |
| 21 | 20 | ||
| 22 | #include "omap_voutdef.h" | 21 | #include "omap_voutdef.h" |
| @@ -63,7 +62,7 @@ static int omap_vout_allocate_vrfb_buffers(struct omap_vout_device *vout, | |||
| 63 | /* | 62 | /* |
| 64 | * Wakes up the application once the DMA transfer to VRFB space is completed. | 63 | * Wakes up the application once the DMA transfer to VRFB space is completed. |
| 65 | */ | 64 | */ |
| 66 | static void omap_vout_vrfb_dma_tx_callback(int lch, u16 ch_status, void *data) | 65 | static void omap_vout_vrfb_dma_tx_callback(void *data) |
| 67 | { | 66 | { |
| 68 | struct vid_vrfb_dma *t = (struct vid_vrfb_dma *) data; | 67 | struct vid_vrfb_dma *t = (struct vid_vrfb_dma *) data; |
| 69 | 68 | ||
| @@ -94,6 +93,7 @@ int omap_vout_setup_vrfb_bufs(struct platform_device *pdev, int vid_num, | |||
| 94 | int ret = 0, i, j; | 93 | int ret = 0, i, j; |
| 95 | struct omap_vout_device *vout; | 94 | struct omap_vout_device *vout; |
| 96 | struct video_device *vfd; | 95 | struct video_device *vfd; |
| 96 | dma_cap_mask_t mask; | ||
| 97 | int image_width, image_height; | 97 | int image_width, image_height; |
| 98 | int vrfb_num_bufs = VRFB_NUM_BUFS; | 98 | int vrfb_num_bufs = VRFB_NUM_BUFS; |
| 99 | struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev); | 99 | struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev); |
| @@ -131,18 +131,27 @@ int omap_vout_setup_vrfb_bufs(struct platform_device *pdev, int vid_num, | |||
| 131 | /* | 131 | /* |
| 132 | * Request and Initialize DMA, for DMA based VRFB transfer | 132 | * Request and Initialize DMA, for DMA based VRFB transfer |
| 133 | */ | 133 | */ |
| 134 | vout->vrfb_dma_tx.dev_id = OMAP_DMA_NO_DEVICE; | 134 | dma_cap_zero(mask); |
| 135 | vout->vrfb_dma_tx.dma_ch = -1; | 135 | dma_cap_set(DMA_INTERLEAVE, mask); |
| 136 | vout->vrfb_dma_tx.req_status = DMA_CHAN_ALLOTED; | 136 | vout->vrfb_dma_tx.chan = dma_request_chan_by_mask(&mask); |
| 137 | ret = omap_request_dma(vout->vrfb_dma_tx.dev_id, "VRFB DMA TX", | 137 | if (IS_ERR(vout->vrfb_dma_tx.chan)) { |
| 138 | omap_vout_vrfb_dma_tx_callback, | ||
| 139 | (void *) &vout->vrfb_dma_tx, &vout->vrfb_dma_tx.dma_ch); | ||
| 140 | if (ret < 0) { | ||
| 141 | vout->vrfb_dma_tx.req_status = DMA_CHAN_NOT_ALLOTED; | 138 | vout->vrfb_dma_tx.req_status = DMA_CHAN_NOT_ALLOTED; |
| 139 | } else { | ||
| 140 | size_t xt_size = sizeof(struct dma_interleaved_template) + | ||
| 141 | sizeof(struct data_chunk); | ||
| 142 | |||
| 143 | vout->vrfb_dma_tx.xt = kzalloc(xt_size, GFP_KERNEL); | ||
| 144 | if (!vout->vrfb_dma_tx.xt) { | ||
| 145 | dma_release_channel(vout->vrfb_dma_tx.chan); | ||
| 146 | vout->vrfb_dma_tx.req_status = DMA_CHAN_NOT_ALLOTED; | ||
| 147 | } | ||
| 148 | } | ||
| 149 | |||
| 150 | if (vout->vrfb_dma_tx.req_status == DMA_CHAN_NOT_ALLOTED) | ||
| 142 | dev_info(&pdev->dev, | 151 | dev_info(&pdev->dev, |
| 143 | ": failed to allocate DMA Channel for video%d\n", | 152 | ": failed to allocate DMA Channel for video%d\n", |
| 144 | vfd->minor); | 153 | vfd->minor); |
| 145 | } | 154 | |
| 146 | init_waitqueue_head(&vout->vrfb_dma_tx.wait); | 155 | init_waitqueue_head(&vout->vrfb_dma_tx.wait); |
| 147 | 156 | ||
| 148 | /* statically allocated the VRFB buffer is done through | 157 | /* statically allocated the VRFB buffer is done through |
| @@ -177,7 +186,9 @@ void omap_vout_release_vrfb(struct omap_vout_device *vout) | |||
| 177 | 186 | ||
| 178 | if (vout->vrfb_dma_tx.req_status == DMA_CHAN_ALLOTED) { | 187 | if (vout->vrfb_dma_tx.req_status == DMA_CHAN_ALLOTED) { |
| 179 | vout->vrfb_dma_tx.req_status = DMA_CHAN_NOT_ALLOTED; | 188 | vout->vrfb_dma_tx.req_status = DMA_CHAN_NOT_ALLOTED; |
| 180 | omap_free_dma(vout->vrfb_dma_tx.dma_ch); | 189 | kfree(vout->vrfb_dma_tx.xt); |
| 190 | dmaengine_terminate_sync(vout->vrfb_dma_tx.chan); | ||
| 191 | dma_release_channel(vout->vrfb_dma_tx.chan); | ||
| 181 | } | 192 | } |
| 182 | } | 193 | } |
| 183 | 194 | ||
| @@ -219,70 +230,84 @@ int omap_vout_vrfb_buffer_setup(struct omap_vout_device *vout, | |||
| 219 | } | 230 | } |
| 220 | 231 | ||
| 221 | int omap_vout_prepare_vrfb(struct omap_vout_device *vout, | 232 | int omap_vout_prepare_vrfb(struct omap_vout_device *vout, |
| 222 | struct videobuf_buffer *vb) | 233 | struct videobuf_buffer *vb) |
| 223 | { | 234 | { |
| 224 | dma_addr_t dmabuf; | 235 | struct dma_async_tx_descriptor *tx; |
| 225 | struct vid_vrfb_dma *tx; | 236 | enum dma_ctrl_flags flags; |
| 237 | struct dma_chan *chan = vout->vrfb_dma_tx.chan; | ||
| 238 | struct dma_device *dmadev = chan->device; | ||
| 239 | struct dma_interleaved_template *xt = vout->vrfb_dma_tx.xt; | ||
| 240 | dma_cookie_t cookie; | ||
| 241 | enum dma_status status; | ||
| 226 | enum dss_rotation rotation; | 242 | enum dss_rotation rotation; |
| 227 | u32 dest_frame_index = 0, src_element_index = 0; | 243 | size_t dst_icg; |
| 228 | u32 dest_element_index = 0, src_frame_index = 0; | 244 | u32 pixsize; |
| 229 | u32 elem_count = 0, frame_count = 0, pixsize = 2; | ||
| 230 | 245 | ||
| 231 | if (!is_rotation_enabled(vout)) | 246 | if (!is_rotation_enabled(vout)) |
| 232 | return 0; | 247 | return 0; |
| 233 | 248 | ||
| 234 | dmabuf = vout->buf_phy_addr[vb->i]; | ||
| 235 | /* If rotation is enabled, copy input buffer into VRFB | 249 | /* If rotation is enabled, copy input buffer into VRFB |
| 236 | * memory space using DMA. We are copying input buffer | 250 | * memory space using DMA. We are copying input buffer |
| 237 | * into VRFB memory space of desired angle and DSS will | 251 | * into VRFB memory space of desired angle and DSS will |
| 238 | * read image VRFB memory for 0 degree angle | 252 | * read image VRFB memory for 0 degree angle |
| 239 | */ | 253 | */ |
| 254 | |||
| 240 | pixsize = vout->bpp * vout->vrfb_bpp; | 255 | pixsize = vout->bpp * vout->vrfb_bpp; |
| 241 | /* | 256 | dst_icg = ((MAX_PIXELS_PER_LINE * pixsize) - |
| 242 | * DMA transfer in double index mode | 257 | (vout->pix.width * vout->bpp)) + 1; |
| 243 | */ | 258 | |
| 259 | xt->src_start = vout->buf_phy_addr[vb->i]; | ||
| 260 | xt->dst_start = vout->vrfb_context[vb->i].paddr[0]; | ||
| 261 | |||
| 262 | xt->numf = vout->pix.height; | ||
| 263 | xt->frame_size = 1; | ||
| 264 | xt->sgl[0].size = vout->pix.width * vout->bpp; | ||
| 265 | xt->sgl[0].icg = dst_icg; | ||
| 266 | |||
| 267 | xt->dir = DMA_MEM_TO_MEM; | ||
| 268 | xt->src_sgl = false; | ||
| 269 | xt->src_inc = true; | ||
| 270 | xt->dst_sgl = true; | ||
| 271 | xt->dst_inc = true; | ||
| 272 | |||
| 273 | tx = dmadev->device_prep_interleaved_dma(chan, xt, flags); | ||
| 274 | if (tx == NULL) { | ||
| 275 | pr_err("%s: DMA interleaved prep error\n", __func__); | ||
| 276 | return -EINVAL; | ||
| 277 | } | ||
| 244 | 278 | ||
| 245 | /* Frame index */ | 279 | tx->callback = omap_vout_vrfb_dma_tx_callback; |
| 246 | dest_frame_index = ((MAX_PIXELS_PER_LINE * pixsize) - | 280 | tx->callback_param = &vout->vrfb_dma_tx; |
| 247 | (vout->pix.width * vout->bpp)) + 1; | 281 | |
| 248 | 282 | cookie = dmaengine_submit(tx); | |
| 249 | /* Source and destination parameters */ | 283 | if (dma_submit_error(cookie)) { |
| 250 | src_element_index = 0; | 284 | pr_err("%s: dmaengine_submit failed (%d)\n", __func__, cookie); |
| 251 | src_frame_index = 0; | 285 | return -EINVAL; |
| 252 | dest_element_index = 1; | 286 | } |
| 253 | /* Number of elements per frame */ | ||
| 254 | elem_count = vout->pix.width * vout->bpp; | ||
| 255 | frame_count = vout->pix.height; | ||
| 256 | tx = &vout->vrfb_dma_tx; | ||
| 257 | tx->tx_status = 0; | ||
| 258 | omap_set_dma_transfer_params(tx->dma_ch, OMAP_DMA_DATA_TYPE_S32, | ||
| 259 | (elem_count / 4), frame_count, OMAP_DMA_SYNC_ELEMENT, | ||
| 260 | tx->dev_id, 0x0); | ||
| 261 | /* src_port required only for OMAP1 */ | ||
| 262 | omap_set_dma_src_params(tx->dma_ch, 0, OMAP_DMA_AMODE_POST_INC, | ||
| 263 | dmabuf, src_element_index, src_frame_index); | ||
| 264 | /*set dma source burst mode for VRFB */ | ||
| 265 | omap_set_dma_src_burst_mode(tx->dma_ch, OMAP_DMA_DATA_BURST_16); | ||
| 266 | rotation = calc_rotation(vout); | ||
| 267 | 287 | ||
| 268 | /* dest_port required only for OMAP1 */ | 288 | vout->vrfb_dma_tx.tx_status = 0; |
| 269 | omap_set_dma_dest_params(tx->dma_ch, 0, OMAP_DMA_AMODE_DOUBLE_IDX, | 289 | dma_async_issue_pending(chan); |
| 270 | vout->vrfb_context[vb->i].paddr[0], dest_element_index, | ||
| 271 | dest_frame_index); | ||
| 272 | /*set dma dest burst mode for VRFB */ | ||
| 273 | omap_set_dma_dest_burst_mode(tx->dma_ch, OMAP_DMA_DATA_BURST_16); | ||
| 274 | omap_dma_set_global_params(DMA_DEFAULT_ARB_RATE, 0x20, 0); | ||
| 275 | 290 | ||
| 276 | omap_start_dma(tx->dma_ch); | 291 | wait_event_interruptible_timeout(vout->vrfb_dma_tx.wait, |
| 277 | wait_event_interruptible_timeout(tx->wait, tx->tx_status == 1, | 292 | vout->vrfb_dma_tx.tx_status == 1, |
| 278 | VRFB_TX_TIMEOUT); | 293 | VRFB_TX_TIMEOUT); |
| 279 | 294 | ||
| 280 | if (tx->tx_status == 0) { | 295 | status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); |
| 281 | omap_stop_dma(tx->dma_ch); | 296 | |
| 297 | if (vout->vrfb_dma_tx.tx_status == 0) { | ||
| 298 | pr_err("%s: Timeout while waiting for DMA\n", __func__); | ||
| 299 | dmaengine_terminate_sync(chan); | ||
| 300 | return -EINVAL; | ||
| 301 | } else if (status != DMA_COMPLETE) { | ||
| 302 | pr_err("%s: DMA completion %s status\n", __func__, | ||
| 303 | status == DMA_ERROR ? "error" : "busy"); | ||
| 304 | dmaengine_terminate_sync(chan); | ||
| 282 | return -EINVAL; | 305 | return -EINVAL; |
| 283 | } | 306 | } |
| 307 | |||
| 284 | /* Store buffers physical address into an array. Addresses | 308 | /* Store buffers physical address into an array. Addresses |
| 285 | * from this array will be used to configure DSS */ | 309 | * from this array will be used to configure DSS */ |
| 310 | rotation = calc_rotation(vout); | ||
| 286 | vout->queued_buf_addr[vb->i] = (u8 *) | 311 | vout->queued_buf_addr[vb->i] = (u8 *) |
| 287 | vout->vrfb_context[vb->i].paddr[rotation]; | 312 | vout->vrfb_context[vb->i].paddr[rotation]; |
| 288 | return 0; | 313 | return 0; |
diff --git a/drivers/media/platform/omap/omap_voutdef.h b/drivers/media/platform/omap/omap_voutdef.h index 80c79fabdf95..56b630b1c8b4 100644 --- a/drivers/media/platform/omap/omap_voutdef.h +++ b/drivers/media/platform/omap/omap_voutdef.h | |||
| @@ -14,6 +14,7 @@ | |||
| 14 | #include <media/v4l2-ctrls.h> | 14 | #include <media/v4l2-ctrls.h> |
| 15 | #include <video/omapfb_dss.h> | 15 | #include <video/omapfb_dss.h> |
| 16 | #include <video/omapvrfb.h> | 16 | #include <video/omapvrfb.h> |
| 17 | #include <linux/dmaengine.h> | ||
| 17 | 18 | ||
| 18 | #define YUYV_BPP 2 | 19 | #define YUYV_BPP 2 |
| 19 | #define RGB565_BPP 2 | 20 | #define RGB565_BPP 2 |
| @@ -81,8 +82,9 @@ enum vout_rotaion_type { | |||
| 81 | * for VRFB hidden buffer | 82 | * for VRFB hidden buffer |
| 82 | */ | 83 | */ |
| 83 | struct vid_vrfb_dma { | 84 | struct vid_vrfb_dma { |
| 84 | int dev_id; | 85 | struct dma_chan *chan; |
| 85 | int dma_ch; | 86 | struct dma_interleaved_template *xt; |
| 87 | |||
| 86 | int req_status; | 88 | int req_status; |
| 87 | int tx_status; | 89 | int tx_status; |
| 88 | wait_queue_head_t wait; | 90 | wait_queue_head_t wait; |
diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c index 776d2bae6979..41eef376eb2d 100644 --- a/drivers/media/platform/qcom/venus/core.c +++ b/drivers/media/platform/qcom/venus/core.c | |||
| @@ -76,7 +76,7 @@ static void venus_sys_error_handler(struct work_struct *work) | |||
| 76 | hfi_core_deinit(core, true); | 76 | hfi_core_deinit(core, true); |
| 77 | hfi_destroy(core); | 77 | hfi_destroy(core); |
| 78 | mutex_lock(&core->lock); | 78 | mutex_lock(&core->lock); |
| 79 | venus_shutdown(&core->dev_fw); | 79 | venus_shutdown(core->dev); |
| 80 | 80 | ||
| 81 | pm_runtime_put_sync(core->dev); | 81 | pm_runtime_put_sync(core->dev); |
| 82 | 82 | ||
| @@ -84,7 +84,7 @@ static void venus_sys_error_handler(struct work_struct *work) | |||
| 84 | 84 | ||
| 85 | pm_runtime_get_sync(core->dev); | 85 | pm_runtime_get_sync(core->dev); |
| 86 | 86 | ||
| 87 | ret |= venus_boot(core->dev, &core->dev_fw, core->res->fwname); | 87 | ret |= venus_boot(core->dev, core->res->fwname); |
| 88 | 88 | ||
| 89 | ret |= hfi_core_resume(core, true); | 89 | ret |= hfi_core_resume(core, true); |
| 90 | 90 | ||
| @@ -137,7 +137,7 @@ static int venus_clks_enable(struct venus_core *core) | |||
| 137 | 137 | ||
| 138 | return 0; | 138 | return 0; |
| 139 | err: | 139 | err: |
| 140 | while (--i) | 140 | while (i--) |
| 141 | clk_disable_unprepare(core->clks[i]); | 141 | clk_disable_unprepare(core->clks[i]); |
| 142 | 142 | ||
| 143 | return ret; | 143 | return ret; |
| @@ -207,7 +207,7 @@ static int venus_probe(struct platform_device *pdev) | |||
| 207 | if (ret < 0) | 207 | if (ret < 0) |
| 208 | goto err_runtime_disable; | 208 | goto err_runtime_disable; |
| 209 | 209 | ||
| 210 | ret = venus_boot(dev, &core->dev_fw, core->res->fwname); | 210 | ret = venus_boot(dev, core->res->fwname); |
| 211 | if (ret) | 211 | if (ret) |
| 212 | goto err_runtime_disable; | 212 | goto err_runtime_disable; |
| 213 | 213 | ||
| @@ -238,7 +238,7 @@ err_dev_unregister: | |||
| 238 | err_core_deinit: | 238 | err_core_deinit: |
| 239 | hfi_core_deinit(core, false); | 239 | hfi_core_deinit(core, false); |
| 240 | err_venus_shutdown: | 240 | err_venus_shutdown: |
| 241 | venus_shutdown(&core->dev_fw); | 241 | venus_shutdown(dev); |
| 242 | err_runtime_disable: | 242 | err_runtime_disable: |
| 243 | pm_runtime_set_suspended(dev); | 243 | pm_runtime_set_suspended(dev); |
| 244 | pm_runtime_disable(dev); | 244 | pm_runtime_disable(dev); |
| @@ -259,7 +259,7 @@ static int venus_remove(struct platform_device *pdev) | |||
| 259 | WARN_ON(ret); | 259 | WARN_ON(ret); |
| 260 | 260 | ||
| 261 | hfi_destroy(core); | 261 | hfi_destroy(core); |
| 262 | venus_shutdown(&core->dev_fw); | 262 | venus_shutdown(dev); |
| 263 | of_platform_depopulate(dev); | 263 | of_platform_depopulate(dev); |
| 264 | 264 | ||
| 265 | pm_runtime_put_sync(dev); | 265 | pm_runtime_put_sync(dev); |
| @@ -270,8 +270,7 @@ static int venus_remove(struct platform_device *pdev) | |||
| 270 | return ret; | 270 | return ret; |
| 271 | } | 271 | } |
| 272 | 272 | ||
| 273 | #ifdef CONFIG_PM | 273 | static __maybe_unused int venus_runtime_suspend(struct device *dev) |
| 274 | static int venus_runtime_suspend(struct device *dev) | ||
| 275 | { | 274 | { |
| 276 | struct venus_core *core = dev_get_drvdata(dev); | 275 | struct venus_core *core = dev_get_drvdata(dev); |
| 277 | int ret; | 276 | int ret; |
| @@ -283,7 +282,7 @@ static int venus_runtime_suspend(struct device *dev) | |||
| 283 | return ret; | 282 | return ret; |
| 284 | } | 283 | } |
| 285 | 284 | ||
| 286 | static int venus_runtime_resume(struct device *dev) | 285 | static __maybe_unused int venus_runtime_resume(struct device *dev) |
| 287 | { | 286 | { |
| 288 | struct venus_core *core = dev_get_drvdata(dev); | 287 | struct venus_core *core = dev_get_drvdata(dev); |
| 289 | int ret; | 288 | int ret; |
| @@ -302,7 +301,6 @@ err_clks_disable: | |||
| 302 | venus_clks_disable(core); | 301 | venus_clks_disable(core); |
| 303 | return ret; | 302 | return ret; |
| 304 | } | 303 | } |
| 305 | #endif | ||
| 306 | 304 | ||
| 307 | static const struct dev_pm_ops venus_pm_ops = { | 305 | static const struct dev_pm_ops venus_pm_ops = { |
| 308 | SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, | 306 | SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, |
diff --git a/drivers/media/platform/qcom/venus/core.h b/drivers/media/platform/qcom/venus/core.h index e542700eee32..cba092bcb76d 100644 --- a/drivers/media/platform/qcom/venus/core.h +++ b/drivers/media/platform/qcom/venus/core.h | |||
| @@ -101,7 +101,6 @@ struct venus_core { | |||
| 101 | struct device *dev; | 101 | struct device *dev; |
| 102 | struct device *dev_dec; | 102 | struct device *dev_dec; |
| 103 | struct device *dev_enc; | 103 | struct device *dev_enc; |
| 104 | struct device dev_fw; | ||
| 105 | struct mutex lock; | 104 | struct mutex lock; |
| 106 | struct list_head instances; | 105 | struct list_head instances; |
| 107 | atomic_t insts_count; | 106 | atomic_t insts_count; |
diff --git a/drivers/media/platform/qcom/venus/firmware.c b/drivers/media/platform/qcom/venus/firmware.c index 1b1a4f355918..521d4b36c090 100644 --- a/drivers/media/platform/qcom/venus/firmware.c +++ b/drivers/media/platform/qcom/venus/firmware.c | |||
| @@ -12,97 +12,87 @@ | |||
| 12 | * | 12 | * |
| 13 | */ | 13 | */ |
| 14 | 14 | ||
| 15 | #include <linux/dma-mapping.h> | 15 | #include <linux/device.h> |
| 16 | #include <linux/firmware.h> | 16 | #include <linux/firmware.h> |
| 17 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
| 18 | #include <linux/io.h> | ||
| 18 | #include <linux/of.h> | 19 | #include <linux/of.h> |
| 19 | #include <linux/of_reserved_mem.h> | 20 | #include <linux/of_address.h> |
| 20 | #include <linux/slab.h> | ||
| 21 | #include <linux/qcom_scm.h> | 21 | #include <linux/qcom_scm.h> |
| 22 | #include <linux/sizes.h> | ||
| 22 | #include <linux/soc/qcom/mdt_loader.h> | 23 | #include <linux/soc/qcom/mdt_loader.h> |
| 23 | 24 | ||
| 24 | #include "firmware.h" | 25 | #include "firmware.h" |
| 25 | 26 | ||
| 26 | #define VENUS_PAS_ID 9 | 27 | #define VENUS_PAS_ID 9 |
| 27 | #define VENUS_FW_MEM_SIZE SZ_8M | 28 | #define VENUS_FW_MEM_SIZE (6 * SZ_1M) |
| 28 | 29 | ||
| 29 | static void device_release_dummy(struct device *dev) | 30 | int venus_boot(struct device *dev, const char *fwname) |
| 30 | { | ||
| 31 | of_reserved_mem_device_release(dev); | ||
| 32 | } | ||
| 33 | |||
| 34 | int venus_boot(struct device *parent, struct device *fw_dev, const char *fwname) | ||
| 35 | { | 31 | { |
| 36 | const struct firmware *mdt; | 32 | const struct firmware *mdt; |
| 33 | struct device_node *node; | ||
| 37 | phys_addr_t mem_phys; | 34 | phys_addr_t mem_phys; |
| 35 | struct resource r; | ||
| 38 | ssize_t fw_size; | 36 | ssize_t fw_size; |
| 39 | size_t mem_size; | 37 | size_t mem_size; |
| 40 | void *mem_va; | 38 | void *mem_va; |
| 41 | int ret; | 39 | int ret; |
| 42 | 40 | ||
| 43 | if (!qcom_scm_is_available()) | 41 | if (!IS_ENABLED(CONFIG_QCOM_MDT_LOADER) || !qcom_scm_is_available()) |
| 44 | return -EPROBE_DEFER; | 42 | return -EPROBE_DEFER; |
| 45 | 43 | ||
| 46 | fw_dev->parent = parent; | 44 | node = of_parse_phandle(dev->of_node, "memory-region", 0); |
| 47 | fw_dev->release = device_release_dummy; | 45 | if (!node) { |
| 46 | dev_err(dev, "no memory-region specified\n"); | ||
| 47 | return -EINVAL; | ||
| 48 | } | ||
| 48 | 49 | ||
| 49 | ret = dev_set_name(fw_dev, "%s:%s", dev_name(parent), "firmware"); | 50 | ret = of_address_to_resource(node, 0, &r); |
| 50 | if (ret) | 51 | if (ret) |
| 51 | return ret; | 52 | return ret; |
| 52 | 53 | ||
| 53 | ret = device_register(fw_dev); | 54 | mem_phys = r.start; |
| 54 | if (ret < 0) | 55 | mem_size = resource_size(&r); |
| 55 | return ret; | ||
| 56 | 56 | ||
| 57 | ret = of_reserved_mem_device_init_by_idx(fw_dev, parent->of_node, 0); | 57 | if (mem_size < VENUS_FW_MEM_SIZE) |
| 58 | if (ret) | 58 | return -EINVAL; |
| 59 | goto err_unreg_device; | ||
| 60 | 59 | ||
| 61 | mem_size = VENUS_FW_MEM_SIZE; | 60 | mem_va = memremap(r.start, mem_size, MEMREMAP_WC); |
| 62 | |||
| 63 | mem_va = dmam_alloc_coherent(fw_dev, mem_size, &mem_phys, GFP_KERNEL); | ||
| 64 | if (!mem_va) { | 61 | if (!mem_va) { |
| 65 | ret = -ENOMEM; | 62 | dev_err(dev, "unable to map memory region: %pa+%zx\n", |
| 66 | goto err_unreg_device; | 63 | &r.start, mem_size); |
| 64 | return -ENOMEM; | ||
| 67 | } | 65 | } |
| 68 | 66 | ||
| 69 | ret = request_firmware(&mdt, fwname, fw_dev); | 67 | ret = request_firmware(&mdt, fwname, dev); |
| 70 | if (ret < 0) | 68 | if (ret < 0) |
| 71 | goto err_unreg_device; | 69 | goto err_unmap; |
| 72 | 70 | ||
| 73 | fw_size = qcom_mdt_get_size(mdt); | 71 | fw_size = qcom_mdt_get_size(mdt); |
| 74 | if (fw_size < 0) { | 72 | if (fw_size < 0) { |
| 75 | ret = fw_size; | 73 | ret = fw_size; |
| 76 | release_firmware(mdt); | 74 | release_firmware(mdt); |
| 77 | goto err_unreg_device; | 75 | goto err_unmap; |
| 78 | } | 76 | } |
| 79 | 77 | ||
| 80 | ret = qcom_mdt_load(fw_dev, mdt, fwname, VENUS_PAS_ID, mem_va, mem_phys, | 78 | ret = qcom_mdt_load(dev, mdt, fwname, VENUS_PAS_ID, mem_va, mem_phys, |
| 81 | mem_size); | 79 | mem_size); |
| 82 | 80 | ||
| 83 | release_firmware(mdt); | 81 | release_firmware(mdt); |
| 84 | 82 | ||
| 85 | if (ret) | 83 | if (ret) |
| 86 | goto err_unreg_device; | 84 | goto err_unmap; |
| 87 | 85 | ||
| 88 | ret = qcom_scm_pas_auth_and_reset(VENUS_PAS_ID); | 86 | ret = qcom_scm_pas_auth_and_reset(VENUS_PAS_ID); |
| 89 | if (ret) | 87 | if (ret) |
| 90 | goto err_unreg_device; | 88 | goto err_unmap; |
| 91 | |||
| 92 | return 0; | ||
| 93 | 89 | ||
| 94 | err_unreg_device: | 90 | err_unmap: |
| 95 | device_unregister(fw_dev); | 91 | memunmap(mem_va); |
| 96 | return ret; | 92 | return ret; |
| 97 | } | 93 | } |
| 98 | 94 | ||
| 99 | int venus_shutdown(struct device *fw_dev) | 95 | int venus_shutdown(struct device *dev) |
| 100 | { | 96 | { |
| 101 | int ret; | 97 | return qcom_scm_pas_shutdown(VENUS_PAS_ID); |
| 102 | |||
| 103 | ret = qcom_scm_pas_shutdown(VENUS_PAS_ID); | ||
| 104 | device_unregister(fw_dev); | ||
| 105 | memset(fw_dev, 0, sizeof(*fw_dev)); | ||
| 106 | |||
| 107 | return ret; | ||
| 108 | } | 98 | } |
diff --git a/drivers/media/platform/qcom/venus/firmware.h b/drivers/media/platform/qcom/venus/firmware.h index f81a98979798..428efb56d339 100644 --- a/drivers/media/platform/qcom/venus/firmware.h +++ b/drivers/media/platform/qcom/venus/firmware.h | |||
| @@ -16,8 +16,7 @@ | |||
| 16 | 16 | ||
| 17 | struct device; | 17 | struct device; |
| 18 | 18 | ||
| 19 | int venus_boot(struct device *parent, struct device *fw_dev, | 19 | int venus_boot(struct device *dev, const char *fwname); |
| 20 | const char *fwname); | 20 | int venus_shutdown(struct device *dev); |
| 21 | int venus_shutdown(struct device *fw_dev); | ||
| 22 | 21 | ||
| 23 | #endif | 22 | #endif |
diff --git a/drivers/media/platform/qcom/venus/hfi_msgs.c b/drivers/media/platform/qcom/venus/hfi_msgs.c index f8841713e417..a681ae5381d6 100644 --- a/drivers/media/platform/qcom/venus/hfi_msgs.c +++ b/drivers/media/platform/qcom/venus/hfi_msgs.c | |||
| @@ -239,11 +239,12 @@ static void hfi_sys_init_done(struct venus_core *core, struct venus_inst *inst, | |||
| 239 | break; | 239 | break; |
| 240 | } | 240 | } |
| 241 | 241 | ||
| 242 | if (!error) { | 242 | if (error) |
| 243 | rem_bytes -= read_bytes; | 243 | break; |
| 244 | data += read_bytes; | 244 | |
| 245 | num_properties--; | 245 | rem_bytes -= read_bytes; |
| 246 | } | 246 | data += read_bytes; |
| 247 | num_properties--; | ||
| 247 | } | 248 | } |
| 248 | 249 | ||
| 249 | err_no_prop: | 250 | err_no_prop: |
diff --git a/drivers/media/platform/sti/bdisp/bdisp-debug.c b/drivers/media/platform/sti/bdisp/bdisp-debug.c index 7af66860d624..2cc289e4dea1 100644 --- a/drivers/media/platform/sti/bdisp/bdisp-debug.c +++ b/drivers/media/platform/sti/bdisp/bdisp-debug.c | |||
| @@ -104,7 +104,7 @@ static void bdisp_dbg_dump_ins(struct seq_file *s, u32 val) | |||
| 104 | if (val & BLT_INS_IRQ) | 104 | if (val & BLT_INS_IRQ) |
| 105 | seq_puts(s, "IRQ - "); | 105 | seq_puts(s, "IRQ - "); |
| 106 | 106 | ||
| 107 | seq_puts(s, "\n"); | 107 | seq_putc(s, '\n'); |
| 108 | } | 108 | } |
| 109 | 109 | ||
| 110 | static void bdisp_dbg_dump_tty(struct seq_file *s, u32 val) | 110 | static void bdisp_dbg_dump_tty(struct seq_file *s, u32 val) |
| @@ -153,7 +153,7 @@ static void bdisp_dbg_dump_tty(struct seq_file *s, u32 val) | |||
| 153 | if (val & BLT_TTY_BIG_END) | 153 | if (val & BLT_TTY_BIG_END) |
| 154 | seq_puts(s, "BigEndian - "); | 154 | seq_puts(s, "BigEndian - "); |
| 155 | 155 | ||
| 156 | seq_puts(s, "\n"); | 156 | seq_putc(s, '\n'); |
| 157 | } | 157 | } |
| 158 | 158 | ||
| 159 | static void bdisp_dbg_dump_xy(struct seq_file *s, u32 val, char *name) | 159 | static void bdisp_dbg_dump_xy(struct seq_file *s, u32 val, char *name) |
| @@ -230,7 +230,7 @@ static void bdisp_dbg_dump_sty(struct seq_file *s, | |||
| 230 | seq_puts(s, "BigEndian - "); | 230 | seq_puts(s, "BigEndian - "); |
| 231 | 231 | ||
| 232 | done: | 232 | done: |
| 233 | seq_puts(s, "\n"); | 233 | seq_putc(s, '\n'); |
| 234 | } | 234 | } |
| 235 | 235 | ||
| 236 | static void bdisp_dbg_dump_fctl(struct seq_file *s, u32 val) | 236 | static void bdisp_dbg_dump_fctl(struct seq_file *s, u32 val) |
| @@ -247,7 +247,7 @@ static void bdisp_dbg_dump_fctl(struct seq_file *s, u32 val) | |||
| 247 | else if ((val & BLT_FCTL_HV_SCALE) == BLT_FCTL_HV_SAMPLE) | 247 | else if ((val & BLT_FCTL_HV_SCALE) == BLT_FCTL_HV_SAMPLE) |
| 248 | seq_puts(s, "Sample Chroma"); | 248 | seq_puts(s, "Sample Chroma"); |
| 249 | 249 | ||
| 250 | seq_puts(s, "\n"); | 250 | seq_putc(s, '\n'); |
| 251 | } | 251 | } |
| 252 | 252 | ||
| 253 | static void bdisp_dbg_dump_rsf(struct seq_file *s, u32 val, char *name) | 253 | static void bdisp_dbg_dump_rsf(struct seq_file *s, u32 val, char *name) |
| @@ -266,7 +266,7 @@ static void bdisp_dbg_dump_rsf(struct seq_file *s, u32 val, char *name) | |||
| 266 | seq_printf(s, "V: %d(6.10) / scale~%dx0.1", inc, 1024 * 10 / inc); | 266 | seq_printf(s, "V: %d(6.10) / scale~%dx0.1", inc, 1024 * 10 / inc); |
| 267 | 267 | ||
| 268 | done: | 268 | done: |
| 269 | seq_puts(s, "\n"); | 269 | seq_putc(s, '\n'); |
| 270 | } | 270 | } |
| 271 | 271 | ||
| 272 | static void bdisp_dbg_dump_rzi(struct seq_file *s, u32 val, char *name) | 272 | static void bdisp_dbg_dump_rzi(struct seq_file *s, u32 val, char *name) |
| @@ -281,7 +281,7 @@ static void bdisp_dbg_dump_rzi(struct seq_file *s, u32 val, char *name) | |||
| 281 | seq_printf(s, "V: init=%d repeat=%d", val & 0x3FF, (val >> 12) & 7); | 281 | seq_printf(s, "V: init=%d repeat=%d", val & 0x3FF, (val >> 12) & 7); |
| 282 | 282 | ||
| 283 | done: | 283 | done: |
| 284 | seq_puts(s, "\n"); | 284 | seq_putc(s, '\n'); |
| 285 | } | 285 | } |
| 286 | 286 | ||
| 287 | static void bdisp_dbg_dump_ivmx(struct seq_file *s, | 287 | static void bdisp_dbg_dump_ivmx(struct seq_file *s, |
| @@ -293,7 +293,7 @@ static void bdisp_dbg_dump_ivmx(struct seq_file *s, | |||
| 293 | seq_printf(s, "IVMX3\t0x%08X\t", c3); | 293 | seq_printf(s, "IVMX3\t0x%08X\t", c3); |
| 294 | 294 | ||
| 295 | if (!c0 && !c1 && !c2 && !c3) { | 295 | if (!c0 && !c1 && !c2 && !c3) { |
| 296 | seq_puts(s, "\n"); | 296 | seq_putc(s, '\n'); |
| 297 | return; | 297 | return; |
| 298 | } | 298 | } |
| 299 | 299 | ||
diff --git a/drivers/media/platform/vimc/vimc-capture.c b/drivers/media/platform/vimc/vimc-capture.c index 14cb32e21130..88a1e5670c72 100644 --- a/drivers/media/platform/vimc/vimc-capture.c +++ b/drivers/media/platform/vimc/vimc-capture.c | |||
| @@ -517,21 +517,22 @@ static int vimc_cap_remove(struct platform_device *pdev) | |||
| 517 | return 0; | 517 | return 0; |
| 518 | } | 518 | } |
| 519 | 519 | ||
| 520 | static const struct platform_device_id vimc_cap_driver_ids[] = { | ||
| 521 | { | ||
| 522 | .name = VIMC_CAP_DRV_NAME, | ||
| 523 | }, | ||
| 524 | { } | ||
| 525 | }; | ||
| 526 | |||
| 520 | static struct platform_driver vimc_cap_pdrv = { | 527 | static struct platform_driver vimc_cap_pdrv = { |
| 521 | .probe = vimc_cap_probe, | 528 | .probe = vimc_cap_probe, |
| 522 | .remove = vimc_cap_remove, | 529 | .remove = vimc_cap_remove, |
| 530 | .id_table = vimc_cap_driver_ids, | ||
| 523 | .driver = { | 531 | .driver = { |
| 524 | .name = VIMC_CAP_DRV_NAME, | 532 | .name = VIMC_CAP_DRV_NAME, |
| 525 | }, | 533 | }, |
| 526 | }; | 534 | }; |
| 527 | 535 | ||
| 528 | static const struct platform_device_id vimc_cap_driver_ids[] = { | ||
| 529 | { | ||
| 530 | .name = VIMC_CAP_DRV_NAME, | ||
| 531 | }, | ||
| 532 | { } | ||
| 533 | }; | ||
| 534 | |||
| 535 | module_platform_driver(vimc_cap_pdrv); | 536 | module_platform_driver(vimc_cap_pdrv); |
| 536 | 537 | ||
| 537 | MODULE_DEVICE_TABLE(platform, vimc_cap_driver_ids); | 538 | MODULE_DEVICE_TABLE(platform, vimc_cap_driver_ids); |
diff --git a/drivers/media/platform/vimc/vimc-debayer.c b/drivers/media/platform/vimc/vimc-debayer.c index 35b15bd4d61d..033a131f67af 100644 --- a/drivers/media/platform/vimc/vimc-debayer.c +++ b/drivers/media/platform/vimc/vimc-debayer.c | |||
| @@ -577,21 +577,22 @@ static int vimc_deb_remove(struct platform_device *pdev) | |||
| 577 | return 0; | 577 | return 0; |
| 578 | } | 578 | } |
| 579 | 579 | ||
| 580 | static const struct platform_device_id vimc_deb_driver_ids[] = { | ||
| 581 | { | ||
| 582 | .name = VIMC_DEB_DRV_NAME, | ||
| 583 | }, | ||
| 584 | { } | ||
| 585 | }; | ||
| 586 | |||
| 580 | static struct platform_driver vimc_deb_pdrv = { | 587 | static struct platform_driver vimc_deb_pdrv = { |
| 581 | .probe = vimc_deb_probe, | 588 | .probe = vimc_deb_probe, |
| 582 | .remove = vimc_deb_remove, | 589 | .remove = vimc_deb_remove, |
| 590 | .id_table = vimc_deb_driver_ids, | ||
| 583 | .driver = { | 591 | .driver = { |
| 584 | .name = VIMC_DEB_DRV_NAME, | 592 | .name = VIMC_DEB_DRV_NAME, |
| 585 | }, | 593 | }, |
| 586 | }; | 594 | }; |
| 587 | 595 | ||
| 588 | static const struct platform_device_id vimc_deb_driver_ids[] = { | ||
| 589 | { | ||
| 590 | .name = VIMC_DEB_DRV_NAME, | ||
| 591 | }, | ||
| 592 | { } | ||
| 593 | }; | ||
| 594 | |||
| 595 | module_platform_driver(vimc_deb_pdrv); | 596 | module_platform_driver(vimc_deb_pdrv); |
| 596 | 597 | ||
| 597 | MODULE_DEVICE_TABLE(platform, vimc_deb_driver_ids); | 598 | MODULE_DEVICE_TABLE(platform, vimc_deb_driver_ids); |
diff --git a/drivers/media/platform/vimc/vimc-scaler.c b/drivers/media/platform/vimc/vimc-scaler.c index fe77505d2679..0a3e086e12f3 100644 --- a/drivers/media/platform/vimc/vimc-scaler.c +++ b/drivers/media/platform/vimc/vimc-scaler.c | |||
| @@ -431,21 +431,22 @@ static int vimc_sca_remove(struct platform_device *pdev) | |||
| 431 | return 0; | 431 | return 0; |
| 432 | } | 432 | } |
| 433 | 433 | ||
| 434 | static const struct platform_device_id vimc_sca_driver_ids[] = { | ||
| 435 | { | ||
| 436 | .name = VIMC_SCA_DRV_NAME, | ||
| 437 | }, | ||
| 438 | { } | ||
| 439 | }; | ||
| 440 | |||
| 434 | static struct platform_driver vimc_sca_pdrv = { | 441 | static struct platform_driver vimc_sca_pdrv = { |
| 435 | .probe = vimc_sca_probe, | 442 | .probe = vimc_sca_probe, |
| 436 | .remove = vimc_sca_remove, | 443 | .remove = vimc_sca_remove, |
| 444 | .id_table = vimc_sca_driver_ids, | ||
| 437 | .driver = { | 445 | .driver = { |
| 438 | .name = VIMC_SCA_DRV_NAME, | 446 | .name = VIMC_SCA_DRV_NAME, |
| 439 | }, | 447 | }, |
| 440 | }; | 448 | }; |
| 441 | 449 | ||
| 442 | static const struct platform_device_id vimc_sca_driver_ids[] = { | ||
| 443 | { | ||
| 444 | .name = VIMC_SCA_DRV_NAME, | ||
| 445 | }, | ||
| 446 | { } | ||
| 447 | }; | ||
| 448 | |||
| 449 | module_platform_driver(vimc_sca_pdrv); | 450 | module_platform_driver(vimc_sca_pdrv); |
| 450 | 451 | ||
| 451 | MODULE_DEVICE_TABLE(platform, vimc_sca_driver_ids); | 452 | MODULE_DEVICE_TABLE(platform, vimc_sca_driver_ids); |
diff --git a/drivers/media/platform/vimc/vimc-sensor.c b/drivers/media/platform/vimc/vimc-sensor.c index ebdbbe8c05ed..615c2b18dcfc 100644 --- a/drivers/media/platform/vimc/vimc-sensor.c +++ b/drivers/media/platform/vimc/vimc-sensor.c | |||
| @@ -365,21 +365,22 @@ static int vimc_sen_remove(struct platform_device *pdev) | |||
| 365 | return 0; | 365 | return 0; |
| 366 | } | 366 | } |
| 367 | 367 | ||
| 368 | static const struct platform_device_id vimc_sen_driver_ids[] = { | ||
| 369 | { | ||
| 370 | .name = VIMC_SEN_DRV_NAME, | ||
| 371 | }, | ||
| 372 | { } | ||
| 373 | }; | ||
| 374 | |||
| 368 | static struct platform_driver vimc_sen_pdrv = { | 375 | static struct platform_driver vimc_sen_pdrv = { |
| 369 | .probe = vimc_sen_probe, | 376 | .probe = vimc_sen_probe, |
| 370 | .remove = vimc_sen_remove, | 377 | .remove = vimc_sen_remove, |
| 378 | .id_table = vimc_sen_driver_ids, | ||
| 371 | .driver = { | 379 | .driver = { |
| 372 | .name = VIMC_SEN_DRV_NAME, | 380 | .name = VIMC_SEN_DRV_NAME, |
| 373 | }, | 381 | }, |
| 374 | }; | 382 | }; |
| 375 | 383 | ||
| 376 | static const struct platform_device_id vimc_sen_driver_ids[] = { | ||
| 377 | { | ||
| 378 | .name = VIMC_SEN_DRV_NAME, | ||
| 379 | }, | ||
| 380 | { } | ||
| 381 | }; | ||
| 382 | |||
| 383 | module_platform_driver(vimc_sen_pdrv); | 384 | module_platform_driver(vimc_sen_pdrv); |
| 384 | 385 | ||
| 385 | MODULE_DEVICE_TABLE(platform, vimc_sen_driver_ids); | 386 | MODULE_DEVICE_TABLE(platform, vimc_sen_driver_ids); |
diff --git a/drivers/media/radio/radio-wl1273.c b/drivers/media/radio/radio-wl1273.c index 7240223dc15a..17e82a9a0109 100644 --- a/drivers/media/radio/radio-wl1273.c +++ b/drivers/media/radio/radio-wl1273.c | |||
| @@ -610,10 +610,21 @@ static int wl1273_fm_start(struct wl1273_device *radio, int new_mode) | |||
| 610 | } | 610 | } |
| 611 | } | 611 | } |
| 612 | 612 | ||
| 613 | if (radio->rds_on) | 613 | if (radio->rds_on) { |
| 614 | r = core->write(core, WL1273_RDS_DATA_ENB, 1); | 614 | r = core->write(core, WL1273_RDS_DATA_ENB, 1); |
| 615 | else | 615 | if (r) { |
| 616 | dev_err(dev, "%s: RDS_DATA_ENB ON fails\n", | ||
| 617 | __func__); | ||
| 618 | goto fail; | ||
| 619 | } | ||
| 620 | } else { | ||
| 616 | r = core->write(core, WL1273_RDS_DATA_ENB, 0); | 621 | r = core->write(core, WL1273_RDS_DATA_ENB, 0); |
| 622 | if (r) { | ||
| 623 | dev_err(dev, "%s: RDS_DATA_ENB OFF fails\n", | ||
| 624 | __func__); | ||
| 625 | goto fail; | ||
| 626 | } | ||
| 627 | } | ||
| 617 | } else { | 628 | } else { |
| 618 | dev_warn(dev, "%s: Illegal mode.\n", __func__); | 629 | dev_warn(dev, "%s: Illegal mode.\n", __func__); |
| 619 | } | 630 | } |
diff --git a/drivers/media/rc/ir-lirc-codec.c b/drivers/media/rc/ir-lirc-codec.c index a30af91710fe..d2223c04e9ad 100644 --- a/drivers/media/rc/ir-lirc-codec.c +++ b/drivers/media/rc/ir-lirc-codec.c | |||
| @@ -266,7 +266,7 @@ static long ir_lirc_ioctl(struct file *filep, unsigned int cmd, | |||
| 266 | if (!dev->rx_resolution) | 266 | if (!dev->rx_resolution) |
| 267 | return -ENOTTY; | 267 | return -ENOTTY; |
| 268 | 268 | ||
| 269 | val = dev->rx_resolution; | 269 | val = dev->rx_resolution / 1000; |
| 270 | break; | 270 | break; |
| 271 | 271 | ||
| 272 | case LIRC_SET_WIDEBAND_RECEIVER: | 272 | case LIRC_SET_WIDEBAND_RECEIVER: |
diff --git a/drivers/media/tuners/fc0011.c b/drivers/media/tuners/fc0011.c index 192b1c7740df..145407dee3db 100644 --- a/drivers/media/tuners/fc0011.c +++ b/drivers/media/tuners/fc0011.c | |||
| @@ -342,6 +342,7 @@ static int fc0011_set_params(struct dvb_frontend *fe) | |||
| 342 | switch (vco_sel) { | 342 | switch (vco_sel) { |
| 343 | default: | 343 | default: |
| 344 | WARN_ON(1); | 344 | WARN_ON(1); |
| 345 | return -EINVAL; | ||
| 345 | case 0: | 346 | case 0: |
| 346 | if (vco_cal < 8) { | 347 | if (vco_cal < 8) { |
| 347 | regs[FC11_REG_VCOSEL] &= ~(FC11_VCOSEL_1 | FC11_VCOSEL_2); | 348 | regs[FC11_REG_VCOSEL] &= ~(FC11_VCOSEL_1 | FC11_VCOSEL_2); |
diff --git a/drivers/media/tuners/mxl5005s.c b/drivers/media/tuners/mxl5005s.c index 353744fee053..dd59c2c0e4a5 100644 --- a/drivers/media/tuners/mxl5005s.c +++ b/drivers/media/tuners/mxl5005s.c | |||
| @@ -2737,8 +2737,6 @@ static u16 MXL_TuneRF(struct dvb_frontend *fe, u32 RF_Freq) | |||
| 2737 | status += MXL_ControlWrite(fe, TG_LO_DIVVAL, 0x0); | 2737 | status += MXL_ControlWrite(fe, TG_LO_DIVVAL, 0x0); |
| 2738 | status += MXL_ControlWrite(fe, TG_LO_SELVAL, 0x7); | 2738 | status += MXL_ControlWrite(fe, TG_LO_SELVAL, 0x7); |
| 2739 | divider_val = 2 ; | 2739 | divider_val = 2 ; |
| 2740 | Fmax = FmaxBin ; | ||
| 2741 | Fmin = FminBin ; | ||
| 2742 | } | 2740 | } |
| 2743 | 2741 | ||
| 2744 | /* TG_DIV_VAL */ | 2742 | /* TG_DIV_VAL */ |
diff --git a/drivers/media/usb/au0828/au0828-input.c b/drivers/media/usb/au0828/au0828-input.c index 9ec919c68482..9d82ec0a4b64 100644 --- a/drivers/media/usb/au0828/au0828-input.c +++ b/drivers/media/usb/au0828/au0828-input.c | |||
| @@ -351,7 +351,7 @@ int au0828_rc_register(struct au0828_dev *dev) | |||
| 351 | if (err) | 351 | if (err) |
| 352 | goto error; | 352 | goto error; |
| 353 | 353 | ||
| 354 | pr_info("Remote controller %s initalized\n", ir->name); | 354 | pr_info("Remote controller %s initialized\n", ir->name); |
| 355 | 355 | ||
| 356 | return 0; | 356 | return 0; |
| 357 | 357 | ||
diff --git a/drivers/media/usb/dvb-usb-v2/lmedm04.c b/drivers/media/usb/dvb-usb-v2/lmedm04.c index 594360a63c18..a91fdad8f8d4 100644 --- a/drivers/media/usb/dvb-usb-v2/lmedm04.c +++ b/drivers/media/usb/dvb-usb-v2/lmedm04.c | |||
| @@ -207,15 +207,13 @@ static int lme2510_stream_restart(struct dvb_usb_device *d) | |||
| 207 | struct lme2510_state *st = d->priv; | 207 | struct lme2510_state *st = d->priv; |
| 208 | u8 all_pids[] = LME_ALL_PIDS; | 208 | u8 all_pids[] = LME_ALL_PIDS; |
| 209 | u8 stream_on[] = LME_ST_ON_W; | 209 | u8 stream_on[] = LME_ST_ON_W; |
| 210 | int ret; | ||
| 211 | u8 rbuff[1]; | 210 | u8 rbuff[1]; |
| 212 | if (st->pid_off) | 211 | if (st->pid_off) |
| 213 | ret = lme2510_usb_talk(d, all_pids, sizeof(all_pids), | 212 | lme2510_usb_talk(d, all_pids, sizeof(all_pids), |
| 214 | rbuff, sizeof(rbuff)); | 213 | rbuff, sizeof(rbuff)); |
| 215 | /*Restart Stream Command*/ | 214 | /*Restart Stream Command*/ |
| 216 | ret = lme2510_usb_talk(d, stream_on, sizeof(stream_on), | 215 | return lme2510_usb_talk(d, stream_on, sizeof(stream_on), |
| 217 | rbuff, sizeof(rbuff)); | 216 | rbuff, sizeof(rbuff)); |
| 218 | return ret; | ||
| 219 | } | 217 | } |
| 220 | 218 | ||
| 221 | static int lme2510_enable_pid(struct dvb_usb_device *d, u8 index, u16 pid_out) | 219 | static int lme2510_enable_pid(struct dvb_usb_device *d, u8 index, u16 pid_out) |
diff --git a/drivers/media/usb/dvb-usb/dib0700_core.c b/drivers/media/usb/dvb-usb/dib0700_core.c index 08acdd32e412..bea1b4764a66 100644 --- a/drivers/media/usb/dvb-usb/dib0700_core.c +++ b/drivers/media/usb/dvb-usb/dib0700_core.c | |||
| @@ -215,13 +215,14 @@ static int dib0700_i2c_xfer_new(struct i2c_adapter *adap, struct i2c_msg *msg, | |||
| 215 | USB_CTRL_GET_TIMEOUT); | 215 | USB_CTRL_GET_TIMEOUT); |
| 216 | if (result < 0) { | 216 | if (result < 0) { |
| 217 | deb_info("i2c read error (status = %d)\n", result); | 217 | deb_info("i2c read error (status = %d)\n", result); |
| 218 | break; | 218 | goto unlock; |
| 219 | } | 219 | } |
| 220 | 220 | ||
| 221 | if (msg[i].len > sizeof(st->buf)) { | 221 | if (msg[i].len > sizeof(st->buf)) { |
| 222 | deb_info("buffer too small to fit %d bytes\n", | 222 | deb_info("buffer too small to fit %d bytes\n", |
| 223 | msg[i].len); | 223 | msg[i].len); |
| 224 | return -EIO; | 224 | result = -EIO; |
| 225 | goto unlock; | ||
| 225 | } | 226 | } |
| 226 | 227 | ||
| 227 | memcpy(msg[i].buf, st->buf, msg[i].len); | 228 | memcpy(msg[i].buf, st->buf, msg[i].len); |
| @@ -233,8 +234,8 @@ static int dib0700_i2c_xfer_new(struct i2c_adapter *adap, struct i2c_msg *msg, | |||
| 233 | /* Write request */ | 234 | /* Write request */ |
| 234 | if (mutex_lock_interruptible(&d->usb_mutex) < 0) { | 235 | if (mutex_lock_interruptible(&d->usb_mutex) < 0) { |
| 235 | err("could not acquire lock"); | 236 | err("could not acquire lock"); |
| 236 | mutex_unlock(&d->i2c_mutex); | 237 | result = -EINTR; |
| 237 | return -EINTR; | 238 | goto unlock; |
| 238 | } | 239 | } |
| 239 | st->buf[0] = REQUEST_NEW_I2C_WRITE; | 240 | st->buf[0] = REQUEST_NEW_I2C_WRITE; |
| 240 | st->buf[1] = msg[i].addr << 1; | 241 | st->buf[1] = msg[i].addr << 1; |
| @@ -247,7 +248,9 @@ static int dib0700_i2c_xfer_new(struct i2c_adapter *adap, struct i2c_msg *msg, | |||
| 247 | if (msg[i].len > sizeof(st->buf) - 4) { | 248 | if (msg[i].len > sizeof(st->buf) - 4) { |
| 248 | deb_info("i2c message to big: %d\n", | 249 | deb_info("i2c message to big: %d\n", |
| 249 | msg[i].len); | 250 | msg[i].len); |
| 250 | return -EIO; | 251 | mutex_unlock(&d->usb_mutex); |
| 252 | result = -EIO; | ||
| 253 | goto unlock; | ||
| 251 | } | 254 | } |
| 252 | 255 | ||
| 253 | /* The Actual i2c payload */ | 256 | /* The Actual i2c payload */ |
| @@ -269,8 +272,11 @@ static int dib0700_i2c_xfer_new(struct i2c_adapter *adap, struct i2c_msg *msg, | |||
| 269 | } | 272 | } |
| 270 | } | 273 | } |
| 271 | } | 274 | } |
| 275 | result = i; | ||
| 276 | |||
| 277 | unlock: | ||
| 272 | mutex_unlock(&d->i2c_mutex); | 278 | mutex_unlock(&d->i2c_mutex); |
| 273 | return i; | 279 | return result; |
| 274 | } | 280 | } |
| 275 | 281 | ||
| 276 | /* | 282 | /* |
| @@ -281,7 +287,7 @@ static int dib0700_i2c_xfer_legacy(struct i2c_adapter *adap, | |||
| 281 | { | 287 | { |
| 282 | struct dvb_usb_device *d = i2c_get_adapdata(adap); | 288 | struct dvb_usb_device *d = i2c_get_adapdata(adap); |
| 283 | struct dib0700_state *st = d->priv; | 289 | struct dib0700_state *st = d->priv; |
| 284 | int i,len; | 290 | int i, len, result; |
| 285 | 291 | ||
| 286 | if (mutex_lock_interruptible(&d->i2c_mutex) < 0) | 292 | if (mutex_lock_interruptible(&d->i2c_mutex) < 0) |
| 287 | return -EINTR; | 293 | return -EINTR; |
| @@ -298,7 +304,8 @@ static int dib0700_i2c_xfer_legacy(struct i2c_adapter *adap, | |||
| 298 | if (msg[i].len > sizeof(st->buf) - 2) { | 304 | if (msg[i].len > sizeof(st->buf) - 2) { |
| 299 | deb_info("i2c xfer to big: %d\n", | 305 | deb_info("i2c xfer to big: %d\n", |
| 300 | msg[i].len); | 306 | msg[i].len); |
| 301 | return -EIO; | 307 | result = -EIO; |
| 308 | goto unlock; | ||
| 302 | } | 309 | } |
| 303 | memcpy(&st->buf[2], msg[i].buf, msg[i].len); | 310 | memcpy(&st->buf[2], msg[i].buf, msg[i].len); |
| 304 | 311 | ||
| @@ -313,13 +320,15 @@ static int dib0700_i2c_xfer_legacy(struct i2c_adapter *adap, | |||
| 313 | if (len <= 0) { | 320 | if (len <= 0) { |
| 314 | deb_info("I2C read failed on address 0x%02x\n", | 321 | deb_info("I2C read failed on address 0x%02x\n", |
| 315 | msg[i].addr); | 322 | msg[i].addr); |
| 316 | break; | 323 | result = -EIO; |
| 324 | goto unlock; | ||
| 317 | } | 325 | } |
| 318 | 326 | ||
| 319 | if (msg[i + 1].len > sizeof(st->buf)) { | 327 | if (msg[i + 1].len > sizeof(st->buf)) { |
| 320 | deb_info("i2c xfer buffer to small for %d\n", | 328 | deb_info("i2c xfer buffer to small for %d\n", |
| 321 | msg[i].len); | 329 | msg[i].len); |
| 322 | return -EIO; | 330 | result = -EIO; |
| 331 | goto unlock; | ||
| 323 | } | 332 | } |
| 324 | memcpy(msg[i + 1].buf, st->buf, msg[i + 1].len); | 333 | memcpy(msg[i + 1].buf, st->buf, msg[i + 1].len); |
| 325 | 334 | ||
| @@ -328,14 +337,17 @@ static int dib0700_i2c_xfer_legacy(struct i2c_adapter *adap, | |||
| 328 | i++; | 337 | i++; |
| 329 | } else { | 338 | } else { |
| 330 | st->buf[0] = REQUEST_I2C_WRITE; | 339 | st->buf[0] = REQUEST_I2C_WRITE; |
| 331 | if (dib0700_ctrl_wr(d, st->buf, msg[i].len + 2) < 0) | 340 | result = dib0700_ctrl_wr(d, st->buf, msg[i].len + 2); |
| 332 | break; | 341 | if (result < 0) |
| 342 | goto unlock; | ||
| 333 | } | 343 | } |
| 334 | } | 344 | } |
| 345 | result = i; | ||
| 346 | unlock: | ||
| 335 | mutex_unlock(&d->usb_mutex); | 347 | mutex_unlock(&d->usb_mutex); |
| 336 | mutex_unlock(&d->i2c_mutex); | 348 | mutex_unlock(&d->i2c_mutex); |
| 337 | 349 | ||
| 338 | return i; | 350 | return result; |
| 339 | } | 351 | } |
| 340 | 352 | ||
| 341 | static int dib0700_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, | 353 | static int dib0700_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, |
diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c index 146341aeb782..4c57fd7929cb 100644 --- a/drivers/media/usb/em28xx/em28xx-cards.c +++ b/drivers/media/usb/em28xx/em28xx-cards.c | |||
| @@ -1193,6 +1193,22 @@ struct em28xx_board em28xx_boards[] = { | |||
| 1193 | .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE | | 1193 | .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE | |
| 1194 | EM28XX_I2C_FREQ_400_KHZ, | 1194 | EM28XX_I2C_FREQ_400_KHZ, |
| 1195 | }, | 1195 | }, |
| 1196 | [EM2884_BOARD_TERRATEC_H6] = { | ||
| 1197 | .name = "Terratec Cinergy H6 rev. 2", | ||
| 1198 | .has_dvb = 1, | ||
| 1199 | .ir_codes = RC_MAP_NEC_TERRATEC_CINERGY_XS, | ||
| 1200 | #if 0 | ||
| 1201 | .tuner_type = TUNER_PHILIPS_TDA8290, | ||
| 1202 | .tuner_addr = 0x41, | ||
| 1203 | .dvb_gpio = terratec_h5_digital, /* FIXME: probably wrong */ | ||
| 1204 | .tuner_gpio = terratec_h5_gpio, | ||
| 1205 | #else | ||
| 1206 | .tuner_type = TUNER_ABSENT, | ||
| 1207 | #endif | ||
| 1208 | .def_i2c_bus = 1, | ||
| 1209 | .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE | | ||
| 1210 | EM28XX_I2C_FREQ_400_KHZ, | ||
| 1211 | }, | ||
| 1196 | [EM2884_BOARD_HAUPPAUGE_WINTV_HVR_930C] = { | 1212 | [EM2884_BOARD_HAUPPAUGE_WINTV_HVR_930C] = { |
| 1197 | .name = "Hauppauge WinTV HVR 930C", | 1213 | .name = "Hauppauge WinTV HVR 930C", |
| 1198 | .has_dvb = 1, | 1214 | .has_dvb = 1, |
| @@ -2496,6 +2512,8 @@ struct usb_device_id em28xx_id_table[] = { | |||
| 2496 | .driver_info = EM2884_BOARD_TERRATEC_H5 }, | 2512 | .driver_info = EM2884_BOARD_TERRATEC_H5 }, |
| 2497 | { USB_DEVICE(0x0ccd, 0x10b6), /* H5 Rev. 3 */ | 2513 | { USB_DEVICE(0x0ccd, 0x10b6), /* H5 Rev. 3 */ |
| 2498 | .driver_info = EM2884_BOARD_TERRATEC_H5 }, | 2514 | .driver_info = EM2884_BOARD_TERRATEC_H5 }, |
| 2515 | { USB_DEVICE(0x0ccd, 0x10b2), /* H6 */ | ||
| 2516 | .driver_info = EM2884_BOARD_TERRATEC_H6 }, | ||
| 2499 | { USB_DEVICE(0x0ccd, 0x0084), | 2517 | { USB_DEVICE(0x0ccd, 0x0084), |
| 2500 | .driver_info = EM2860_BOARD_TERRATEC_AV350 }, | 2518 | .driver_info = EM2860_BOARD_TERRATEC_AV350 }, |
| 2501 | { USB_DEVICE(0x0ccd, 0x0096), | 2519 | { USB_DEVICE(0x0ccd, 0x0096), |
diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c b/drivers/media/usb/em28xx/em28xx-dvb.c index 82edd37f0d73..4a7db623fe29 100644 --- a/drivers/media/usb/em28xx/em28xx-dvb.c +++ b/drivers/media/usb/em28xx/em28xx-dvb.c | |||
| @@ -1522,6 +1522,7 @@ static int em28xx_dvb_init(struct em28xx *dev) | |||
| 1522 | break; | 1522 | break; |
| 1523 | case EM2884_BOARD_ELGATO_EYETV_HYBRID_2008: | 1523 | case EM2884_BOARD_ELGATO_EYETV_HYBRID_2008: |
| 1524 | case EM2884_BOARD_CINERGY_HTC_STICK: | 1524 | case EM2884_BOARD_CINERGY_HTC_STICK: |
| 1525 | case EM2884_BOARD_TERRATEC_H6: | ||
| 1525 | terratec_htc_stick_init(dev); | 1526 | terratec_htc_stick_init(dev); |
| 1526 | 1527 | ||
| 1527 | /* attach demodulator */ | 1528 | /* attach demodulator */ |
diff --git a/drivers/media/usb/em28xx/em28xx-i2c.c b/drivers/media/usb/em28xx/em28xx-i2c.c index 8c472d5adb50..60b195c157b8 100644 --- a/drivers/media/usb/em28xx/em28xx-i2c.c +++ b/drivers/media/usb/em28xx/em28xx-i2c.c | |||
| @@ -982,8 +982,6 @@ int em28xx_i2c_register(struct em28xx *dev, unsigned bus, | |||
| 982 | dev_err(&dev->intf->dev, | 982 | dev_err(&dev->intf->dev, |
| 983 | "%s: em28xx_i2_eeprom failed! retval [%d]\n", | 983 | "%s: em28xx_i2_eeprom failed! retval [%d]\n", |
| 984 | __func__, retval); | 984 | __func__, retval); |
| 985 | |||
| 986 | return retval; | ||
| 987 | } | 985 | } |
| 988 | } | 986 | } |
| 989 | 987 | ||
diff --git a/drivers/media/usb/em28xx/em28xx-input.c b/drivers/media/usb/em28xx/em28xx-input.c index eba75736e654..ca9673917ad5 100644 --- a/drivers/media/usb/em28xx/em28xx-input.c +++ b/drivers/media/usb/em28xx/em28xx-input.c | |||
| @@ -821,7 +821,7 @@ static int em28xx_ir_init(struct em28xx *dev) | |||
| 821 | if (err) | 821 | if (err) |
| 822 | goto error; | 822 | goto error; |
| 823 | 823 | ||
| 824 | dev_info(&dev->intf->dev, "Input extension successfully initalized\n"); | 824 | dev_info(&dev->intf->dev, "Input extension successfully initialized\n"); |
| 825 | 825 | ||
| 826 | return 0; | 826 | return 0; |
| 827 | 827 | ||
diff --git a/drivers/media/usb/em28xx/em28xx.h b/drivers/media/usb/em28xx/em28xx.h index e8d97d5ec161..88084f24f033 100644 --- a/drivers/media/usb/em28xx/em28xx.h +++ b/drivers/media/usb/em28xx/em28xx.h | |||
| @@ -148,6 +148,7 @@ | |||
| 148 | #define EM28178_BOARD_PLEX_PX_BCUD 98 | 148 | #define EM28178_BOARD_PLEX_PX_BCUD 98 |
| 149 | #define EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_DVB 99 | 149 | #define EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_DVB 99 |
| 150 | #define EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_01595 100 | 150 | #define EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_01595 100 |
| 151 | #define EM2884_BOARD_TERRATEC_H6 101 | ||
| 151 | 152 | ||
| 152 | /* Limits minimum and default number of buffers */ | 153 | /* Limits minimum and default number of buffers */ |
| 153 | #define EM28XX_MIN_BUF 4 | 154 | #define EM28XX_MIN_BUF 4 |
diff --git a/drivers/media/usb/pulse8-cec/pulse8-cec.c b/drivers/media/usb/pulse8-cec/pulse8-cec.c index c843070f24c1..f9ed9c950247 100644 --- a/drivers/media/usb/pulse8-cec/pulse8-cec.c +++ b/drivers/media/usb/pulse8-cec/pulse8-cec.c | |||
| @@ -51,7 +51,7 @@ MODULE_DESCRIPTION("Pulse Eight HDMI CEC driver"); | |||
| 51 | MODULE_LICENSE("GPL"); | 51 | MODULE_LICENSE("GPL"); |
| 52 | 52 | ||
| 53 | static int debug; | 53 | static int debug; |
| 54 | static int persistent_config = 1; | 54 | static int persistent_config; |
| 55 | module_param(debug, int, 0644); | 55 | module_param(debug, int, 0644); |
| 56 | module_param(persistent_config, int, 0644); | 56 | module_param(persistent_config, int, 0644); |
| 57 | MODULE_PARM_DESC(debug, "debug level (0-1)"); | 57 | MODULE_PARM_DESC(debug, "debug level (0-1)"); |
diff --git a/drivers/media/usb/rainshadow-cec/rainshadow-cec.c b/drivers/media/usb/rainshadow-cec/rainshadow-cec.c index f203699e9c1b..65692576690f 100644 --- a/drivers/media/usb/rainshadow-cec/rainshadow-cec.c +++ b/drivers/media/usb/rainshadow-cec/rainshadow-cec.c | |||
| @@ -116,21 +116,19 @@ static void rain_irq_work_handler(struct work_struct *work) | |||
| 116 | 116 | ||
| 117 | while (true) { | 117 | while (true) { |
| 118 | unsigned long flags; | 118 | unsigned long flags; |
| 119 | bool exit_loop = false; | ||
| 120 | char data; | 119 | char data; |
| 121 | 120 | ||
| 122 | spin_lock_irqsave(&rain->buf_lock, flags); | 121 | spin_lock_irqsave(&rain->buf_lock, flags); |
| 123 | if (rain->buf_len) { | 122 | if (!rain->buf_len) { |
| 124 | data = rain->buf[rain->buf_rd_idx]; | 123 | spin_unlock_irqrestore(&rain->buf_lock, flags); |
| 125 | rain->buf_len--; | 124 | break; |
| 126 | rain->buf_rd_idx = (rain->buf_rd_idx + 1) & 0xff; | ||
| 127 | } else { | ||
| 128 | exit_loop = true; | ||
| 129 | } | 125 | } |
| 130 | spin_unlock_irqrestore(&rain->buf_lock, flags); | ||
| 131 | 126 | ||
| 132 | if (exit_loop) | 127 | data = rain->buf[rain->buf_rd_idx]; |
| 133 | break; | 128 | rain->buf_len--; |
| 129 | rain->buf_rd_idx = (rain->buf_rd_idx + 1) & 0xff; | ||
| 130 | |||
| 131 | spin_unlock_irqrestore(&rain->buf_lock, flags); | ||
| 134 | 132 | ||
| 135 | if (!rain->cmd_started && data != '?') | 133 | if (!rain->cmd_started && data != '?') |
| 136 | continue; | 134 | continue; |
diff --git a/drivers/media/usb/stkwebcam/stk-sensor.c b/drivers/media/usb/stkwebcam/stk-sensor.c index 985af9933c7e..c1d4505f84ea 100644 --- a/drivers/media/usb/stkwebcam/stk-sensor.c +++ b/drivers/media/usb/stkwebcam/stk-sensor.c | |||
| @@ -41,6 +41,8 @@ | |||
| 41 | 41 | ||
| 42 | /* It seems the i2c bus is controlled with these registers */ | 42 | /* It seems the i2c bus is controlled with these registers */ |
| 43 | 43 | ||
| 44 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
| 45 | |||
| 44 | #include "stk-webcam.h" | 46 | #include "stk-webcam.h" |
| 45 | 47 | ||
| 46 | #define STK_IIC_BASE (0x0200) | 48 | #define STK_IIC_BASE (0x0200) |
| @@ -239,8 +241,8 @@ static int stk_sensor_outb(struct stk_camera *dev, u8 reg, u8 val) | |||
| 239 | } while (tmpval == 0 && i < MAX_RETRIES); | 241 | } while (tmpval == 0 && i < MAX_RETRIES); |
| 240 | if (tmpval != STK_IIC_STAT_TX_OK) { | 242 | if (tmpval != STK_IIC_STAT_TX_OK) { |
| 241 | if (tmpval) | 243 | if (tmpval) |
| 242 | STK_ERROR("stk_sensor_outb failed, status=0x%02x\n", | 244 | pr_err("stk_sensor_outb failed, status=0x%02x\n", |
| 243 | tmpval); | 245 | tmpval); |
| 244 | return 1; | 246 | return 1; |
| 245 | } else | 247 | } else |
| 246 | return 0; | 248 | return 0; |
| @@ -262,8 +264,8 @@ static int stk_sensor_inb(struct stk_camera *dev, u8 reg, u8 *val) | |||
| 262 | } while (tmpval == 0 && i < MAX_RETRIES); | 264 | } while (tmpval == 0 && i < MAX_RETRIES); |
| 263 | if (tmpval != STK_IIC_STAT_RX_OK) { | 265 | if (tmpval != STK_IIC_STAT_RX_OK) { |
| 264 | if (tmpval) | 266 | if (tmpval) |
| 265 | STK_ERROR("stk_sensor_inb failed, status=0x%02x\n", | 267 | pr_err("stk_sensor_inb failed, status=0x%02x\n", |
| 266 | tmpval); | 268 | tmpval); |
| 267 | return 1; | 269 | return 1; |
| 268 | } | 270 | } |
| 269 | 271 | ||
| @@ -366,29 +368,29 @@ int stk_sensor_init(struct stk_camera *dev) | |||
| 366 | if (stk_camera_write_reg(dev, STK_IIC_ENABLE, STK_IIC_ENABLE_YES) | 368 | if (stk_camera_write_reg(dev, STK_IIC_ENABLE, STK_IIC_ENABLE_YES) |
| 367 | || stk_camera_write_reg(dev, STK_IIC_ADDR, SENSOR_ADDRESS) | 369 | || stk_camera_write_reg(dev, STK_IIC_ADDR, SENSOR_ADDRESS) |
| 368 | || stk_sensor_outb(dev, REG_COM7, COM7_RESET)) { | 370 | || stk_sensor_outb(dev, REG_COM7, COM7_RESET)) { |
| 369 | STK_ERROR("Sensor resetting failed\n"); | 371 | pr_err("Sensor resetting failed\n"); |
| 370 | return -ENODEV; | 372 | return -ENODEV; |
| 371 | } | 373 | } |
| 372 | msleep(10); | 374 | msleep(10); |
| 373 | /* Read the manufacturer ID: ov = 0x7FA2 */ | 375 | /* Read the manufacturer ID: ov = 0x7FA2 */ |
| 374 | if (stk_sensor_inb(dev, REG_MIDH, &idh) | 376 | if (stk_sensor_inb(dev, REG_MIDH, &idh) |
| 375 | || stk_sensor_inb(dev, REG_MIDL, &idl)) { | 377 | || stk_sensor_inb(dev, REG_MIDL, &idl)) { |
| 376 | STK_ERROR("Strange error reading sensor ID\n"); | 378 | pr_err("Strange error reading sensor ID\n"); |
| 377 | return -ENODEV; | 379 | return -ENODEV; |
| 378 | } | 380 | } |
| 379 | if (idh != 0x7f || idl != 0xa2) { | 381 | if (idh != 0x7f || idl != 0xa2) { |
| 380 | STK_ERROR("Huh? you don't have a sensor from ovt\n"); | 382 | pr_err("Huh? you don't have a sensor from ovt\n"); |
| 381 | return -ENODEV; | 383 | return -ENODEV; |
| 382 | } | 384 | } |
| 383 | if (stk_sensor_inb(dev, REG_PID, &idh) | 385 | if (stk_sensor_inb(dev, REG_PID, &idh) |
| 384 | || stk_sensor_inb(dev, REG_VER, &idl)) { | 386 | || stk_sensor_inb(dev, REG_VER, &idl)) { |
| 385 | STK_ERROR("Could not read sensor model\n"); | 387 | pr_err("Could not read sensor model\n"); |
| 386 | return -ENODEV; | 388 | return -ENODEV; |
| 387 | } | 389 | } |
| 388 | stk_sensor_write_regvals(dev, ov_initvals); | 390 | stk_sensor_write_regvals(dev, ov_initvals); |
| 389 | msleep(10); | 391 | msleep(10); |
| 390 | STK_INFO("OmniVision sensor detected, id %02X%02X at address %x\n", | 392 | pr_info("OmniVision sensor detected, id %02X%02X at address %x\n", |
| 391 | idh, idl, SENSOR_ADDRESS); | 393 | idh, idl, SENSOR_ADDRESS); |
| 392 | return 0; | 394 | return 0; |
| 393 | } | 395 | } |
| 394 | 396 | ||
| @@ -520,7 +522,8 @@ int stk_sensor_configure(struct stk_camera *dev) | |||
| 520 | case MODE_SXGA: com7 = COM7_FMT_SXGA; | 522 | case MODE_SXGA: com7 = COM7_FMT_SXGA; |
| 521 | dummylines = 0; | 523 | dummylines = 0; |
| 522 | break; | 524 | break; |
| 523 | default: STK_ERROR("Unsupported mode %d\n", dev->vsettings.mode); | 525 | default: |
| 526 | pr_err("Unsupported mode %d\n", dev->vsettings.mode); | ||
| 524 | return -EFAULT; | 527 | return -EFAULT; |
| 525 | } | 528 | } |
| 526 | switch (dev->vsettings.palette) { | 529 | switch (dev->vsettings.palette) { |
| @@ -544,7 +547,8 @@ int stk_sensor_configure(struct stk_camera *dev) | |||
| 544 | com7 |= COM7_PBAYER; | 547 | com7 |= COM7_PBAYER; |
| 545 | rv = ov_fmt_bayer; | 548 | rv = ov_fmt_bayer; |
| 546 | break; | 549 | break; |
| 547 | default: STK_ERROR("Unsupported colorspace\n"); | 550 | default: |
| 551 | pr_err("Unsupported colorspace\n"); | ||
| 548 | return -EFAULT; | 552 | return -EFAULT; |
| 549 | } | 553 | } |
| 550 | /*FIXME sometimes the sensor go to a bad state | 554 | /*FIXME sometimes the sensor go to a bad state |
| @@ -564,7 +568,7 @@ int stk_sensor_configure(struct stk_camera *dev) | |||
| 564 | switch (dev->vsettings.mode) { | 568 | switch (dev->vsettings.mode) { |
| 565 | case MODE_VGA: | 569 | case MODE_VGA: |
| 566 | if (stk_sensor_set_hw(dev, 302, 1582, 6, 486)) | 570 | if (stk_sensor_set_hw(dev, 302, 1582, 6, 486)) |
| 567 | STK_ERROR("stk_sensor_set_hw failed (VGA)\n"); | 571 | pr_err("stk_sensor_set_hw failed (VGA)\n"); |
| 568 | break; | 572 | break; |
| 569 | case MODE_SXGA: | 573 | case MODE_SXGA: |
| 570 | case MODE_CIF: | 574 | case MODE_CIF: |
| @@ -572,7 +576,7 @@ int stk_sensor_configure(struct stk_camera *dev) | |||
| 572 | case MODE_QCIF: | 576 | case MODE_QCIF: |
| 573 | /*FIXME These settings seem ignored by the sensor | 577 | /*FIXME These settings seem ignored by the sensor |
| 574 | if (stk_sensor_set_hw(dev, 220, 1500, 10, 1034)) | 578 | if (stk_sensor_set_hw(dev, 220, 1500, 10, 1034)) |
| 575 | STK_ERROR("stk_sensor_set_hw failed (SXGA)\n"); | 579 | pr_err("stk_sensor_set_hw failed (SXGA)\n"); |
| 576 | */ | 580 | */ |
| 577 | break; | 581 | break; |
| 578 | } | 582 | } |
diff --git a/drivers/media/usb/stkwebcam/stk-webcam.c b/drivers/media/usb/stkwebcam/stk-webcam.c index 6e7fc36b658f..90d4a08cda31 100644 --- a/drivers/media/usb/stkwebcam/stk-webcam.c +++ b/drivers/media/usb/stkwebcam/stk-webcam.c | |||
| @@ -18,6 +18,8 @@ | |||
| 18 | * GNU General Public License for more details. | 18 | * GNU General Public License for more details. |
| 19 | */ | 19 | */ |
| 20 | 20 | ||
| 21 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
| 22 | |||
| 21 | #include <linux/module.h> | 23 | #include <linux/module.h> |
| 22 | #include <linux/init.h> | 24 | #include <linux/init.h> |
| 23 | #include <linux/kernel.h> | 25 | #include <linux/kernel.h> |
| @@ -175,15 +177,15 @@ static int stk_start_stream(struct stk_camera *dev) | |||
| 175 | if (!is_present(dev)) | 177 | if (!is_present(dev)) |
| 176 | return -ENODEV; | 178 | return -ENODEV; |
| 177 | if (!is_memallocd(dev) || !is_initialised(dev)) { | 179 | if (!is_memallocd(dev) || !is_initialised(dev)) { |
| 178 | STK_ERROR("FIXME: Buffers are not allocated\n"); | 180 | pr_err("FIXME: Buffers are not allocated\n"); |
| 179 | return -EFAULT; | 181 | return -EFAULT; |
| 180 | } | 182 | } |
| 181 | ret = usb_set_interface(dev->udev, 0, 5); | 183 | ret = usb_set_interface(dev->udev, 0, 5); |
| 182 | 184 | ||
| 183 | if (ret < 0) | 185 | if (ret < 0) |
| 184 | STK_ERROR("usb_set_interface failed !\n"); | 186 | pr_err("usb_set_interface failed !\n"); |
| 185 | if (stk_sensor_wakeup(dev)) | 187 | if (stk_sensor_wakeup(dev)) |
| 186 | STK_ERROR("error awaking the sensor\n"); | 188 | pr_err("error awaking the sensor\n"); |
| 187 | 189 | ||
| 188 | stk_camera_read_reg(dev, 0x0116, &value_116); | 190 | stk_camera_read_reg(dev, 0x0116, &value_116); |
| 189 | stk_camera_read_reg(dev, 0x0117, &value_117); | 191 | stk_camera_read_reg(dev, 0x0117, &value_117); |
| @@ -224,9 +226,9 @@ static int stk_stop_stream(struct stk_camera *dev) | |||
| 224 | unset_streaming(dev); | 226 | unset_streaming(dev); |
| 225 | 227 | ||
| 226 | if (usb_set_interface(dev->udev, 0, 0)) | 228 | if (usb_set_interface(dev->udev, 0, 0)) |
| 227 | STK_ERROR("usb_set_interface failed !\n"); | 229 | pr_err("usb_set_interface failed !\n"); |
| 228 | if (stk_sensor_sleep(dev)) | 230 | if (stk_sensor_sleep(dev)) |
| 229 | STK_ERROR("error suspending the sensor\n"); | 231 | pr_err("error suspending the sensor\n"); |
| 230 | } | 232 | } |
| 231 | return 0; | 233 | return 0; |
| 232 | } | 234 | } |
| @@ -313,7 +315,7 @@ static void stk_isoc_handler(struct urb *urb) | |||
| 313 | dev = (struct stk_camera *) urb->context; | 315 | dev = (struct stk_camera *) urb->context; |
| 314 | 316 | ||
| 315 | if (dev == NULL) { | 317 | if (dev == NULL) { |
| 316 | STK_ERROR("isoc_handler called with NULL device !\n"); | 318 | pr_err("isoc_handler called with NULL device !\n"); |
| 317 | return; | 319 | return; |
| 318 | } | 320 | } |
| 319 | 321 | ||
| @@ -326,14 +328,13 @@ static void stk_isoc_handler(struct urb *urb) | |||
| 326 | spin_lock_irqsave(&dev->spinlock, flags); | 328 | spin_lock_irqsave(&dev->spinlock, flags); |
| 327 | 329 | ||
| 328 | if (urb->status != -EINPROGRESS && urb->status != 0) { | 330 | if (urb->status != -EINPROGRESS && urb->status != 0) { |
| 329 | STK_ERROR("isoc_handler: urb->status == %d\n", urb->status); | 331 | pr_err("isoc_handler: urb->status == %d\n", urb->status); |
| 330 | goto resubmit; | 332 | goto resubmit; |
| 331 | } | 333 | } |
| 332 | 334 | ||
| 333 | if (list_empty(&dev->sio_avail)) { | 335 | if (list_empty(&dev->sio_avail)) { |
| 334 | /*FIXME Stop streaming after a while */ | 336 | /*FIXME Stop streaming after a while */ |
| 335 | (void) (printk_ratelimit() && | 337 | pr_err_ratelimited("isoc_handler without available buffer!\n"); |
| 336 | STK_ERROR("isoc_handler without available buffer!\n")); | ||
| 337 | goto resubmit; | 338 | goto resubmit; |
| 338 | } | 339 | } |
| 339 | fb = list_first_entry(&dev->sio_avail, | 340 | fb = list_first_entry(&dev->sio_avail, |
| @@ -343,8 +344,8 @@ static void stk_isoc_handler(struct urb *urb) | |||
| 343 | for (i = 0; i < urb->number_of_packets; i++) { | 344 | for (i = 0; i < urb->number_of_packets; i++) { |
| 344 | if (urb->iso_frame_desc[i].status != 0) { | 345 | if (urb->iso_frame_desc[i].status != 0) { |
| 345 | if (urb->iso_frame_desc[i].status != -EXDEV) | 346 | if (urb->iso_frame_desc[i].status != -EXDEV) |
| 346 | STK_ERROR("Frame %d has error %d\n", i, | 347 | pr_err("Frame %d has error %d\n", |
| 347 | urb->iso_frame_desc[i].status); | 348 | i, urb->iso_frame_desc[i].status); |
| 348 | continue; | 349 | continue; |
| 349 | } | 350 | } |
| 350 | framelen = urb->iso_frame_desc[i].actual_length; | 351 | framelen = urb->iso_frame_desc[i].actual_length; |
| @@ -368,9 +369,8 @@ static void stk_isoc_handler(struct urb *urb) | |||
| 368 | /* This marks a new frame */ | 369 | /* This marks a new frame */ |
| 369 | if (fb->v4lbuf.bytesused != 0 | 370 | if (fb->v4lbuf.bytesused != 0 |
| 370 | && fb->v4lbuf.bytesused != dev->frame_size) { | 371 | && fb->v4lbuf.bytesused != dev->frame_size) { |
| 371 | (void) (printk_ratelimit() && | 372 | pr_err_ratelimited("frame %d, bytesused=%d, skipping\n", |
| 372 | STK_ERROR("frame %d, bytesused=%d, skipping\n", | 373 | i, fb->v4lbuf.bytesused); |
| 373 | i, fb->v4lbuf.bytesused)); | ||
| 374 | fb->v4lbuf.bytesused = 0; | 374 | fb->v4lbuf.bytesused = 0; |
| 375 | fill = fb->buffer; | 375 | fill = fb->buffer; |
| 376 | } else if (fb->v4lbuf.bytesused == dev->frame_size) { | 376 | } else if (fb->v4lbuf.bytesused == dev->frame_size) { |
| @@ -395,8 +395,7 @@ static void stk_isoc_handler(struct urb *urb) | |||
| 395 | 395 | ||
| 396 | /* Our buffer is full !!! */ | 396 | /* Our buffer is full !!! */ |
| 397 | if (framelen + fb->v4lbuf.bytesused > dev->frame_size) { | 397 | if (framelen + fb->v4lbuf.bytesused > dev->frame_size) { |
| 398 | (void) (printk_ratelimit() && | 398 | pr_err_ratelimited("Frame buffer overflow, lost sync\n"); |
| 399 | STK_ERROR("Frame buffer overflow, lost sync\n")); | ||
| 400 | /*FIXME Do something here? */ | 399 | /*FIXME Do something here? */ |
| 401 | continue; | 400 | continue; |
| 402 | } | 401 | } |
| @@ -414,8 +413,8 @@ resubmit: | |||
| 414 | urb->dev = dev->udev; | 413 | urb->dev = dev->udev; |
| 415 | ret = usb_submit_urb(urb, GFP_ATOMIC); | 414 | ret = usb_submit_urb(urb, GFP_ATOMIC); |
| 416 | if (ret != 0) { | 415 | if (ret != 0) { |
| 417 | STK_ERROR("Error (%d) re-submitting urb in stk_isoc_handler.\n", | 416 | pr_err("Error (%d) re-submitting urb in stk_isoc_handler\n", |
| 418 | ret); | 417 | ret); |
| 419 | } | 418 | } |
| 420 | } | 419 | } |
| 421 | 420 | ||
| @@ -433,32 +432,31 @@ static int stk_prepare_iso(struct stk_camera *dev) | |||
| 433 | udev = dev->udev; | 432 | udev = dev->udev; |
| 434 | 433 | ||
| 435 | if (dev->isobufs) | 434 | if (dev->isobufs) |
| 436 | STK_ERROR("isobufs already allocated. Bad\n"); | 435 | pr_err("isobufs already allocated. Bad\n"); |
| 437 | else | 436 | else |
| 438 | dev->isobufs = kcalloc(MAX_ISO_BUFS, sizeof(*dev->isobufs), | 437 | dev->isobufs = kcalloc(MAX_ISO_BUFS, sizeof(*dev->isobufs), |
| 439 | GFP_KERNEL); | 438 | GFP_KERNEL); |
| 440 | if (dev->isobufs == NULL) { | 439 | if (dev->isobufs == NULL) { |
| 441 | STK_ERROR("Unable to allocate iso buffers\n"); | 440 | pr_err("Unable to allocate iso buffers\n"); |
| 442 | return -ENOMEM; | 441 | return -ENOMEM; |
| 443 | } | 442 | } |
| 444 | for (i = 0; i < MAX_ISO_BUFS; i++) { | 443 | for (i = 0; i < MAX_ISO_BUFS; i++) { |
| 445 | if (dev->isobufs[i].data == NULL) { | 444 | if (dev->isobufs[i].data == NULL) { |
| 446 | kbuf = kzalloc(ISO_BUFFER_SIZE, GFP_KERNEL); | 445 | kbuf = kzalloc(ISO_BUFFER_SIZE, GFP_KERNEL); |
| 447 | if (kbuf == NULL) { | 446 | if (kbuf == NULL) { |
| 448 | STK_ERROR("Failed to allocate iso buffer %d\n", | 447 | pr_err("Failed to allocate iso buffer %d\n", i); |
| 449 | i); | ||
| 450 | goto isobufs_out; | 448 | goto isobufs_out; |
| 451 | } | 449 | } |
| 452 | dev->isobufs[i].data = kbuf; | 450 | dev->isobufs[i].data = kbuf; |
| 453 | } else | 451 | } else |
| 454 | STK_ERROR("isobuf data already allocated\n"); | 452 | pr_err("isobuf data already allocated\n"); |
| 455 | if (dev->isobufs[i].urb == NULL) { | 453 | if (dev->isobufs[i].urb == NULL) { |
| 456 | urb = usb_alloc_urb(ISO_FRAMES_PER_DESC, GFP_KERNEL); | 454 | urb = usb_alloc_urb(ISO_FRAMES_PER_DESC, GFP_KERNEL); |
| 457 | if (urb == NULL) | 455 | if (urb == NULL) |
| 458 | goto isobufs_out; | 456 | goto isobufs_out; |
| 459 | dev->isobufs[i].urb = urb; | 457 | dev->isobufs[i].urb = urb; |
| 460 | } else { | 458 | } else { |
| 461 | STK_ERROR("Killing URB\n"); | 459 | pr_err("Killing URB\n"); |
| 462 | usb_kill_urb(dev->isobufs[i].urb); | 460 | usb_kill_urb(dev->isobufs[i].urb); |
| 463 | urb = dev->isobufs[i].urb; | 461 | urb = dev->isobufs[i].urb; |
| 464 | } | 462 | } |
| @@ -567,7 +565,7 @@ static int stk_prepare_sio_buffers(struct stk_camera *dev, unsigned n_sbufs) | |||
| 567 | { | 565 | { |
| 568 | int i; | 566 | int i; |
| 569 | if (dev->sio_bufs != NULL) | 567 | if (dev->sio_bufs != NULL) |
| 570 | STK_ERROR("sio_bufs already allocated\n"); | 568 | pr_err("sio_bufs already allocated\n"); |
| 571 | else { | 569 | else { |
| 572 | dev->sio_bufs = kzalloc(n_sbufs * sizeof(struct stk_sio_buffer), | 570 | dev->sio_bufs = kzalloc(n_sbufs * sizeof(struct stk_sio_buffer), |
| 573 | GFP_KERNEL); | 571 | GFP_KERNEL); |
| @@ -690,7 +688,7 @@ static ssize_t stk_read(struct file *fp, char __user *buf, | |||
| 690 | spin_lock_irqsave(&dev->spinlock, flags); | 688 | spin_lock_irqsave(&dev->spinlock, flags); |
| 691 | if (list_empty(&dev->sio_full)) { | 689 | if (list_empty(&dev->sio_full)) { |
| 692 | spin_unlock_irqrestore(&dev->spinlock, flags); | 690 | spin_unlock_irqrestore(&dev->spinlock, flags); |
| 693 | STK_ERROR("BUG: No siobufs ready\n"); | 691 | pr_err("BUG: No siobufs ready\n"); |
| 694 | return 0; | 692 | return 0; |
| 695 | } | 693 | } |
| 696 | sbuf = list_first_entry(&dev->sio_full, struct stk_sio_buffer, list); | 694 | sbuf = list_first_entry(&dev->sio_full, struct stk_sio_buffer, list); |
| @@ -907,7 +905,7 @@ static int stk_vidioc_g_fmt_vid_cap(struct file *filp, | |||
| 907 | stk_sizes[i].m != dev->vsettings.mode; i++) | 905 | stk_sizes[i].m != dev->vsettings.mode; i++) |
| 908 | ; | 906 | ; |
| 909 | if (i == ARRAY_SIZE(stk_sizes)) { | 907 | if (i == ARRAY_SIZE(stk_sizes)) { |
| 910 | STK_ERROR("ERROR: mode invalid\n"); | 908 | pr_err("ERROR: mode invalid\n"); |
| 911 | return -EINVAL; | 909 | return -EINVAL; |
| 912 | } | 910 | } |
| 913 | pix_format->width = stk_sizes[i].w; | 911 | pix_format->width = stk_sizes[i].w; |
| @@ -985,7 +983,7 @@ static int stk_setup_format(struct stk_camera *dev) | |||
| 985 | stk_sizes[i].m != dev->vsettings.mode) | 983 | stk_sizes[i].m != dev->vsettings.mode) |
| 986 | i++; | 984 | i++; |
| 987 | if (i == ARRAY_SIZE(stk_sizes)) { | 985 | if (i == ARRAY_SIZE(stk_sizes)) { |
| 988 | STK_ERROR("Something is broken in %s\n", __func__); | 986 | pr_err("Something is broken in %s\n", __func__); |
| 989 | return -EFAULT; | 987 | return -EFAULT; |
| 990 | } | 988 | } |
| 991 | /* This registers controls some timings, not sure of what. */ | 989 | /* This registers controls some timings, not sure of what. */ |
| @@ -1241,7 +1239,7 @@ static void stk_v4l_dev_release(struct video_device *vd) | |||
| 1241 | struct stk_camera *dev = vdev_to_camera(vd); | 1239 | struct stk_camera *dev = vdev_to_camera(vd); |
| 1242 | 1240 | ||
| 1243 | if (dev->sio_bufs != NULL || dev->isobufs != NULL) | 1241 | if (dev->sio_bufs != NULL || dev->isobufs != NULL) |
| 1244 | STK_ERROR("We are leaking memory\n"); | 1242 | pr_err("We are leaking memory\n"); |
| 1245 | usb_put_intf(dev->interface); | 1243 | usb_put_intf(dev->interface); |
| 1246 | kfree(dev); | 1244 | kfree(dev); |
| 1247 | } | 1245 | } |
| @@ -1264,10 +1262,10 @@ static int stk_register_video_device(struct stk_camera *dev) | |||
| 1264 | video_set_drvdata(&dev->vdev, dev); | 1262 | video_set_drvdata(&dev->vdev, dev); |
| 1265 | err = video_register_device(&dev->vdev, VFL_TYPE_GRABBER, -1); | 1263 | err = video_register_device(&dev->vdev, VFL_TYPE_GRABBER, -1); |
| 1266 | if (err) | 1264 | if (err) |
| 1267 | STK_ERROR("v4l registration failed\n"); | 1265 | pr_err("v4l registration failed\n"); |
| 1268 | else | 1266 | else |
| 1269 | STK_INFO("Syntek USB2.0 Camera is now controlling device %s\n", | 1267 | pr_info("Syntek USB2.0 Camera is now controlling device %s\n", |
| 1270 | video_device_node_name(&dev->vdev)); | 1268 | video_device_node_name(&dev->vdev)); |
| 1271 | return err; | 1269 | return err; |
| 1272 | } | 1270 | } |
| 1273 | 1271 | ||
| @@ -1288,7 +1286,7 @@ static int stk_camera_probe(struct usb_interface *interface, | |||
| 1288 | 1286 | ||
| 1289 | dev = kzalloc(sizeof(struct stk_camera), GFP_KERNEL); | 1287 | dev = kzalloc(sizeof(struct stk_camera), GFP_KERNEL); |
| 1290 | if (dev == NULL) { | 1288 | if (dev == NULL) { |
| 1291 | STK_ERROR("Out of memory !\n"); | 1289 | pr_err("Out of memory !\n"); |
| 1292 | return -ENOMEM; | 1290 | return -ENOMEM; |
| 1293 | } | 1291 | } |
| 1294 | err = v4l2_device_register(&interface->dev, &dev->v4l2_dev); | 1292 | err = v4l2_device_register(&interface->dev, &dev->v4l2_dev); |
| @@ -1352,7 +1350,7 @@ static int stk_camera_probe(struct usb_interface *interface, | |||
| 1352 | } | 1350 | } |
| 1353 | } | 1351 | } |
| 1354 | if (!dev->isoc_ep) { | 1352 | if (!dev->isoc_ep) { |
| 1355 | STK_ERROR("Could not find isoc-in endpoint"); | 1353 | pr_err("Could not find isoc-in endpoint\n"); |
| 1356 | err = -ENODEV; | 1354 | err = -ENODEV; |
| 1357 | goto error; | 1355 | goto error; |
| 1358 | } | 1356 | } |
| @@ -1387,8 +1385,8 @@ static void stk_camera_disconnect(struct usb_interface *interface) | |||
| 1387 | 1385 | ||
| 1388 | wake_up_interruptible(&dev->wait_frame); | 1386 | wake_up_interruptible(&dev->wait_frame); |
| 1389 | 1387 | ||
| 1390 | STK_INFO("Syntek USB2.0 Camera release resources device %s\n", | 1388 | pr_info("Syntek USB2.0 Camera release resources device %s\n", |
| 1391 | video_device_node_name(&dev->vdev)); | 1389 | video_device_node_name(&dev->vdev)); |
| 1392 | 1390 | ||
| 1393 | video_unregister_device(&dev->vdev); | 1391 | video_unregister_device(&dev->vdev); |
| 1394 | v4l2_ctrl_handler_free(&dev->hdl); | 1392 | v4l2_ctrl_handler_free(&dev->hdl); |
diff --git a/drivers/media/usb/stkwebcam/stk-webcam.h b/drivers/media/usb/stkwebcam/stk-webcam.h index 0284120ce246..5cecbdc97573 100644 --- a/drivers/media/usb/stkwebcam/stk-webcam.h +++ b/drivers/media/usb/stkwebcam/stk-webcam.h | |||
| @@ -31,12 +31,6 @@ | |||
| 31 | #define ISO_MAX_FRAME_SIZE 3 * 1024 | 31 | #define ISO_MAX_FRAME_SIZE 3 * 1024 |
| 32 | #define ISO_BUFFER_SIZE (ISO_FRAMES_PER_DESC * ISO_MAX_FRAME_SIZE) | 32 | #define ISO_BUFFER_SIZE (ISO_FRAMES_PER_DESC * ISO_MAX_FRAME_SIZE) |
| 33 | 33 | ||
| 34 | |||
| 35 | #define PREFIX "stkwebcam: " | ||
| 36 | #define STK_INFO(str, args...) printk(KERN_INFO PREFIX str, ##args) | ||
| 37 | #define STK_ERROR(str, args...) printk(KERN_ERR PREFIX str, ##args) | ||
| 38 | #define STK_WARNING(str, args...) printk(KERN_WARNING PREFIX str, ##args) | ||
| 39 | |||
| 40 | struct stk_iso_buf { | 34 | struct stk_iso_buf { |
| 41 | void *data; | 35 | void *data; |
| 42 | int length; | 36 | int length; |
diff --git a/drivers/media/v4l2-core/tuner-core.c b/drivers/media/v4l2-core/tuner-core.c index e48b7c032c95..8db45dfc271b 100644 --- a/drivers/media/v4l2-core/tuner-core.c +++ b/drivers/media/v4l2-core/tuner-core.c | |||
| @@ -43,8 +43,6 @@ | |||
| 43 | 43 | ||
| 44 | #define UNSET (-1U) | 44 | #define UNSET (-1U) |
| 45 | 45 | ||
| 46 | #define PREFIX (t->i2c->dev.driver->name) | ||
| 47 | |||
| 48 | /* | 46 | /* |
| 49 | * Driver modprobe parameters | 47 | * Driver modprobe parameters |
| 50 | */ | 48 | */ |
diff --git a/drivers/memory/atmel-ebi.c b/drivers/memory/atmel-ebi.c index 99e644cda4d1..ebf69ff48ae2 100644 --- a/drivers/memory/atmel-ebi.c +++ b/drivers/memory/atmel-ebi.c | |||
| @@ -72,7 +72,7 @@ struct atmel_smc_timing_xlate { | |||
| 72 | { .name = nm, .converter = atmel_smc_cs_conf_set_pulse, .shift = pos} | 72 | { .name = nm, .converter = atmel_smc_cs_conf_set_pulse, .shift = pos} |
| 73 | 73 | ||
| 74 | #define ATMEL_SMC_CYCLE_XLATE(nm, pos) \ | 74 | #define ATMEL_SMC_CYCLE_XLATE(nm, pos) \ |
| 75 | { .name = nm, .converter = atmel_smc_cs_conf_set_setup, .shift = pos} | 75 | { .name = nm, .converter = atmel_smc_cs_conf_set_cycle, .shift = pos} |
| 76 | 76 | ||
| 77 | static void at91sam9_ebi_get_config(struct atmel_ebi_dev *ebid, | 77 | static void at91sam9_ebi_get_config(struct atmel_ebi_dev *ebid, |
| 78 | struct atmel_ebi_dev_config *conf) | 78 | struct atmel_ebi_dev_config *conf) |
| @@ -120,12 +120,14 @@ static int atmel_ebi_xslate_smc_timings(struct atmel_ebi_dev *ebid, | |||
| 120 | if (!ret) { | 120 | if (!ret) { |
| 121 | required = true; | 121 | required = true; |
| 122 | ncycles = DIV_ROUND_UP(val, clk_period_ns); | 122 | ncycles = DIV_ROUND_UP(val, clk_period_ns); |
| 123 | if (ncycles > ATMEL_SMC_MODE_TDF_MAX || | 123 | if (ncycles > ATMEL_SMC_MODE_TDF_MAX) { |
| 124 | ncycles < ATMEL_SMC_MODE_TDF_MIN) { | ||
| 125 | ret = -EINVAL; | 124 | ret = -EINVAL; |
| 126 | goto out; | 125 | goto out; |
| 127 | } | 126 | } |
| 128 | 127 | ||
| 128 | if (ncycles < ATMEL_SMC_MODE_TDF_MIN) | ||
| 129 | ncycles = ATMEL_SMC_MODE_TDF_MIN; | ||
| 130 | |||
| 129 | smcconf->mode |= ATMEL_SMC_MODE_TDF(ncycles); | 131 | smcconf->mode |= ATMEL_SMC_MODE_TDF(ncycles); |
| 130 | } | 132 | } |
| 131 | 133 | ||
| @@ -263,7 +265,7 @@ static int atmel_ebi_xslate_smc_config(struct atmel_ebi_dev *ebid, | |||
| 263 | } | 265 | } |
| 264 | 266 | ||
| 265 | ret = atmel_ebi_xslate_smc_timings(ebid, np, &conf->smcconf); | 267 | ret = atmel_ebi_xslate_smc_timings(ebid, np, &conf->smcconf); |
| 266 | if (ret) | 268 | if (ret < 0) |
| 267 | return -EINVAL; | 269 | return -EINVAL; |
| 268 | 270 | ||
| 269 | if ((ret > 0 && !required) || (!ret && required)) { | 271 | if ((ret > 0 && !required) || (!ret && required)) { |
diff --git a/drivers/mfd/atmel-smc.c b/drivers/mfd/atmel-smc.c index 954cf0f66a31..20cc0ea470fa 100644 --- a/drivers/mfd/atmel-smc.c +++ b/drivers/mfd/atmel-smc.c | |||
| @@ -206,7 +206,7 @@ EXPORT_SYMBOL_GPL(atmel_smc_cs_conf_set_pulse); | |||
| 206 | * parameter | 206 | * parameter |
| 207 | * | 207 | * |
| 208 | * This function encodes the @ncycles value as described in the datasheet | 208 | * This function encodes the @ncycles value as described in the datasheet |
| 209 | * (section "SMC Pulse Register"), and then stores the result in the | 209 | * (section "SMC Cycle Register"), and then stores the result in the |
| 210 | * @conf->setup field at @shift position. | 210 | * @conf->setup field at @shift position. |
| 211 | * | 211 | * |
| 212 | * Returns -EINVAL if @shift is invalid, -ERANGE if @ncycles does not fit in | 212 | * Returns -EINVAL if @shift is invalid, -ERANGE if @ncycles does not fit in |
diff --git a/drivers/mfd/da9062-core.c b/drivers/mfd/da9062-core.c index fbe0f245ce8e..fe1811523e4a 100644 --- a/drivers/mfd/da9062-core.c +++ b/drivers/mfd/da9062-core.c | |||
| @@ -645,6 +645,9 @@ static const struct regmap_range da9062_aa_readable_ranges[] = { | |||
| 645 | .range_min = DA9062AA_VLDO1_B, | 645 | .range_min = DA9062AA_VLDO1_B, |
| 646 | .range_max = DA9062AA_VLDO4_B, | 646 | .range_max = DA9062AA_VLDO4_B, |
| 647 | }, { | 647 | }, { |
| 648 | .range_min = DA9062AA_BBAT_CONT, | ||
| 649 | .range_max = DA9062AA_BBAT_CONT, | ||
| 650 | }, { | ||
| 648 | .range_min = DA9062AA_INTERFACE, | 651 | .range_min = DA9062AA_INTERFACE, |
| 649 | .range_max = DA9062AA_CONFIG_E, | 652 | .range_max = DA9062AA_CONFIG_E, |
| 650 | }, { | 653 | }, { |
| @@ -721,6 +724,9 @@ static const struct regmap_range da9062_aa_writeable_ranges[] = { | |||
| 721 | .range_min = DA9062AA_VLDO1_B, | 724 | .range_min = DA9062AA_VLDO1_B, |
| 722 | .range_max = DA9062AA_VLDO4_B, | 725 | .range_max = DA9062AA_VLDO4_B, |
| 723 | }, { | 726 | }, { |
| 727 | .range_min = DA9062AA_BBAT_CONT, | ||
| 728 | .range_max = DA9062AA_BBAT_CONT, | ||
| 729 | }, { | ||
| 724 | .range_min = DA9062AA_GP_ID_0, | 730 | .range_min = DA9062AA_GP_ID_0, |
| 725 | .range_max = DA9062AA_GP_ID_19, | 731 | .range_max = DA9062AA_GP_ID_19, |
| 726 | }, | 732 | }, |
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c index 5c739ac752e8..5970b8def548 100644 --- a/drivers/mfd/db8500-prcmu.c +++ b/drivers/mfd/db8500-prcmu.c | |||
| @@ -33,7 +33,6 @@ | |||
| 33 | #include <linux/mfd/abx500/ab8500.h> | 33 | #include <linux/mfd/abx500/ab8500.h> |
| 34 | #include <linux/regulator/db8500-prcmu.h> | 34 | #include <linux/regulator/db8500-prcmu.h> |
| 35 | #include <linux/regulator/machine.h> | 35 | #include <linux/regulator/machine.h> |
| 36 | #include <linux/cpufreq.h> | ||
| 37 | #include <linux/platform_data/ux500_wdt.h> | 36 | #include <linux/platform_data/ux500_wdt.h> |
| 38 | #include <linux/platform_data/db8500_thermal.h> | 37 | #include <linux/platform_data/db8500_thermal.h> |
| 39 | #include "dbx500-prcmu-regs.h" | 38 | #include "dbx500-prcmu-regs.h" |
| @@ -1692,32 +1691,27 @@ static long round_clock_rate(u8 clock, unsigned long rate) | |||
| 1692 | return rounded_rate; | 1691 | return rounded_rate; |
| 1693 | } | 1692 | } |
| 1694 | 1693 | ||
| 1695 | /* CPU FREQ table, may be changed due to if MAX_OPP is supported. */ | 1694 | static const unsigned long armss_freqs[] = { |
| 1696 | static struct cpufreq_frequency_table db8500_cpufreq_table[] = { | 1695 | 200000000, |
| 1697 | { .frequency = 200000, .driver_data = ARM_EXTCLK,}, | 1696 | 400000000, |
| 1698 | { .frequency = 400000, .driver_data = ARM_50_OPP,}, | 1697 | 800000000, |
| 1699 | { .frequency = 800000, .driver_data = ARM_100_OPP,}, | 1698 | 998400000 |
| 1700 | { .frequency = CPUFREQ_TABLE_END,}, /* To be used for MAX_OPP. */ | ||
| 1701 | { .frequency = CPUFREQ_TABLE_END,}, | ||
| 1702 | }; | 1699 | }; |
| 1703 | 1700 | ||
| 1704 | static long round_armss_rate(unsigned long rate) | 1701 | static long round_armss_rate(unsigned long rate) |
| 1705 | { | 1702 | { |
| 1706 | struct cpufreq_frequency_table *pos; | 1703 | unsigned long freq = 0; |
| 1707 | long freq = 0; | 1704 | int i; |
| 1708 | |||
| 1709 | /* cpufreq table frequencies is in KHz. */ | ||
| 1710 | rate = rate / 1000; | ||
| 1711 | 1705 | ||
| 1712 | /* Find the corresponding arm opp from the cpufreq table. */ | 1706 | /* Find the corresponding arm opp from the cpufreq table. */ |
| 1713 | cpufreq_for_each_entry(pos, db8500_cpufreq_table) { | 1707 | for (i = 0; i < ARRAY_SIZE(armss_freqs); i++) { |
| 1714 | freq = pos->frequency; | 1708 | freq = armss_freqs[i]; |
| 1715 | if (freq == rate) | 1709 | if (rate <= freq) |
| 1716 | break; | 1710 | break; |
| 1717 | } | 1711 | } |
| 1718 | 1712 | ||
| 1719 | /* Return the last valid value, even if a match was not found. */ | 1713 | /* Return the last valid value, even if a match was not found. */ |
| 1720 | return freq * 1000; | 1714 | return freq; |
| 1721 | } | 1715 | } |
| 1722 | 1716 | ||
| 1723 | #define MIN_PLL_VCO_RATE 600000000ULL | 1717 | #define MIN_PLL_VCO_RATE 600000000ULL |
| @@ -1854,21 +1848,23 @@ static void set_clock_rate(u8 clock, unsigned long rate) | |||
| 1854 | 1848 | ||
| 1855 | static int set_armss_rate(unsigned long rate) | 1849 | static int set_armss_rate(unsigned long rate) |
| 1856 | { | 1850 | { |
| 1857 | struct cpufreq_frequency_table *pos; | 1851 | unsigned long freq; |
| 1858 | 1852 | u8 opps[] = { ARM_EXTCLK, ARM_50_OPP, ARM_100_OPP, ARM_MAX_OPP }; | |
| 1859 | /* cpufreq table frequencies is in KHz. */ | 1853 | int i; |
| 1860 | rate = rate / 1000; | ||
| 1861 | 1854 | ||
| 1862 | /* Find the corresponding arm opp from the cpufreq table. */ | 1855 | /* Find the corresponding arm opp from the cpufreq table. */ |
| 1863 | cpufreq_for_each_entry(pos, db8500_cpufreq_table) | 1856 | for (i = 0; i < ARRAY_SIZE(armss_freqs); i++) { |
| 1864 | if (pos->frequency == rate) | 1857 | freq = armss_freqs[i]; |
| 1858 | if (rate == freq) | ||
| 1865 | break; | 1859 | break; |
| 1860 | } | ||
| 1866 | 1861 | ||
| 1867 | if (pos->frequency != rate) | 1862 | if (rate != freq) |
| 1868 | return -EINVAL; | 1863 | return -EINVAL; |
| 1869 | 1864 | ||
| 1870 | /* Set the new arm opp. */ | 1865 | /* Set the new arm opp. */ |
| 1871 | return db8500_prcmu_set_arm_opp(pos->driver_data); | 1866 | pr_debug("SET ARM OPP 0x%02x\n", opps[i]); |
| 1867 | return db8500_prcmu_set_arm_opp(opps[i]); | ||
| 1872 | } | 1868 | } |
| 1873 | 1869 | ||
| 1874 | static int set_plldsi_rate(unsigned long rate) | 1870 | static int set_plldsi_rate(unsigned long rate) |
| @@ -3049,12 +3045,6 @@ static const struct mfd_cell db8500_prcmu_devs[] = { | |||
| 3049 | .pdata_size = sizeof(db8500_regulators), | 3045 | .pdata_size = sizeof(db8500_regulators), |
| 3050 | }, | 3046 | }, |
| 3051 | { | 3047 | { |
| 3052 | .name = "cpufreq-ux500", | ||
| 3053 | .of_compatible = "stericsson,cpufreq-ux500", | ||
| 3054 | .platform_data = &db8500_cpufreq_table, | ||
| 3055 | .pdata_size = sizeof(db8500_cpufreq_table), | ||
| 3056 | }, | ||
| 3057 | { | ||
| 3058 | .name = "cpuidle-dbx500", | 3048 | .name = "cpuidle-dbx500", |
| 3059 | .of_compatible = "stericsson,cpuidle-dbx500", | 3049 | .of_compatible = "stericsson,cpuidle-dbx500", |
| 3060 | }, | 3050 | }, |
| @@ -3067,14 +3057,6 @@ static const struct mfd_cell db8500_prcmu_devs[] = { | |||
| 3067 | }, | 3057 | }, |
| 3068 | }; | 3058 | }; |
| 3069 | 3059 | ||
| 3070 | static void db8500_prcmu_update_cpufreq(void) | ||
| 3071 | { | ||
| 3072 | if (prcmu_has_arm_maxopp()) { | ||
| 3073 | db8500_cpufreq_table[3].frequency = 1000000; | ||
| 3074 | db8500_cpufreq_table[3].driver_data = ARM_MAX_OPP; | ||
| 3075 | } | ||
| 3076 | } | ||
| 3077 | |||
| 3078 | static int db8500_prcmu_register_ab8500(struct device *parent) | 3060 | static int db8500_prcmu_register_ab8500(struct device *parent) |
| 3079 | { | 3061 | { |
| 3080 | struct device_node *np; | 3062 | struct device_node *np; |
| @@ -3160,8 +3142,6 @@ static int db8500_prcmu_probe(struct platform_device *pdev) | |||
| 3160 | 3142 | ||
| 3161 | prcmu_config_esram0_deep_sleep(ESRAM0_DEEP_SLEEP_STATE_RET); | 3143 | prcmu_config_esram0_deep_sleep(ESRAM0_DEEP_SLEEP_STATE_RET); |
| 3162 | 3144 | ||
| 3163 | db8500_prcmu_update_cpufreq(); | ||
| 3164 | |||
| 3165 | err = mfd_add_devices(&pdev->dev, 0, common_prcmu_devs, | 3145 | err = mfd_add_devices(&pdev->dev, 0, common_prcmu_devs, |
| 3166 | ARRAY_SIZE(common_prcmu_devs), NULL, 0, db8500_irq_domain); | 3146 | ARRAY_SIZE(common_prcmu_devs), NULL, 0, db8500_irq_domain); |
| 3167 | if (err) { | 3147 | if (err) { |
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index 8621a198a2ce..bac33311f55a 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c | |||
| @@ -216,6 +216,12 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 216 | pci_set_drvdata(pdev, dev); | 216 | pci_set_drvdata(pdev, dev); |
| 217 | 217 | ||
| 218 | /* | 218 | /* |
| 219 | * MEI requires to resume from runtime suspend mode | ||
| 220 | * in order to perform link reset flow upon system suspend. | ||
| 221 | */ | ||
| 222 | pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME; | ||
| 223 | |||
| 224 | /* | ||
| 219 | * For not wake-able HW runtime pm framework | 225 | * For not wake-able HW runtime pm framework |
| 220 | * can't be used on pci device level. | 226 | * can't be used on pci device level. |
| 221 | * Use domain runtime pm callbacks instead. | 227 | * Use domain runtime pm callbacks instead. |
diff --git a/drivers/misc/mei/pci-txe.c b/drivers/misc/mei/pci-txe.c index f811cd524468..e38a5f144373 100644 --- a/drivers/misc/mei/pci-txe.c +++ b/drivers/misc/mei/pci-txe.c | |||
| @@ -138,6 +138,12 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 138 | pci_set_drvdata(pdev, dev); | 138 | pci_set_drvdata(pdev, dev); |
| 139 | 139 | ||
| 140 | /* | 140 | /* |
| 141 | * MEI requires to resume from runtime suspend mode | ||
| 142 | * in order to perform link reset flow upon system suspend. | ||
| 143 | */ | ||
| 144 | pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME; | ||
| 145 | |||
| 146 | /* | ||
| 141 | * For not wake-able HW runtime pm framework | 147 | * For not wake-able HW runtime pm framework |
| 142 | * can't be used on pci device level. | 148 | * can't be used on pci device level. |
| 143 | * Use domain runtime pm callbacks instead. | 149 | * Use domain runtime pm callbacks instead. |
diff --git a/drivers/misc/mic/scif/scif_dma.c b/drivers/misc/mic/scif/scif_dma.c index 64d5760d069a..63d6246d6dff 100644 --- a/drivers/misc/mic/scif/scif_dma.c +++ b/drivers/misc/mic/scif/scif_dma.c | |||
| @@ -200,16 +200,6 @@ static void scif_mmu_notifier_release(struct mmu_notifier *mn, | |||
| 200 | schedule_work(&scif_info.misc_work); | 200 | schedule_work(&scif_info.misc_work); |
| 201 | } | 201 | } |
| 202 | 202 | ||
| 203 | static void scif_mmu_notifier_invalidate_page(struct mmu_notifier *mn, | ||
| 204 | struct mm_struct *mm, | ||
| 205 | unsigned long address) | ||
| 206 | { | ||
| 207 | struct scif_mmu_notif *mmn; | ||
| 208 | |||
| 209 | mmn = container_of(mn, struct scif_mmu_notif, ep_mmu_notifier); | ||
| 210 | scif_rma_destroy_tcw(mmn, address, PAGE_SIZE); | ||
| 211 | } | ||
| 212 | |||
| 213 | static void scif_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, | 203 | static void scif_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, |
| 214 | struct mm_struct *mm, | 204 | struct mm_struct *mm, |
| 215 | unsigned long start, | 205 | unsigned long start, |
| @@ -235,7 +225,6 @@ static void scif_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn, | |||
| 235 | static const struct mmu_notifier_ops scif_mmu_notifier_ops = { | 225 | static const struct mmu_notifier_ops scif_mmu_notifier_ops = { |
| 236 | .release = scif_mmu_notifier_release, | 226 | .release = scif_mmu_notifier_release, |
| 237 | .clear_flush_young = NULL, | 227 | .clear_flush_young = NULL, |
| 238 | .invalidate_page = scif_mmu_notifier_invalidate_page, | ||
| 239 | .invalidate_range_start = scif_mmu_notifier_invalidate_range_start, | 228 | .invalidate_range_start = scif_mmu_notifier_invalidate_range_start, |
| 240 | .invalidate_range_end = scif_mmu_notifier_invalidate_range_end}; | 229 | .invalidate_range_end = scif_mmu_notifier_invalidate_range_end}; |
| 241 | 230 | ||
diff --git a/drivers/misc/sgi-gru/grutlbpurge.c b/drivers/misc/sgi-gru/grutlbpurge.c index e936d43895d2..9918eda0e05f 100644 --- a/drivers/misc/sgi-gru/grutlbpurge.c +++ b/drivers/misc/sgi-gru/grutlbpurge.c | |||
| @@ -247,17 +247,6 @@ static void gru_invalidate_range_end(struct mmu_notifier *mn, | |||
| 247 | gru_dbg(grudev, "gms %p, start 0x%lx, end 0x%lx\n", gms, start, end); | 247 | gru_dbg(grudev, "gms %p, start 0x%lx, end 0x%lx\n", gms, start, end); |
| 248 | } | 248 | } |
| 249 | 249 | ||
| 250 | static void gru_invalidate_page(struct mmu_notifier *mn, struct mm_struct *mm, | ||
| 251 | unsigned long address) | ||
| 252 | { | ||
| 253 | struct gru_mm_struct *gms = container_of(mn, struct gru_mm_struct, | ||
| 254 | ms_notifier); | ||
| 255 | |||
| 256 | STAT(mmu_invalidate_page); | ||
| 257 | gru_flush_tlb_range(gms, address, PAGE_SIZE); | ||
| 258 | gru_dbg(grudev, "gms %p, address 0x%lx\n", gms, address); | ||
| 259 | } | ||
| 260 | |||
| 261 | static void gru_release(struct mmu_notifier *mn, struct mm_struct *mm) | 250 | static void gru_release(struct mmu_notifier *mn, struct mm_struct *mm) |
| 262 | { | 251 | { |
| 263 | struct gru_mm_struct *gms = container_of(mn, struct gru_mm_struct, | 252 | struct gru_mm_struct *gms = container_of(mn, struct gru_mm_struct, |
| @@ -269,7 +258,6 @@ static void gru_release(struct mmu_notifier *mn, struct mm_struct *mm) | |||
| 269 | 258 | ||
| 270 | 259 | ||
| 271 | static const struct mmu_notifier_ops gru_mmuops = { | 260 | static const struct mmu_notifier_ops gru_mmuops = { |
| 272 | .invalidate_page = gru_invalidate_page, | ||
| 273 | .invalidate_range_start = gru_invalidate_range_start, | 261 | .invalidate_range_start = gru_invalidate_range_start, |
| 274 | .invalidate_range_end = gru_invalidate_range_end, | 262 | .invalidate_range_end = gru_invalidate_range_end, |
| 275 | .release = gru_release, | 263 | .release = gru_release, |
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 8ac59dc80f23..8bd7aba811e9 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c | |||
| @@ -1213,7 +1213,7 @@ static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req) | |||
| 1213 | break; | 1213 | break; |
| 1214 | } | 1214 | } |
| 1215 | mq_rq->drv_op_result = ret; | 1215 | mq_rq->drv_op_result = ret; |
| 1216 | blk_end_request_all(req, ret); | 1216 | blk_end_request_all(req, ret ? BLK_STS_IOERR : BLK_STS_OK); |
| 1217 | } | 1217 | } |
| 1218 | 1218 | ||
| 1219 | static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) | 1219 | static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) |
| @@ -1371,12 +1371,46 @@ static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq, | |||
| 1371 | R1_CC_ERROR | /* Card controller error */ \ | 1371 | R1_CC_ERROR | /* Card controller error */ \ |
| 1372 | R1_ERROR) /* General/unknown error */ | 1372 | R1_ERROR) /* General/unknown error */ |
| 1373 | 1373 | ||
| 1374 | static bool mmc_blk_has_cmd_err(struct mmc_command *cmd) | 1374 | static void mmc_blk_eval_resp_error(struct mmc_blk_request *brq) |
| 1375 | { | 1375 | { |
| 1376 | if (!cmd->error && cmd->resp[0] & CMD_ERRORS) | 1376 | u32 val; |
| 1377 | cmd->error = -EIO; | 1377 | |
| 1378 | /* | ||
| 1379 | * Per the SD specification(physical layer version 4.10)[1], | ||
| 1380 | * section 4.3.3, it explicitly states that "When the last | ||
| 1381 | * block of user area is read using CMD18, the host should | ||
| 1382 | * ignore OUT_OF_RANGE error that may occur even the sequence | ||
| 1383 | * is correct". And JESD84-B51 for eMMC also has a similar | ||
| 1384 | * statement on section 6.8.3. | ||
| 1385 | * | ||
| 1386 | * Multiple block read/write could be done by either predefined | ||
| 1387 | * method, namely CMD23, or open-ending mode. For open-ending mode, | ||
| 1388 | * we should ignore the OUT_OF_RANGE error as it's normal behaviour. | ||
| 1389 | * | ||
| 1390 | * However the spec[1] doesn't tell us whether we should also | ||
| 1391 | * ignore that for predefined method. But per the spec[1], section | ||
| 1392 | * 4.15 Set Block Count Command, it says"If illegal block count | ||
| 1393 | * is set, out of range error will be indicated during read/write | ||
| 1394 | * operation (For example, data transfer is stopped at user area | ||
| 1395 | * boundary)." In another word, we could expect a out of range error | ||
| 1396 | * in the response for the following CMD18/25. And if argument of | ||
| 1397 | * CMD23 + the argument of CMD18/25 exceed the max number of blocks, | ||
| 1398 | * we could also expect to get a -ETIMEDOUT or any error number from | ||
| 1399 | * the host drivers due to missing data response(for write)/data(for | ||
| 1400 | * read), as the cards will stop the data transfer by itself per the | ||
| 1401 | * spec. So we only need to check R1_OUT_OF_RANGE for open-ending mode. | ||
| 1402 | */ | ||
| 1378 | 1403 | ||
| 1379 | return cmd->error; | 1404 | if (!brq->stop.error) { |
| 1405 | bool oor_with_open_end; | ||
| 1406 | /* If there is no error yet, check R1 response */ | ||
| 1407 | |||
| 1408 | val = brq->stop.resp[0] & CMD_ERRORS; | ||
| 1409 | oor_with_open_end = val & R1_OUT_OF_RANGE && !brq->mrq.sbc; | ||
| 1410 | |||
| 1411 | if (val && !oor_with_open_end) | ||
| 1412 | brq->stop.error = -EIO; | ||
| 1413 | } | ||
| 1380 | } | 1414 | } |
| 1381 | 1415 | ||
| 1382 | static enum mmc_blk_status mmc_blk_err_check(struct mmc_card *card, | 1416 | static enum mmc_blk_status mmc_blk_err_check(struct mmc_card *card, |
| @@ -1400,8 +1434,11 @@ static enum mmc_blk_status mmc_blk_err_check(struct mmc_card *card, | |||
| 1400 | * stop.error indicates a problem with the stop command. Data | 1434 | * stop.error indicates a problem with the stop command. Data |
| 1401 | * may have been transferred, or may still be transferring. | 1435 | * may have been transferred, or may still be transferring. |
| 1402 | */ | 1436 | */ |
| 1403 | if (brq->sbc.error || brq->cmd.error || mmc_blk_has_cmd_err(&brq->stop) || | 1437 | |
| 1404 | brq->data.error) { | 1438 | mmc_blk_eval_resp_error(brq); |
| 1439 | |||
| 1440 | if (brq->sbc.error || brq->cmd.error || | ||
| 1441 | brq->stop.error || brq->data.error) { | ||
| 1405 | switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err, &gen_err)) { | 1442 | switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err, &gen_err)) { |
| 1406 | case ERR_RETRY: | 1443 | case ERR_RETRY: |
| 1407 | return MMC_BLK_RETRY; | 1444 | return MMC_BLK_RETRY; |
| @@ -1681,9 +1718,9 @@ static bool mmc_blk_rw_cmd_err(struct mmc_blk_data *md, struct mmc_card *card, | |||
| 1681 | if (err) | 1718 | if (err) |
| 1682 | req_pending = old_req_pending; | 1719 | req_pending = old_req_pending; |
| 1683 | else | 1720 | else |
| 1684 | req_pending = blk_end_request(req, 0, blocks << 9); | 1721 | req_pending = blk_end_request(req, BLK_STS_OK, blocks << 9); |
| 1685 | } else { | 1722 | } else { |
| 1686 | req_pending = blk_end_request(req, 0, brq->data.bytes_xfered); | 1723 | req_pending = blk_end_request(req, BLK_STS_OK, brq->data.bytes_xfered); |
| 1687 | } | 1724 | } |
| 1688 | return req_pending; | 1725 | return req_pending; |
| 1689 | } | 1726 | } |
| @@ -2170,6 +2207,9 @@ static void mmc_blk_remove_req(struct mmc_blk_data *md) | |||
| 2170 | * from being accepted. | 2207 | * from being accepted. |
| 2171 | */ | 2208 | */ |
| 2172 | card = md->queue.card; | 2209 | card = md->queue.card; |
| 2210 | spin_lock_irq(md->queue.queue->queue_lock); | ||
| 2211 | queue_flag_set(QUEUE_FLAG_BYPASS, md->queue.queue); | ||
| 2212 | spin_unlock_irq(md->queue.queue->queue_lock); | ||
| 2173 | blk_set_queue_dying(md->queue.queue); | 2213 | blk_set_queue_dying(md->queue.queue); |
| 2174 | mmc_cleanup_queue(&md->queue); | 2214 | mmc_cleanup_queue(&md->queue); |
| 2175 | if (md->disk->flags & GENHD_FL_UP) { | 2215 | if (md->disk->flags & GENHD_FL_UP) { |
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index 4ffea14b7eb6..2bae69e39544 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c | |||
| @@ -1289,7 +1289,7 @@ out_err: | |||
| 1289 | static int mmc_select_hs400es(struct mmc_card *card) | 1289 | static int mmc_select_hs400es(struct mmc_card *card) |
| 1290 | { | 1290 | { |
| 1291 | struct mmc_host *host = card->host; | 1291 | struct mmc_host *host = card->host; |
| 1292 | int err = 0; | 1292 | int err = -EINVAL; |
| 1293 | u8 val; | 1293 | u8 val; |
| 1294 | 1294 | ||
| 1295 | if (!(host->caps & MMC_CAP_8_BIT_DATA)) { | 1295 | if (!(host->caps & MMC_CAP_8_BIT_DATA)) { |
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index a9dfb26972f2..250dc6ec4c82 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c | |||
| @@ -2957,7 +2957,7 @@ static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host) | |||
| 2957 | } | 2957 | } |
| 2958 | 2958 | ||
| 2959 | /* find out number of slots supported */ | 2959 | /* find out number of slots supported */ |
| 2960 | if (device_property_read_u32(dev, "num-slots", &pdata->num_slots)) | 2960 | if (!device_property_read_u32(dev, "num-slots", &pdata->num_slots)) |
| 2961 | dev_info(dev, "'num-slots' was deprecated.\n"); | 2961 | dev_info(dev, "'num-slots' was deprecated.\n"); |
| 2962 | 2962 | ||
| 2963 | if (device_property_read_u32(dev, "fifo-depth", &pdata->fifo_depth)) | 2963 | if (device_property_read_u32(dev, "fifo-depth", &pdata->fifo_depth)) |
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c index 7c12f3715676..2ab4788d021f 100644 --- a/drivers/mmc/host/omap_hsmmc.c +++ b/drivers/mmc/host/omap_hsmmc.c | |||
| @@ -356,9 +356,6 @@ static int omap_hsmmc_set_power(struct omap_hsmmc_host *host, int power_on, | |||
| 356 | struct mmc_host *mmc = host->mmc; | 356 | struct mmc_host *mmc = host->mmc; |
| 357 | int ret = 0; | 357 | int ret = 0; |
| 358 | 358 | ||
| 359 | if (mmc_pdata(host)->set_power) | ||
| 360 | return mmc_pdata(host)->set_power(host->dev, power_on, vdd); | ||
| 361 | |||
| 362 | /* | 359 | /* |
| 363 | * If we don't see a Vcc regulator, assume it's a fixed | 360 | * If we don't see a Vcc regulator, assume it's a fixed |
| 364 | * voltage always-on regulator. | 361 | * voltage always-on regulator. |
| @@ -366,9 +363,6 @@ static int omap_hsmmc_set_power(struct omap_hsmmc_host *host, int power_on, | |||
| 366 | if (IS_ERR(mmc->supply.vmmc)) | 363 | if (IS_ERR(mmc->supply.vmmc)) |
| 367 | return 0; | 364 | return 0; |
| 368 | 365 | ||
| 369 | if (mmc_pdata(host)->before_set_reg) | ||
| 370 | mmc_pdata(host)->before_set_reg(host->dev, power_on, vdd); | ||
| 371 | |||
| 372 | ret = omap_hsmmc_set_pbias(host, false, 0); | 366 | ret = omap_hsmmc_set_pbias(host, false, 0); |
| 373 | if (ret) | 367 | if (ret) |
| 374 | return ret; | 368 | return ret; |
| @@ -400,9 +394,6 @@ static int omap_hsmmc_set_power(struct omap_hsmmc_host *host, int power_on, | |||
| 400 | return ret; | 394 | return ret; |
| 401 | } | 395 | } |
| 402 | 396 | ||
| 403 | if (mmc_pdata(host)->after_set_reg) | ||
| 404 | mmc_pdata(host)->after_set_reg(host->dev, power_on, vdd); | ||
| 405 | |||
| 406 | return 0; | 397 | return 0; |
| 407 | 398 | ||
| 408 | err_set_voltage: | 399 | err_set_voltage: |
| @@ -469,8 +460,6 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host) | |||
| 469 | int ret; | 460 | int ret; |
| 470 | struct mmc_host *mmc = host->mmc; | 461 | struct mmc_host *mmc = host->mmc; |
| 471 | 462 | ||
| 472 | if (mmc_pdata(host)->set_power) | ||
| 473 | return 0; | ||
| 474 | 463 | ||
| 475 | ret = mmc_regulator_get_supply(mmc); | 464 | ret = mmc_regulator_get_supply(mmc); |
| 476 | if (ret == -EPROBE_DEFER) | 465 | if (ret == -EPROBE_DEFER) |
| @@ -2097,7 +2086,7 @@ static int omap_hsmmc_probe(struct platform_device *pdev) | |||
| 2097 | mmc->max_seg_size = mmc->max_req_size; | 2086 | mmc->max_seg_size = mmc->max_req_size; |
| 2098 | 2087 | ||
| 2099 | mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED | | 2088 | mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED | |
| 2100 | MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_ERASE; | 2089 | MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_ERASE | MMC_CAP_CMD23; |
| 2101 | 2090 | ||
| 2102 | mmc->caps |= mmc_pdata(host)->caps; | 2091 | mmc->caps |= mmc_pdata(host)->caps; |
| 2103 | if (mmc->caps & MMC_CAP_8_BIT_DATA) | 2092 | if (mmc->caps & MMC_CAP_8_BIT_DATA) |
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c index 7611fd679f1a..1485530c3592 100644 --- a/drivers/mmc/host/sdhci-of-at91.c +++ b/drivers/mmc/host/sdhci-of-at91.c | |||
| @@ -31,6 +31,7 @@ | |||
| 31 | 31 | ||
| 32 | #define SDMMC_MC1R 0x204 | 32 | #define SDMMC_MC1R 0x204 |
| 33 | #define SDMMC_MC1R_DDR BIT(3) | 33 | #define SDMMC_MC1R_DDR BIT(3) |
| 34 | #define SDMMC_MC1R_FCD BIT(7) | ||
| 34 | #define SDMMC_CACR 0x230 | 35 | #define SDMMC_CACR 0x230 |
| 35 | #define SDMMC_CACR_CAPWREN BIT(0) | 36 | #define SDMMC_CACR_CAPWREN BIT(0) |
| 36 | #define SDMMC_CACR_KEY (0x46 << 8) | 37 | #define SDMMC_CACR_KEY (0x46 << 8) |
| @@ -43,6 +44,15 @@ struct sdhci_at91_priv { | |||
| 43 | struct clk *mainck; | 44 | struct clk *mainck; |
| 44 | }; | 45 | }; |
| 45 | 46 | ||
| 47 | static void sdhci_at91_set_force_card_detect(struct sdhci_host *host) | ||
| 48 | { | ||
| 49 | u8 mc1r; | ||
| 50 | |||
| 51 | mc1r = readb(host->ioaddr + SDMMC_MC1R); | ||
| 52 | mc1r |= SDMMC_MC1R_FCD; | ||
| 53 | writeb(mc1r, host->ioaddr + SDMMC_MC1R); | ||
| 54 | } | ||
| 55 | |||
| 46 | static void sdhci_at91_set_clock(struct sdhci_host *host, unsigned int clock) | 56 | static void sdhci_at91_set_clock(struct sdhci_host *host, unsigned int clock) |
| 47 | { | 57 | { |
| 48 | u16 clk; | 58 | u16 clk; |
| @@ -110,10 +120,18 @@ void sdhci_at91_set_uhs_signaling(struct sdhci_host *host, unsigned int timing) | |||
| 110 | sdhci_set_uhs_signaling(host, timing); | 120 | sdhci_set_uhs_signaling(host, timing); |
| 111 | } | 121 | } |
| 112 | 122 | ||
| 123 | static void sdhci_at91_reset(struct sdhci_host *host, u8 mask) | ||
| 124 | { | ||
| 125 | sdhci_reset(host, mask); | ||
| 126 | |||
| 127 | if (host->mmc->caps & MMC_CAP_NONREMOVABLE) | ||
| 128 | sdhci_at91_set_force_card_detect(host); | ||
| 129 | } | ||
| 130 | |||
| 113 | static const struct sdhci_ops sdhci_at91_sama5d2_ops = { | 131 | static const struct sdhci_ops sdhci_at91_sama5d2_ops = { |
| 114 | .set_clock = sdhci_at91_set_clock, | 132 | .set_clock = sdhci_at91_set_clock, |
| 115 | .set_bus_width = sdhci_set_bus_width, | 133 | .set_bus_width = sdhci_set_bus_width, |
| 116 | .reset = sdhci_reset, | 134 | .reset = sdhci_at91_reset, |
| 117 | .set_uhs_signaling = sdhci_at91_set_uhs_signaling, | 135 | .set_uhs_signaling = sdhci_at91_set_uhs_signaling, |
| 118 | .set_power = sdhci_at91_set_power, | 136 | .set_power = sdhci_at91_set_power, |
| 119 | }; | 137 | }; |
| @@ -324,6 +342,21 @@ static int sdhci_at91_probe(struct platform_device *pdev) | |||
| 324 | host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; | 342 | host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; |
| 325 | } | 343 | } |
| 326 | 344 | ||
| 345 | /* | ||
| 346 | * If the device attached to the MMC bus is not removable, it is safer | ||
| 347 | * to set the Force Card Detect bit. People often don't connect the | ||
| 348 | * card detect signal and use this pin for another purpose. If the card | ||
| 349 | * detect pin is not muxed to SDHCI controller, a default value is | ||
| 350 | * used. This value can be different from a SoC revision to another | ||
| 351 | * one. Problems come when this default value is not card present. To | ||
| 352 | * avoid this case, if the device is non removable then the card | ||
| 353 | * detection procedure using the SDMCC_CD signal is bypassed. | ||
| 354 | * This bit is reset when a software reset for all command is performed | ||
| 355 | * so we need to implement our own reset function to set back this bit. | ||
| 356 | */ | ||
| 357 | if (host->mmc->caps & MMC_CAP_NONREMOVABLE) | ||
| 358 | sdhci_at91_set_force_card_detect(host); | ||
| 359 | |||
| 327 | pm_runtime_put_autosuspend(&pdev->dev); | 360 | pm_runtime_put_autosuspend(&pdev->dev); |
| 328 | 361 | ||
| 329 | return 0; | 362 | return 0; |
diff --git a/drivers/mmc/host/sdhci-xenon.c b/drivers/mmc/host/sdhci-xenon.c index bc1781bb070b..c580af05b033 100644 --- a/drivers/mmc/host/sdhci-xenon.c +++ b/drivers/mmc/host/sdhci-xenon.c | |||
| @@ -210,8 +210,27 @@ static void xenon_set_uhs_signaling(struct sdhci_host *host, | |||
| 210 | sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); | 210 | sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); |
| 211 | } | 211 | } |
| 212 | 212 | ||
| 213 | static void xenon_set_power(struct sdhci_host *host, unsigned char mode, | ||
| 214 | unsigned short vdd) | ||
| 215 | { | ||
| 216 | struct mmc_host *mmc = host->mmc; | ||
| 217 | u8 pwr = host->pwr; | ||
| 218 | |||
| 219 | sdhci_set_power_noreg(host, mode, vdd); | ||
| 220 | |||
| 221 | if (host->pwr == pwr) | ||
| 222 | return; | ||
| 223 | |||
| 224 | if (host->pwr == 0) | ||
| 225 | vdd = 0; | ||
| 226 | |||
| 227 | if (!IS_ERR(mmc->supply.vmmc)) | ||
| 228 | mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); | ||
| 229 | } | ||
| 230 | |||
| 213 | static const struct sdhci_ops sdhci_xenon_ops = { | 231 | static const struct sdhci_ops sdhci_xenon_ops = { |
| 214 | .set_clock = sdhci_set_clock, | 232 | .set_clock = sdhci_set_clock, |
| 233 | .set_power = xenon_set_power, | ||
| 215 | .set_bus_width = sdhci_set_bus_width, | 234 | .set_bus_width = sdhci_set_bus_width, |
| 216 | .reset = xenon_reset, | 235 | .reset = xenon_reset, |
| 217 | .set_uhs_signaling = xenon_set_uhs_signaling, | 236 | .set_uhs_signaling = xenon_set_uhs_signaling, |
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c index d6fa2214aaae..0fb4e4c119e1 100644 --- a/drivers/mmc/host/sunxi-mmc.c +++ b/drivers/mmc/host/sunxi-mmc.c | |||
| @@ -793,8 +793,12 @@ static int sunxi_mmc_clk_set_rate(struct sunxi_mmc_host *host, | |||
| 793 | } | 793 | } |
| 794 | mmc_writel(host, REG_CLKCR, rval); | 794 | mmc_writel(host, REG_CLKCR, rval); |
| 795 | 795 | ||
| 796 | if (host->cfg->needs_new_timings) | 796 | if (host->cfg->needs_new_timings) { |
| 797 | mmc_writel(host, REG_SD_NTSR, SDXC_2X_TIMING_MODE); | 797 | /* Don't touch the delay bits */ |
| 798 | rval = mmc_readl(host, REG_SD_NTSR); | ||
| 799 | rval |= SDXC_2X_TIMING_MODE; | ||
| 800 | mmc_writel(host, REG_SD_NTSR, rval); | ||
| 801 | } | ||
| 798 | 802 | ||
| 799 | ret = sunxi_mmc_clk_set_phase(host, ios, rate); | 803 | ret = sunxi_mmc_clk_set_phase(host, ios, rate); |
| 800 | if (ret) | 804 | if (ret) |
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c index f336a9b85576..9ec8f033ac5f 100644 --- a/drivers/mtd/mtd_blkdevs.c +++ b/drivers/mtd/mtd_blkdevs.c | |||
| @@ -113,6 +113,7 @@ static blk_status_t do_blktrans_request(struct mtd_blktrans_ops *tr, | |||
| 113 | for (; nsect > 0; nsect--, block++, buf += tr->blksize) | 113 | for (; nsect > 0; nsect--, block++, buf += tr->blksize) |
| 114 | if (tr->writesect(dev, block, buf)) | 114 | if (tr->writesect(dev, block, buf)) |
| 115 | return BLK_STS_IOERR; | 115 | return BLK_STS_IOERR; |
| 116 | return BLK_STS_OK; | ||
| 116 | default: | 117 | default: |
| 117 | return BLK_STS_IOERR; | 118 | return BLK_STS_IOERR; |
| 118 | } | 119 | } |
diff --git a/drivers/mtd/nand/atmel/nand-controller.c b/drivers/mtd/nand/atmel/nand-controller.c index d922a88e407f..ceec21bd30c4 100644 --- a/drivers/mtd/nand/atmel/nand-controller.c +++ b/drivers/mtd/nand/atmel/nand-controller.c | |||
| @@ -1201,7 +1201,7 @@ static int atmel_smc_nand_prepare_smcconf(struct atmel_nand *nand, | |||
| 1201 | * tRC < 30ns implies EDO mode. This controller does not support this | 1201 | * tRC < 30ns implies EDO mode. This controller does not support this |
| 1202 | * mode. | 1202 | * mode. |
| 1203 | */ | 1203 | */ |
| 1204 | if (conf->timings.sdr.tRC_min < 30) | 1204 | if (conf->timings.sdr.tRC_min < 30000) |
| 1205 | return -ENOTSUPP; | 1205 | return -ENOTSUPP; |
| 1206 | 1206 | ||
| 1207 | atmel_smc_cs_conf_init(smcconf); | 1207 | atmel_smc_cs_conf_init(smcconf); |
| @@ -1364,7 +1364,18 @@ static int atmel_smc_nand_prepare_smcconf(struct atmel_nand *nand, | |||
| 1364 | ret = atmel_smc_cs_conf_set_timing(smcconf, | 1364 | ret = atmel_smc_cs_conf_set_timing(smcconf, |
| 1365 | ATMEL_HSMC_TIMINGS_TADL_SHIFT, | 1365 | ATMEL_HSMC_TIMINGS_TADL_SHIFT, |
| 1366 | ncycles); | 1366 | ncycles); |
| 1367 | if (ret) | 1367 | /* |
| 1368 | * Version 4 of the ONFI spec mandates that tADL be at least 400 | ||
| 1369 | * nanoseconds, but, depending on the master clock rate, 400 ns may not | ||
| 1370 | * fit in the tADL field of the SMC reg. We need to relax the check and | ||
| 1371 | * accept the -ERANGE return code. | ||
| 1372 | * | ||
| 1373 | * Note that previous versions of the ONFI spec had a lower tADL_min | ||
| 1374 | * (100 or 200 ns). It's not clear why this timing constraint got | ||
| 1375 | * increased but it seems most NANDs are fine with values lower than | ||
| 1376 | * 400ns, so we should be safe. | ||
| 1377 | */ | ||
| 1378 | if (ret && ret != -ERANGE) | ||
| 1368 | return ret; | 1379 | return ret; |
| 1369 | 1380 | ||
| 1370 | ncycles = DIV_ROUND_UP(conf->timings.sdr.tAR_min, mckperiodps); | 1381 | ncycles = DIV_ROUND_UP(conf->timings.sdr.tAR_min, mckperiodps); |
diff --git a/drivers/mtd/nand/atmel/pmecc.c b/drivers/mtd/nand/atmel/pmecc.c index 55a8ee5306ea..8c210a5776bc 100644 --- a/drivers/mtd/nand/atmel/pmecc.c +++ b/drivers/mtd/nand/atmel/pmecc.c | |||
| @@ -945,6 +945,7 @@ struct atmel_pmecc *devm_atmel_pmecc_get(struct device *userdev) | |||
| 945 | */ | 945 | */ |
| 946 | struct platform_device *pdev = to_platform_device(userdev); | 946 | struct platform_device *pdev = to_platform_device(userdev); |
| 947 | const struct atmel_pmecc_caps *caps; | 947 | const struct atmel_pmecc_caps *caps; |
| 948 | const struct of_device_id *match; | ||
| 948 | 949 | ||
| 949 | /* No PMECC engine available. */ | 950 | /* No PMECC engine available. */ |
| 950 | if (!of_property_read_bool(userdev->of_node, | 951 | if (!of_property_read_bool(userdev->of_node, |
| @@ -953,21 +954,11 @@ struct atmel_pmecc *devm_atmel_pmecc_get(struct device *userdev) | |||
| 953 | 954 | ||
| 954 | caps = &at91sam9g45_caps; | 955 | caps = &at91sam9g45_caps; |
| 955 | 956 | ||
| 956 | /* | 957 | /* Find the caps associated to the NAND dev node. */ |
| 957 | * Try to find the NFC subnode and extract the associated caps | 958 | match = of_match_node(atmel_pmecc_legacy_match, |
| 958 | * from there. | 959 | userdev->of_node); |
| 959 | */ | 960 | if (match && match->data) |
| 960 | np = of_find_compatible_node(userdev->of_node, NULL, | 961 | caps = match->data; |
| 961 | "atmel,sama5d3-nfc"); | ||
| 962 | if (np) { | ||
| 963 | const struct of_device_id *match; | ||
| 964 | |||
| 965 | match = of_match_node(atmel_pmecc_legacy_match, np); | ||
| 966 | if (match && match->data) | ||
| 967 | caps = match->data; | ||
| 968 | |||
| 969 | of_node_put(np); | ||
| 970 | } | ||
| 971 | 962 | ||
| 972 | pmecc = atmel_pmecc_create(pdev, caps, 1, 2); | 963 | pmecc = atmel_pmecc_create(pdev, caps, 1, 2); |
| 973 | } | 964 | } |
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index 5fa5ddc94834..c6c18b82f8f4 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c | |||
| @@ -65,8 +65,14 @@ static int nand_ooblayout_ecc_sp(struct mtd_info *mtd, int section, | |||
| 65 | 65 | ||
| 66 | if (!section) { | 66 | if (!section) { |
| 67 | oobregion->offset = 0; | 67 | oobregion->offset = 0; |
| 68 | oobregion->length = 4; | 68 | if (mtd->oobsize == 16) |
| 69 | oobregion->length = 4; | ||
| 70 | else | ||
| 71 | oobregion->length = 3; | ||
| 69 | } else { | 72 | } else { |
| 73 | if (mtd->oobsize == 8) | ||
| 74 | return -ERANGE; | ||
| 75 | |||
| 70 | oobregion->offset = 6; | 76 | oobregion->offset = 6; |
| 71 | oobregion->length = ecc->total - 4; | 77 | oobregion->length = ecc->total - 4; |
| 72 | } | 78 | } |
| @@ -1125,7 +1131,9 @@ static int nand_setup_data_interface(struct nand_chip *chip, int chipnr) | |||
| 1125 | * Ensure the timing mode has been changed on the chip side | 1131 | * Ensure the timing mode has been changed on the chip side |
| 1126 | * before changing timings on the controller side. | 1132 | * before changing timings on the controller side. |
| 1127 | */ | 1133 | */ |
| 1128 | if (chip->onfi_version) { | 1134 | if (chip->onfi_version && |
| 1135 | (le16_to_cpu(chip->onfi_params.opt_cmd) & | ||
| 1136 | ONFI_OPT_CMD_SET_GET_FEATURES)) { | ||
| 1129 | u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = { | 1137 | u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = { |
| 1130 | chip->onfi_timing_mode_default, | 1138 | chip->onfi_timing_mode_default, |
| 1131 | }; | 1139 | }; |
| @@ -2741,7 +2749,6 @@ static int nand_write_page_syndrome(struct mtd_info *mtd, | |||
| 2741 | * @buf: the data to write | 2749 | * @buf: the data to write |
| 2742 | * @oob_required: must write chip->oob_poi to OOB | 2750 | * @oob_required: must write chip->oob_poi to OOB |
| 2743 | * @page: page number to write | 2751 | * @page: page number to write |
| 2744 | * @cached: cached programming | ||
| 2745 | * @raw: use _raw version of write_page | 2752 | * @raw: use _raw version of write_page |
| 2746 | */ | 2753 | */ |
| 2747 | static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip, | 2754 | static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip, |
diff --git a/drivers/mtd/nand/nand_timings.c b/drivers/mtd/nand/nand_timings.c index f06312df3669..7e36d7d13c26 100644 --- a/drivers/mtd/nand/nand_timings.c +++ b/drivers/mtd/nand/nand_timings.c | |||
| @@ -311,9 +311,9 @@ int onfi_init_data_interface(struct nand_chip *chip, | |||
| 311 | struct nand_sdr_timings *timings = &iface->timings.sdr; | 311 | struct nand_sdr_timings *timings = &iface->timings.sdr; |
| 312 | 312 | ||
| 313 | /* microseconds -> picoseconds */ | 313 | /* microseconds -> picoseconds */ |
| 314 | timings->tPROG_max = 1000000UL * le16_to_cpu(params->t_prog); | 314 | timings->tPROG_max = 1000000ULL * le16_to_cpu(params->t_prog); |
| 315 | timings->tBERS_max = 1000000UL * le16_to_cpu(params->t_bers); | 315 | timings->tBERS_max = 1000000ULL * le16_to_cpu(params->t_bers); |
| 316 | timings->tR_max = 1000000UL * le16_to_cpu(params->t_r); | 316 | timings->tR_max = 1000000ULL * le16_to_cpu(params->t_r); |
| 317 | 317 | ||
| 318 | /* nanoseconds -> picoseconds */ | 318 | /* nanoseconds -> picoseconds */ |
| 319 | timings->tCCS_min = 1000UL * le16_to_cpu(params->t_ccs); | 319 | timings->tCCS_min = 1000UL * le16_to_cpu(params->t_ccs); |
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c index 03a0d057bf2f..e4211c3cc49b 100644 --- a/drivers/mtd/nand/nandsim.c +++ b/drivers/mtd/nand/nandsim.c | |||
| @@ -2373,6 +2373,7 @@ static int __init ns_init_module(void) | |||
| 2373 | return 0; | 2373 | return 0; |
| 2374 | 2374 | ||
| 2375 | err_exit: | 2375 | err_exit: |
| 2376 | nandsim_debugfs_remove(nand); | ||
| 2376 | free_nandsim(nand); | 2377 | free_nandsim(nand); |
| 2377 | nand_release(nsmtd); | 2378 | nand_release(nsmtd); |
| 2378 | for (i = 0;i < ARRAY_SIZE(nand->partitions); ++i) | 2379 | for (i = 0;i < ARRAY_SIZE(nand->partitions); ++i) |
diff --git a/drivers/mtd/nand/sunxi_nand.c b/drivers/mtd/nand/sunxi_nand.c index d0b6f8f9f297..6abd142b1324 100644 --- a/drivers/mtd/nand/sunxi_nand.c +++ b/drivers/mtd/nand/sunxi_nand.c | |||
| @@ -1728,6 +1728,10 @@ static int sunxi_nfc_setup_data_interface(struct mtd_info *mtd, int csline, | |||
| 1728 | */ | 1728 | */ |
| 1729 | chip->clk_rate = NSEC_PER_SEC / min_clk_period; | 1729 | chip->clk_rate = NSEC_PER_SEC / min_clk_period; |
| 1730 | real_clk_rate = clk_round_rate(nfc->mod_clk, chip->clk_rate); | 1730 | real_clk_rate = clk_round_rate(nfc->mod_clk, chip->clk_rate); |
| 1731 | if (real_clk_rate <= 0) { | ||
| 1732 | dev_err(nfc->dev, "Unable to round clk %lu\n", chip->clk_rate); | ||
| 1733 | return -EINVAL; | ||
| 1734 | } | ||
| 1731 | 1735 | ||
| 1732 | /* | 1736 | /* |
| 1733 | * ONFI specification 3.1, paragraph 4.15.2 dictates that EDO data | 1737 | * ONFI specification 3.1, paragraph 4.15.2 dictates that EDO data |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 181839d6fbea..fc63992ab0e0 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
| @@ -1569,7 +1569,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) | |||
| 1569 | new_slave->delay = 0; | 1569 | new_slave->delay = 0; |
| 1570 | new_slave->link_failure_count = 0; | 1570 | new_slave->link_failure_count = 0; |
| 1571 | 1571 | ||
| 1572 | if (bond_update_speed_duplex(new_slave)) | 1572 | if (bond_update_speed_duplex(new_slave) && |
| 1573 | bond_needs_speed_duplex(bond)) | ||
| 1573 | new_slave->link = BOND_LINK_DOWN; | 1574 | new_slave->link = BOND_LINK_DOWN; |
| 1574 | 1575 | ||
| 1575 | new_slave->last_rx = jiffies - | 1576 | new_slave->last_rx = jiffies - |
| @@ -2050,6 +2051,7 @@ static int bond_miimon_inspect(struct bonding *bond) | |||
| 2050 | continue; | 2051 | continue; |
| 2051 | 2052 | ||
| 2052 | bond_propose_link_state(slave, BOND_LINK_FAIL); | 2053 | bond_propose_link_state(slave, BOND_LINK_FAIL); |
| 2054 | commit++; | ||
| 2053 | slave->delay = bond->params.downdelay; | 2055 | slave->delay = bond->params.downdelay; |
| 2054 | if (slave->delay) { | 2056 | if (slave->delay) { |
| 2055 | netdev_info(bond->dev, "link status down for %sinterface %s, disabling it in %d ms\n", | 2057 | netdev_info(bond->dev, "link status down for %sinterface %s, disabling it in %d ms\n", |
| @@ -2088,6 +2090,7 @@ static int bond_miimon_inspect(struct bonding *bond) | |||
| 2088 | continue; | 2090 | continue; |
| 2089 | 2091 | ||
| 2090 | bond_propose_link_state(slave, BOND_LINK_BACK); | 2092 | bond_propose_link_state(slave, BOND_LINK_BACK); |
| 2093 | commit++; | ||
| 2091 | slave->delay = bond->params.updelay; | 2094 | slave->delay = bond->params.updelay; |
| 2092 | 2095 | ||
| 2093 | if (slave->delay) { | 2096 | if (slave->delay) { |
| @@ -2138,11 +2141,13 @@ static void bond_miimon_commit(struct bonding *bond) | |||
| 2138 | continue; | 2141 | continue; |
| 2139 | 2142 | ||
| 2140 | case BOND_LINK_UP: | 2143 | case BOND_LINK_UP: |
| 2141 | if (bond_update_speed_duplex(slave)) { | 2144 | if (bond_update_speed_duplex(slave) && |
| 2145 | bond_needs_speed_duplex(bond)) { | ||
| 2142 | slave->link = BOND_LINK_DOWN; | 2146 | slave->link = BOND_LINK_DOWN; |
| 2143 | netdev_warn(bond->dev, | 2147 | if (net_ratelimit()) |
| 2144 | "failed to get link speed/duplex for %s\n", | 2148 | netdev_warn(bond->dev, |
| 2145 | slave->dev->name); | 2149 | "failed to get link speed/duplex for %s\n", |
| 2150 | slave->dev->name); | ||
| 2146 | continue; | 2151 | continue; |
| 2147 | } | 2152 | } |
| 2148 | bond_set_slave_link_state(slave, BOND_LINK_UP, | 2153 | bond_set_slave_link_state(slave, BOND_LINK_UP, |
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 648f91b58d1e..9b6ce7c3f6c3 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c | |||
| @@ -1048,6 +1048,7 @@ struct bcm_sf2_of_data { | |||
| 1048 | u32 type; | 1048 | u32 type; |
| 1049 | const u16 *reg_offsets; | 1049 | const u16 *reg_offsets; |
| 1050 | unsigned int core_reg_align; | 1050 | unsigned int core_reg_align; |
| 1051 | unsigned int num_cfp_rules; | ||
| 1051 | }; | 1052 | }; |
| 1052 | 1053 | ||
| 1053 | /* Register offsets for the SWITCH_REG_* block */ | 1054 | /* Register offsets for the SWITCH_REG_* block */ |
| @@ -1071,6 +1072,7 @@ static const struct bcm_sf2_of_data bcm_sf2_7445_data = { | |||
| 1071 | .type = BCM7445_DEVICE_ID, | 1072 | .type = BCM7445_DEVICE_ID, |
| 1072 | .core_reg_align = 0, | 1073 | .core_reg_align = 0, |
| 1073 | .reg_offsets = bcm_sf2_7445_reg_offsets, | 1074 | .reg_offsets = bcm_sf2_7445_reg_offsets, |
| 1075 | .num_cfp_rules = 256, | ||
| 1074 | }; | 1076 | }; |
| 1075 | 1077 | ||
| 1076 | static const u16 bcm_sf2_7278_reg_offsets[] = { | 1078 | static const u16 bcm_sf2_7278_reg_offsets[] = { |
| @@ -1093,6 +1095,7 @@ static const struct bcm_sf2_of_data bcm_sf2_7278_data = { | |||
| 1093 | .type = BCM7278_DEVICE_ID, | 1095 | .type = BCM7278_DEVICE_ID, |
| 1094 | .core_reg_align = 1, | 1096 | .core_reg_align = 1, |
| 1095 | .reg_offsets = bcm_sf2_7278_reg_offsets, | 1097 | .reg_offsets = bcm_sf2_7278_reg_offsets, |
| 1098 | .num_cfp_rules = 128, | ||
| 1096 | }; | 1099 | }; |
| 1097 | 1100 | ||
| 1098 | static const struct of_device_id bcm_sf2_of_match[] = { | 1101 | static const struct of_device_id bcm_sf2_of_match[] = { |
| @@ -1149,6 +1152,7 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev) | |||
| 1149 | priv->type = data->type; | 1152 | priv->type = data->type; |
| 1150 | priv->reg_offsets = data->reg_offsets; | 1153 | priv->reg_offsets = data->reg_offsets; |
| 1151 | priv->core_reg_align = data->core_reg_align; | 1154 | priv->core_reg_align = data->core_reg_align; |
| 1155 | priv->num_cfp_rules = data->num_cfp_rules; | ||
| 1152 | 1156 | ||
| 1153 | /* Auto-detection using standard registers will not work, so | 1157 | /* Auto-detection using standard registers will not work, so |
| 1154 | * provide an indication of what kind of device we are for | 1158 | * provide an indication of what kind of device we are for |
diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h index 7d3030e04f11..7f9125eef3df 100644 --- a/drivers/net/dsa/bcm_sf2.h +++ b/drivers/net/dsa/bcm_sf2.h | |||
| @@ -72,6 +72,7 @@ struct bcm_sf2_priv { | |||
| 72 | u32 type; | 72 | u32 type; |
| 73 | const u16 *reg_offsets; | 73 | const u16 *reg_offsets; |
| 74 | unsigned int core_reg_align; | 74 | unsigned int core_reg_align; |
| 75 | unsigned int num_cfp_rules; | ||
| 75 | 76 | ||
| 76 | /* spinlock protecting access to the indirect registers */ | 77 | /* spinlock protecting access to the indirect registers */ |
| 77 | spinlock_t indir_lock; | 78 | spinlock_t indir_lock; |
diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c index 2fb32d67065f..8a1da7e67707 100644 --- a/drivers/net/dsa/bcm_sf2_cfp.c +++ b/drivers/net/dsa/bcm_sf2_cfp.c | |||
| @@ -98,7 +98,7 @@ static inline void bcm_sf2_cfp_rule_addr_set(struct bcm_sf2_priv *priv, | |||
| 98 | { | 98 | { |
| 99 | u32 reg; | 99 | u32 reg; |
| 100 | 100 | ||
| 101 | WARN_ON(addr >= CFP_NUM_RULES); | 101 | WARN_ON(addr >= priv->num_cfp_rules); |
| 102 | 102 | ||
| 103 | reg = core_readl(priv, CORE_CFP_ACC); | 103 | reg = core_readl(priv, CORE_CFP_ACC); |
| 104 | reg &= ~(XCESS_ADDR_MASK << XCESS_ADDR_SHIFT); | 104 | reg &= ~(XCESS_ADDR_MASK << XCESS_ADDR_SHIFT); |
| @@ -109,7 +109,7 @@ static inline void bcm_sf2_cfp_rule_addr_set(struct bcm_sf2_priv *priv, | |||
| 109 | static inline unsigned int bcm_sf2_cfp_rule_size(struct bcm_sf2_priv *priv) | 109 | static inline unsigned int bcm_sf2_cfp_rule_size(struct bcm_sf2_priv *priv) |
| 110 | { | 110 | { |
| 111 | /* Entry #0 is reserved */ | 111 | /* Entry #0 is reserved */ |
| 112 | return CFP_NUM_RULES - 1; | 112 | return priv->num_cfp_rules - 1; |
| 113 | } | 113 | } |
| 114 | 114 | ||
| 115 | static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port, | 115 | static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port, |
| @@ -523,7 +523,7 @@ static int bcm_sf2_cfp_rule_get_all(struct bcm_sf2_priv *priv, | |||
| 523 | if (!(reg & OP_STR_DONE)) | 523 | if (!(reg & OP_STR_DONE)) |
| 524 | break; | 524 | break; |
| 525 | 525 | ||
| 526 | } while (index < CFP_NUM_RULES); | 526 | } while (index < priv->num_cfp_rules); |
| 527 | 527 | ||
| 528 | /* Put the TCAM size here */ | 528 | /* Put the TCAM size here */ |
| 529 | nfc->data = bcm_sf2_cfp_rule_size(priv); | 529 | nfc->data = bcm_sf2_cfp_rule_size(priv); |
| @@ -544,7 +544,7 @@ int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port, | |||
| 544 | case ETHTOOL_GRXCLSRLCNT: | 544 | case ETHTOOL_GRXCLSRLCNT: |
| 545 | /* Subtract the default, unusable rule */ | 545 | /* Subtract the default, unusable rule */ |
| 546 | nfc->rule_cnt = bitmap_weight(priv->cfp.used, | 546 | nfc->rule_cnt = bitmap_weight(priv->cfp.used, |
| 547 | CFP_NUM_RULES) - 1; | 547 | priv->num_cfp_rules) - 1; |
| 548 | /* We support specifying rule locations */ | 548 | /* We support specifying rule locations */ |
| 549 | nfc->data |= RX_CLS_LOC_SPECIAL; | 549 | nfc->data |= RX_CLS_LOC_SPECIAL; |
| 550 | break; | 550 | break; |
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index 1e46418a3b74..264b281eb86b 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c | |||
| @@ -625,6 +625,44 @@ static void mt7530_adjust_link(struct dsa_switch *ds, int port, | |||
| 625 | * all finished. | 625 | * all finished. |
| 626 | */ | 626 | */ |
| 627 | mt7623_pad_clk_setup(ds); | 627 | mt7623_pad_clk_setup(ds); |
| 628 | } else { | ||
| 629 | u16 lcl_adv = 0, rmt_adv = 0; | ||
| 630 | u8 flowctrl; | ||
| 631 | u32 mcr = PMCR_USERP_LINK | PMCR_FORCE_MODE; | ||
| 632 | |||
| 633 | switch (phydev->speed) { | ||
| 634 | case SPEED_1000: | ||
| 635 | mcr |= PMCR_FORCE_SPEED_1000; | ||
| 636 | break; | ||
| 637 | case SPEED_100: | ||
| 638 | mcr |= PMCR_FORCE_SPEED_100; | ||
| 639 | break; | ||
| 640 | }; | ||
| 641 | |||
| 642 | if (phydev->link) | ||
| 643 | mcr |= PMCR_FORCE_LNK; | ||
| 644 | |||
| 645 | if (phydev->duplex) { | ||
| 646 | mcr |= PMCR_FORCE_FDX; | ||
| 647 | |||
| 648 | if (phydev->pause) | ||
| 649 | rmt_adv = LPA_PAUSE_CAP; | ||
| 650 | if (phydev->asym_pause) | ||
| 651 | rmt_adv |= LPA_PAUSE_ASYM; | ||
| 652 | |||
| 653 | if (phydev->advertising & ADVERTISED_Pause) | ||
| 654 | lcl_adv |= ADVERTISE_PAUSE_CAP; | ||
| 655 | if (phydev->advertising & ADVERTISED_Asym_Pause) | ||
| 656 | lcl_adv |= ADVERTISE_PAUSE_ASYM; | ||
| 657 | |||
| 658 | flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv); | ||
| 659 | |||
| 660 | if (flowctrl & FLOW_CTRL_TX) | ||
| 661 | mcr |= PMCR_TX_FC_EN; | ||
| 662 | if (flowctrl & FLOW_CTRL_RX) | ||
| 663 | mcr |= PMCR_RX_FC_EN; | ||
| 664 | } | ||
| 665 | mt7530_write(priv, MT7530_PMCR_P(port), mcr); | ||
| 628 | } | 666 | } |
| 629 | } | 667 | } |
| 630 | 668 | ||
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h index b83d76b99802..74db9822eb40 100644 --- a/drivers/net/dsa/mt7530.h +++ b/drivers/net/dsa/mt7530.h | |||
| @@ -151,6 +151,7 @@ enum mt7530_stp_state { | |||
| 151 | #define PMCR_TX_FC_EN BIT(5) | 151 | #define PMCR_TX_FC_EN BIT(5) |
| 152 | #define PMCR_RX_FC_EN BIT(4) | 152 | #define PMCR_RX_FC_EN BIT(4) |
| 153 | #define PMCR_FORCE_SPEED_1000 BIT(3) | 153 | #define PMCR_FORCE_SPEED_1000 BIT(3) |
| 154 | #define PMCR_FORCE_SPEED_100 BIT(2) | ||
| 154 | #define PMCR_FORCE_FDX BIT(1) | 155 | #define PMCR_FORCE_FDX BIT(1) |
| 155 | #define PMCR_FORCE_LNK BIT(0) | 156 | #define PMCR_FORCE_LNK BIT(0) |
| 156 | #define PMCR_COMMON_LINK (PMCR_IFG_XMIT(1) | PMCR_MAC_MODE | \ | 157 | #define PMCR_COMMON_LINK (PMCR_IFG_XMIT(1) | PMCR_MAC_MODE | \ |
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index 86058a9f3417..6e253d913fe2 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c | |||
| @@ -1661,21 +1661,21 @@ static int xgene_enet_get_irqs(struct xgene_enet_pdata *pdata) | |||
| 1661 | return 0; | 1661 | return 0; |
| 1662 | } | 1662 | } |
| 1663 | 1663 | ||
| 1664 | static int xgene_enet_check_phy_handle(struct xgene_enet_pdata *pdata) | 1664 | static void xgene_enet_check_phy_handle(struct xgene_enet_pdata *pdata) |
| 1665 | { | 1665 | { |
| 1666 | int ret; | 1666 | int ret; |
| 1667 | 1667 | ||
| 1668 | if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) | 1668 | if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) |
| 1669 | return 0; | 1669 | return; |
| 1670 | 1670 | ||
| 1671 | if (!IS_ENABLED(CONFIG_MDIO_XGENE)) | 1671 | if (!IS_ENABLED(CONFIG_MDIO_XGENE)) |
| 1672 | return 0; | 1672 | return; |
| 1673 | 1673 | ||
| 1674 | ret = xgene_enet_phy_connect(pdata->ndev); | 1674 | ret = xgene_enet_phy_connect(pdata->ndev); |
| 1675 | if (!ret) | 1675 | if (!ret) |
| 1676 | pdata->mdio_driver = true; | 1676 | pdata->mdio_driver = true; |
| 1677 | 1677 | ||
| 1678 | return 0; | 1678 | return; |
| 1679 | } | 1679 | } |
| 1680 | 1680 | ||
| 1681 | static void xgene_enet_gpiod_get(struct xgene_enet_pdata *pdata) | 1681 | static void xgene_enet_gpiod_get(struct xgene_enet_pdata *pdata) |
| @@ -1779,15 +1779,11 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata) | |||
| 1779 | if (ret) | 1779 | if (ret) |
| 1780 | return ret; | 1780 | return ret; |
| 1781 | 1781 | ||
| 1782 | ret = xgene_enet_check_phy_handle(pdata); | ||
| 1783 | if (ret) | ||
| 1784 | return ret; | ||
| 1785 | |||
| 1786 | xgene_enet_gpiod_get(pdata); | 1782 | xgene_enet_gpiod_get(pdata); |
| 1787 | 1783 | ||
| 1788 | if (pdata->phy_mode != PHY_INTERFACE_MODE_SGMII) { | 1784 | pdata->clk = devm_clk_get(&pdev->dev, NULL); |
| 1789 | pdata->clk = devm_clk_get(&pdev->dev, NULL); | 1785 | if (IS_ERR(pdata->clk)) { |
| 1790 | if (IS_ERR(pdata->clk)) { | 1786 | if (pdata->phy_mode != PHY_INTERFACE_MODE_SGMII) { |
| 1791 | /* Abort if the clock is defined but couldn't be | 1787 | /* Abort if the clock is defined but couldn't be |
| 1792 | * retrived. Always abort if the clock is missing on | 1788 | * retrived. Always abort if the clock is missing on |
| 1793 | * DT system as the driver can't cope with this case. | 1789 | * DT system as the driver can't cope with this case. |
| @@ -2097,9 +2093,11 @@ static int xgene_enet_probe(struct platform_device *pdev) | |||
| 2097 | goto err; | 2093 | goto err; |
| 2098 | } | 2094 | } |
| 2099 | 2095 | ||
| 2096 | xgene_enet_check_phy_handle(pdata); | ||
| 2097 | |||
| 2100 | ret = xgene_enet_init_hw(pdata); | 2098 | ret = xgene_enet_init_hw(pdata); |
| 2101 | if (ret) | 2099 | if (ret) |
| 2102 | goto err; | 2100 | goto err2; |
| 2103 | 2101 | ||
| 2104 | link_state = pdata->mac_ops->link_state; | 2102 | link_state = pdata->mac_ops->link_state; |
| 2105 | if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) { | 2103 | if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) { |
| @@ -2117,29 +2115,30 @@ static int xgene_enet_probe(struct platform_device *pdev) | |||
| 2117 | spin_lock_init(&pdata->stats_lock); | 2115 | spin_lock_init(&pdata->stats_lock); |
| 2118 | ret = xgene_extd_stats_init(pdata); | 2116 | ret = xgene_extd_stats_init(pdata); |
| 2119 | if (ret) | 2117 | if (ret) |
| 2120 | goto err2; | 2118 | goto err1; |
| 2121 | 2119 | ||
| 2122 | xgene_enet_napi_add(pdata); | 2120 | xgene_enet_napi_add(pdata); |
| 2123 | ret = register_netdev(ndev); | 2121 | ret = register_netdev(ndev); |
| 2124 | if (ret) { | 2122 | if (ret) { |
| 2125 | netdev_err(ndev, "Failed to register netdev\n"); | 2123 | netdev_err(ndev, "Failed to register netdev\n"); |
| 2126 | goto err2; | 2124 | goto err1; |
| 2127 | } | 2125 | } |
| 2128 | 2126 | ||
| 2129 | return 0; | 2127 | return 0; |
| 2130 | 2128 | ||
| 2131 | err2: | 2129 | err1: |
| 2132 | /* | 2130 | /* |
| 2133 | * If necessary, free_netdev() will call netif_napi_del() and undo | 2131 | * If necessary, free_netdev() will call netif_napi_del() and undo |
| 2134 | * the effects of xgene_enet_napi_add()'s calls to netif_napi_add(). | 2132 | * the effects of xgene_enet_napi_add()'s calls to netif_napi_add(). |
| 2135 | */ | 2133 | */ |
| 2136 | 2134 | ||
| 2135 | xgene_enet_delete_desc_rings(pdata); | ||
| 2136 | |||
| 2137 | err2: | ||
| 2137 | if (pdata->mdio_driver) | 2138 | if (pdata->mdio_driver) |
| 2138 | xgene_enet_phy_disconnect(pdata); | 2139 | xgene_enet_phy_disconnect(pdata); |
| 2139 | else if (phy_interface_mode_is_rgmii(pdata->phy_mode)) | 2140 | else if (phy_interface_mode_is_rgmii(pdata->phy_mode)) |
| 2140 | xgene_enet_mdio_remove(pdata); | 2141 | xgene_enet_mdio_remove(pdata); |
| 2141 | err1: | ||
| 2142 | xgene_enet_delete_desc_rings(pdata); | ||
| 2143 | err: | 2142 | err: |
| 2144 | free_netdev(ndev); | 2143 | free_netdev(ndev); |
| 2145 | return ret; | 2144 | return ret; |
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h index fce0fd3f23ff..bf9b3f020e10 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h | |||
| @@ -105,8 +105,7 @@ struct aq_hw_ops { | |||
| 105 | 105 | ||
| 106 | int (*hw_set_mac_address)(struct aq_hw_s *self, u8 *mac_addr); | 106 | int (*hw_set_mac_address)(struct aq_hw_s *self, u8 *mac_addr); |
| 107 | 107 | ||
| 108 | int (*hw_get_link_status)(struct aq_hw_s *self, | 108 | int (*hw_get_link_status)(struct aq_hw_s *self); |
| 109 | struct aq_hw_link_status_s *link_status); | ||
| 110 | 109 | ||
| 111 | int (*hw_set_link_speed)(struct aq_hw_s *self, u32 speed); | 110 | int (*hw_set_link_speed)(struct aq_hw_s *self, u32 speed); |
| 112 | 111 | ||
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index 9ee1c5016784..6ac9e2602d6d 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c | |||
| @@ -103,6 +103,8 @@ int aq_nic_cfg_start(struct aq_nic_s *self) | |||
| 103 | else | 103 | else |
| 104 | cfg->vecs = 1U; | 104 | cfg->vecs = 1U; |
| 105 | 105 | ||
| 106 | cfg->num_rss_queues = min(cfg->vecs, AQ_CFG_NUM_RSS_QUEUES_DEF); | ||
| 107 | |||
| 106 | cfg->irq_type = aq_pci_func_get_irq_type(self->aq_pci_func); | 108 | cfg->irq_type = aq_pci_func_get_irq_type(self->aq_pci_func); |
| 107 | 109 | ||
| 108 | if ((cfg->irq_type == AQ_HW_IRQ_LEGACY) || | 110 | if ((cfg->irq_type == AQ_HW_IRQ_LEGACY) || |
| @@ -123,33 +125,30 @@ static void aq_nic_service_timer_cb(unsigned long param) | |||
| 123 | struct net_device *ndev = aq_nic_get_ndev(self); | 125 | struct net_device *ndev = aq_nic_get_ndev(self); |
| 124 | int err = 0; | 126 | int err = 0; |
| 125 | unsigned int i = 0U; | 127 | unsigned int i = 0U; |
| 126 | struct aq_hw_link_status_s link_status; | ||
| 127 | struct aq_ring_stats_rx_s stats_rx; | 128 | struct aq_ring_stats_rx_s stats_rx; |
| 128 | struct aq_ring_stats_tx_s stats_tx; | 129 | struct aq_ring_stats_tx_s stats_tx; |
| 129 | 130 | ||
| 130 | if (aq_utils_obj_test(&self->header.flags, AQ_NIC_FLAGS_IS_NOT_READY)) | 131 | if (aq_utils_obj_test(&self->header.flags, AQ_NIC_FLAGS_IS_NOT_READY)) |
| 131 | goto err_exit; | 132 | goto err_exit; |
| 132 | 133 | ||
| 133 | err = self->aq_hw_ops.hw_get_link_status(self->aq_hw, &link_status); | 134 | err = self->aq_hw_ops.hw_get_link_status(self->aq_hw); |
| 134 | if (err < 0) | 135 | if (err < 0) |
| 135 | goto err_exit; | 136 | goto err_exit; |
| 136 | 137 | ||
| 137 | self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw, | 138 | self->link_status = self->aq_hw->aq_link_status; |
| 138 | self->aq_nic_cfg.is_interrupt_moderation); | ||
| 139 | |||
| 140 | if (memcmp(&link_status, &self->link_status, sizeof(link_status))) { | ||
| 141 | if (link_status.mbps) { | ||
| 142 | aq_utils_obj_set(&self->header.flags, | ||
| 143 | AQ_NIC_FLAG_STARTED); | ||
| 144 | aq_utils_obj_clear(&self->header.flags, | ||
| 145 | AQ_NIC_LINK_DOWN); | ||
| 146 | netif_carrier_on(self->ndev); | ||
| 147 | } else { | ||
| 148 | netif_carrier_off(self->ndev); | ||
| 149 | aq_utils_obj_set(&self->header.flags, AQ_NIC_LINK_DOWN); | ||
| 150 | } | ||
| 151 | 139 | ||
| 152 | self->link_status = link_status; | 140 | self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw, |
| 141 | self->aq_nic_cfg.is_interrupt_moderation); | ||
| 142 | |||
| 143 | if (self->link_status.mbps) { | ||
| 144 | aq_utils_obj_set(&self->header.flags, | ||
| 145 | AQ_NIC_FLAG_STARTED); | ||
| 146 | aq_utils_obj_clear(&self->header.flags, | ||
| 147 | AQ_NIC_LINK_DOWN); | ||
| 148 | netif_carrier_on(self->ndev); | ||
| 149 | } else { | ||
| 150 | netif_carrier_off(self->ndev); | ||
| 151 | aq_utils_obj_set(&self->header.flags, AQ_NIC_LINK_DOWN); | ||
| 153 | } | 152 | } |
| 154 | 153 | ||
| 155 | memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s)); | 154 | memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s)); |
| @@ -597,14 +596,11 @@ exit: | |||
| 597 | } | 596 | } |
| 598 | 597 | ||
| 599 | int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb) | 598 | int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb) |
| 600 | __releases(&ring->lock) | ||
| 601 | __acquires(&ring->lock) | ||
| 602 | { | 599 | { |
| 603 | struct aq_ring_s *ring = NULL; | 600 | struct aq_ring_s *ring = NULL; |
| 604 | unsigned int frags = 0U; | 601 | unsigned int frags = 0U; |
| 605 | unsigned int vec = skb->queue_mapping % self->aq_nic_cfg.vecs; | 602 | unsigned int vec = skb->queue_mapping % self->aq_nic_cfg.vecs; |
| 606 | unsigned int tc = 0U; | 603 | unsigned int tc = 0U; |
| 607 | unsigned int trys = AQ_CFG_LOCK_TRYS; | ||
| 608 | int err = NETDEV_TX_OK; | 604 | int err = NETDEV_TX_OK; |
| 609 | bool is_nic_in_bad_state; | 605 | bool is_nic_in_bad_state; |
| 610 | 606 | ||
| @@ -628,36 +624,21 @@ __acquires(&ring->lock) | |||
| 628 | goto err_exit; | 624 | goto err_exit; |
| 629 | } | 625 | } |
| 630 | 626 | ||
| 631 | do { | 627 | frags = aq_nic_map_skb(self, skb, ring); |
| 632 | if (spin_trylock(&ring->header.lock)) { | ||
| 633 | frags = aq_nic_map_skb(self, skb, ring); | ||
| 634 | |||
| 635 | if (likely(frags)) { | ||
| 636 | err = self->aq_hw_ops.hw_ring_tx_xmit( | ||
| 637 | self->aq_hw, | ||
| 638 | ring, frags); | ||
| 639 | if (err >= 0) { | ||
| 640 | if (aq_ring_avail_dx(ring) < | ||
| 641 | AQ_CFG_SKB_FRAGS_MAX + 1) | ||
| 642 | aq_nic_ndev_queue_stop( | ||
| 643 | self, | ||
| 644 | ring->idx); | ||
| 645 | |||
| 646 | ++ring->stats.tx.packets; | ||
| 647 | ring->stats.tx.bytes += skb->len; | ||
| 648 | } | ||
| 649 | } else { | ||
| 650 | err = NETDEV_TX_BUSY; | ||
| 651 | } | ||
| 652 | 628 | ||
| 653 | spin_unlock(&ring->header.lock); | 629 | if (likely(frags)) { |
| 654 | break; | 630 | err = self->aq_hw_ops.hw_ring_tx_xmit(self->aq_hw, |
| 655 | } | 631 | ring, |
| 656 | } while (--trys); | 632 | frags); |
| 633 | if (err >= 0) { | ||
| 634 | if (aq_ring_avail_dx(ring) < AQ_CFG_SKB_FRAGS_MAX + 1) | ||
| 635 | aq_nic_ndev_queue_stop(self, ring->idx); | ||
| 657 | 636 | ||
| 658 | if (!trys) { | 637 | ++ring->stats.tx.packets; |
| 638 | ring->stats.tx.bytes += skb->len; | ||
| 639 | } | ||
| 640 | } else { | ||
| 659 | err = NETDEV_TX_BUSY; | 641 | err = NETDEV_TX_BUSY; |
| 660 | goto err_exit; | ||
| 661 | } | 642 | } |
| 662 | 643 | ||
| 663 | err_exit: | 644 | err_exit: |
| @@ -688,11 +669,26 @@ int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev) | |||
| 688 | netdev_for_each_mc_addr(ha, ndev) { | 669 | netdev_for_each_mc_addr(ha, ndev) { |
| 689 | ether_addr_copy(self->mc_list.ar[i++], ha->addr); | 670 | ether_addr_copy(self->mc_list.ar[i++], ha->addr); |
| 690 | ++self->mc_list.count; | 671 | ++self->mc_list.count; |
| 672 | |||
| 673 | if (i >= AQ_CFG_MULTICAST_ADDRESS_MAX) | ||
| 674 | break; | ||
| 691 | } | 675 | } |
| 692 | 676 | ||
| 693 | return self->aq_hw_ops.hw_multicast_list_set(self->aq_hw, | 677 | if (i >= AQ_CFG_MULTICAST_ADDRESS_MAX) { |
| 678 | /* Number of filters is too big: atlantic does not support this. | ||
| 679 | * Force all multi filter to support this. | ||
| 680 | * With this we disable all UC filters and setup "all pass" | ||
| 681 | * multicast mask | ||
| 682 | */ | ||
| 683 | self->packet_filter |= IFF_ALLMULTI; | ||
| 684 | self->aq_hw->aq_nic_cfg->mc_list_count = 0; | ||
| 685 | return self->aq_hw_ops.hw_packet_filter_set(self->aq_hw, | ||
| 686 | self->packet_filter); | ||
| 687 | } else { | ||
| 688 | return self->aq_hw_ops.hw_multicast_list_set(self->aq_hw, | ||
| 694 | self->mc_list.ar, | 689 | self->mc_list.ar, |
| 695 | self->mc_list.count); | 690 | self->mc_list.count); |
| 691 | } | ||
| 696 | } | 692 | } |
| 697 | 693 | ||
| 698 | int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu) | 694 | int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu) |
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c index 9a0817938eca..ec5579fb8268 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c | |||
| @@ -101,7 +101,6 @@ int aq_ring_init(struct aq_ring_s *self) | |||
| 101 | self->hw_head = 0; | 101 | self->hw_head = 0; |
| 102 | self->sw_head = 0; | 102 | self->sw_head = 0; |
| 103 | self->sw_tail = 0; | 103 | self->sw_tail = 0; |
| 104 | spin_lock_init(&self->header.lock); | ||
| 105 | return 0; | 104 | return 0; |
| 106 | } | 105 | } |
| 107 | 106 | ||
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_utils.h b/drivers/net/ethernet/aquantia/atlantic/aq_utils.h index f6012b34abe6..e12bcdfb874a 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_utils.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_utils.h | |||
| @@ -17,7 +17,6 @@ | |||
| 17 | #define AQ_DIMOF(_ARY_) ARRAY_SIZE(_ARY_) | 17 | #define AQ_DIMOF(_ARY_) ARRAY_SIZE(_ARY_) |
| 18 | 18 | ||
| 19 | struct aq_obj_s { | 19 | struct aq_obj_s { |
| 20 | spinlock_t lock; /* spinlock for nic/rings processing */ | ||
| 21 | atomic_t flags; | 20 | atomic_t flags; |
| 22 | }; | 21 | }; |
| 23 | 22 | ||
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c index ad5b4d4dac7f..fee446af748f 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c | |||
| @@ -34,8 +34,6 @@ struct aq_vec_s { | |||
| 34 | #define AQ_VEC_RX_ID 1 | 34 | #define AQ_VEC_RX_ID 1 |
| 35 | 35 | ||
| 36 | static int aq_vec_poll(struct napi_struct *napi, int budget) | 36 | static int aq_vec_poll(struct napi_struct *napi, int budget) |
| 37 | __releases(&self->lock) | ||
| 38 | __acquires(&self->lock) | ||
| 39 | { | 37 | { |
| 40 | struct aq_vec_s *self = container_of(napi, struct aq_vec_s, napi); | 38 | struct aq_vec_s *self = container_of(napi, struct aq_vec_s, napi); |
| 41 | struct aq_ring_s *ring = NULL; | 39 | struct aq_ring_s *ring = NULL; |
| @@ -47,7 +45,7 @@ __acquires(&self->lock) | |||
| 47 | 45 | ||
| 48 | if (!self) { | 46 | if (!self) { |
| 49 | err = -EINVAL; | 47 | err = -EINVAL; |
| 50 | } else if (spin_trylock(&self->header.lock)) { | 48 | } else { |
| 51 | for (i = 0U, ring = self->ring[0]; | 49 | for (i = 0U, ring = self->ring[0]; |
| 52 | self->tx_rings > i; ++i, ring = self->ring[i]) { | 50 | self->tx_rings > i; ++i, ring = self->ring[i]) { |
| 53 | if (self->aq_hw_ops->hw_ring_tx_head_update) { | 51 | if (self->aq_hw_ops->hw_ring_tx_head_update) { |
| @@ -105,11 +103,8 @@ __acquires(&self->lock) | |||
| 105 | self->aq_hw_ops->hw_irq_enable(self->aq_hw, | 103 | self->aq_hw_ops->hw_irq_enable(self->aq_hw, |
| 106 | 1U << self->aq_ring_param.vec_idx); | 104 | 1U << self->aq_ring_param.vec_idx); |
| 107 | } | 105 | } |
| 108 | |||
| 109 | err_exit: | ||
| 110 | spin_unlock(&self->header.lock); | ||
| 111 | } | 106 | } |
| 112 | 107 | err_exit: | |
| 113 | return work_done; | 108 | return work_done; |
| 114 | } | 109 | } |
| 115 | 110 | ||
| @@ -185,8 +180,6 @@ int aq_vec_init(struct aq_vec_s *self, struct aq_hw_ops *aq_hw_ops, | |||
| 185 | self->aq_hw_ops = aq_hw_ops; | 180 | self->aq_hw_ops = aq_hw_ops; |
| 186 | self->aq_hw = aq_hw; | 181 | self->aq_hw = aq_hw; |
| 187 | 182 | ||
| 188 | spin_lock_init(&self->header.lock); | ||
| 189 | |||
| 190 | for (i = 0U, ring = self->ring[0]; | 183 | for (i = 0U, ring = self->ring[0]; |
| 191 | self->tx_rings > i; ++i, ring = self->ring[i]) { | 184 | self->tx_rings > i; ++i, ring = self->ring[i]) { |
| 192 | err = aq_ring_init(&ring[AQ_VEC_TX_ID]); | 185 | err = aq_ring_init(&ring[AQ_VEC_TX_ID]); |
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c index faeb4935ef3e..c5a02df7a48b 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c | |||
| @@ -629,6 +629,12 @@ static int hw_atl_a0_hw_ring_rx_receive(struct aq_hw_s *self, | |||
| 629 | buff->is_udp_cso = (is_err & 0x10U) ? 0 : 1; | 629 | buff->is_udp_cso = (is_err & 0x10U) ? 0 : 1; |
| 630 | else if (0x0U == (pkt_type & 0x1CU)) | 630 | else if (0x0U == (pkt_type & 0x1CU)) |
| 631 | buff->is_tcp_cso = (is_err & 0x10U) ? 0 : 1; | 631 | buff->is_tcp_cso = (is_err & 0x10U) ? 0 : 1; |
| 632 | |||
| 633 | /* Checksum offload workaround for small packets */ | ||
| 634 | if (rxd_wb->pkt_len <= 60) { | ||
| 635 | buff->is_ip_cso = 0U; | ||
| 636 | buff->is_cso_err = 0U; | ||
| 637 | } | ||
| 632 | } | 638 | } |
| 633 | 639 | ||
| 634 | is_err &= ~0x18U; | 640 | is_err &= ~0x18U; |
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c index 1bceb7358e5c..21784cc39dab 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c | |||
| @@ -645,6 +645,12 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self, | |||
| 645 | buff->is_udp_cso = buff->is_cso_err ? 0U : 1U; | 645 | buff->is_udp_cso = buff->is_cso_err ? 0U : 1U; |
| 646 | else if (0x0U == (pkt_type & 0x1CU)) | 646 | else if (0x0U == (pkt_type & 0x1CU)) |
| 647 | buff->is_tcp_cso = buff->is_cso_err ? 0U : 1U; | 647 | buff->is_tcp_cso = buff->is_cso_err ? 0U : 1U; |
| 648 | |||
| 649 | /* Checksum offload workaround for small packets */ | ||
| 650 | if (rxd_wb->pkt_len <= 60) { | ||
| 651 | buff->is_ip_cso = 0U; | ||
| 652 | buff->is_cso_err = 0U; | ||
| 653 | } | ||
| 648 | } | 654 | } |
| 649 | 655 | ||
| 650 | is_err &= ~0x18U; | 656 | is_err &= ~0x18U; |
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c index 8d6d8f5804da..4f5ec9a0fbfb 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c | |||
| @@ -141,6 +141,12 @@ static int hw_atl_utils_init_ucp(struct aq_hw_s *self, | |||
| 141 | 141 | ||
| 142 | err = hw_atl_utils_ver_match(aq_hw_caps->fw_ver_expected, | 142 | err = hw_atl_utils_ver_match(aq_hw_caps->fw_ver_expected, |
| 143 | aq_hw_read_reg(self, 0x18U)); | 143 | aq_hw_read_reg(self, 0x18U)); |
| 144 | |||
| 145 | if (err < 0) | ||
| 146 | pr_err("%s: Bad FW version detected: expected=%x, actual=%x\n", | ||
| 147 | AQ_CFG_DRV_NAME, | ||
| 148 | aq_hw_caps->fw_ver_expected, | ||
| 149 | aq_hw_read_reg(self, 0x18U)); | ||
| 144 | return err; | 150 | return err; |
| 145 | } | 151 | } |
| 146 | 152 | ||
| @@ -313,11 +319,11 @@ void hw_atl_utils_mpi_set(struct aq_hw_s *self, | |||
| 313 | err_exit:; | 319 | err_exit:; |
| 314 | } | 320 | } |
| 315 | 321 | ||
| 316 | int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self, | 322 | int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self) |
| 317 | struct aq_hw_link_status_s *link_status) | ||
| 318 | { | 323 | { |
| 319 | u32 cp0x036C = aq_hw_read_reg(self, HW_ATL_MPI_STATE_ADR); | 324 | u32 cp0x036C = aq_hw_read_reg(self, HW_ATL_MPI_STATE_ADR); |
| 320 | u32 link_speed_mask = cp0x036C >> HW_ATL_MPI_SPEED_SHIFT; | 325 | u32 link_speed_mask = cp0x036C >> HW_ATL_MPI_SPEED_SHIFT; |
| 326 | struct aq_hw_link_status_s *link_status = &self->aq_link_status; | ||
| 321 | 327 | ||
| 322 | if (!link_speed_mask) { | 328 | if (!link_speed_mask) { |
| 323 | link_status->mbps = 0U; | 329 | link_status->mbps = 0U; |
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h index a66aee51ab5b..e0360a6b2202 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h | |||
| @@ -180,8 +180,7 @@ void hw_atl_utils_mpi_set(struct aq_hw_s *self, | |||
| 180 | int hw_atl_utils_mpi_set_speed(struct aq_hw_s *self, u32 speed, | 180 | int hw_atl_utils_mpi_set_speed(struct aq_hw_s *self, u32 speed, |
| 181 | enum hal_atl_utils_fw_state_e state); | 181 | enum hal_atl_utils_fw_state_e state); |
| 182 | 182 | ||
| 183 | int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self, | 183 | int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self); |
| 184 | struct aq_hw_link_status_s *link_status); | ||
| 185 | 184 | ||
| 186 | int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self, | 185 | int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self, |
| 187 | struct aq_hw_caps_s *aq_hw_caps, | 186 | struct aq_hw_caps_s *aq_hw_caps, |
diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c index 041cfb7952f8..e94159507847 100644 --- a/drivers/net/ethernet/aurora/nb8800.c +++ b/drivers/net/ethernet/aurora/nb8800.c | |||
| @@ -609,7 +609,7 @@ static void nb8800_mac_config(struct net_device *dev) | |||
| 609 | mac_mode |= HALF_DUPLEX; | 609 | mac_mode |= HALF_DUPLEX; |
| 610 | 610 | ||
| 611 | if (gigabit) { | 611 | if (gigabit) { |
| 612 | if (priv->phy_mode == PHY_INTERFACE_MODE_RGMII) | 612 | if (phy_interface_is_rgmii(dev->phydev)) |
| 613 | mac_mode |= RGMII_MODE; | 613 | mac_mode |= RGMII_MODE; |
| 614 | 614 | ||
| 615 | mac_mode |= GMAC_MODE; | 615 | mac_mode |= GMAC_MODE; |
| @@ -1268,11 +1268,10 @@ static int nb8800_tangox_init(struct net_device *dev) | |||
| 1268 | break; | 1268 | break; |
| 1269 | 1269 | ||
| 1270 | case PHY_INTERFACE_MODE_RGMII: | 1270 | case PHY_INTERFACE_MODE_RGMII: |
| 1271 | pad_mode = PAD_MODE_RGMII; | 1271 | case PHY_INTERFACE_MODE_RGMII_ID: |
| 1272 | break; | 1272 | case PHY_INTERFACE_MODE_RGMII_RXID: |
| 1273 | |||
| 1274 | case PHY_INTERFACE_MODE_RGMII_TXID: | 1273 | case PHY_INTERFACE_MODE_RGMII_TXID: |
| 1275 | pad_mode = PAD_MODE_RGMII | PAD_MODE_GTX_CLK_DELAY; | 1274 | pad_mode = PAD_MODE_RGMII; |
| 1276 | break; | 1275 | break; |
| 1277 | 1276 | ||
| 1278 | default: | 1277 | default: |
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c index f411936b744c..a1125d10c825 100644 --- a/drivers/net/ethernet/broadcom/b44.c +++ b/drivers/net/ethernet/broadcom/b44.c | |||
| @@ -2368,6 +2368,7 @@ static int b44_init_one(struct ssb_device *sdev, | |||
| 2368 | bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE); | 2368 | bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE); |
| 2369 | 2369 | ||
| 2370 | spin_lock_init(&bp->lock); | 2370 | spin_lock_init(&bp->lock); |
| 2371 | u64_stats_init(&bp->hw_stats.syncp); | ||
| 2371 | 2372 | ||
| 2372 | bp->rx_pending = B44_DEF_RX_RING_PENDING; | 2373 | bp->rx_pending = B44_DEF_RX_RING_PENDING; |
| 2373 | bp->tx_pending = B44_DEF_TX_RING_PENDING; | 2374 | bp->tx_pending = B44_DEF_TX_RING_PENDING; |
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 5333601f855f..c28fa5a8734c 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c | |||
| @@ -449,6 +449,10 @@ static void bcm_sysport_get_stats(struct net_device *dev, | |||
| 449 | p = (char *)&dev->stats; | 449 | p = (char *)&dev->stats; |
| 450 | else | 450 | else |
| 451 | p = (char *)priv; | 451 | p = (char *)priv; |
| 452 | |||
| 453 | if (priv->is_lite && !bcm_sysport_lite_stat_valid(s->type)) | ||
| 454 | continue; | ||
| 455 | |||
| 452 | p += s->stat_offset; | 456 | p += s->stat_offset; |
| 453 | data[j] = *(unsigned long *)p; | 457 | data[j] = *(unsigned long *)p; |
| 454 | j++; | 458 | j++; |
| @@ -593,7 +597,7 @@ static int bcm_sysport_set_coalesce(struct net_device *dev, | |||
| 593 | 597 | ||
| 594 | static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb) | 598 | static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb) |
| 595 | { | 599 | { |
| 596 | dev_kfree_skb_any(cb->skb); | 600 | dev_consume_skb_any(cb->skb); |
| 597 | cb->skb = NULL; | 601 | cb->skb = NULL; |
| 598 | dma_unmap_addr_set(cb, dma_addr, 0); | 602 | dma_unmap_addr_set(cb, dma_addr, 0); |
| 599 | } | 603 | } |
| @@ -1342,6 +1346,8 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv, | |||
| 1342 | 1346 | ||
| 1343 | ring->cbs = kcalloc(size, sizeof(struct bcm_sysport_cb), GFP_KERNEL); | 1347 | ring->cbs = kcalloc(size, sizeof(struct bcm_sysport_cb), GFP_KERNEL); |
| 1344 | if (!ring->cbs) { | 1348 | if (!ring->cbs) { |
| 1349 | dma_free_coherent(kdev, sizeof(struct dma_desc), | ||
| 1350 | ring->desc_cpu, ring->desc_dma); | ||
| 1345 | netif_err(priv, hw, priv->netdev, "CB allocation failed\n"); | 1351 | netif_err(priv, hw, priv->netdev, "CB allocation failed\n"); |
| 1346 | return -ENOMEM; | 1352 | return -ENOMEM; |
| 1347 | } | 1353 | } |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index e7c8539cbddf..f20b3d2a4c23 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c | |||
| @@ -4647,7 +4647,6 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp) | |||
| 4647 | pf->port_id = le16_to_cpu(resp->port_id); | 4647 | pf->port_id = le16_to_cpu(resp->port_id); |
| 4648 | bp->dev->dev_port = pf->port_id; | 4648 | bp->dev->dev_port = pf->port_id; |
| 4649 | memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN); | 4649 | memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN); |
| 4650 | memcpy(bp->dev->dev_addr, pf->mac_addr, ETH_ALEN); | ||
| 4651 | pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); | 4650 | pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); |
| 4652 | pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); | 4651 | pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); |
| 4653 | pf->max_tx_rings = le16_to_cpu(resp->max_tx_rings); | 4652 | pf->max_tx_rings = le16_to_cpu(resp->max_tx_rings); |
| @@ -4687,16 +4686,6 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp) | |||
| 4687 | vf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx); | 4686 | vf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx); |
| 4688 | 4687 | ||
| 4689 | memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN); | 4688 | memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN); |
| 4690 | mutex_unlock(&bp->hwrm_cmd_lock); | ||
| 4691 | |||
| 4692 | if (is_valid_ether_addr(vf->mac_addr)) { | ||
| 4693 | /* overwrite netdev dev_adr with admin VF MAC */ | ||
| 4694 | memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN); | ||
| 4695 | } else { | ||
| 4696 | eth_hw_addr_random(bp->dev); | ||
| 4697 | rc = bnxt_approve_mac(bp, bp->dev->dev_addr); | ||
| 4698 | } | ||
| 4699 | return rc; | ||
| 4700 | #endif | 4689 | #endif |
| 4701 | } | 4690 | } |
| 4702 | 4691 | ||
| @@ -7152,6 +7141,7 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc) | |||
| 7152 | bp->tx_nr_rings = bp->tx_nr_rings_per_tc; | 7141 | bp->tx_nr_rings = bp->tx_nr_rings_per_tc; |
| 7153 | netdev_reset_tc(dev); | 7142 | netdev_reset_tc(dev); |
| 7154 | } | 7143 | } |
| 7144 | bp->tx_nr_rings += bp->tx_nr_rings_xdp; | ||
| 7155 | bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : | 7145 | bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : |
| 7156 | bp->tx_nr_rings + bp->rx_nr_rings; | 7146 | bp->tx_nr_rings + bp->rx_nr_rings; |
| 7157 | bp->num_stat_ctxs = bp->cp_nr_rings; | 7147 | bp->num_stat_ctxs = bp->cp_nr_rings; |
| @@ -7661,6 +7651,28 @@ void bnxt_restore_pf_fw_resources(struct bnxt *bp) | |||
| 7661 | bnxt_subtract_ulp_resources(bp, BNXT_ROCE_ULP); | 7651 | bnxt_subtract_ulp_resources(bp, BNXT_ROCE_ULP); |
| 7662 | } | 7652 | } |
| 7663 | 7653 | ||
| 7654 | static int bnxt_init_mac_addr(struct bnxt *bp) | ||
| 7655 | { | ||
| 7656 | int rc = 0; | ||
| 7657 | |||
| 7658 | if (BNXT_PF(bp)) { | ||
| 7659 | memcpy(bp->dev->dev_addr, bp->pf.mac_addr, ETH_ALEN); | ||
| 7660 | } else { | ||
| 7661 | #ifdef CONFIG_BNXT_SRIOV | ||
| 7662 | struct bnxt_vf_info *vf = &bp->vf; | ||
| 7663 | |||
| 7664 | if (is_valid_ether_addr(vf->mac_addr)) { | ||
| 7665 | /* overwrite netdev dev_adr with admin VF MAC */ | ||
| 7666 | memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN); | ||
| 7667 | } else { | ||
| 7668 | eth_hw_addr_random(bp->dev); | ||
| 7669 | rc = bnxt_approve_mac(bp, bp->dev->dev_addr); | ||
| 7670 | } | ||
| 7671 | #endif | ||
| 7672 | } | ||
| 7673 | return rc; | ||
| 7674 | } | ||
| 7675 | |||
| 7664 | static void bnxt_parse_log_pcie_link(struct bnxt *bp) | 7676 | static void bnxt_parse_log_pcie_link(struct bnxt *bp) |
| 7665 | { | 7677 | { |
| 7666 | enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN; | 7678 | enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN; |
| @@ -7789,7 +7801,12 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 7789 | rc = -1; | 7801 | rc = -1; |
| 7790 | goto init_err_pci_clean; | 7802 | goto init_err_pci_clean; |
| 7791 | } | 7803 | } |
| 7792 | 7804 | rc = bnxt_init_mac_addr(bp); | |
| 7805 | if (rc) { | ||
| 7806 | dev_err(&pdev->dev, "Unable to initialize mac address.\n"); | ||
| 7807 | rc = -EADDRNOTAVAIL; | ||
| 7808 | goto init_err_pci_clean; | ||
| 7809 | } | ||
| 7793 | rc = bnxt_hwrm_queue_qportcfg(bp); | 7810 | rc = bnxt_hwrm_queue_qportcfg(bp); |
| 7794 | if (rc) { | 7811 | if (rc) { |
| 7795 | netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n", | 7812 | netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n", |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c index 77da75a55c02..997e10e8b863 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c | |||
| @@ -84,6 +84,8 @@ static int bnxt_unregister_dev(struct bnxt_en_dev *edev, int ulp_id) | |||
| 84 | 84 | ||
| 85 | max_stat_ctxs = bnxt_get_max_func_stat_ctxs(bp); | 85 | max_stat_ctxs = bnxt_get_max_func_stat_ctxs(bp); |
| 86 | bnxt_set_max_func_stat_ctxs(bp, max_stat_ctxs + 1); | 86 | bnxt_set_max_func_stat_ctxs(bp, max_stat_ctxs + 1); |
| 87 | if (ulp->msix_requested) | ||
| 88 | edev->en_ops->bnxt_free_msix(edev, ulp_id); | ||
| 87 | } | 89 | } |
| 88 | if (ulp->max_async_event_id) | 90 | if (ulp->max_async_event_id) |
| 89 | bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0); | 91 | bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0); |
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 7b0b399aaedd..fea3f9a5fb2d 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c | |||
| @@ -1360,7 +1360,7 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev, | |||
| 1360 | if (skb) { | 1360 | if (skb) { |
| 1361 | pkts_compl++; | 1361 | pkts_compl++; |
| 1362 | bytes_compl += GENET_CB(skb)->bytes_sent; | 1362 | bytes_compl += GENET_CB(skb)->bytes_sent; |
| 1363 | dev_kfree_skb_any(skb); | 1363 | dev_consume_skb_any(skb); |
| 1364 | } | 1364 | } |
| 1365 | 1365 | ||
| 1366 | txbds_processed++; | 1366 | txbds_processed++; |
| @@ -1875,7 +1875,7 @@ static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv, | |||
| 1875 | cb = ring->cbs + i; | 1875 | cb = ring->cbs + i; |
| 1876 | skb = bcmgenet_rx_refill(priv, cb); | 1876 | skb = bcmgenet_rx_refill(priv, cb); |
| 1877 | if (skb) | 1877 | if (skb) |
| 1878 | dev_kfree_skb_any(skb); | 1878 | dev_consume_skb_any(skb); |
| 1879 | if (!cb->skb) | 1879 | if (!cb->skb) |
| 1880 | return -ENOMEM; | 1880 | return -ENOMEM; |
| 1881 | } | 1881 | } |
| @@ -1894,7 +1894,7 @@ static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv) | |||
| 1894 | 1894 | ||
| 1895 | skb = bcmgenet_free_rx_cb(&priv->pdev->dev, cb); | 1895 | skb = bcmgenet_free_rx_cb(&priv->pdev->dev, cb); |
| 1896 | if (skb) | 1896 | if (skb) |
| 1897 | dev_kfree_skb_any(skb); | 1897 | dev_consume_skb_any(skb); |
| 1898 | } | 1898 | } |
| 1899 | } | 1899 | } |
| 1900 | 1900 | ||
| @@ -3669,7 +3669,7 @@ static int bcmgenet_resume(struct device *d) | |||
| 3669 | 3669 | ||
| 3670 | phy_init_hw(priv->phydev); | 3670 | phy_init_hw(priv->phydev); |
| 3671 | /* Speed settings must be restored */ | 3671 | /* Speed settings must be restored */ |
| 3672 | bcmgenet_mii_config(priv->dev); | 3672 | bcmgenet_mii_config(priv->dev, false); |
| 3673 | 3673 | ||
| 3674 | /* disable ethernet MAC while updating its registers */ | 3674 | /* disable ethernet MAC while updating its registers */ |
| 3675 | umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false); | 3675 | umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false); |
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h index b9344de669f8..3a34fdba5301 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h | |||
| @@ -698,7 +698,7 @@ GENET_IO_MACRO(rbuf, GENET_RBUF_OFF); | |||
| 698 | 698 | ||
| 699 | /* MDIO routines */ | 699 | /* MDIO routines */ |
| 700 | int bcmgenet_mii_init(struct net_device *dev); | 700 | int bcmgenet_mii_init(struct net_device *dev); |
| 701 | int bcmgenet_mii_config(struct net_device *dev); | 701 | int bcmgenet_mii_config(struct net_device *dev, bool init); |
| 702 | int bcmgenet_mii_probe(struct net_device *dev); | 702 | int bcmgenet_mii_probe(struct net_device *dev); |
| 703 | void bcmgenet_mii_exit(struct net_device *dev); | 703 | void bcmgenet_mii_exit(struct net_device *dev); |
| 704 | void bcmgenet_mii_reset(struct net_device *dev); | 704 | void bcmgenet_mii_reset(struct net_device *dev); |
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index 071fcbd14e6a..30cb97b4a1d7 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c | |||
| @@ -238,7 +238,7 @@ static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv) | |||
| 238 | bcmgenet_fixed_phy_link_update); | 238 | bcmgenet_fixed_phy_link_update); |
| 239 | } | 239 | } |
| 240 | 240 | ||
| 241 | int bcmgenet_mii_config(struct net_device *dev) | 241 | int bcmgenet_mii_config(struct net_device *dev, bool init) |
| 242 | { | 242 | { |
| 243 | struct bcmgenet_priv *priv = netdev_priv(dev); | 243 | struct bcmgenet_priv *priv = netdev_priv(dev); |
| 244 | struct phy_device *phydev = priv->phydev; | 244 | struct phy_device *phydev = priv->phydev; |
| @@ -327,7 +327,8 @@ int bcmgenet_mii_config(struct net_device *dev) | |||
| 327 | bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL); | 327 | bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL); |
| 328 | } | 328 | } |
| 329 | 329 | ||
| 330 | dev_info_once(kdev, "configuring instance for %s\n", phy_name); | 330 | if (init) |
| 331 | dev_info(kdev, "configuring instance for %s\n", phy_name); | ||
| 331 | 332 | ||
| 332 | return 0; | 333 | return 0; |
| 333 | } | 334 | } |
| @@ -375,7 +376,7 @@ int bcmgenet_mii_probe(struct net_device *dev) | |||
| 375 | * PHY speed which is needed for bcmgenet_mii_config() to configure | 376 | * PHY speed which is needed for bcmgenet_mii_config() to configure |
| 376 | * things appropriately. | 377 | * things appropriately. |
| 377 | */ | 378 | */ |
| 378 | ret = bcmgenet_mii_config(dev); | 379 | ret = bcmgenet_mii_config(dev, true); |
| 379 | if (ret) { | 380 | if (ret) { |
| 380 | phy_disconnect(priv->phydev); | 381 | phy_disconnect(priv->phydev); |
| 381 | return ret; | 382 | return ret; |
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c index 79112563a25a..5e5c4d7796b8 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c | |||
| @@ -292,11 +292,30 @@ static void bgx_sgmii_change_link_state(struct lmac *lmac) | |||
| 292 | u64 cmr_cfg; | 292 | u64 cmr_cfg; |
| 293 | u64 port_cfg = 0; | 293 | u64 port_cfg = 0; |
| 294 | u64 misc_ctl = 0; | 294 | u64 misc_ctl = 0; |
| 295 | bool tx_en, rx_en; | ||
| 295 | 296 | ||
| 296 | cmr_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_CMRX_CFG); | 297 | cmr_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_CMRX_CFG); |
| 297 | cmr_cfg &= ~CMR_EN; | 298 | tx_en = cmr_cfg & CMR_PKT_TX_EN; |
| 299 | rx_en = cmr_cfg & CMR_PKT_RX_EN; | ||
| 300 | cmr_cfg &= ~(CMR_PKT_RX_EN | CMR_PKT_TX_EN); | ||
| 298 | bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg); | 301 | bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg); |
| 299 | 302 | ||
| 303 | /* Wait for BGX RX to be idle */ | ||
| 304 | if (bgx_poll_reg(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG, | ||
| 305 | GMI_PORT_CFG_RX_IDLE, false)) { | ||
| 306 | dev_err(&bgx->pdev->dev, "BGX%d LMAC%d GMI RX not idle\n", | ||
| 307 | bgx->bgx_id, lmac->lmacid); | ||
| 308 | return; | ||
| 309 | } | ||
| 310 | |||
| 311 | /* Wait for BGX TX to be idle */ | ||
| 312 | if (bgx_poll_reg(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG, | ||
| 313 | GMI_PORT_CFG_TX_IDLE, false)) { | ||
| 314 | dev_err(&bgx->pdev->dev, "BGX%d LMAC%d GMI TX not idle\n", | ||
| 315 | bgx->bgx_id, lmac->lmacid); | ||
| 316 | return; | ||
| 317 | } | ||
| 318 | |||
| 300 | port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG); | 319 | port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG); |
| 301 | misc_ctl = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL); | 320 | misc_ctl = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL); |
| 302 | 321 | ||
| @@ -347,10 +366,8 @@ static void bgx_sgmii_change_link_state(struct lmac *lmac) | |||
| 347 | bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL, misc_ctl); | 366 | bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL, misc_ctl); |
| 348 | bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG, port_cfg); | 367 | bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG, port_cfg); |
| 349 | 368 | ||
| 350 | port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG); | 369 | /* Restore CMR config settings */ |
| 351 | 370 | cmr_cfg |= (rx_en ? CMR_PKT_RX_EN : 0) | (tx_en ? CMR_PKT_TX_EN : 0); | |
| 352 | /* Re-enable lmac */ | ||
| 353 | cmr_cfg |= CMR_EN; | ||
| 354 | bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg); | 371 | bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg); |
| 355 | 372 | ||
| 356 | if (bgx->is_rgx && (cmr_cfg & (CMR_PKT_RX_EN | CMR_PKT_TX_EN))) | 373 | if (bgx->is_rgx && (cmr_cfg & (CMR_PKT_RX_EN | CMR_PKT_TX_EN))) |
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h index 6b7fe6fdd13b..23acdc5ab896 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h | |||
| @@ -170,6 +170,8 @@ | |||
| 170 | #define GMI_PORT_CFG_DUPLEX BIT_ULL(2) | 170 | #define GMI_PORT_CFG_DUPLEX BIT_ULL(2) |
| 171 | #define GMI_PORT_CFG_SLOT_TIME BIT_ULL(3) | 171 | #define GMI_PORT_CFG_SLOT_TIME BIT_ULL(3) |
| 172 | #define GMI_PORT_CFG_SPEED_MSB BIT_ULL(8) | 172 | #define GMI_PORT_CFG_SPEED_MSB BIT_ULL(8) |
| 173 | #define GMI_PORT_CFG_RX_IDLE BIT_ULL(12) | ||
| 174 | #define GMI_PORT_CFG_TX_IDLE BIT_ULL(13) | ||
| 173 | #define BGX_GMP_GMI_RXX_JABBER 0x38038 | 175 | #define BGX_GMP_GMI_RXX_JABBER 0x38038 |
| 174 | #define BGX_GMP_GMI_TXX_THRESH 0x38210 | 176 | #define BGX_GMP_GMI_TXX_THRESH 0x38210 |
| 175 | #define BGX_GMP_GMI_TXX_APPEND 0x38218 | 177 | #define BGX_GMP_GMI_TXX_APPEND 0x38218 |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index ef4be781fd05..09ea62ee96d3 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | |||
| @@ -529,6 +529,7 @@ enum { /* adapter flags */ | |||
| 529 | USING_SOFT_PARAMS = (1 << 6), | 529 | USING_SOFT_PARAMS = (1 << 6), |
| 530 | MASTER_PF = (1 << 7), | 530 | MASTER_PF = (1 << 7), |
| 531 | FW_OFLD_CONN = (1 << 9), | 531 | FW_OFLD_CONN = (1 << 9), |
| 532 | ROOT_NO_RELAXED_ORDERING = (1 << 10), | ||
| 532 | }; | 533 | }; |
| 533 | 534 | ||
| 534 | enum { | 535 | enum { |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index e403fa18f1b1..33bb8678833a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | |||
| @@ -4654,11 +4654,6 @@ static void print_port_info(const struct net_device *dev) | |||
| 4654 | dev->name, adap->params.vpd.id, adap->name, buf); | 4654 | dev->name, adap->params.vpd.id, adap->name, buf); |
| 4655 | } | 4655 | } |
| 4656 | 4656 | ||
| 4657 | static void enable_pcie_relaxed_ordering(struct pci_dev *dev) | ||
| 4658 | { | ||
| 4659 | pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN); | ||
| 4660 | } | ||
| 4661 | |||
| 4662 | /* | 4657 | /* |
| 4663 | * Free the following resources: | 4658 | * Free the following resources: |
| 4664 | * - memory used for tables | 4659 | * - memory used for tables |
| @@ -4908,7 +4903,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 4908 | } | 4903 | } |
| 4909 | 4904 | ||
| 4910 | pci_enable_pcie_error_reporting(pdev); | 4905 | pci_enable_pcie_error_reporting(pdev); |
| 4911 | enable_pcie_relaxed_ordering(pdev); | ||
| 4912 | pci_set_master(pdev); | 4906 | pci_set_master(pdev); |
| 4913 | pci_save_state(pdev); | 4907 | pci_save_state(pdev); |
| 4914 | 4908 | ||
| @@ -4947,6 +4941,23 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 4947 | adapter->msg_enable = DFLT_MSG_ENABLE; | 4941 | adapter->msg_enable = DFLT_MSG_ENABLE; |
| 4948 | memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map)); | 4942 | memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map)); |
| 4949 | 4943 | ||
| 4944 | /* If possible, we use PCIe Relaxed Ordering Attribute to deliver | ||
| 4945 | * Ingress Packet Data to Free List Buffers in order to allow for | ||
| 4946 | * chipset performance optimizations between the Root Complex and | ||
| 4947 | * Memory Controllers. (Messages to the associated Ingress Queue | ||
| 4948 | * notifying new Packet Placement in the Free Lists Buffers will be | ||
| 4949 | * send without the Relaxed Ordering Attribute thus guaranteeing that | ||
| 4950 | * all preceding PCIe Transaction Layer Packets will be processed | ||
| 4951 | * first.) But some Root Complexes have various issues with Upstream | ||
| 4952 | * Transaction Layer Packets with the Relaxed Ordering Attribute set. | ||
| 4953 | * The PCIe devices which under the Root Complexes will be cleared the | ||
| 4954 | * Relaxed Ordering bit in the configuration space, So we check our | ||
| 4955 | * PCIe configuration space to see if it's flagged with advice against | ||
| 4956 | * using Relaxed Ordering. | ||
| 4957 | */ | ||
| 4958 | if (!pcie_relaxed_ordering_enabled(pdev)) | ||
| 4959 | adapter->flags |= ROOT_NO_RELAXED_ORDERING; | ||
| 4960 | |||
| 4950 | spin_lock_init(&adapter->stats_lock); | 4961 | spin_lock_init(&adapter->stats_lock); |
| 4951 | spin_lock_init(&adapter->tid_release_lock); | 4962 | spin_lock_init(&adapter->tid_release_lock); |
| 4952 | spin_lock_init(&adapter->win0_lock); | 4963 | spin_lock_init(&adapter->win0_lock); |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index ede12209f20b..4ef68f69b58c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c | |||
| @@ -2719,6 +2719,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, | |||
| 2719 | struct fw_iq_cmd c; | 2719 | struct fw_iq_cmd c; |
| 2720 | struct sge *s = &adap->sge; | 2720 | struct sge *s = &adap->sge; |
| 2721 | struct port_info *pi = netdev_priv(dev); | 2721 | struct port_info *pi = netdev_priv(dev); |
| 2722 | int relaxed = !(adap->flags & ROOT_NO_RELAXED_ORDERING); | ||
| 2722 | 2723 | ||
| 2723 | /* Size needs to be multiple of 16, including status entry. */ | 2724 | /* Size needs to be multiple of 16, including status entry. */ |
| 2724 | iq->size = roundup(iq->size, 16); | 2725 | iq->size = roundup(iq->size, 16); |
| @@ -2772,8 +2773,8 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, | |||
| 2772 | 2773 | ||
| 2773 | flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc); | 2774 | flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc); |
| 2774 | c.iqns_to_fl0congen |= htonl(FW_IQ_CMD_FL0PACKEN_F | | 2775 | c.iqns_to_fl0congen |= htonl(FW_IQ_CMD_FL0PACKEN_F | |
| 2775 | FW_IQ_CMD_FL0FETCHRO_F | | 2776 | FW_IQ_CMD_FL0FETCHRO_V(relaxed) | |
| 2776 | FW_IQ_CMD_FL0DATARO_F | | 2777 | FW_IQ_CMD_FL0DATARO_V(relaxed) | |
| 2777 | FW_IQ_CMD_FL0PADEN_F); | 2778 | FW_IQ_CMD_FL0PADEN_F); |
| 2778 | if (cong >= 0) | 2779 | if (cong >= 0) |
| 2779 | c.iqns_to_fl0congen |= | 2780 | c.iqns_to_fl0congen |= |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 82bf7aac6cdb..0293b41171a5 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | |||
| @@ -369,12 +369,12 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd, | |||
| 369 | list_del(&entry.list); | 369 | list_del(&entry.list); |
| 370 | spin_unlock(&adap->mbox_lock); | 370 | spin_unlock(&adap->mbox_lock); |
| 371 | ret = (v == MBOX_OWNER_FW) ? -EBUSY : -ETIMEDOUT; | 371 | ret = (v == MBOX_OWNER_FW) ? -EBUSY : -ETIMEDOUT; |
| 372 | t4_record_mbox(adap, cmd, MBOX_LEN, access, ret); | 372 | t4_record_mbox(adap, cmd, size, access, ret); |
| 373 | return ret; | 373 | return ret; |
| 374 | } | 374 | } |
| 375 | 375 | ||
| 376 | /* Copy in the new mailbox command and send it on its way ... */ | 376 | /* Copy in the new mailbox command and send it on its way ... */ |
| 377 | t4_record_mbox(adap, cmd, MBOX_LEN, access, 0); | 377 | t4_record_mbox(adap, cmd, size, access, 0); |
| 378 | for (i = 0; i < size; i += 8) | 378 | for (i = 0; i < size; i += 8) |
| 379 | t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++)); | 379 | t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++)); |
| 380 | 380 | ||
| @@ -426,7 +426,7 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd, | |||
| 426 | } | 426 | } |
| 427 | 427 | ||
| 428 | ret = (pcie_fw & PCIE_FW_ERR_F) ? -ENXIO : -ETIMEDOUT; | 428 | ret = (pcie_fw & PCIE_FW_ERR_F) ? -ENXIO : -ETIMEDOUT; |
| 429 | t4_record_mbox(adap, cmd, MBOX_LEN, access, ret); | 429 | t4_record_mbox(adap, cmd, size, access, ret); |
| 430 | dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n", | 430 | dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n", |
| 431 | *(const u8 *)cmd, mbox); | 431 | *(const u8 *)cmd, mbox); |
| 432 | t4_report_fw_error(adap); | 432 | t4_report_fw_error(adap); |
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h index 109bc630408b..08c6ddb84a04 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h +++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h | |||
| @@ -408,6 +408,7 @@ enum { /* adapter flags */ | |||
| 408 | USING_MSI = (1UL << 1), | 408 | USING_MSI = (1UL << 1), |
| 409 | USING_MSIX = (1UL << 2), | 409 | USING_MSIX = (1UL << 2), |
| 410 | QUEUES_BOUND = (1UL << 3), | 410 | QUEUES_BOUND = (1UL << 3), |
| 411 | ROOT_NO_RELAXED_ORDERING = (1UL << 4), | ||
| 411 | }; | 412 | }; |
| 412 | 413 | ||
| 413 | /* | 414 | /* |
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index ac7a150c54e9..2b85b874fd0d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c | |||
| @@ -2888,6 +2888,24 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev, | |||
| 2888 | */ | 2888 | */ |
| 2889 | adapter->name = pci_name(pdev); | 2889 | adapter->name = pci_name(pdev); |
| 2890 | adapter->msg_enable = DFLT_MSG_ENABLE; | 2890 | adapter->msg_enable = DFLT_MSG_ENABLE; |
| 2891 | |||
| 2892 | /* If possible, we use PCIe Relaxed Ordering Attribute to deliver | ||
| 2893 | * Ingress Packet Data to Free List Buffers in order to allow for | ||
| 2894 | * chipset performance optimizations between the Root Complex and | ||
| 2895 | * Memory Controllers. (Messages to the associated Ingress Queue | ||
| 2896 | * notifying new Packet Placement in the Free Lists Buffers will be | ||
| 2897 | * send without the Relaxed Ordering Attribute thus guaranteeing that | ||
| 2898 | * all preceding PCIe Transaction Layer Packets will be processed | ||
| 2899 | * first.) But some Root Complexes have various issues with Upstream | ||
| 2900 | * Transaction Layer Packets with the Relaxed Ordering Attribute set. | ||
| 2901 | * The PCIe devices which under the Root Complexes will be cleared the | ||
| 2902 | * Relaxed Ordering bit in the configuration space, So we check our | ||
| 2903 | * PCIe configuration space to see if it's flagged with advice against | ||
| 2904 | * using Relaxed Ordering. | ||
| 2905 | */ | ||
| 2906 | if (!pcie_relaxed_ordering_enabled(pdev)) | ||
| 2907 | adapter->flags |= ROOT_NO_RELAXED_ORDERING; | ||
| 2908 | |||
| 2891 | err = adap_init0(adapter); | 2909 | err = adap_init0(adapter); |
| 2892 | if (err) | 2910 | if (err) |
| 2893 | goto err_unmap_bar; | 2911 | goto err_unmap_bar; |
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index e37dde2ba97f..05498e7f2840 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c | |||
| @@ -2205,6 +2205,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq, | |||
| 2205 | struct port_info *pi = netdev_priv(dev); | 2205 | struct port_info *pi = netdev_priv(dev); |
| 2206 | struct fw_iq_cmd cmd, rpl; | 2206 | struct fw_iq_cmd cmd, rpl; |
| 2207 | int ret, iqandst, flsz = 0; | 2207 | int ret, iqandst, flsz = 0; |
| 2208 | int relaxed = !(adapter->flags & ROOT_NO_RELAXED_ORDERING); | ||
| 2208 | 2209 | ||
| 2209 | /* | 2210 | /* |
| 2210 | * If we're using MSI interrupts and we're not initializing the | 2211 | * If we're using MSI interrupts and we're not initializing the |
| @@ -2300,6 +2301,8 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq, | |||
| 2300 | cpu_to_be32( | 2301 | cpu_to_be32( |
| 2301 | FW_IQ_CMD_FL0HOSTFCMODE_V(SGE_HOSTFCMODE_NONE) | | 2302 | FW_IQ_CMD_FL0HOSTFCMODE_V(SGE_HOSTFCMODE_NONE) | |
| 2302 | FW_IQ_CMD_FL0PACKEN_F | | 2303 | FW_IQ_CMD_FL0PACKEN_F | |
| 2304 | FW_IQ_CMD_FL0FETCHRO_V(relaxed) | | ||
| 2305 | FW_IQ_CMD_FL0DATARO_V(relaxed) | | ||
| 2303 | FW_IQ_CMD_FL0PADEN_F); | 2306 | FW_IQ_CMD_FL0PADEN_F); |
| 2304 | 2307 | ||
| 2305 | /* In T6, for egress queue type FL there is internal overhead | 2308 | /* In T6, for egress queue type FL there is internal overhead |
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c index 95bf5e89cfd1..59da7ac3c108 100644 --- a/drivers/net/ethernet/faraday/ftgmac100.c +++ b/drivers/net/ethernet/faraday/ftgmac100.c | |||
| @@ -125,7 +125,7 @@ static int ftgmac100_reset_mac(struct ftgmac100 *priv, u32 maccr) | |||
| 125 | iowrite32(maccr, priv->base + FTGMAC100_OFFSET_MACCR); | 125 | iowrite32(maccr, priv->base + FTGMAC100_OFFSET_MACCR); |
| 126 | iowrite32(maccr | FTGMAC100_MACCR_SW_RST, | 126 | iowrite32(maccr | FTGMAC100_MACCR_SW_RST, |
| 127 | priv->base + FTGMAC100_OFFSET_MACCR); | 127 | priv->base + FTGMAC100_OFFSET_MACCR); |
| 128 | for (i = 0; i < 50; i++) { | 128 | for (i = 0; i < 200; i++) { |
| 129 | unsigned int maccr; | 129 | unsigned int maccr; |
| 130 | 130 | ||
| 131 | maccr = ioread32(priv->base + FTGMAC100_OFFSET_MACCR); | 131 | maccr = ioread32(priv->base + FTGMAC100_OFFSET_MACCR); |
| @@ -392,7 +392,7 @@ static int ftgmac100_alloc_rx_buf(struct ftgmac100 *priv, unsigned int entry, | |||
| 392 | struct net_device *netdev = priv->netdev; | 392 | struct net_device *netdev = priv->netdev; |
| 393 | struct sk_buff *skb; | 393 | struct sk_buff *skb; |
| 394 | dma_addr_t map; | 394 | dma_addr_t map; |
| 395 | int err; | 395 | int err = 0; |
| 396 | 396 | ||
| 397 | skb = netdev_alloc_skb_ip_align(netdev, RX_BUF_SIZE); | 397 | skb = netdev_alloc_skb_ip_align(netdev, RX_BUF_SIZE); |
| 398 | if (unlikely(!skb)) { | 398 | if (unlikely(!skb)) { |
| @@ -428,7 +428,7 @@ static int ftgmac100_alloc_rx_buf(struct ftgmac100 *priv, unsigned int entry, | |||
| 428 | else | 428 | else |
| 429 | rxdes->rxdes0 = 0; | 429 | rxdes->rxdes0 = 0; |
| 430 | 430 | ||
| 431 | return 0; | 431 | return err; |
| 432 | } | 432 | } |
| 433 | 433 | ||
| 434 | static unsigned int ftgmac100_next_rx_pointer(struct ftgmac100 *priv, | 434 | static unsigned int ftgmac100_next_rx_pointer(struct ftgmac100 *priv, |
| @@ -1682,6 +1682,7 @@ static int ftgmac100_setup_mdio(struct net_device *netdev) | |||
| 1682 | priv->mii_bus->name = "ftgmac100_mdio"; | 1682 | priv->mii_bus->name = "ftgmac100_mdio"; |
| 1683 | snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%d", | 1683 | snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%d", |
| 1684 | pdev->name, pdev->id); | 1684 | pdev->name, pdev->id); |
| 1685 | priv->mii_bus->parent = priv->dev; | ||
| 1685 | priv->mii_bus->priv = priv->netdev; | 1686 | priv->mii_bus->priv = priv->netdev; |
| 1686 | priv->mii_bus->read = ftgmac100_mdiobus_read; | 1687 | priv->mii_bus->read = ftgmac100_mdiobus_read; |
| 1687 | priv->mii_bus->write = ftgmac100_mdiobus_write; | 1688 | priv->mii_bus->write = ftgmac100_mdiobus_write; |
| @@ -1862,7 +1863,6 @@ err_setup_mdio: | |||
| 1862 | err_ioremap: | 1863 | err_ioremap: |
| 1863 | release_resource(priv->res); | 1864 | release_resource(priv->res); |
| 1864 | err_req_mem: | 1865 | err_req_mem: |
| 1865 | netif_napi_del(&priv->napi); | ||
| 1866 | free_netdev(netdev); | 1866 | free_netdev(netdev); |
| 1867 | err_alloc_etherdev: | 1867 | err_alloc_etherdev: |
| 1868 | return err; | 1868 | return err; |
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c index 6e67d22fd0d5..1c7da16ad0ff 100644 --- a/drivers/net/ethernet/freescale/fman/mac.c +++ b/drivers/net/ethernet/freescale/fman/mac.c | |||
| @@ -623,6 +623,8 @@ static struct platform_device *dpaa_eth_add_device(int fman_id, | |||
| 623 | goto no_mem; | 623 | goto no_mem; |
| 624 | } | 624 | } |
| 625 | 625 | ||
| 626 | pdev->dev.of_node = node; | ||
| 627 | pdev->dev.parent = priv->dev; | ||
| 626 | set_dma_ops(&pdev->dev, get_dma_ops(priv->dev)); | 628 | set_dma_ops(&pdev->dev, get_dma_ops(priv->dev)); |
| 627 | 629 | ||
| 628 | ret = platform_device_add_data(pdev, &data, sizeof(data)); | 630 | ret = platform_device_add_data(pdev, &data, sizeof(data)); |
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index a3e694679635..c45e8e3b82d3 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c | |||
| @@ -111,6 +111,7 @@ static void send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8); | |||
| 111 | static void send_request_unmap(struct ibmvnic_adapter *, u8); | 111 | static void send_request_unmap(struct ibmvnic_adapter *, u8); |
| 112 | static void send_login(struct ibmvnic_adapter *adapter); | 112 | static void send_login(struct ibmvnic_adapter *adapter); |
| 113 | static void send_cap_queries(struct ibmvnic_adapter *adapter); | 113 | static void send_cap_queries(struct ibmvnic_adapter *adapter); |
| 114 | static int init_sub_crqs(struct ibmvnic_adapter *); | ||
| 114 | static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter); | 115 | static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter); |
| 115 | static int ibmvnic_init(struct ibmvnic_adapter *); | 116 | static int ibmvnic_init(struct ibmvnic_adapter *); |
| 116 | static void release_crq_queue(struct ibmvnic_adapter *); | 117 | static void release_crq_queue(struct ibmvnic_adapter *); |
| @@ -651,6 +652,7 @@ static int ibmvnic_login(struct net_device *netdev) | |||
| 651 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); | 652 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); |
| 652 | unsigned long timeout = msecs_to_jiffies(30000); | 653 | unsigned long timeout = msecs_to_jiffies(30000); |
| 653 | struct device *dev = &adapter->vdev->dev; | 654 | struct device *dev = &adapter->vdev->dev; |
| 655 | int rc; | ||
| 654 | 656 | ||
| 655 | do { | 657 | do { |
| 656 | if (adapter->renegotiate) { | 658 | if (adapter->renegotiate) { |
| @@ -664,6 +666,18 @@ static int ibmvnic_login(struct net_device *netdev) | |||
| 664 | dev_err(dev, "Capabilities query timeout\n"); | 666 | dev_err(dev, "Capabilities query timeout\n"); |
| 665 | return -1; | 667 | return -1; |
| 666 | } | 668 | } |
| 669 | rc = init_sub_crqs(adapter); | ||
| 670 | if (rc) { | ||
| 671 | dev_err(dev, | ||
| 672 | "Initialization of SCRQ's failed\n"); | ||
| 673 | return -1; | ||
| 674 | } | ||
| 675 | rc = init_sub_crq_irqs(adapter); | ||
| 676 | if (rc) { | ||
| 677 | dev_err(dev, | ||
| 678 | "Initialization of SCRQ's irqs failed\n"); | ||
| 679 | return -1; | ||
| 680 | } | ||
| 667 | } | 681 | } |
| 668 | 682 | ||
| 669 | reinit_completion(&adapter->init_done); | 683 | reinit_completion(&adapter->init_done); |
| @@ -3004,7 +3018,6 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq, | |||
| 3004 | *req_value, | 3018 | *req_value, |
| 3005 | (long int)be64_to_cpu(crq->request_capability_rsp. | 3019 | (long int)be64_to_cpu(crq->request_capability_rsp. |
| 3006 | number), name); | 3020 | number), name); |
| 3007 | release_sub_crqs(adapter); | ||
| 3008 | *req_value = be64_to_cpu(crq->request_capability_rsp.number); | 3021 | *req_value = be64_to_cpu(crq->request_capability_rsp.number); |
| 3009 | ibmvnic_send_req_caps(adapter, 1); | 3022 | ibmvnic_send_req_caps(adapter, 1); |
| 3010 | return; | 3023 | return; |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index b936febc315a..2194960d5855 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c | |||
| @@ -1113,6 +1113,8 @@ int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring) | |||
| 1113 | if (!tx_ring->tx_bi) | 1113 | if (!tx_ring->tx_bi) |
| 1114 | goto err; | 1114 | goto err; |
| 1115 | 1115 | ||
| 1116 | u64_stats_init(&tx_ring->syncp); | ||
| 1117 | |||
| 1116 | /* round up to nearest 4K */ | 1118 | /* round up to nearest 4K */ |
| 1117 | tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc); | 1119 | tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc); |
| 1118 | /* add u32 for head writeback, align after this takes care of | 1120 | /* add u32 for head writeback, align after this takes care of |
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 084c53582793..032f8ac06357 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | |||
| @@ -2988,6 +2988,8 @@ int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring) | |||
| 2988 | if (!tx_ring->tx_buffer_info) | 2988 | if (!tx_ring->tx_buffer_info) |
| 2989 | goto err; | 2989 | goto err; |
| 2990 | 2990 | ||
| 2991 | u64_stats_init(&tx_ring->syncp); | ||
| 2992 | |||
| 2991 | /* round up to nearest 4K */ | 2993 | /* round up to nearest 4K */ |
| 2992 | tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); | 2994 | tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); |
| 2993 | tx_ring->size = ALIGN(tx_ring->size, 4096); | 2995 | tx_ring->size = ALIGN(tx_ring->size, 4096); |
| @@ -3046,6 +3048,8 @@ int ixgbevf_setup_rx_resources(struct ixgbevf_ring *rx_ring) | |||
| 3046 | if (!rx_ring->rx_buffer_info) | 3048 | if (!rx_ring->rx_buffer_info) |
| 3047 | goto err; | 3049 | goto err; |
| 3048 | 3050 | ||
| 3051 | u64_stats_init(&rx_ring->syncp); | ||
| 3052 | |||
| 3049 | /* Round up to nearest 4K */ | 3053 | /* Round up to nearest 4K */ |
| 3050 | rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); | 3054 | rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); |
| 3051 | rx_ring->size = ALIGN(rx_ring->size, 4096); | 3055 | rx_ring->size = ALIGN(rx_ring->size, 4096); |
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 5794d98d946f..9c94ea9b2b80 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c | |||
| @@ -2734,7 +2734,7 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev, | |||
| 2734 | ppd.shared = pdev; | 2734 | ppd.shared = pdev; |
| 2735 | 2735 | ||
| 2736 | memset(&res, 0, sizeof(res)); | 2736 | memset(&res, 0, sizeof(res)); |
| 2737 | if (!of_irq_to_resource(pnp, 0, &res)) { | 2737 | if (of_irq_to_resource(pnp, 0, &res) <= 0) { |
| 2738 | dev_err(&pdev->dev, "missing interrupt on %s\n", pnp->name); | 2738 | dev_err(&pdev->dev, "missing interrupt on %s\n", pnp->name); |
| 2739 | return -EINVAL; | 2739 | return -EINVAL; |
| 2740 | } | 2740 | } |
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index 48d21c1e09f2..4d598ca8503a 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c | |||
| @@ -6504,7 +6504,7 @@ static int mvpp2_port_probe(struct platform_device *pdev, | |||
| 6504 | struct resource *res; | 6504 | struct resource *res; |
| 6505 | const char *dt_mac_addr; | 6505 | const char *dt_mac_addr; |
| 6506 | const char *mac_from; | 6506 | const char *mac_from; |
| 6507 | char hw_mac_addr[ETH_ALEN]; | 6507 | char hw_mac_addr[ETH_ALEN] = {0}; |
| 6508 | u32 id; | 6508 | u32 id; |
| 6509 | int features; | 6509 | int features; |
| 6510 | int phy_mode; | 6510 | int phy_mode; |
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index b3d0c2e6347a..e588a0cdb074 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c | |||
| @@ -22,6 +22,7 @@ | |||
| 22 | #include <linux/if_vlan.h> | 22 | #include <linux/if_vlan.h> |
| 23 | #include <linux/reset.h> | 23 | #include <linux/reset.h> |
| 24 | #include <linux/tcp.h> | 24 | #include <linux/tcp.h> |
| 25 | #include <linux/interrupt.h> | ||
| 25 | 26 | ||
| 26 | #include "mtk_eth_soc.h" | 27 | #include "mtk_eth_soc.h" |
| 27 | 28 | ||
| @@ -947,6 +948,10 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, | |||
| 947 | RX_DMA_FPORT_MASK; | 948 | RX_DMA_FPORT_MASK; |
| 948 | mac--; | 949 | mac--; |
| 949 | 950 | ||
| 951 | if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT || | ||
| 952 | !eth->netdev[mac])) | ||
| 953 | goto release_desc; | ||
| 954 | |||
| 950 | netdev = eth->netdev[mac]; | 955 | netdev = eth->netdev[mac]; |
| 951 | 956 | ||
| 952 | if (unlikely(test_bit(MTK_RESETTING, ð->state))) | 957 | if (unlikely(test_bit(MTK_RESETTING, ð->state))) |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index c751a1d434ad..3d4e4a5d00d1 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c | |||
| @@ -223,6 +223,7 @@ static void mlx4_en_get_wol(struct net_device *netdev, | |||
| 223 | struct ethtool_wolinfo *wol) | 223 | struct ethtool_wolinfo *wol) |
| 224 | { | 224 | { |
| 225 | struct mlx4_en_priv *priv = netdev_priv(netdev); | 225 | struct mlx4_en_priv *priv = netdev_priv(netdev); |
| 226 | struct mlx4_caps *caps = &priv->mdev->dev->caps; | ||
| 226 | int err = 0; | 227 | int err = 0; |
| 227 | u64 config = 0; | 228 | u64 config = 0; |
| 228 | u64 mask; | 229 | u64 mask; |
| @@ -235,24 +236,24 @@ static void mlx4_en_get_wol(struct net_device *netdev, | |||
| 235 | mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 : | 236 | mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 : |
| 236 | MLX4_DEV_CAP_FLAG_WOL_PORT2; | 237 | MLX4_DEV_CAP_FLAG_WOL_PORT2; |
| 237 | 238 | ||
| 238 | if (!(priv->mdev->dev->caps.flags & mask)) { | 239 | if (!(caps->flags & mask)) { |
| 239 | wol->supported = 0; | 240 | wol->supported = 0; |
| 240 | wol->wolopts = 0; | 241 | wol->wolopts = 0; |
| 241 | return; | 242 | return; |
| 242 | } | 243 | } |
| 243 | 244 | ||
| 245 | if (caps->wol_port[priv->port]) | ||
| 246 | wol->supported = WAKE_MAGIC; | ||
| 247 | else | ||
| 248 | wol->supported = 0; | ||
| 249 | |||
| 244 | err = mlx4_wol_read(priv->mdev->dev, &config, priv->port); | 250 | err = mlx4_wol_read(priv->mdev->dev, &config, priv->port); |
| 245 | if (err) { | 251 | if (err) { |
| 246 | en_err(priv, "Failed to get WoL information\n"); | 252 | en_err(priv, "Failed to get WoL information\n"); |
| 247 | return; | 253 | return; |
| 248 | } | 254 | } |
| 249 | 255 | ||
| 250 | if (config & MLX4_EN_WOL_MAGIC) | 256 | if ((config & MLX4_EN_WOL_ENABLED) && (config & MLX4_EN_WOL_MAGIC)) |
| 251 | wol->supported = WAKE_MAGIC; | ||
| 252 | else | ||
| 253 | wol->supported = 0; | ||
| 254 | |||
| 255 | if (config & MLX4_EN_WOL_ENABLED) | ||
| 256 | wol->wolopts = WAKE_MAGIC; | 257 | wol->wolopts = WAKE_MAGIC; |
| 257 | else | 258 | else |
| 258 | wol->wolopts = 0; | 259 | wol->wolopts = 0; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 436f7689a032..bf1638044a7a 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c | |||
| @@ -574,16 +574,21 @@ static inline __wsum get_fixed_vlan_csum(__wsum hw_checksum, | |||
| 574 | * header, the HW adds it. To address that, we are subtracting the pseudo | 574 | * header, the HW adds it. To address that, we are subtracting the pseudo |
| 575 | * header checksum from the checksum value provided by the HW. | 575 | * header checksum from the checksum value provided by the HW. |
| 576 | */ | 576 | */ |
| 577 | static void get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb, | 577 | static int get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb, |
| 578 | struct iphdr *iph) | 578 | struct iphdr *iph) |
| 579 | { | 579 | { |
| 580 | __u16 length_for_csum = 0; | 580 | __u16 length_for_csum = 0; |
| 581 | __wsum csum_pseudo_header = 0; | 581 | __wsum csum_pseudo_header = 0; |
| 582 | __u8 ipproto = iph->protocol; | ||
| 583 | |||
| 584 | if (unlikely(ipproto == IPPROTO_SCTP)) | ||
| 585 | return -1; | ||
| 582 | 586 | ||
| 583 | length_for_csum = (be16_to_cpu(iph->tot_len) - (iph->ihl << 2)); | 587 | length_for_csum = (be16_to_cpu(iph->tot_len) - (iph->ihl << 2)); |
| 584 | csum_pseudo_header = csum_tcpudp_nofold(iph->saddr, iph->daddr, | 588 | csum_pseudo_header = csum_tcpudp_nofold(iph->saddr, iph->daddr, |
| 585 | length_for_csum, iph->protocol, 0); | 589 | length_for_csum, ipproto, 0); |
| 586 | skb->csum = csum_sub(hw_checksum, csum_pseudo_header); | 590 | skb->csum = csum_sub(hw_checksum, csum_pseudo_header); |
| 591 | return 0; | ||
| 587 | } | 592 | } |
| 588 | 593 | ||
| 589 | #if IS_ENABLED(CONFIG_IPV6) | 594 | #if IS_ENABLED(CONFIG_IPV6) |
| @@ -594,17 +599,20 @@ static void get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb, | |||
| 594 | static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb, | 599 | static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb, |
| 595 | struct ipv6hdr *ipv6h) | 600 | struct ipv6hdr *ipv6h) |
| 596 | { | 601 | { |
| 602 | __u8 nexthdr = ipv6h->nexthdr; | ||
| 597 | __wsum csum_pseudo_hdr = 0; | 603 | __wsum csum_pseudo_hdr = 0; |
| 598 | 604 | ||
| 599 | if (unlikely(ipv6h->nexthdr == IPPROTO_FRAGMENT || | 605 | if (unlikely(nexthdr == IPPROTO_FRAGMENT || |
| 600 | ipv6h->nexthdr == IPPROTO_HOPOPTS)) | 606 | nexthdr == IPPROTO_HOPOPTS || |
| 607 | nexthdr == IPPROTO_SCTP)) | ||
| 601 | return -1; | 608 | return -1; |
| 602 | hw_checksum = csum_add(hw_checksum, (__force __wsum)htons(ipv6h->nexthdr)); | 609 | hw_checksum = csum_add(hw_checksum, (__force __wsum)htons(nexthdr)); |
| 603 | 610 | ||
| 604 | csum_pseudo_hdr = csum_partial(&ipv6h->saddr, | 611 | csum_pseudo_hdr = csum_partial(&ipv6h->saddr, |
| 605 | sizeof(ipv6h->saddr) + sizeof(ipv6h->daddr), 0); | 612 | sizeof(ipv6h->saddr) + sizeof(ipv6h->daddr), 0); |
| 606 | csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ipv6h->payload_len); | 613 | csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ipv6h->payload_len); |
| 607 | csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ntohs(ipv6h->nexthdr)); | 614 | csum_pseudo_hdr = csum_add(csum_pseudo_hdr, |
| 615 | (__force __wsum)htons(nexthdr)); | ||
| 608 | 616 | ||
| 609 | skb->csum = csum_sub(hw_checksum, csum_pseudo_hdr); | 617 | skb->csum = csum_sub(hw_checksum, csum_pseudo_hdr); |
| 610 | skb->csum = csum_add(skb->csum, csum_partial(ipv6h, sizeof(struct ipv6hdr), 0)); | 618 | skb->csum = csum_add(skb->csum, csum_partial(ipv6h, sizeof(struct ipv6hdr), 0)); |
| @@ -627,11 +635,10 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va, | |||
| 627 | } | 635 | } |
| 628 | 636 | ||
| 629 | if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4)) | 637 | if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4)) |
| 630 | get_fixed_ipv4_csum(hw_checksum, skb, hdr); | 638 | return get_fixed_ipv4_csum(hw_checksum, skb, hdr); |
| 631 | #if IS_ENABLED(CONFIG_IPV6) | 639 | #if IS_ENABLED(CONFIG_IPV6) |
| 632 | else if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6)) | 640 | if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6)) |
| 633 | if (unlikely(get_fixed_ipv6_csum(hw_checksum, skb, hdr))) | 641 | return get_fixed_ipv6_csum(hw_checksum, skb, hdr); |
| 634 | return -1; | ||
| 635 | #endif | 642 | #endif |
| 636 | return 0; | 643 | return 0; |
| 637 | } | 644 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 37e84a59e751..041c0ed65929 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c | |||
| @@ -159,8 +159,9 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags) | |||
| 159 | [32] = "Loopback source checks support", | 159 | [32] = "Loopback source checks support", |
| 160 | [33] = "RoCEv2 support", | 160 | [33] = "RoCEv2 support", |
| 161 | [34] = "DMFS Sniffer support (UC & MC)", | 161 | [34] = "DMFS Sniffer support (UC & MC)", |
| 162 | [35] = "QinQ VST mode support", | 162 | [35] = "Diag counters per port", |
| 163 | [36] = "sl to vl mapping table change event support" | 163 | [36] = "QinQ VST mode support", |
| 164 | [37] = "sl to vl mapping table change event support", | ||
| 164 | }; | 165 | }; |
| 165 | int i; | 166 | int i; |
| 166 | 167 | ||
| @@ -764,6 +765,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) | |||
| 764 | #define QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET 0x3e | 765 | #define QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET 0x3e |
| 765 | #define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f | 766 | #define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f |
| 766 | #define QUERY_DEV_CAP_EXT_FLAGS_OFFSET 0x40 | 767 | #define QUERY_DEV_CAP_EXT_FLAGS_OFFSET 0x40 |
| 768 | #define QUERY_DEV_CAP_WOL_OFFSET 0x43 | ||
| 767 | #define QUERY_DEV_CAP_FLAGS_OFFSET 0x44 | 769 | #define QUERY_DEV_CAP_FLAGS_OFFSET 0x44 |
| 768 | #define QUERY_DEV_CAP_RSVD_UAR_OFFSET 0x48 | 770 | #define QUERY_DEV_CAP_RSVD_UAR_OFFSET 0x48 |
| 769 | #define QUERY_DEV_CAP_UAR_SZ_OFFSET 0x49 | 771 | #define QUERY_DEV_CAP_UAR_SZ_OFFSET 0x49 |
| @@ -920,6 +922,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) | |||
| 920 | MLX4_GET(ext_flags, outbox, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); | 922 | MLX4_GET(ext_flags, outbox, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); |
| 921 | MLX4_GET(flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET); | 923 | MLX4_GET(flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET); |
| 922 | dev_cap->flags = flags | (u64)ext_flags << 32; | 924 | dev_cap->flags = flags | (u64)ext_flags << 32; |
| 925 | MLX4_GET(field, outbox, QUERY_DEV_CAP_WOL_OFFSET); | ||
| 926 | dev_cap->wol_port[1] = !!(field & 0x20); | ||
| 927 | dev_cap->wol_port[2] = !!(field & 0x40); | ||
| 923 | MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET); | 928 | MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET); |
| 924 | dev_cap->reserved_uars = field >> 4; | 929 | dev_cap->reserved_uars = field >> 4; |
| 925 | MLX4_GET(field, outbox, QUERY_DEV_CAP_UAR_SZ_OFFSET); | 930 | MLX4_GET(field, outbox, QUERY_DEV_CAP_UAR_SZ_OFFSET); |
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h index 5343a0599253..b52ba01aa486 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.h +++ b/drivers/net/ethernet/mellanox/mlx4/fw.h | |||
| @@ -129,6 +129,7 @@ struct mlx4_dev_cap { | |||
| 129 | u32 dmfs_high_rate_qpn_range; | 129 | u32 dmfs_high_rate_qpn_range; |
| 130 | struct mlx4_rate_limit_caps rl_caps; | 130 | struct mlx4_rate_limit_caps rl_caps; |
| 131 | struct mlx4_port_cap port_cap[MLX4_MAX_PORTS + 1]; | 131 | struct mlx4_port_cap port_cap[MLX4_MAX_PORTS + 1]; |
| 132 | bool wol_port[MLX4_MAX_PORTS + 1]; | ||
| 132 | }; | 133 | }; |
| 133 | 134 | ||
| 134 | struct mlx4_func_cap { | 135 | struct mlx4_func_cap { |
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index a27c9c13a36e..5fe5cdc51357 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c | |||
| @@ -424,13 +424,15 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) | |||
| 424 | dev->caps.stat_rate_support = dev_cap->stat_rate_support; | 424 | dev->caps.stat_rate_support = dev_cap->stat_rate_support; |
| 425 | dev->caps.max_gso_sz = dev_cap->max_gso_sz; | 425 | dev->caps.max_gso_sz = dev_cap->max_gso_sz; |
| 426 | dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz; | 426 | dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz; |
| 427 | dev->caps.wol_port[1] = dev_cap->wol_port[1]; | ||
| 428 | dev->caps.wol_port[2] = dev_cap->wol_port[2]; | ||
| 427 | 429 | ||
| 428 | /* Save uar page shift */ | 430 | /* Save uar page shift */ |
| 429 | if (!mlx4_is_slave(dev)) { | 431 | if (!mlx4_is_slave(dev)) { |
| 430 | /* Virtual PCI function needs to determine UAR page size from | 432 | /* Virtual PCI function needs to determine UAR page size from |
| 431 | * firmware. Only master PCI function can set the uar page size | 433 | * firmware. Only master PCI function can set the uar page size |
| 432 | */ | 434 | */ |
| 433 | if (enable_4k_uar) | 435 | if (enable_4k_uar || !dev->persist->num_vfs) |
| 434 | dev->uar_page_shift = DEFAULT_UAR_PAGE_SHIFT; | 436 | dev->uar_page_shift = DEFAULT_UAR_PAGE_SHIFT; |
| 435 | else | 437 | else |
| 436 | dev->uar_page_shift = PAGE_SHIFT; | 438 | dev->uar_page_shift = PAGE_SHIFT; |
| @@ -2275,7 +2277,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev) | |||
| 2275 | 2277 | ||
| 2276 | dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1; | 2278 | dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1; |
| 2277 | 2279 | ||
| 2278 | if (enable_4k_uar) { | 2280 | if (enable_4k_uar || !dev->persist->num_vfs) { |
| 2279 | init_hca.log_uar_sz = ilog2(dev->caps.num_uars) + | 2281 | init_hca.log_uar_sz = ilog2(dev->caps.num_uars) + |
| 2280 | PAGE_SHIFT - DEFAULT_UAR_PAGE_SHIFT; | 2282 | PAGE_SHIFT - DEFAULT_UAR_PAGE_SHIFT; |
| 2281 | init_hca.uar_page_sz = DEFAULT_UAR_PAGE_SHIFT - 12; | 2283 | init_hca.uar_page_sz = DEFAULT_UAR_PAGE_SHIFT - 12; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index f5a2c605749f..31cbe5e86a01 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c | |||
| @@ -786,6 +786,10 @@ static void cb_timeout_handler(struct work_struct *work) | |||
| 786 | mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true); | 786 | mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true); |
| 787 | } | 787 | } |
| 788 | 788 | ||
| 789 | static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg); | ||
| 790 | static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev, | ||
| 791 | struct mlx5_cmd_msg *msg); | ||
| 792 | |||
| 789 | static void cmd_work_handler(struct work_struct *work) | 793 | static void cmd_work_handler(struct work_struct *work) |
| 790 | { | 794 | { |
| 791 | struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work); | 795 | struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work); |
| @@ -796,17 +800,28 @@ static void cmd_work_handler(struct work_struct *work) | |||
| 796 | struct semaphore *sem; | 800 | struct semaphore *sem; |
| 797 | unsigned long flags; | 801 | unsigned long flags; |
| 798 | bool poll_cmd = ent->polling; | 802 | bool poll_cmd = ent->polling; |
| 803 | int alloc_ret; | ||
| 799 | 804 | ||
| 800 | 805 | ||
| 801 | sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem; | 806 | sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem; |
| 802 | down(sem); | 807 | down(sem); |
| 803 | if (!ent->page_queue) { | 808 | if (!ent->page_queue) { |
| 804 | ent->idx = alloc_ent(cmd); | 809 | alloc_ret = alloc_ent(cmd); |
| 805 | if (ent->idx < 0) { | 810 | if (alloc_ret < 0) { |
| 806 | mlx5_core_err(dev, "failed to allocate command entry\n"); | 811 | mlx5_core_err(dev, "failed to allocate command entry\n"); |
| 812 | if (ent->callback) { | ||
| 813 | ent->callback(-EAGAIN, ent->context); | ||
| 814 | mlx5_free_cmd_msg(dev, ent->out); | ||
| 815 | free_msg(dev, ent->in); | ||
| 816 | free_cmd(ent); | ||
| 817 | } else { | ||
| 818 | ent->ret = -EAGAIN; | ||
| 819 | complete(&ent->done); | ||
| 820 | } | ||
| 807 | up(sem); | 821 | up(sem); |
| 808 | return; | 822 | return; |
| 809 | } | 823 | } |
| 824 | ent->idx = alloc_ret; | ||
| 810 | } else { | 825 | } else { |
| 811 | ent->idx = cmd->max_reg_cmds; | 826 | ent->idx = cmd->max_reg_cmds; |
| 812 | spin_lock_irqsave(&cmd->alloc_lock, flags); | 827 | spin_lock_irqsave(&cmd->alloc_lock, flags); |
| @@ -967,7 +982,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, | |||
| 967 | 982 | ||
| 968 | err = wait_func(dev, ent); | 983 | err = wait_func(dev, ent); |
| 969 | if (err == -ETIMEDOUT) | 984 | if (err == -ETIMEDOUT) |
| 970 | goto out_free; | 985 | goto out; |
| 971 | 986 | ||
| 972 | ds = ent->ts2 - ent->ts1; | 987 | ds = ent->ts2 - ent->ts1; |
| 973 | op = MLX5_GET(mbox_in, in->first.data, opcode); | 988 | op = MLX5_GET(mbox_in, in->first.data, opcode); |
| @@ -1430,6 +1445,7 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced) | |||
| 1430 | mlx5_core_err(dev, "Command completion arrived after timeout (entry idx = %d).\n", | 1445 | mlx5_core_err(dev, "Command completion arrived after timeout (entry idx = %d).\n", |
| 1431 | ent->idx); | 1446 | ent->idx); |
| 1432 | free_ent(cmd, ent->idx); | 1447 | free_ent(cmd, ent->idx); |
| 1448 | free_cmd(ent); | ||
| 1433 | } | 1449 | } |
| 1434 | continue; | 1450 | continue; |
| 1435 | } | 1451 | } |
| @@ -1488,7 +1504,8 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced) | |||
| 1488 | free_msg(dev, ent->in); | 1504 | free_msg(dev, ent->in); |
| 1489 | 1505 | ||
| 1490 | err = err ? err : ent->status; | 1506 | err = err ? err : ent->status; |
| 1491 | free_cmd(ent); | 1507 | if (!forced) |
| 1508 | free_cmd(ent); | ||
| 1492 | callback(err, context); | 1509 | callback(err, context); |
| 1493 | } else { | 1510 | } else { |
| 1494 | complete(&ent->done); | 1511 | complete(&ent->done); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index e1b7ddfecd01..2f26fb34d741 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h | |||
| @@ -263,9 +263,18 @@ struct mlx5e_dcbx { | |||
| 263 | 263 | ||
| 264 | /* The only setting that cannot be read from FW */ | 264 | /* The only setting that cannot be read from FW */ |
| 265 | u8 tc_tsa[IEEE_8021QAZ_MAX_TCS]; | 265 | u8 tc_tsa[IEEE_8021QAZ_MAX_TCS]; |
| 266 | u8 cap; | ||
| 266 | }; | 267 | }; |
| 267 | #endif | 268 | #endif |
| 268 | 269 | ||
| 270 | #define MAX_PIN_NUM 8 | ||
| 271 | struct mlx5e_pps { | ||
| 272 | u8 pin_caps[MAX_PIN_NUM]; | ||
| 273 | struct work_struct out_work; | ||
| 274 | u64 start[MAX_PIN_NUM]; | ||
| 275 | u8 enabled; | ||
| 276 | }; | ||
| 277 | |||
| 269 | struct mlx5e_tstamp { | 278 | struct mlx5e_tstamp { |
| 270 | rwlock_t lock; | 279 | rwlock_t lock; |
| 271 | struct cyclecounter cycles; | 280 | struct cyclecounter cycles; |
| @@ -277,7 +286,7 @@ struct mlx5e_tstamp { | |||
| 277 | struct mlx5_core_dev *mdev; | 286 | struct mlx5_core_dev *mdev; |
| 278 | struct ptp_clock *ptp; | 287 | struct ptp_clock *ptp; |
| 279 | struct ptp_clock_info ptp_info; | 288 | struct ptp_clock_info ptp_info; |
| 280 | u8 *pps_pin_caps; | 289 | struct mlx5e_pps pps_info; |
| 281 | }; | 290 | }; |
| 282 | 291 | ||
| 283 | enum { | 292 | enum { |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c index 66f432385dbb..84dd63e74041 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c | |||
| @@ -53,6 +53,15 @@ enum { | |||
| 53 | MLX5E_EVENT_MODE_ONCE_TILL_ARM = 0x2, | 53 | MLX5E_EVENT_MODE_ONCE_TILL_ARM = 0x2, |
| 54 | }; | 54 | }; |
| 55 | 55 | ||
| 56 | enum { | ||
| 57 | MLX5E_MTPPS_FS_ENABLE = BIT(0x0), | ||
| 58 | MLX5E_MTPPS_FS_PATTERN = BIT(0x2), | ||
| 59 | MLX5E_MTPPS_FS_PIN_MODE = BIT(0x3), | ||
| 60 | MLX5E_MTPPS_FS_TIME_STAMP = BIT(0x4), | ||
| 61 | MLX5E_MTPPS_FS_OUT_PULSE_DURATION = BIT(0x5), | ||
| 62 | MLX5E_MTPPS_FS_ENH_OUT_PER_ADJ = BIT(0x7), | ||
| 63 | }; | ||
| 64 | |||
| 56 | void mlx5e_fill_hwstamp(struct mlx5e_tstamp *tstamp, u64 timestamp, | 65 | void mlx5e_fill_hwstamp(struct mlx5e_tstamp *tstamp, u64 timestamp, |
| 57 | struct skb_shared_hwtstamps *hwts) | 66 | struct skb_shared_hwtstamps *hwts) |
| 58 | { | 67 | { |
| @@ -73,17 +82,46 @@ static u64 mlx5e_read_internal_timer(const struct cyclecounter *cc) | |||
| 73 | return mlx5_read_internal_timer(tstamp->mdev) & cc->mask; | 82 | return mlx5_read_internal_timer(tstamp->mdev) & cc->mask; |
| 74 | } | 83 | } |
| 75 | 84 | ||
| 85 | static void mlx5e_pps_out(struct work_struct *work) | ||
| 86 | { | ||
| 87 | struct mlx5e_pps *pps_info = container_of(work, struct mlx5e_pps, | ||
| 88 | out_work); | ||
| 89 | struct mlx5e_tstamp *tstamp = container_of(pps_info, struct mlx5e_tstamp, | ||
| 90 | pps_info); | ||
| 91 | u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; | ||
| 92 | unsigned long flags; | ||
| 93 | int i; | ||
| 94 | |||
| 95 | for (i = 0; i < tstamp->ptp_info.n_pins; i++) { | ||
| 96 | u64 tstart; | ||
| 97 | |||
| 98 | write_lock_irqsave(&tstamp->lock, flags); | ||
| 99 | tstart = tstamp->pps_info.start[i]; | ||
| 100 | tstamp->pps_info.start[i] = 0; | ||
| 101 | write_unlock_irqrestore(&tstamp->lock, flags); | ||
| 102 | if (!tstart) | ||
| 103 | continue; | ||
| 104 | |||
| 105 | MLX5_SET(mtpps_reg, in, pin, i); | ||
| 106 | MLX5_SET64(mtpps_reg, in, time_stamp, tstart); | ||
| 107 | MLX5_SET(mtpps_reg, in, field_select, MLX5E_MTPPS_FS_TIME_STAMP); | ||
| 108 | mlx5_set_mtpps(tstamp->mdev, in, sizeof(in)); | ||
| 109 | } | ||
| 110 | } | ||
| 111 | |||
| 76 | static void mlx5e_timestamp_overflow(struct work_struct *work) | 112 | static void mlx5e_timestamp_overflow(struct work_struct *work) |
| 77 | { | 113 | { |
| 78 | struct delayed_work *dwork = to_delayed_work(work); | 114 | struct delayed_work *dwork = to_delayed_work(work); |
| 79 | struct mlx5e_tstamp *tstamp = container_of(dwork, struct mlx5e_tstamp, | 115 | struct mlx5e_tstamp *tstamp = container_of(dwork, struct mlx5e_tstamp, |
| 80 | overflow_work); | 116 | overflow_work); |
| 117 | struct mlx5e_priv *priv = container_of(tstamp, struct mlx5e_priv, tstamp); | ||
| 81 | unsigned long flags; | 118 | unsigned long flags; |
| 82 | 119 | ||
| 83 | write_lock_irqsave(&tstamp->lock, flags); | 120 | write_lock_irqsave(&tstamp->lock, flags); |
| 84 | timecounter_read(&tstamp->clock); | 121 | timecounter_read(&tstamp->clock); |
| 85 | write_unlock_irqrestore(&tstamp->lock, flags); | 122 | write_unlock_irqrestore(&tstamp->lock, flags); |
| 86 | schedule_delayed_work(&tstamp->overflow_work, tstamp->overflow_period); | 123 | queue_delayed_work(priv->wq, &tstamp->overflow_work, |
| 124 | msecs_to_jiffies(tstamp->overflow_period * 1000)); | ||
| 87 | } | 125 | } |
| 88 | 126 | ||
| 89 | int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr) | 127 | int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr) |
| @@ -213,18 +251,6 @@ static int mlx5e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta) | |||
| 213 | int neg_adj = 0; | 251 | int neg_adj = 0; |
| 214 | struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp, | 252 | struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp, |
| 215 | ptp_info); | 253 | ptp_info); |
| 216 | struct mlx5e_priv *priv = | ||
| 217 | container_of(tstamp, struct mlx5e_priv, tstamp); | ||
| 218 | |||
| 219 | if (MLX5_CAP_GEN(priv->mdev, pps_modify)) { | ||
| 220 | u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; | ||
| 221 | |||
| 222 | /* For future use need to add a loop for finding all 1PPS out pins */ | ||
| 223 | MLX5_SET(mtpps_reg, in, pin_mode, MLX5E_PIN_MODE_OUT); | ||
| 224 | MLX5_SET(mtpps_reg, in, out_periodic_adjustment, delta & 0xFFFF); | ||
| 225 | |||
| 226 | mlx5_set_mtpps(priv->mdev, in, sizeof(in)); | ||
| 227 | } | ||
| 228 | 254 | ||
| 229 | if (delta < 0) { | 255 | if (delta < 0) { |
| 230 | neg_adj = 1; | 256 | neg_adj = 1; |
| @@ -253,12 +279,13 @@ static int mlx5e_extts_configure(struct ptp_clock_info *ptp, | |||
| 253 | struct mlx5e_priv *priv = | 279 | struct mlx5e_priv *priv = |
| 254 | container_of(tstamp, struct mlx5e_priv, tstamp); | 280 | container_of(tstamp, struct mlx5e_priv, tstamp); |
| 255 | u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; | 281 | u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; |
| 282 | u32 field_select = 0; | ||
| 283 | u8 pin_mode = 0; | ||
| 256 | u8 pattern = 0; | 284 | u8 pattern = 0; |
| 257 | int pin = -1; | 285 | int pin = -1; |
| 258 | int err = 0; | 286 | int err = 0; |
| 259 | 287 | ||
| 260 | if (!MLX5_CAP_GEN(priv->mdev, pps) || | 288 | if (!MLX5_PPS_CAP(priv->mdev)) |
| 261 | !MLX5_CAP_GEN(priv->mdev, pps_modify)) | ||
| 262 | return -EOPNOTSUPP; | 289 | return -EOPNOTSUPP; |
| 263 | 290 | ||
| 264 | if (rq->extts.index >= tstamp->ptp_info.n_pins) | 291 | if (rq->extts.index >= tstamp->ptp_info.n_pins) |
| @@ -268,15 +295,21 @@ static int mlx5e_extts_configure(struct ptp_clock_info *ptp, | |||
| 268 | pin = ptp_find_pin(tstamp->ptp, PTP_PF_EXTTS, rq->extts.index); | 295 | pin = ptp_find_pin(tstamp->ptp, PTP_PF_EXTTS, rq->extts.index); |
| 269 | if (pin < 0) | 296 | if (pin < 0) |
| 270 | return -EBUSY; | 297 | return -EBUSY; |
| 298 | pin_mode = MLX5E_PIN_MODE_IN; | ||
| 299 | pattern = !!(rq->extts.flags & PTP_FALLING_EDGE); | ||
| 300 | field_select = MLX5E_MTPPS_FS_PIN_MODE | | ||
| 301 | MLX5E_MTPPS_FS_PATTERN | | ||
| 302 | MLX5E_MTPPS_FS_ENABLE; | ||
| 303 | } else { | ||
| 304 | pin = rq->extts.index; | ||
| 305 | field_select = MLX5E_MTPPS_FS_ENABLE; | ||
| 271 | } | 306 | } |
| 272 | 307 | ||
| 273 | if (rq->extts.flags & PTP_FALLING_EDGE) | ||
| 274 | pattern = 1; | ||
| 275 | |||
| 276 | MLX5_SET(mtpps_reg, in, pin, pin); | 308 | MLX5_SET(mtpps_reg, in, pin, pin); |
| 277 | MLX5_SET(mtpps_reg, in, pin_mode, MLX5E_PIN_MODE_IN); | 309 | MLX5_SET(mtpps_reg, in, pin_mode, pin_mode); |
| 278 | MLX5_SET(mtpps_reg, in, pattern, pattern); | 310 | MLX5_SET(mtpps_reg, in, pattern, pattern); |
| 279 | MLX5_SET(mtpps_reg, in, enable, on); | 311 | MLX5_SET(mtpps_reg, in, enable, on); |
| 312 | MLX5_SET(mtpps_reg, in, field_select, field_select); | ||
| 280 | 313 | ||
| 281 | err = mlx5_set_mtpps(priv->mdev, in, sizeof(in)); | 314 | err = mlx5_set_mtpps(priv->mdev, in, sizeof(in)); |
| 282 | if (err) | 315 | if (err) |
| @@ -295,14 +328,18 @@ static int mlx5e_perout_configure(struct ptp_clock_info *ptp, | |||
| 295 | struct mlx5e_priv *priv = | 328 | struct mlx5e_priv *priv = |
| 296 | container_of(tstamp, struct mlx5e_priv, tstamp); | 329 | container_of(tstamp, struct mlx5e_priv, tstamp); |
| 297 | u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; | 330 | u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; |
| 298 | u64 nsec_now, nsec_delta, time_stamp; | 331 | u64 nsec_now, nsec_delta, time_stamp = 0; |
| 299 | u64 cycles_now, cycles_delta; | 332 | u64 cycles_now, cycles_delta; |
| 300 | struct timespec64 ts; | 333 | struct timespec64 ts; |
| 301 | unsigned long flags; | 334 | unsigned long flags; |
| 335 | u32 field_select = 0; | ||
| 336 | u8 pin_mode = 0; | ||
| 337 | u8 pattern = 0; | ||
| 302 | int pin = -1; | 338 | int pin = -1; |
| 339 | int err = 0; | ||
| 303 | s64 ns; | 340 | s64 ns; |
| 304 | 341 | ||
| 305 | if (!MLX5_CAP_GEN(priv->mdev, pps_modify)) | 342 | if (!MLX5_PPS_CAP(priv->mdev)) |
| 306 | return -EOPNOTSUPP; | 343 | return -EOPNOTSUPP; |
| 307 | 344 | ||
| 308 | if (rq->perout.index >= tstamp->ptp_info.n_pins) | 345 | if (rq->perout.index >= tstamp->ptp_info.n_pins) |
| @@ -313,32 +350,60 @@ static int mlx5e_perout_configure(struct ptp_clock_info *ptp, | |||
| 313 | rq->perout.index); | 350 | rq->perout.index); |
| 314 | if (pin < 0) | 351 | if (pin < 0) |
| 315 | return -EBUSY; | 352 | return -EBUSY; |
| 316 | } | ||
| 317 | 353 | ||
| 318 | ts.tv_sec = rq->perout.period.sec; | 354 | pin_mode = MLX5E_PIN_MODE_OUT; |
| 319 | ts.tv_nsec = rq->perout.period.nsec; | 355 | pattern = MLX5E_OUT_PATTERN_PERIODIC; |
| 320 | ns = timespec64_to_ns(&ts); | 356 | ts.tv_sec = rq->perout.period.sec; |
| 321 | if (on) | 357 | ts.tv_nsec = rq->perout.period.nsec; |
| 358 | ns = timespec64_to_ns(&ts); | ||
| 359 | |||
| 322 | if ((ns >> 1) != 500000000LL) | 360 | if ((ns >> 1) != 500000000LL) |
| 323 | return -EINVAL; | 361 | return -EINVAL; |
| 324 | ts.tv_sec = rq->perout.start.sec; | 362 | |
| 325 | ts.tv_nsec = rq->perout.start.nsec; | 363 | ts.tv_sec = rq->perout.start.sec; |
| 326 | ns = timespec64_to_ns(&ts); | 364 | ts.tv_nsec = rq->perout.start.nsec; |
| 327 | cycles_now = mlx5_read_internal_timer(tstamp->mdev); | 365 | ns = timespec64_to_ns(&ts); |
| 328 | write_lock_irqsave(&tstamp->lock, flags); | 366 | cycles_now = mlx5_read_internal_timer(tstamp->mdev); |
| 329 | nsec_now = timecounter_cyc2time(&tstamp->clock, cycles_now); | 367 | write_lock_irqsave(&tstamp->lock, flags); |
| 330 | nsec_delta = ns - nsec_now; | 368 | nsec_now = timecounter_cyc2time(&tstamp->clock, cycles_now); |
| 331 | cycles_delta = div64_u64(nsec_delta << tstamp->cycles.shift, | 369 | nsec_delta = ns - nsec_now; |
| 332 | tstamp->cycles.mult); | 370 | cycles_delta = div64_u64(nsec_delta << tstamp->cycles.shift, |
| 333 | write_unlock_irqrestore(&tstamp->lock, flags); | 371 | tstamp->cycles.mult); |
| 334 | time_stamp = cycles_now + cycles_delta; | 372 | write_unlock_irqrestore(&tstamp->lock, flags); |
| 373 | time_stamp = cycles_now + cycles_delta; | ||
| 374 | field_select = MLX5E_MTPPS_FS_PIN_MODE | | ||
| 375 | MLX5E_MTPPS_FS_PATTERN | | ||
| 376 | MLX5E_MTPPS_FS_ENABLE | | ||
| 377 | MLX5E_MTPPS_FS_TIME_STAMP; | ||
| 378 | } else { | ||
| 379 | pin = rq->perout.index; | ||
| 380 | field_select = MLX5E_MTPPS_FS_ENABLE; | ||
| 381 | } | ||
| 382 | |||
| 335 | MLX5_SET(mtpps_reg, in, pin, pin); | 383 | MLX5_SET(mtpps_reg, in, pin, pin); |
| 336 | MLX5_SET(mtpps_reg, in, pin_mode, MLX5E_PIN_MODE_OUT); | 384 | MLX5_SET(mtpps_reg, in, pin_mode, pin_mode); |
| 337 | MLX5_SET(mtpps_reg, in, pattern, MLX5E_OUT_PATTERN_PERIODIC); | 385 | MLX5_SET(mtpps_reg, in, pattern, pattern); |
| 338 | MLX5_SET(mtpps_reg, in, enable, on); | 386 | MLX5_SET(mtpps_reg, in, enable, on); |
| 339 | MLX5_SET64(mtpps_reg, in, time_stamp, time_stamp); | 387 | MLX5_SET64(mtpps_reg, in, time_stamp, time_stamp); |
| 388 | MLX5_SET(mtpps_reg, in, field_select, field_select); | ||
| 389 | |||
| 390 | err = mlx5_set_mtpps(priv->mdev, in, sizeof(in)); | ||
| 391 | if (err) | ||
| 392 | return err; | ||
| 340 | 393 | ||
| 341 | return mlx5_set_mtpps(priv->mdev, in, sizeof(in)); | 394 | return mlx5_set_mtppse(priv->mdev, pin, 0, |
| 395 | MLX5E_EVENT_MODE_REPETETIVE & on); | ||
| 396 | } | ||
| 397 | |||
| 398 | static int mlx5e_pps_configure(struct ptp_clock_info *ptp, | ||
| 399 | struct ptp_clock_request *rq, | ||
| 400 | int on) | ||
| 401 | { | ||
| 402 | struct mlx5e_tstamp *tstamp = | ||
| 403 | container_of(ptp, struct mlx5e_tstamp, ptp_info); | ||
| 404 | |||
| 405 | tstamp->pps_info.enabled = !!on; | ||
| 406 | return 0; | ||
| 342 | } | 407 | } |
| 343 | 408 | ||
| 344 | static int mlx5e_ptp_enable(struct ptp_clock_info *ptp, | 409 | static int mlx5e_ptp_enable(struct ptp_clock_info *ptp, |
| @@ -350,6 +415,8 @@ static int mlx5e_ptp_enable(struct ptp_clock_info *ptp, | |||
| 350 | return mlx5e_extts_configure(ptp, rq, on); | 415 | return mlx5e_extts_configure(ptp, rq, on); |
| 351 | case PTP_CLK_REQ_PEROUT: | 416 | case PTP_CLK_REQ_PEROUT: |
| 352 | return mlx5e_perout_configure(ptp, rq, on); | 417 | return mlx5e_perout_configure(ptp, rq, on); |
| 418 | case PTP_CLK_REQ_PPS: | ||
| 419 | return mlx5e_pps_configure(ptp, rq, on); | ||
| 353 | default: | 420 | default: |
| 354 | return -EOPNOTSUPP; | 421 | return -EOPNOTSUPP; |
| 355 | } | 422 | } |
| @@ -395,6 +462,7 @@ static int mlx5e_init_pin_config(struct mlx5e_tstamp *tstamp) | |||
| 395 | return -ENOMEM; | 462 | return -ENOMEM; |
| 396 | tstamp->ptp_info.enable = mlx5e_ptp_enable; | 463 | tstamp->ptp_info.enable = mlx5e_ptp_enable; |
| 397 | tstamp->ptp_info.verify = mlx5e_ptp_verify; | 464 | tstamp->ptp_info.verify = mlx5e_ptp_verify; |
| 465 | tstamp->ptp_info.pps = 1; | ||
| 398 | 466 | ||
| 399 | for (i = 0; i < tstamp->ptp_info.n_pins; i++) { | 467 | for (i = 0; i < tstamp->ptp_info.n_pins; i++) { |
| 400 | snprintf(tstamp->ptp_info.pin_config[i].name, | 468 | snprintf(tstamp->ptp_info.pin_config[i].name, |
| @@ -422,22 +490,56 @@ static void mlx5e_get_pps_caps(struct mlx5e_priv *priv, | |||
| 422 | tstamp->ptp_info.n_per_out = MLX5_GET(mtpps_reg, out, | 490 | tstamp->ptp_info.n_per_out = MLX5_GET(mtpps_reg, out, |
| 423 | cap_max_num_of_pps_out_pins); | 491 | cap_max_num_of_pps_out_pins); |
| 424 | 492 | ||
| 425 | tstamp->pps_pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode); | 493 | tstamp->pps_info.pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode); |
| 426 | tstamp->pps_pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode); | 494 | tstamp->pps_info.pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode); |
| 427 | tstamp->pps_pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode); | 495 | tstamp->pps_info.pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode); |
| 428 | tstamp->pps_pin_caps[3] = MLX5_GET(mtpps_reg, out, cap_pin_3_mode); | 496 | tstamp->pps_info.pin_caps[3] = MLX5_GET(mtpps_reg, out, cap_pin_3_mode); |
| 429 | tstamp->pps_pin_caps[4] = MLX5_GET(mtpps_reg, out, cap_pin_4_mode); | 497 | tstamp->pps_info.pin_caps[4] = MLX5_GET(mtpps_reg, out, cap_pin_4_mode); |
| 430 | tstamp->pps_pin_caps[5] = MLX5_GET(mtpps_reg, out, cap_pin_5_mode); | 498 | tstamp->pps_info.pin_caps[5] = MLX5_GET(mtpps_reg, out, cap_pin_5_mode); |
| 431 | tstamp->pps_pin_caps[6] = MLX5_GET(mtpps_reg, out, cap_pin_6_mode); | 499 | tstamp->pps_info.pin_caps[6] = MLX5_GET(mtpps_reg, out, cap_pin_6_mode); |
| 432 | tstamp->pps_pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode); | 500 | tstamp->pps_info.pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode); |
| 433 | } | 501 | } |
| 434 | 502 | ||
| 435 | void mlx5e_pps_event_handler(struct mlx5e_priv *priv, | 503 | void mlx5e_pps_event_handler(struct mlx5e_priv *priv, |
| 436 | struct ptp_clock_event *event) | 504 | struct ptp_clock_event *event) |
| 437 | { | 505 | { |
| 506 | struct net_device *netdev = priv->netdev; | ||
| 438 | struct mlx5e_tstamp *tstamp = &priv->tstamp; | 507 | struct mlx5e_tstamp *tstamp = &priv->tstamp; |
| 508 | struct timespec64 ts; | ||
| 509 | u64 nsec_now, nsec_delta; | ||
| 510 | u64 cycles_now, cycles_delta; | ||
| 511 | int pin = event->index; | ||
| 512 | s64 ns; | ||
| 513 | unsigned long flags; | ||
| 439 | 514 | ||
| 440 | ptp_clock_event(tstamp->ptp, event); | 515 | switch (tstamp->ptp_info.pin_config[pin].func) { |
| 516 | case PTP_PF_EXTTS: | ||
| 517 | if (tstamp->pps_info.enabled) { | ||
| 518 | event->type = PTP_CLOCK_PPSUSR; | ||
| 519 | event->pps_times.ts_real = ns_to_timespec64(event->timestamp); | ||
| 520 | } else { | ||
| 521 | event->type = PTP_CLOCK_EXTTS; | ||
| 522 | } | ||
| 523 | ptp_clock_event(tstamp->ptp, event); | ||
| 524 | break; | ||
| 525 | case PTP_PF_PEROUT: | ||
| 526 | mlx5e_ptp_gettime(&tstamp->ptp_info, &ts); | ||
| 527 | cycles_now = mlx5_read_internal_timer(tstamp->mdev); | ||
| 528 | ts.tv_sec += 1; | ||
| 529 | ts.tv_nsec = 0; | ||
| 530 | ns = timespec64_to_ns(&ts); | ||
| 531 | write_lock_irqsave(&tstamp->lock, flags); | ||
| 532 | nsec_now = timecounter_cyc2time(&tstamp->clock, cycles_now); | ||
| 533 | nsec_delta = ns - nsec_now; | ||
| 534 | cycles_delta = div64_u64(nsec_delta << tstamp->cycles.shift, | ||
| 535 | tstamp->cycles.mult); | ||
| 536 | tstamp->pps_info.start[pin] = cycles_now + cycles_delta; | ||
| 537 | queue_work(priv->wq, &tstamp->pps_info.out_work); | ||
| 538 | write_unlock_irqrestore(&tstamp->lock, flags); | ||
| 539 | break; | ||
| 540 | default: | ||
| 541 | netdev_err(netdev, "%s: Unhandled event\n", __func__); | ||
| 542 | } | ||
| 441 | } | 543 | } |
| 442 | 544 | ||
| 443 | void mlx5e_timestamp_init(struct mlx5e_priv *priv) | 545 | void mlx5e_timestamp_init(struct mlx5e_priv *priv) |
| @@ -473,9 +575,10 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv) | |||
| 473 | do_div(ns, NSEC_PER_SEC / 2 / HZ); | 575 | do_div(ns, NSEC_PER_SEC / 2 / HZ); |
| 474 | tstamp->overflow_period = ns; | 576 | tstamp->overflow_period = ns; |
| 475 | 577 | ||
| 578 | INIT_WORK(&tstamp->pps_info.out_work, mlx5e_pps_out); | ||
| 476 | INIT_DELAYED_WORK(&tstamp->overflow_work, mlx5e_timestamp_overflow); | 579 | INIT_DELAYED_WORK(&tstamp->overflow_work, mlx5e_timestamp_overflow); |
| 477 | if (tstamp->overflow_period) | 580 | if (tstamp->overflow_period) |
| 478 | schedule_delayed_work(&tstamp->overflow_work, 0); | 581 | queue_delayed_work(priv->wq, &tstamp->overflow_work, 0); |
| 479 | else | 582 | else |
| 480 | mlx5_core_warn(priv->mdev, "invalid overflow period, overflow_work is not scheduled\n"); | 583 | mlx5_core_warn(priv->mdev, "invalid overflow period, overflow_work is not scheduled\n"); |
| 481 | 584 | ||
| @@ -484,16 +587,10 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv) | |||
| 484 | snprintf(tstamp->ptp_info.name, 16, "mlx5 ptp"); | 587 | snprintf(tstamp->ptp_info.name, 16, "mlx5 ptp"); |
| 485 | 588 | ||
| 486 | /* Initialize 1PPS data structures */ | 589 | /* Initialize 1PPS data structures */ |
| 487 | #define MAX_PIN_NUM 8 | 590 | if (MLX5_PPS_CAP(priv->mdev)) |
| 488 | tstamp->pps_pin_caps = kzalloc(sizeof(u8) * MAX_PIN_NUM, GFP_KERNEL); | 591 | mlx5e_get_pps_caps(priv, tstamp); |
| 489 | if (tstamp->pps_pin_caps) { | 592 | if (tstamp->ptp_info.n_pins) |
| 490 | if (MLX5_CAP_GEN(priv->mdev, pps)) | 593 | mlx5e_init_pin_config(tstamp); |
| 491 | mlx5e_get_pps_caps(priv, tstamp); | ||
| 492 | if (tstamp->ptp_info.n_pins) | ||
| 493 | mlx5e_init_pin_config(tstamp); | ||
| 494 | } else { | ||
| 495 | mlx5_core_warn(priv->mdev, "1PPS initialization failed\n"); | ||
| 496 | } | ||
| 497 | 594 | ||
| 498 | tstamp->ptp = ptp_clock_register(&tstamp->ptp_info, | 595 | tstamp->ptp = ptp_clock_register(&tstamp->ptp_info, |
| 499 | &priv->mdev->pdev->dev); | 596 | &priv->mdev->pdev->dev); |
| @@ -516,8 +613,7 @@ void mlx5e_timestamp_cleanup(struct mlx5e_priv *priv) | |||
| 516 | priv->tstamp.ptp = NULL; | 613 | priv->tstamp.ptp = NULL; |
| 517 | } | 614 | } |
| 518 | 615 | ||
| 519 | kfree(tstamp->pps_pin_caps); | 616 | cancel_work_sync(&tstamp->pps_info.out_work); |
| 520 | kfree(tstamp->ptp_info.pin_config); | ||
| 521 | |||
| 522 | cancel_delayed_work_sync(&tstamp->overflow_work); | 617 | cancel_delayed_work_sync(&tstamp->overflow_work); |
| 618 | kfree(tstamp->ptp_info.pin_config); | ||
| 523 | } | 619 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c index 2eb54d36e16e..c1d384fca4dc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c | |||
| @@ -288,13 +288,8 @@ static int mlx5e_dcbnl_ieee_setpfc(struct net_device *dev, | |||
| 288 | static u8 mlx5e_dcbnl_getdcbx(struct net_device *dev) | 288 | static u8 mlx5e_dcbnl_getdcbx(struct net_device *dev) |
| 289 | { | 289 | { |
| 290 | struct mlx5e_priv *priv = netdev_priv(dev); | 290 | struct mlx5e_priv *priv = netdev_priv(dev); |
| 291 | struct mlx5e_dcbx *dcbx = &priv->dcbx; | ||
| 292 | u8 mode = DCB_CAP_DCBX_VER_IEEE | DCB_CAP_DCBX_VER_CEE; | ||
| 293 | |||
| 294 | if (dcbx->mode == MLX5E_DCBX_PARAM_VER_OPER_HOST) | ||
| 295 | mode |= DCB_CAP_DCBX_HOST; | ||
| 296 | 291 | ||
| 297 | return mode; | 292 | return priv->dcbx.cap; |
| 298 | } | 293 | } |
| 299 | 294 | ||
| 300 | static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode) | 295 | static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode) |
| @@ -312,6 +307,7 @@ static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode) | |||
| 312 | /* set dcbx to fw controlled */ | 307 | /* set dcbx to fw controlled */ |
| 313 | if (!mlx5e_dcbnl_set_dcbx_mode(priv, MLX5E_DCBX_PARAM_VER_OPER_AUTO)) { | 308 | if (!mlx5e_dcbnl_set_dcbx_mode(priv, MLX5E_DCBX_PARAM_VER_OPER_AUTO)) { |
| 314 | dcbx->mode = MLX5E_DCBX_PARAM_VER_OPER_AUTO; | 309 | dcbx->mode = MLX5E_DCBX_PARAM_VER_OPER_AUTO; |
| 310 | dcbx->cap &= ~DCB_CAP_DCBX_HOST; | ||
| 315 | return 0; | 311 | return 0; |
| 316 | } | 312 | } |
| 317 | 313 | ||
| @@ -324,6 +320,8 @@ static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode) | |||
| 324 | if (mlx5e_dcbnl_switch_to_host_mode(netdev_priv(dev))) | 320 | if (mlx5e_dcbnl_switch_to_host_mode(netdev_priv(dev))) |
| 325 | return 1; | 321 | return 1; |
| 326 | 322 | ||
| 323 | dcbx->cap = mode; | ||
| 324 | |||
| 327 | return 0; | 325 | return 0; |
| 328 | } | 326 | } |
| 329 | 327 | ||
| @@ -628,9 +626,9 @@ static u8 mlx5e_dcbnl_getcap(struct net_device *netdev, | |||
| 628 | *cap = false; | 626 | *cap = false; |
| 629 | break; | 627 | break; |
| 630 | case DCB_CAP_ATTR_DCBX: | 628 | case DCB_CAP_ATTR_DCBX: |
| 631 | *cap = (DCB_CAP_DCBX_LLD_MANAGED | | 629 | *cap = priv->dcbx.cap | |
| 632 | DCB_CAP_DCBX_VER_CEE | | 630 | DCB_CAP_DCBX_VER_CEE | |
| 633 | DCB_CAP_DCBX_STATIC); | 631 | DCB_CAP_DCBX_VER_IEEE; |
| 634 | break; | 632 | break; |
| 635 | default: | 633 | default: |
| 636 | *cap = 0; | 634 | *cap = 0; |
| @@ -754,8 +752,16 @@ void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv) | |||
| 754 | { | 752 | { |
| 755 | struct mlx5e_dcbx *dcbx = &priv->dcbx; | 753 | struct mlx5e_dcbx *dcbx = &priv->dcbx; |
| 756 | 754 | ||
| 755 | if (!MLX5_CAP_GEN(priv->mdev, qos)) | ||
| 756 | return; | ||
| 757 | |||
| 757 | if (MLX5_CAP_GEN(priv->mdev, dcbx)) | 758 | if (MLX5_CAP_GEN(priv->mdev, dcbx)) |
| 758 | mlx5e_dcbnl_query_dcbx_mode(priv, &dcbx->mode); | 759 | mlx5e_dcbnl_query_dcbx_mode(priv, &dcbx->mode); |
| 759 | 760 | ||
| 761 | priv->dcbx.cap = DCB_CAP_DCBX_VER_CEE | | ||
| 762 | DCB_CAP_DCBX_VER_IEEE; | ||
| 763 | if (priv->dcbx.mode == MLX5E_DCBX_PARAM_VER_OPER_HOST) | ||
| 764 | priv->dcbx.cap |= DCB_CAP_DCBX_HOST; | ||
| 765 | |||
| 760 | mlx5e_ets_init(priv); | 766 | mlx5e_ets_init(priv); |
| 761 | } | 767 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 917fade5f5d5..f5594014715b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c | |||
| @@ -641,8 +641,10 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv, | |||
| 641 | 641 | ||
| 642 | new_channels.params = priv->channels.params; | 642 | new_channels.params = priv->channels.params; |
| 643 | new_channels.params.num_channels = count; | 643 | new_channels.params.num_channels = count; |
| 644 | mlx5e_build_default_indir_rqt(priv->mdev, new_channels.params.indirection_rqt, | 644 | if (!netif_is_rxfh_configured(priv->netdev)) |
| 645 | MLX5E_INDIR_RQT_SIZE, count); | 645 | mlx5e_build_default_indir_rqt(priv->mdev, |
| 646 | new_channels.params.indirection_rqt, | ||
| 647 | MLX5E_INDIR_RQT_SIZE, count); | ||
| 646 | 648 | ||
| 647 | if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { | 649 | if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { |
| 648 | priv->channels.params = new_channels.params; | 650 | priv->channels.params = new_channels.params; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c index bdd82c9b3992..eafc59280ada 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c | |||
| @@ -276,7 +276,7 @@ static void add_rule_to_list(struct mlx5e_priv *priv, | |||
| 276 | 276 | ||
| 277 | static bool outer_header_zero(u32 *match_criteria) | 277 | static bool outer_header_zero(u32 *match_criteria) |
| 278 | { | 278 | { |
| 279 | int size = MLX5_ST_SZ_BYTES(fte_match_param); | 279 | int size = MLX5_FLD_SZ_BYTES(fte_match_param, outer_headers); |
| 280 | char *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_criteria, | 280 | char *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_criteria, |
| 281 | outer_headers); | 281 | outer_headers); |
| 282 | 282 | ||
| @@ -320,7 +320,7 @@ add_ethtool_flow_rule(struct mlx5e_priv *priv, | |||
| 320 | 320 | ||
| 321 | spec->match_criteria_enable = (!outer_header_zero(spec->match_criteria)); | 321 | spec->match_criteria_enable = (!outer_header_zero(spec->match_criteria)); |
| 322 | flow_act.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG; | 322 | flow_act.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG; |
| 323 | rule = mlx5_add_flow_rules(ft, spec, &flow_act, dst, 1); | 323 | rule = mlx5_add_flow_rules(ft, spec, &flow_act, dst, dst ? 1 : 0); |
| 324 | if (IS_ERR(rule)) { | 324 | if (IS_ERR(rule)) { |
| 325 | err = PTR_ERR(rule); | 325 | err = PTR_ERR(rule); |
| 326 | netdev_err(priv->netdev, "%s: failed to add ethtool steering rule: %d\n", | 326 | netdev_err(priv->netdev, "%s: failed to add ethtool steering rule: %d\n", |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 1eac5003084f..6ad7f07e7861 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c | |||
| @@ -377,7 +377,6 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv, | |||
| 377 | break; | 377 | break; |
| 378 | case MLX5_DEV_EVENT_PPS: | 378 | case MLX5_DEV_EVENT_PPS: |
| 379 | eqe = (struct mlx5_eqe *)param; | 379 | eqe = (struct mlx5_eqe *)param; |
| 380 | ptp_event.type = PTP_CLOCK_EXTTS; | ||
| 381 | ptp_event.index = eqe->data.pps.pin; | 380 | ptp_event.index = eqe->data.pps.pin; |
| 382 | ptp_event.timestamp = | 381 | ptp_event.timestamp = |
| 383 | timecounter_cyc2time(&priv->tstamp.clock, | 382 | timecounter_cyc2time(&priv->tstamp.clock, |
| @@ -1970,6 +1969,7 @@ static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv, | |||
| 1970 | } | 1969 | } |
| 1971 | 1970 | ||
| 1972 | mlx5e_build_common_cq_param(priv, param); | 1971 | mlx5e_build_common_cq_param(priv, param); |
| 1972 | param->cq_period_mode = params->rx_cq_period_mode; | ||
| 1973 | } | 1973 | } |
| 1974 | 1974 | ||
| 1975 | static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv, | 1975 | static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv, |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 325b2c8c1c6d..7344433259fc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | |||
| @@ -222,13 +222,13 @@ static inline int mlx5e_page_alloc_mapped(struct mlx5e_rq *rq, | |||
| 222 | if (unlikely(!page)) | 222 | if (unlikely(!page)) |
| 223 | return -ENOMEM; | 223 | return -ENOMEM; |
| 224 | 224 | ||
| 225 | dma_info->page = page; | ||
| 226 | dma_info->addr = dma_map_page(rq->pdev, page, 0, | 225 | dma_info->addr = dma_map_page(rq->pdev, page, 0, |
| 227 | RQ_PAGE_SIZE(rq), rq->buff.map_dir); | 226 | RQ_PAGE_SIZE(rq), rq->buff.map_dir); |
| 228 | if (unlikely(dma_mapping_error(rq->pdev, dma_info->addr))) { | 227 | if (unlikely(dma_mapping_error(rq->pdev, dma_info->addr))) { |
| 229 | put_page(page); | 228 | put_page(page); |
| 230 | return -ENOMEM; | 229 | return -ENOMEM; |
| 231 | } | 230 | } |
| 231 | dma_info->page = page; | ||
| 232 | 232 | ||
| 233 | return 0; | 233 | return 0; |
| 234 | } | 234 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 3c536f560dd2..7f282e8f4e7f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | |||
| @@ -1443,12 +1443,10 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv, | |||
| 1443 | struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; | 1443 | struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; |
| 1444 | int ret; | 1444 | int ret; |
| 1445 | 1445 | ||
| 1446 | dst = ip6_route_output(dev_net(mirred_dev), NULL, fl6); | 1446 | ret = ipv6_stub->ipv6_dst_lookup(dev_net(mirred_dev), NULL, &dst, |
| 1447 | ret = dst->error; | 1447 | fl6); |
| 1448 | if (ret) { | 1448 | if (ret < 0) |
| 1449 | dst_release(dst); | ||
| 1450 | return ret; | 1449 | return ret; |
| 1451 | } | ||
| 1452 | 1450 | ||
| 1453 | *out_ttl = ip6_dst_hoplimit(dst); | 1451 | *out_ttl = ip6_dst_hoplimit(dst); |
| 1454 | 1452 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index aaa0f4ebba9a..31353e5c3c78 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c | |||
| @@ -128,10 +128,10 @@ static inline int mlx5e_skb_l3_header_offset(struct sk_buff *skb) | |||
| 128 | return mlx5e_skb_l2_header_offset(skb); | 128 | return mlx5e_skb_l2_header_offset(skb); |
| 129 | } | 129 | } |
| 130 | 130 | ||
| 131 | static inline unsigned int mlx5e_calc_min_inline(enum mlx5_inline_modes mode, | 131 | static inline u16 mlx5e_calc_min_inline(enum mlx5_inline_modes mode, |
| 132 | struct sk_buff *skb) | 132 | struct sk_buff *skb) |
| 133 | { | 133 | { |
| 134 | int hlen; | 134 | u16 hlen; |
| 135 | 135 | ||
| 136 | switch (mode) { | 136 | switch (mode) { |
| 137 | case MLX5_INLINE_MODE_NONE: | 137 | case MLX5_INLINE_MODE_NONE: |
| @@ -140,19 +140,22 @@ static inline unsigned int mlx5e_calc_min_inline(enum mlx5_inline_modes mode, | |||
| 140 | hlen = eth_get_headlen(skb->data, skb_headlen(skb)); | 140 | hlen = eth_get_headlen(skb->data, skb_headlen(skb)); |
| 141 | if (hlen == ETH_HLEN && !skb_vlan_tag_present(skb)) | 141 | if (hlen == ETH_HLEN && !skb_vlan_tag_present(skb)) |
| 142 | hlen += VLAN_HLEN; | 142 | hlen += VLAN_HLEN; |
| 143 | return hlen; | 143 | break; |
| 144 | case MLX5_INLINE_MODE_IP: | 144 | case MLX5_INLINE_MODE_IP: |
| 145 | /* When transport header is set to zero, it means no transport | 145 | /* When transport header is set to zero, it means no transport |
| 146 | * header. When transport header is set to 0xff's, it means | 146 | * header. When transport header is set to 0xff's, it means |
| 147 | * transport header wasn't set. | 147 | * transport header wasn't set. |
| 148 | */ | 148 | */ |
| 149 | if (skb_transport_offset(skb)) | 149 | if (skb_transport_offset(skb)) { |
| 150 | return mlx5e_skb_l3_header_offset(skb); | 150 | hlen = mlx5e_skb_l3_header_offset(skb); |
| 151 | break; | ||
| 152 | } | ||
| 151 | /* fall through */ | 153 | /* fall through */ |
| 152 | case MLX5_INLINE_MODE_L2: | 154 | case MLX5_INLINE_MODE_L2: |
| 153 | default: | 155 | default: |
| 154 | return mlx5e_skb_l2_header_offset(skb); | 156 | hlen = mlx5e_skb_l2_header_offset(skb); |
| 155 | } | 157 | } |
| 158 | return min_t(u16, hlen, skb->len); | ||
| 156 | } | 159 | } |
| 157 | 160 | ||
| 158 | static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data, | 161 | static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data, |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index af51a5d2b912..52b9a64cd3a2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c | |||
| @@ -698,7 +698,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev) | |||
| 698 | else | 698 | else |
| 699 | mlx5_core_dbg(dev, "port_module_event is not set\n"); | 699 | mlx5_core_dbg(dev, "port_module_event is not set\n"); |
| 700 | 700 | ||
| 701 | if (MLX5_CAP_GEN(dev, pps)) | 701 | if (MLX5_PPS_CAP(dev)) |
| 702 | async_event_mask |= (1ull << MLX5_EVENT_TYPE_PPS_EVENT); | 702 | async_event_mask |= (1ull << MLX5_EVENT_TYPE_PPS_EVENT); |
| 703 | 703 | ||
| 704 | if (MLX5_CAP_GEN(dev, fpga)) | 704 | if (MLX5_CAP_GEN(dev, fpga)) |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 89bfda419efe..8b18cc9ec026 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | |||
| @@ -1668,7 +1668,8 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw) | |||
| 1668 | int i; | 1668 | int i; |
| 1669 | 1669 | ||
| 1670 | if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) || | 1670 | if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) || |
| 1671 | MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) | 1671 | MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH || |
| 1672 | esw->mode == SRIOV_NONE) | ||
| 1672 | return; | 1673 | return; |
| 1673 | 1674 | ||
| 1674 | esw_info(esw->dev, "disable SRIOV: active vports(%d) mode(%d)\n", | 1675 | esw_info(esw->dev, "disable SRIOV: active vports(%d) mode(%d)\n", |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 95b64025ce36..5bc0593bd76e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c | |||
| @@ -815,7 +815,7 @@ void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports) | |||
| 815 | struct mlx5_eswitch_rep *rep; | 815 | struct mlx5_eswitch_rep *rep; |
| 816 | int vport; | 816 | int vport; |
| 817 | 817 | ||
| 818 | for (vport = 0; vport < nvports; vport++) { | 818 | for (vport = nvports - 1; vport >= 0; vport--) { |
| 819 | rep = &esw->offloads.vport_reps[vport]; | 819 | rep = &esw->offloads.vport_reps[vport]; |
| 820 | if (!rep->valid) | 820 | if (!rep->valid) |
| 821 | continue; | 821 | continue; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index 1ee5bce85901..85298051a3e4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c | |||
| @@ -178,8 +178,6 @@ out: | |||
| 178 | 178 | ||
| 179 | static void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp) | 179 | static void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp) |
| 180 | { | 180 | { |
| 181 | mlx5_fs_remove_rx_underlay_qpn(mdev, qp->qpn); | ||
| 182 | |||
| 183 | mlx5_core_destroy_qp(mdev, qp); | 181 | mlx5_core_destroy_qp(mdev, qp); |
| 184 | } | 182 | } |
| 185 | 183 | ||
| @@ -194,8 +192,6 @@ static int mlx5i_init_tx(struct mlx5e_priv *priv) | |||
| 194 | return err; | 192 | return err; |
| 195 | } | 193 | } |
| 196 | 194 | ||
| 197 | mlx5_fs_add_rx_underlay_qpn(priv->mdev, ipriv->qp.qpn); | ||
| 198 | |||
| 199 | err = mlx5e_create_tis(priv->mdev, 0 /* tc */, ipriv->qp.qpn, &priv->tisn[0]); | 195 | err = mlx5e_create_tis(priv->mdev, 0 /* tc */, ipriv->qp.qpn, &priv->tisn[0]); |
| 200 | if (err) { | 196 | if (err) { |
| 201 | mlx5_core_warn(priv->mdev, "create tis failed, %d\n", err); | 197 | mlx5_core_warn(priv->mdev, "create tis failed, %d\n", err); |
| @@ -253,6 +249,7 @@ static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv) | |||
| 253 | 249 | ||
| 254 | static int mlx5i_init_rx(struct mlx5e_priv *priv) | 250 | static int mlx5i_init_rx(struct mlx5e_priv *priv) |
| 255 | { | 251 | { |
| 252 | struct mlx5i_priv *ipriv = priv->ppriv; | ||
| 256 | int err; | 253 | int err; |
| 257 | 254 | ||
| 258 | err = mlx5e_create_indirect_rqt(priv); | 255 | err = mlx5e_create_indirect_rqt(priv); |
| @@ -271,12 +268,18 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv) | |||
| 271 | if (err) | 268 | if (err) |
| 272 | goto err_destroy_indirect_tirs; | 269 | goto err_destroy_indirect_tirs; |
| 273 | 270 | ||
| 274 | err = mlx5i_create_flow_steering(priv); | 271 | err = mlx5_fs_add_rx_underlay_qpn(priv->mdev, ipriv->qp.qpn); |
| 275 | if (err) | 272 | if (err) |
| 276 | goto err_destroy_direct_tirs; | 273 | goto err_destroy_direct_tirs; |
| 277 | 274 | ||
| 275 | err = mlx5i_create_flow_steering(priv); | ||
| 276 | if (err) | ||
| 277 | goto err_remove_rx_underlay_qpn; | ||
| 278 | |||
| 278 | return 0; | 279 | return 0; |
| 279 | 280 | ||
| 281 | err_remove_rx_underlay_qpn: | ||
| 282 | mlx5_fs_remove_rx_underlay_qpn(priv->mdev, ipriv->qp.qpn); | ||
| 280 | err_destroy_direct_tirs: | 283 | err_destroy_direct_tirs: |
| 281 | mlx5e_destroy_direct_tirs(priv); | 284 | mlx5e_destroy_direct_tirs(priv); |
| 282 | err_destroy_indirect_tirs: | 285 | err_destroy_indirect_tirs: |
| @@ -290,6 +293,9 @@ err_destroy_indirect_rqts: | |||
| 290 | 293 | ||
| 291 | static void mlx5i_cleanup_rx(struct mlx5e_priv *priv) | 294 | static void mlx5i_cleanup_rx(struct mlx5e_priv *priv) |
| 292 | { | 295 | { |
| 296 | struct mlx5i_priv *ipriv = priv->ppriv; | ||
| 297 | |||
| 298 | mlx5_fs_remove_rx_underlay_qpn(priv->mdev, ipriv->qp.qpn); | ||
| 293 | mlx5i_destroy_flow_steering(priv); | 299 | mlx5i_destroy_flow_steering(priv); |
| 294 | mlx5e_destroy_direct_tirs(priv); | 300 | mlx5e_destroy_direct_tirs(priv); |
| 295 | mlx5e_destroy_indirect_tirs(priv); | 301 | mlx5e_destroy_indirect_tirs(priv); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c index a3a836bdcfd2..f26f97fe4666 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c | |||
| @@ -162,22 +162,17 @@ static bool mlx5_lag_is_bonded(struct mlx5_lag *ldev) | |||
| 162 | static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker, | 162 | static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker, |
| 163 | u8 *port1, u8 *port2) | 163 | u8 *port1, u8 *port2) |
| 164 | { | 164 | { |
| 165 | if (tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) { | 165 | *port1 = 1; |
| 166 | if (tracker->netdev_state[0].tx_enabled) { | 166 | *port2 = 2; |
| 167 | *port1 = 1; | 167 | if (!tracker->netdev_state[0].tx_enabled || |
| 168 | *port2 = 1; | 168 | !tracker->netdev_state[0].link_up) { |
| 169 | } else { | 169 | *port1 = 2; |
| 170 | *port1 = 2; | 170 | return; |
| 171 | *port2 = 2; | ||
| 172 | } | ||
| 173 | } else { | ||
| 174 | *port1 = 1; | ||
| 175 | *port2 = 2; | ||
| 176 | if (!tracker->netdev_state[0].link_up) | ||
| 177 | *port1 = 2; | ||
| 178 | else if (!tracker->netdev_state[1].link_up) | ||
| 179 | *port2 = 1; | ||
| 180 | } | 171 | } |
| 172 | |||
| 173 | if (!tracker->netdev_state[1].tx_enabled || | ||
| 174 | !tracker->netdev_state[1].link_up) | ||
| 175 | *port2 = 1; | ||
| 181 | } | 176 | } |
| 182 | 177 | ||
| 183 | static void mlx5_activate_lag(struct mlx5_lag *ldev, | 178 | static void mlx5_activate_lag(struct mlx5_lag *ldev, |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index c065132b956d..16885827367b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c | |||
| @@ -1186,7 +1186,6 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, | |||
| 1186 | } | 1186 | } |
| 1187 | } | 1187 | } |
| 1188 | 1188 | ||
| 1189 | clear_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state); | ||
| 1190 | set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state); | 1189 | set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state); |
| 1191 | out: | 1190 | out: |
| 1192 | mutex_unlock(&dev->intf_state_mutex); | 1191 | mutex_unlock(&dev->intf_state_mutex); |
| @@ -1261,7 +1260,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, | |||
| 1261 | mlx5_drain_health_recovery(dev); | 1260 | mlx5_drain_health_recovery(dev); |
| 1262 | 1261 | ||
| 1263 | mutex_lock(&dev->intf_state_mutex); | 1262 | mutex_lock(&dev->intf_state_mutex); |
| 1264 | if (test_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state)) { | 1263 | if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) { |
| 1265 | dev_warn(&dev->pdev->dev, "%s: interface is down, NOP\n", | 1264 | dev_warn(&dev->pdev->dev, "%s: interface is down, NOP\n", |
| 1266 | __func__); | 1265 | __func__); |
| 1267 | if (cleanup) | 1266 | if (cleanup) |
| @@ -1270,7 +1269,6 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, | |||
| 1270 | } | 1269 | } |
| 1271 | 1270 | ||
| 1272 | clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state); | 1271 | clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state); |
| 1273 | set_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state); | ||
| 1274 | 1272 | ||
| 1275 | if (mlx5_device_registered(dev)) | 1273 | if (mlx5_device_registered(dev)) |
| 1276 | mlx5_detach_device(dev); | 1274 | mlx5_detach_device(dev); |
| @@ -1565,8 +1563,6 @@ static void shutdown(struct pci_dev *pdev) | |||
| 1565 | int err; | 1563 | int err; |
| 1566 | 1564 | ||
| 1567 | dev_info(&pdev->dev, "Shutdown was called\n"); | 1565 | dev_info(&pdev->dev, "Shutdown was called\n"); |
| 1568 | /* Notify mlx5 clients that the kernel is being shut down */ | ||
| 1569 | set_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &dev->intf_state); | ||
| 1570 | err = mlx5_try_fast_unload(dev); | 1566 | err = mlx5_try_fast_unload(dev); |
| 1571 | if (err) | 1567 | if (err) |
| 1572 | mlx5_unload_one(dev, priv, false); | 1568 | mlx5_unload_one(dev, priv, false); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index 6a3d6bef7dd4..6a263e8d883a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h | |||
| @@ -154,6 +154,11 @@ int mlx5_set_mtpps(struct mlx5_core_dev *mdev, u32 *mtpps, u32 mtpps_size); | |||
| 154 | int mlx5_query_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 *arm, u8 *mode); | 154 | int mlx5_query_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 *arm, u8 *mode); |
| 155 | int mlx5_set_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 arm, u8 mode); | 155 | int mlx5_set_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 arm, u8 mode); |
| 156 | 156 | ||
| 157 | #define MLX5_PPS_CAP(mdev) (MLX5_CAP_GEN((mdev), pps) && \ | ||
| 158 | MLX5_CAP_GEN((mdev), pps_modify) && \ | ||
| 159 | MLX5_CAP_MCAM_FEATURE((mdev), mtpps_fs) && \ | ||
| 160 | MLX5_CAP_MCAM_FEATURE((mdev), mtpps_enh_out_per_adj)) | ||
| 161 | |||
| 157 | int mlx5_firmware_flash(struct mlx5_core_dev *dev, const struct firmware *fw); | 162 | int mlx5_firmware_flash(struct mlx5_core_dev *dev, const struct firmware *fw); |
| 158 | 163 | ||
| 159 | void mlx5e_init(void); | 164 | void mlx5e_init(void); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c index bcdf7779c48d..bf99d40e30b4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c | |||
| @@ -88,7 +88,11 @@ static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev) | |||
| 88 | int vf; | 88 | int vf; |
| 89 | 89 | ||
| 90 | if (!sriov->enabled_vfs) | 90 | if (!sriov->enabled_vfs) |
| 91 | #ifdef CONFIG_MLX5_CORE_EN | ||
| 92 | goto disable_sriov_resources; | ||
| 93 | #else | ||
| 91 | return; | 94 | return; |
| 95 | #endif | ||
| 92 | 96 | ||
| 93 | for (vf = 0; vf < sriov->num_vfs; vf++) { | 97 | for (vf = 0; vf < sriov->num_vfs; vf++) { |
| 94 | if (!sriov->vfs_ctx[vf].enabled) | 98 | if (!sriov->vfs_ctx[vf].enabled) |
| @@ -103,6 +107,7 @@ static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev) | |||
| 103 | } | 107 | } |
| 104 | 108 | ||
| 105 | #ifdef CONFIG_MLX5_CORE_EN | 109 | #ifdef CONFIG_MLX5_CORE_EN |
| 110 | disable_sriov_resources: | ||
| 106 | mlx5_eswitch_disable_sriov(dev->priv.eswitch); | 111 | mlx5_eswitch_disable_sriov(dev->priv.eswitch); |
| 107 | #endif | 112 | #endif |
| 108 | 113 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/srq.c b/drivers/net/ethernet/mellanox/mlx5/core/srq.c index f774de6f5fcb..520f6382dfde 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/srq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/srq.c | |||
| @@ -201,13 +201,13 @@ static int destroy_srq_cmd(struct mlx5_core_dev *dev, | |||
| 201 | static int arm_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, | 201 | static int arm_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, |
| 202 | u16 lwm, int is_srq) | 202 | u16 lwm, int is_srq) |
| 203 | { | 203 | { |
| 204 | /* arm_srq structs missing using identical xrc ones */ | 204 | u32 srq_in[MLX5_ST_SZ_DW(arm_rq_in)] = {0}; |
| 205 | u32 srq_in[MLX5_ST_SZ_DW(arm_xrc_srq_in)] = {0}; | 205 | u32 srq_out[MLX5_ST_SZ_DW(arm_rq_out)] = {0}; |
| 206 | u32 srq_out[MLX5_ST_SZ_DW(arm_xrc_srq_out)] = {0}; | ||
| 207 | 206 | ||
| 208 | MLX5_SET(arm_xrc_srq_in, srq_in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ); | 207 | MLX5_SET(arm_rq_in, srq_in, opcode, MLX5_CMD_OP_ARM_RQ); |
| 209 | MLX5_SET(arm_xrc_srq_in, srq_in, xrc_srqn, srq->srqn); | 208 | MLX5_SET(arm_rq_in, srq_in, op_mod, MLX5_ARM_RQ_IN_OP_MOD_SRQ); |
| 210 | MLX5_SET(arm_xrc_srq_in, srq_in, lwm, lwm); | 209 | MLX5_SET(arm_rq_in, srq_in, srq_number, srq->srqn); |
| 210 | MLX5_SET(arm_rq_in, srq_in, lwm, lwm); | ||
| 211 | 211 | ||
| 212 | return mlx5_cmd_exec(dev, srq_in, sizeof(srq_in), | 212 | return mlx5_cmd_exec(dev, srq_in, sizeof(srq_in), |
| 213 | srq_out, sizeof(srq_out)); | 213 | srq_out, sizeof(srq_out)); |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 60bf8f27cc00..c6a3e61b53bd 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c | |||
| @@ -4139,6 +4139,8 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, | |||
| 4139 | return -EINVAL; | 4139 | return -EINVAL; |
| 4140 | if (!info->linking) | 4140 | if (!info->linking) |
| 4141 | break; | 4141 | break; |
| 4142 | if (netdev_has_any_upper_dev(upper_dev)) | ||
| 4143 | return -EINVAL; | ||
| 4142 | if (netif_is_lag_master(upper_dev) && | 4144 | if (netif_is_lag_master(upper_dev) && |
| 4143 | !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev, | 4145 | !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev, |
| 4144 | info->upper_info)) | 4146 | info->upper_info)) |
| @@ -4258,6 +4260,10 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev, | |||
| 4258 | upper_dev = info->upper_dev; | 4260 | upper_dev = info->upper_dev; |
| 4259 | if (!netif_is_bridge_master(upper_dev)) | 4261 | if (!netif_is_bridge_master(upper_dev)) |
| 4260 | return -EINVAL; | 4262 | return -EINVAL; |
| 4263 | if (!info->linking) | ||
| 4264 | break; | ||
| 4265 | if (netdev_has_any_upper_dev(upper_dev)) | ||
| 4266 | return -EINVAL; | ||
| 4261 | break; | 4267 | break; |
| 4262 | case NETDEV_CHANGEUPPER: | 4268 | case NETDEV_CHANGEUPPER: |
| 4263 | upper_dev = info->upper_dev; | 4269 | upper_dev = info->upper_dev; |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 383fef5a8e24..4b2e0fd7d51e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | |||
| @@ -1512,6 +1512,10 @@ mlxsw_sp_nexthop_group_mac_update(struct mlxsw_sp *mlxsw_sp, | |||
| 1512 | static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp, | 1512 | static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp, |
| 1513 | struct mlxsw_sp_fib_entry *fib_entry); | 1513 | struct mlxsw_sp_fib_entry *fib_entry); |
| 1514 | 1514 | ||
| 1515 | static bool | ||
| 1516 | mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node, | ||
| 1517 | const struct mlxsw_sp_fib_entry *fib_entry); | ||
| 1518 | |||
| 1515 | static int | 1519 | static int |
| 1516 | mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp, | 1520 | mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp, |
| 1517 | struct mlxsw_sp_nexthop_group *nh_grp) | 1521 | struct mlxsw_sp_nexthop_group *nh_grp) |
| @@ -1520,6 +1524,9 @@ mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp, | |||
| 1520 | int err; | 1524 | int err; |
| 1521 | 1525 | ||
| 1522 | list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) { | 1526 | list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) { |
| 1527 | if (!mlxsw_sp_fib_node_entry_is_first(fib_entry->fib_node, | ||
| 1528 | fib_entry)) | ||
| 1529 | continue; | ||
| 1523 | err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry); | 1530 | err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry); |
| 1524 | if (err) | 1531 | if (err) |
| 1525 | return err; | 1532 | return err; |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 656b2d3f1bee..d39ffbfcc436 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c | |||
| @@ -626,8 +626,8 @@ static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port, | |||
| 626 | 626 | ||
| 627 | bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge, | 627 | bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge, |
| 628 | orig_dev); | 628 | orig_dev); |
| 629 | if (WARN_ON(!bridge_port)) | 629 | if (!bridge_port) |
| 630 | return -EINVAL; | 630 | return 0; |
| 631 | 631 | ||
| 632 | err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port, | 632 | err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port, |
| 633 | MLXSW_SP_FLOOD_TYPE_UC, | 633 | MLXSW_SP_FLOOD_TYPE_UC, |
| @@ -705,21 +705,28 @@ static int mlxsw_sp_port_attr_mc_router_set(struct mlxsw_sp_port *mlxsw_sp_port, | |||
| 705 | bool is_port_mc_router) | 705 | bool is_port_mc_router) |
| 706 | { | 706 | { |
| 707 | struct mlxsw_sp_bridge_port *bridge_port; | 707 | struct mlxsw_sp_bridge_port *bridge_port; |
| 708 | int err; | ||
| 708 | 709 | ||
| 709 | if (switchdev_trans_ph_prepare(trans)) | 710 | if (switchdev_trans_ph_prepare(trans)) |
| 710 | return 0; | 711 | return 0; |
| 711 | 712 | ||
| 712 | bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge, | 713 | bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge, |
| 713 | orig_dev); | 714 | orig_dev); |
| 714 | if (WARN_ON(!bridge_port)) | 715 | if (!bridge_port) |
| 715 | return -EINVAL; | 716 | return 0; |
| 716 | 717 | ||
| 717 | if (!bridge_port->bridge_device->multicast_enabled) | 718 | if (!bridge_port->bridge_device->multicast_enabled) |
| 718 | return 0; | 719 | goto out; |
| 719 | 720 | ||
| 720 | return mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port, | 721 | err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port, |
| 721 | MLXSW_SP_FLOOD_TYPE_MC, | 722 | MLXSW_SP_FLOOD_TYPE_MC, |
| 722 | is_port_mc_router); | 723 | is_port_mc_router); |
| 724 | if (err) | ||
| 725 | return err; | ||
| 726 | |||
| 727 | out: | ||
| 728 | bridge_port->mrouter = is_port_mc_router; | ||
| 729 | return 0; | ||
| 723 | } | 730 | } |
| 724 | 731 | ||
| 725 | static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port, | 732 | static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port, |
| @@ -1283,15 +1290,15 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port, | |||
| 1283 | return 0; | 1290 | return 0; |
| 1284 | 1291 | ||
| 1285 | bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); | 1292 | bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); |
| 1286 | if (WARN_ON(!bridge_port)) | 1293 | if (!bridge_port) |
| 1287 | return -EINVAL; | 1294 | return 0; |
| 1288 | 1295 | ||
| 1289 | bridge_device = bridge_port->bridge_device; | 1296 | bridge_device = bridge_port->bridge_device; |
| 1290 | mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port, | 1297 | mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port, |
| 1291 | bridge_device, | 1298 | bridge_device, |
| 1292 | mdb->vid); | 1299 | mdb->vid); |
| 1293 | if (WARN_ON(!mlxsw_sp_port_vlan)) | 1300 | if (!mlxsw_sp_port_vlan) |
| 1294 | return -EINVAL; | 1301 | return 0; |
| 1295 | 1302 | ||
| 1296 | fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid); | 1303 | fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid); |
| 1297 | 1304 | ||
| @@ -1407,15 +1414,15 @@ static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port, | |||
| 1407 | int err = 0; | 1414 | int err = 0; |
| 1408 | 1415 | ||
| 1409 | bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); | 1416 | bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); |
| 1410 | if (WARN_ON(!bridge_port)) | 1417 | if (!bridge_port) |
| 1411 | return -EINVAL; | 1418 | return 0; |
| 1412 | 1419 | ||
| 1413 | bridge_device = bridge_port->bridge_device; | 1420 | bridge_device = bridge_port->bridge_device; |
| 1414 | mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port, | 1421 | mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port, |
| 1415 | bridge_device, | 1422 | bridge_device, |
| 1416 | mdb->vid); | 1423 | mdb->vid); |
| 1417 | if (WARN_ON(!mlxsw_sp_port_vlan)) | 1424 | if (!mlxsw_sp_port_vlan) |
| 1418 | return -EINVAL; | 1425 | return 0; |
| 1419 | 1426 | ||
| 1420 | fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid); | 1427 | fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid); |
| 1421 | 1428 | ||
| @@ -1974,6 +1981,17 @@ static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp) | |||
| 1974 | 1981 | ||
| 1975 | } | 1982 | } |
| 1976 | 1983 | ||
| 1984 | static void mlxsw_sp_mids_fini(struct mlxsw_sp *mlxsw_sp) | ||
| 1985 | { | ||
| 1986 | struct mlxsw_sp_mid *mid, *tmp; | ||
| 1987 | |||
| 1988 | list_for_each_entry_safe(mid, tmp, &mlxsw_sp->bridge->mids_list, list) { | ||
| 1989 | list_del(&mid->list); | ||
| 1990 | clear_bit(mid->mid, mlxsw_sp->bridge->mids_bitmap); | ||
| 1991 | kfree(mid); | ||
| 1992 | } | ||
| 1993 | } | ||
| 1994 | |||
| 1977 | int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp) | 1995 | int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp) |
| 1978 | { | 1996 | { |
| 1979 | struct mlxsw_sp_bridge *bridge; | 1997 | struct mlxsw_sp_bridge *bridge; |
| @@ -1996,7 +2014,7 @@ int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp) | |||
| 1996 | void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp) | 2014 | void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp) |
| 1997 | { | 2015 | { |
| 1998 | mlxsw_sp_fdb_fini(mlxsw_sp); | 2016 | mlxsw_sp_fdb_fini(mlxsw_sp); |
| 1999 | WARN_ON(!list_empty(&mlxsw_sp->bridge->mids_list)); | 2017 | mlxsw_sp_mids_fini(mlxsw_sp); |
| 2000 | WARN_ON(!list_empty(&mlxsw_sp->bridge->bridges_list)); | 2018 | WARN_ON(!list_empty(&mlxsw_sp->bridge->bridges_list)); |
| 2001 | kfree(mlxsw_sp->bridge); | 2019 | kfree(mlxsw_sp->bridge); |
| 2002 | } | 2020 | } |
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c index dd7fa9cf225f..b0837b58c3a1 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c | |||
| @@ -115,14 +115,10 @@ nfp_flower_cmsg_portmod_rx(struct nfp_app *app, struct sk_buff *skb) | |||
| 115 | return; | 115 | return; |
| 116 | } | 116 | } |
| 117 | 117 | ||
| 118 | if (link) { | 118 | if (link) |
| 119 | netif_carrier_on(netdev); | 119 | netif_carrier_on(netdev); |
| 120 | rtnl_lock(); | 120 | else |
| 121 | dev_set_mtu(netdev, be16_to_cpu(msg->mtu)); | ||
| 122 | rtnl_unlock(); | ||
| 123 | } else { | ||
| 124 | netif_carrier_off(netdev); | 121 | netif_carrier_off(netdev); |
| 125 | } | ||
| 126 | rcu_read_unlock(); | 122 | rcu_read_unlock(); |
| 127 | } | 123 | } |
| 128 | 124 | ||
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c index 0e08404480ef..d25b5038c3a2 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/match.c +++ b/drivers/net/ethernet/netronome/nfp/flower/match.c | |||
| @@ -42,33 +42,29 @@ nfp_flower_compile_meta_tci(struct nfp_flower_meta_two *frame, | |||
| 42 | struct tc_cls_flower_offload *flow, u8 key_type, | 42 | struct tc_cls_flower_offload *flow, u8 key_type, |
| 43 | bool mask_version) | 43 | bool mask_version) |
| 44 | { | 44 | { |
| 45 | struct fl_flow_key *target = mask_version ? flow->mask : flow->key; | ||
| 45 | struct flow_dissector_key_vlan *flow_vlan; | 46 | struct flow_dissector_key_vlan *flow_vlan; |
| 46 | u16 tmp_tci; | 47 | u16 tmp_tci; |
| 47 | 48 | ||
| 49 | memset(frame, 0, sizeof(struct nfp_flower_meta_two)); | ||
| 48 | /* Populate the metadata frame. */ | 50 | /* Populate the metadata frame. */ |
| 49 | frame->nfp_flow_key_layer = key_type; | 51 | frame->nfp_flow_key_layer = key_type; |
| 50 | frame->mask_id = ~0; | 52 | frame->mask_id = ~0; |
| 51 | 53 | ||
| 52 | if (mask_version) { | 54 | if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_VLAN)) { |
| 53 | frame->tci = cpu_to_be16(~0); | 55 | flow_vlan = skb_flow_dissector_target(flow->dissector, |
| 54 | return; | 56 | FLOW_DISSECTOR_KEY_VLAN, |
| 55 | } | 57 | target); |
| 56 | 58 | /* Populate the tci field. */ | |
| 57 | flow_vlan = skb_flow_dissector_target(flow->dissector, | 59 | if (flow_vlan->vlan_id) { |
| 58 | FLOW_DISSECTOR_KEY_VLAN, | 60 | tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO, |
| 59 | flow->key); | 61 | flow_vlan->vlan_priority) | |
| 60 | 62 | FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID, | |
| 61 | /* Populate the tci field. */ | 63 | flow_vlan->vlan_id) | |
| 62 | if (!flow_vlan->vlan_id) { | 64 | NFP_FLOWER_MASK_VLAN_CFI; |
| 63 | tmp_tci = 0; | 65 | frame->tci = cpu_to_be16(tmp_tci); |
| 64 | } else { | 66 | } |
| 65 | tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO, | ||
| 66 | flow_vlan->vlan_priority) | | ||
| 67 | FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID, | ||
| 68 | flow_vlan->vlan_id) | | ||
| 69 | NFP_FLOWER_MASK_VLAN_CFI; | ||
| 70 | } | 67 | } |
| 71 | frame->tci = cpu_to_be16(tmp_tci); | ||
| 72 | } | 68 | } |
| 73 | 69 | ||
| 74 | static void | 70 | static void |
| @@ -99,17 +95,18 @@ nfp_flower_compile_mac(struct nfp_flower_mac_mpls *frame, | |||
| 99 | bool mask_version) | 95 | bool mask_version) |
| 100 | { | 96 | { |
| 101 | struct fl_flow_key *target = mask_version ? flow->mask : flow->key; | 97 | struct fl_flow_key *target = mask_version ? flow->mask : flow->key; |
| 102 | struct flow_dissector_key_eth_addrs *flow_mac; | 98 | struct flow_dissector_key_eth_addrs *addr; |
| 103 | |||
| 104 | flow_mac = skb_flow_dissector_target(flow->dissector, | ||
| 105 | FLOW_DISSECTOR_KEY_ETH_ADDRS, | ||
| 106 | target); | ||
| 107 | 99 | ||
| 108 | memset(frame, 0, sizeof(struct nfp_flower_mac_mpls)); | 100 | memset(frame, 0, sizeof(struct nfp_flower_mac_mpls)); |
| 109 | 101 | ||
| 110 | /* Populate mac frame. */ | 102 | if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { |
| 111 | ether_addr_copy(frame->mac_dst, &flow_mac->dst[0]); | 103 | addr = skb_flow_dissector_target(flow->dissector, |
| 112 | ether_addr_copy(frame->mac_src, &flow_mac->src[0]); | 104 | FLOW_DISSECTOR_KEY_ETH_ADDRS, |
| 105 | target); | ||
| 106 | /* Populate mac frame. */ | ||
| 107 | ether_addr_copy(frame->mac_dst, &addr->dst[0]); | ||
| 108 | ether_addr_copy(frame->mac_src, &addr->src[0]); | ||
| 109 | } | ||
| 113 | 110 | ||
| 114 | if (mask_version) | 111 | if (mask_version) |
| 115 | frame->mpls_lse = cpu_to_be32(~0); | 112 | frame->mpls_lse = cpu_to_be32(~0); |
| @@ -121,14 +118,17 @@ nfp_flower_compile_tport(struct nfp_flower_tp_ports *frame, | |||
| 121 | bool mask_version) | 118 | bool mask_version) |
| 122 | { | 119 | { |
| 123 | struct fl_flow_key *target = mask_version ? flow->mask : flow->key; | 120 | struct fl_flow_key *target = mask_version ? flow->mask : flow->key; |
| 124 | struct flow_dissector_key_ports *flow_tp; | 121 | struct flow_dissector_key_ports *tp; |
| 125 | 122 | ||
| 126 | flow_tp = skb_flow_dissector_target(flow->dissector, | 123 | memset(frame, 0, sizeof(struct nfp_flower_tp_ports)); |
| 127 | FLOW_DISSECTOR_KEY_PORTS, | ||
| 128 | target); | ||
| 129 | 124 | ||
| 130 | frame->port_src = flow_tp->src; | 125 | if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_PORTS)) { |
| 131 | frame->port_dst = flow_tp->dst; | 126 | tp = skb_flow_dissector_target(flow->dissector, |
| 127 | FLOW_DISSECTOR_KEY_PORTS, | ||
| 128 | target); | ||
| 129 | frame->port_src = tp->src; | ||
| 130 | frame->port_dst = tp->dst; | ||
| 131 | } | ||
| 132 | } | 132 | } |
| 133 | 133 | ||
| 134 | static void | 134 | static void |
| @@ -137,25 +137,27 @@ nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *frame, | |||
| 137 | bool mask_version) | 137 | bool mask_version) |
| 138 | { | 138 | { |
| 139 | struct fl_flow_key *target = mask_version ? flow->mask : flow->key; | 139 | struct fl_flow_key *target = mask_version ? flow->mask : flow->key; |
| 140 | struct flow_dissector_key_ipv4_addrs *flow_ipv4; | 140 | struct flow_dissector_key_ipv4_addrs *addr; |
| 141 | struct flow_dissector_key_basic *flow_basic; | 141 | struct flow_dissector_key_basic *basic; |
| 142 | |||
| 143 | flow_ipv4 = skb_flow_dissector_target(flow->dissector, | ||
| 144 | FLOW_DISSECTOR_KEY_IPV4_ADDRS, | ||
| 145 | target); | ||
| 146 | |||
| 147 | flow_basic = skb_flow_dissector_target(flow->dissector, | ||
| 148 | FLOW_DISSECTOR_KEY_BASIC, | ||
| 149 | target); | ||
| 150 | 142 | ||
| 151 | /* Populate IPv4 frame. */ | ||
| 152 | frame->reserved = 0; | ||
| 153 | frame->ipv4_src = flow_ipv4->src; | ||
| 154 | frame->ipv4_dst = flow_ipv4->dst; | ||
| 155 | frame->proto = flow_basic->ip_proto; | ||
| 156 | /* Wildcard TOS/TTL for now. */ | 143 | /* Wildcard TOS/TTL for now. */ |
| 157 | frame->tos = 0; | 144 | memset(frame, 0, sizeof(struct nfp_flower_ipv4)); |
| 158 | frame->ttl = 0; | 145 | |
| 146 | if (dissector_uses_key(flow->dissector, | ||
| 147 | FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { | ||
| 148 | addr = skb_flow_dissector_target(flow->dissector, | ||
| 149 | FLOW_DISSECTOR_KEY_IPV4_ADDRS, | ||
| 150 | target); | ||
| 151 | frame->ipv4_src = addr->src; | ||
| 152 | frame->ipv4_dst = addr->dst; | ||
| 153 | } | ||
| 154 | |||
| 155 | if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) { | ||
| 156 | basic = skb_flow_dissector_target(flow->dissector, | ||
| 157 | FLOW_DISSECTOR_KEY_BASIC, | ||
| 158 | target); | ||
| 159 | frame->proto = basic->ip_proto; | ||
| 160 | } | ||
| 159 | } | 161 | } |
| 160 | 162 | ||
| 161 | static void | 163 | static void |
| @@ -164,26 +166,27 @@ nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *frame, | |||
| 164 | bool mask_version) | 166 | bool mask_version) |
| 165 | { | 167 | { |
| 166 | struct fl_flow_key *target = mask_version ? flow->mask : flow->key; | 168 | struct fl_flow_key *target = mask_version ? flow->mask : flow->key; |
| 167 | struct flow_dissector_key_ipv6_addrs *flow_ipv6; | 169 | struct flow_dissector_key_ipv6_addrs *addr; |
| 168 | struct flow_dissector_key_basic *flow_basic; | 170 | struct flow_dissector_key_basic *basic; |
| 169 | |||
| 170 | flow_ipv6 = skb_flow_dissector_target(flow->dissector, | ||
| 171 | FLOW_DISSECTOR_KEY_IPV6_ADDRS, | ||
| 172 | target); | ||
| 173 | 171 | ||
| 174 | flow_basic = skb_flow_dissector_target(flow->dissector, | ||
| 175 | FLOW_DISSECTOR_KEY_BASIC, | ||
| 176 | target); | ||
| 177 | |||
| 178 | /* Populate IPv6 frame. */ | ||
| 179 | frame->reserved = 0; | ||
| 180 | frame->ipv6_src = flow_ipv6->src; | ||
| 181 | frame->ipv6_dst = flow_ipv6->dst; | ||
| 182 | frame->proto = flow_basic->ip_proto; | ||
| 183 | /* Wildcard LABEL/TOS/TTL for now. */ | 172 | /* Wildcard LABEL/TOS/TTL for now. */ |
| 184 | frame->ipv6_flow_label_exthdr = 0; | 173 | memset(frame, 0, sizeof(struct nfp_flower_ipv6)); |
| 185 | frame->tos = 0; | 174 | |
| 186 | frame->ttl = 0; | 175 | if (dissector_uses_key(flow->dissector, |
| 176 | FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { | ||
| 177 | addr = skb_flow_dissector_target(flow->dissector, | ||
| 178 | FLOW_DISSECTOR_KEY_IPV6_ADDRS, | ||
| 179 | target); | ||
| 180 | frame->ipv6_src = addr->src; | ||
| 181 | frame->ipv6_dst = addr->dst; | ||
| 182 | } | ||
| 183 | |||
| 184 | if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) { | ||
| 185 | basic = skb_flow_dissector_target(flow->dissector, | ||
| 186 | FLOW_DISSECTOR_KEY_BASIC, | ||
| 187 | target); | ||
| 188 | frame->proto = basic->ip_proto; | ||
| 189 | } | ||
| 187 | } | 190 | } |
| 188 | 191 | ||
| 189 | int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow, | 192 | int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow, |
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index 4ad10bd5e139..74a96d6bb05c 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c | |||
| @@ -105,43 +105,62 @@ static int | |||
| 105 | nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls, | 105 | nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls, |
| 106 | struct tc_cls_flower_offload *flow) | 106 | struct tc_cls_flower_offload *flow) |
| 107 | { | 107 | { |
| 108 | struct flow_dissector_key_control *mask_enc_ctl; | 108 | struct flow_dissector_key_basic *mask_basic = NULL; |
| 109 | struct flow_dissector_key_basic *mask_basic; | 109 | struct flow_dissector_key_basic *key_basic = NULL; |
| 110 | struct flow_dissector_key_basic *key_basic; | 110 | struct flow_dissector_key_ip *mask_ip = NULL; |
| 111 | u32 key_layer_two; | 111 | u32 key_layer_two; |
| 112 | u8 key_layer; | 112 | u8 key_layer; |
| 113 | int key_size; | 113 | int key_size; |
| 114 | 114 | ||
| 115 | mask_enc_ctl = skb_flow_dissector_target(flow->dissector, | 115 | if (dissector_uses_key(flow->dissector, |
| 116 | FLOW_DISSECTOR_KEY_ENC_CONTROL, | 116 | FLOW_DISSECTOR_KEY_ENC_CONTROL)) { |
| 117 | flow->mask); | 117 | struct flow_dissector_key_control *mask_enc_ctl = |
| 118 | skb_flow_dissector_target(flow->dissector, | ||
| 119 | FLOW_DISSECTOR_KEY_ENC_CONTROL, | ||
| 120 | flow->mask); | ||
| 121 | /* We are expecting a tunnel. For now we ignore offloading. */ | ||
| 122 | if (mask_enc_ctl->addr_type) | ||
| 123 | return -EOPNOTSUPP; | ||
| 124 | } | ||
| 118 | 125 | ||
| 119 | mask_basic = skb_flow_dissector_target(flow->dissector, | 126 | if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) { |
| 120 | FLOW_DISSECTOR_KEY_BASIC, | 127 | mask_basic = skb_flow_dissector_target(flow->dissector, |
| 121 | flow->mask); | 128 | FLOW_DISSECTOR_KEY_BASIC, |
| 129 | flow->mask); | ||
| 130 | |||
| 131 | key_basic = skb_flow_dissector_target(flow->dissector, | ||
| 132 | FLOW_DISSECTOR_KEY_BASIC, | ||
| 133 | flow->key); | ||
| 134 | } | ||
| 135 | |||
| 136 | if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_IP)) | ||
| 137 | mask_ip = skb_flow_dissector_target(flow->dissector, | ||
| 138 | FLOW_DISSECTOR_KEY_IP, | ||
| 139 | flow->mask); | ||
| 122 | 140 | ||
| 123 | key_basic = skb_flow_dissector_target(flow->dissector, | ||
| 124 | FLOW_DISSECTOR_KEY_BASIC, | ||
| 125 | flow->key); | ||
| 126 | key_layer_two = 0; | 141 | key_layer_two = 0; |
| 127 | key_layer = NFP_FLOWER_LAYER_PORT | NFP_FLOWER_LAYER_MAC; | 142 | key_layer = NFP_FLOWER_LAYER_PORT | NFP_FLOWER_LAYER_MAC; |
| 128 | key_size = sizeof(struct nfp_flower_meta_one) + | 143 | key_size = sizeof(struct nfp_flower_meta_one) + |
| 129 | sizeof(struct nfp_flower_in_port) + | 144 | sizeof(struct nfp_flower_in_port) + |
| 130 | sizeof(struct nfp_flower_mac_mpls); | 145 | sizeof(struct nfp_flower_mac_mpls); |
| 131 | 146 | ||
| 132 | /* We are expecting a tunnel. For now we ignore offloading. */ | 147 | if (mask_basic && mask_basic->n_proto) { |
| 133 | if (mask_enc_ctl->addr_type) | ||
| 134 | return -EOPNOTSUPP; | ||
| 135 | |||
| 136 | if (mask_basic->n_proto) { | ||
| 137 | /* Ethernet type is present in the key. */ | 148 | /* Ethernet type is present in the key. */ |
| 138 | switch (key_basic->n_proto) { | 149 | switch (key_basic->n_proto) { |
| 139 | case cpu_to_be16(ETH_P_IP): | 150 | case cpu_to_be16(ETH_P_IP): |
| 151 | if (mask_ip && mask_ip->tos) | ||
| 152 | return -EOPNOTSUPP; | ||
| 153 | if (mask_ip && mask_ip->ttl) | ||
| 154 | return -EOPNOTSUPP; | ||
| 140 | key_layer |= NFP_FLOWER_LAYER_IPV4; | 155 | key_layer |= NFP_FLOWER_LAYER_IPV4; |
| 141 | key_size += sizeof(struct nfp_flower_ipv4); | 156 | key_size += sizeof(struct nfp_flower_ipv4); |
| 142 | break; | 157 | break; |
| 143 | 158 | ||
| 144 | case cpu_to_be16(ETH_P_IPV6): | 159 | case cpu_to_be16(ETH_P_IPV6): |
| 160 | if (mask_ip && mask_ip->tos) | ||
| 161 | return -EOPNOTSUPP; | ||
| 162 | if (mask_ip && mask_ip->ttl) | ||
| 163 | return -EOPNOTSUPP; | ||
| 145 | key_layer |= NFP_FLOWER_LAYER_IPV6; | 164 | key_layer |= NFP_FLOWER_LAYER_IPV6; |
| 146 | key_size += sizeof(struct nfp_flower_ipv6); | 165 | key_size += sizeof(struct nfp_flower_ipv6); |
| 147 | break; | 166 | break; |
| @@ -152,6 +171,11 @@ nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls, | |||
| 152 | case cpu_to_be16(ETH_P_ARP): | 171 | case cpu_to_be16(ETH_P_ARP): |
| 153 | return -EOPNOTSUPP; | 172 | return -EOPNOTSUPP; |
| 154 | 173 | ||
| 174 | /* Currently we do not offload MPLS. */ | ||
| 175 | case cpu_to_be16(ETH_P_MPLS_UC): | ||
| 176 | case cpu_to_be16(ETH_P_MPLS_MC): | ||
| 177 | return -EOPNOTSUPP; | ||
| 178 | |||
| 155 | /* Will be included in layer 2. */ | 179 | /* Will be included in layer 2. */ |
| 156 | case cpu_to_be16(ETH_P_8021Q): | 180 | case cpu_to_be16(ETH_P_8021Q): |
| 157 | break; | 181 | break; |
| @@ -166,7 +190,7 @@ nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls, | |||
| 166 | } | 190 | } |
| 167 | } | 191 | } |
| 168 | 192 | ||
| 169 | if (mask_basic->ip_proto) { | 193 | if (mask_basic && mask_basic->ip_proto) { |
| 170 | /* Ethernet type is present in the key. */ | 194 | /* Ethernet type is present in the key. */ |
| 171 | switch (key_basic->ip_proto) { | 195 | switch (key_basic->ip_proto) { |
| 172 | case IPPROTO_TCP: | 196 | case IPPROTO_TCP: |
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index d67969d3e484..3f199db2002e 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c | |||
| @@ -98,21 +98,20 @@ static int nfp_pcie_sriov_enable(struct pci_dev *pdev, int num_vfs) | |||
| 98 | struct nfp_pf *pf = pci_get_drvdata(pdev); | 98 | struct nfp_pf *pf = pci_get_drvdata(pdev); |
| 99 | int err; | 99 | int err; |
| 100 | 100 | ||
| 101 | mutex_lock(&pf->lock); | ||
| 102 | |||
| 103 | if (num_vfs > pf->limit_vfs) { | 101 | if (num_vfs > pf->limit_vfs) { |
| 104 | nfp_info(pf->cpp, "Firmware limits number of VFs to %u\n", | 102 | nfp_info(pf->cpp, "Firmware limits number of VFs to %u\n", |
| 105 | pf->limit_vfs); | 103 | pf->limit_vfs); |
| 106 | err = -EINVAL; | 104 | return -EINVAL; |
| 107 | goto err_unlock; | ||
| 108 | } | 105 | } |
| 109 | 106 | ||
| 110 | err = pci_enable_sriov(pdev, num_vfs); | 107 | err = pci_enable_sriov(pdev, num_vfs); |
| 111 | if (err) { | 108 | if (err) { |
| 112 | dev_warn(&pdev->dev, "Failed to enable PCI SR-IOV: %d\n", err); | 109 | dev_warn(&pdev->dev, "Failed to enable PCI SR-IOV: %d\n", err); |
| 113 | goto err_unlock; | 110 | return err; |
| 114 | } | 111 | } |
| 115 | 112 | ||
| 113 | mutex_lock(&pf->lock); | ||
| 114 | |||
| 116 | err = nfp_app_sriov_enable(pf->app, num_vfs); | 115 | err = nfp_app_sriov_enable(pf->app, num_vfs); |
| 117 | if (err) { | 116 | if (err) { |
| 118 | dev_warn(&pdev->dev, | 117 | dev_warn(&pdev->dev, |
| @@ -129,9 +128,8 @@ static int nfp_pcie_sriov_enable(struct pci_dev *pdev, int num_vfs) | |||
| 129 | return num_vfs; | 128 | return num_vfs; |
| 130 | 129 | ||
| 131 | err_sriov_disable: | 130 | err_sriov_disable: |
| 132 | pci_disable_sriov(pdev); | ||
| 133 | err_unlock: | ||
| 134 | mutex_unlock(&pf->lock); | 131 | mutex_unlock(&pf->lock); |
| 132 | pci_disable_sriov(pdev); | ||
| 135 | return err; | 133 | return err; |
| 136 | #endif | 134 | #endif |
| 137 | return 0; | 135 | return 0; |
| @@ -158,10 +156,10 @@ static int nfp_pcie_sriov_disable(struct pci_dev *pdev) | |||
| 158 | 156 | ||
| 159 | pf->num_vfs = 0; | 157 | pf->num_vfs = 0; |
| 160 | 158 | ||
| 159 | mutex_unlock(&pf->lock); | ||
| 160 | |||
| 161 | pci_disable_sriov(pdev); | 161 | pci_disable_sriov(pdev); |
| 162 | dev_dbg(&pdev->dev, "Removed VFs.\n"); | 162 | dev_dbg(&pdev->dev, "Removed VFs.\n"); |
| 163 | |||
| 164 | mutex_unlock(&pf->lock); | ||
| 165 | #endif | 163 | #endif |
| 166 | return 0; | 164 | return 0; |
| 167 | } | 165 | } |
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 18750ff0ede6..66a09e490cf5 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c | |||
| @@ -513,6 +513,7 @@ nfp_net_tx_ring_init(struct nfp_net_tx_ring *tx_ring, | |||
| 513 | tx_ring->idx = idx; | 513 | tx_ring->idx = idx; |
| 514 | tx_ring->r_vec = r_vec; | 514 | tx_ring->r_vec = r_vec; |
| 515 | tx_ring->is_xdp = is_xdp; | 515 | tx_ring->is_xdp = is_xdp; |
| 516 | u64_stats_init(&tx_ring->r_vec->tx_sync); | ||
| 516 | 517 | ||
| 517 | tx_ring->qcidx = tx_ring->idx * nn->stride_tx; | 518 | tx_ring->qcidx = tx_ring->idx * nn->stride_tx; |
| 518 | tx_ring->qcp_q = nn->tx_bar + NFP_QCP_QUEUE_OFF(tx_ring->qcidx); | 519 | tx_ring->qcp_q = nn->tx_bar + NFP_QCP_QUEUE_OFF(tx_ring->qcidx); |
| @@ -532,6 +533,7 @@ nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring, | |||
| 532 | 533 | ||
| 533 | rx_ring->idx = idx; | 534 | rx_ring->idx = idx; |
| 534 | rx_ring->r_vec = r_vec; | 535 | rx_ring->r_vec = r_vec; |
| 536 | u64_stats_init(&rx_ring->r_vec->rx_sync); | ||
| 535 | 537 | ||
| 536 | rx_ring->fl_qcidx = rx_ring->idx * nn->stride_rx; | 538 | rx_ring->fl_qcidx = rx_ring->idx * nn->stride_rx; |
| 537 | rx_ring->qcp_fl = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->fl_qcidx); | 539 | rx_ring->qcp_fl = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->fl_qcidx); |
| @@ -893,6 +895,8 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev) | |||
| 893 | 895 | ||
| 894 | netdev_tx_sent_queue(nd_q, txbuf->real_len); | 896 | netdev_tx_sent_queue(nd_q, txbuf->real_len); |
| 895 | 897 | ||
| 898 | skb_tx_timestamp(skb); | ||
| 899 | |||
| 896 | tx_ring->wr_p += nr_frags + 1; | 900 | tx_ring->wr_p += nr_frags + 1; |
| 897 | if (nfp_net_tx_ring_should_stop(tx_ring)) | 901 | if (nfp_net_tx_ring_should_stop(tx_ring)) |
| 898 | nfp_net_tx_ring_stop(nd_q, tx_ring); | 902 | nfp_net_tx_ring_stop(nd_q, tx_ring); |
| @@ -901,13 +905,10 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev) | |||
| 901 | if (!skb->xmit_more || netif_xmit_stopped(nd_q)) | 905 | if (!skb->xmit_more || netif_xmit_stopped(nd_q)) |
| 902 | nfp_net_tx_xmit_more_flush(tx_ring); | 906 | nfp_net_tx_xmit_more_flush(tx_ring); |
| 903 | 907 | ||
| 904 | skb_tx_timestamp(skb); | ||
| 905 | |||
| 906 | return NETDEV_TX_OK; | 908 | return NETDEV_TX_OK; |
| 907 | 909 | ||
| 908 | err_unmap: | 910 | err_unmap: |
| 909 | --f; | 911 | while (--f >= 0) { |
| 910 | while (f >= 0) { | ||
| 911 | frag = &skb_shinfo(skb)->frags[f]; | 912 | frag = &skb_shinfo(skb)->frags[f]; |
| 912 | dma_unmap_page(dp->dev, tx_ring->txbufs[wr_idx].dma_addr, | 913 | dma_unmap_page(dp->dev, tx_ring->txbufs[wr_idx].dma_addr, |
| 913 | skb_frag_size(frag), DMA_TO_DEVICE); | 914 | skb_frag_size(frag), DMA_TO_DEVICE); |
| @@ -1750,6 +1751,10 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) | |||
| 1750 | continue; | 1751 | continue; |
| 1751 | } | 1752 | } |
| 1752 | 1753 | ||
| 1754 | nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr); | ||
| 1755 | |||
| 1756 | nfp_net_rx_give_one(dp, rx_ring, new_frag, new_dma_addr); | ||
| 1757 | |||
| 1753 | if (likely(!meta.portid)) { | 1758 | if (likely(!meta.portid)) { |
| 1754 | netdev = dp->netdev; | 1759 | netdev = dp->netdev; |
| 1755 | } else { | 1760 | } else { |
| @@ -1758,16 +1763,12 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) | |||
| 1758 | nn = netdev_priv(dp->netdev); | 1763 | nn = netdev_priv(dp->netdev); |
| 1759 | netdev = nfp_app_repr_get(nn->app, meta.portid); | 1764 | netdev = nfp_app_repr_get(nn->app, meta.portid); |
| 1760 | if (unlikely(!netdev)) { | 1765 | if (unlikely(!netdev)) { |
| 1761 | nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, skb); | 1766 | nfp_net_rx_drop(dp, r_vec, rx_ring, NULL, skb); |
| 1762 | continue; | 1767 | continue; |
| 1763 | } | 1768 | } |
| 1764 | nfp_repr_inc_rx_stats(netdev, pkt_len); | 1769 | nfp_repr_inc_rx_stats(netdev, pkt_len); |
| 1765 | } | 1770 | } |
| 1766 | 1771 | ||
| 1767 | nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr); | ||
| 1768 | |||
| 1769 | nfp_net_rx_give_one(dp, rx_ring, new_frag, new_dma_addr); | ||
| 1770 | |||
| 1771 | skb_reserve(skb, pkt_off); | 1772 | skb_reserve(skb, pkt_off); |
| 1772 | skb_put(skb, pkt_len); | 1773 | skb_put(skb, pkt_len); |
| 1773 | 1774 | ||
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c index 5797dbf2b507..34b985384d26 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c | |||
| @@ -456,13 +456,9 @@ static int nfp_net_pf_app_start(struct nfp_pf *pf) | |||
| 456 | { | 456 | { |
| 457 | int err; | 457 | int err; |
| 458 | 458 | ||
| 459 | err = nfp_net_pf_app_start_ctrl(pf); | ||
| 460 | if (err) | ||
| 461 | return err; | ||
| 462 | |||
| 463 | err = nfp_app_start(pf->app, pf->ctrl_vnic); | 459 | err = nfp_app_start(pf->app, pf->ctrl_vnic); |
| 464 | if (err) | 460 | if (err) |
| 465 | goto err_ctrl_stop; | 461 | return err; |
| 466 | 462 | ||
| 467 | if (pf->num_vfs) { | 463 | if (pf->num_vfs) { |
| 468 | err = nfp_app_sriov_enable(pf->app, pf->num_vfs); | 464 | err = nfp_app_sriov_enable(pf->app, pf->num_vfs); |
| @@ -474,8 +470,6 @@ static int nfp_net_pf_app_start(struct nfp_pf *pf) | |||
| 474 | 470 | ||
| 475 | err_app_stop: | 471 | err_app_stop: |
| 476 | nfp_app_stop(pf->app); | 472 | nfp_app_stop(pf->app); |
| 477 | err_ctrl_stop: | ||
| 478 | nfp_net_pf_app_stop_ctrl(pf); | ||
| 479 | return err; | 473 | return err; |
| 480 | } | 474 | } |
| 481 | 475 | ||
| @@ -484,7 +478,6 @@ static void nfp_net_pf_app_stop(struct nfp_pf *pf) | |||
| 484 | if (pf->num_vfs) | 478 | if (pf->num_vfs) |
| 485 | nfp_app_sriov_disable(pf->app); | 479 | nfp_app_sriov_disable(pf->app); |
| 486 | nfp_app_stop(pf->app); | 480 | nfp_app_stop(pf->app); |
| 487 | nfp_net_pf_app_stop_ctrl(pf); | ||
| 488 | } | 481 | } |
| 489 | 482 | ||
| 490 | static void nfp_net_pci_unmap_mem(struct nfp_pf *pf) | 483 | static void nfp_net_pci_unmap_mem(struct nfp_pf *pf) |
| @@ -559,7 +552,7 @@ err_unmap_ctrl: | |||
| 559 | 552 | ||
| 560 | static void nfp_net_pci_remove_finish(struct nfp_pf *pf) | 553 | static void nfp_net_pci_remove_finish(struct nfp_pf *pf) |
| 561 | { | 554 | { |
| 562 | nfp_net_pf_app_stop(pf); | 555 | nfp_net_pf_app_stop_ctrl(pf); |
| 563 | /* stop app first, to avoid double free of ctrl vNIC's ddir */ | 556 | /* stop app first, to avoid double free of ctrl vNIC's ddir */ |
| 564 | nfp_net_debugfs_dir_clean(&pf->ddir); | 557 | nfp_net_debugfs_dir_clean(&pf->ddir); |
| 565 | 558 | ||
| @@ -690,6 +683,7 @@ int nfp_net_pci_probe(struct nfp_pf *pf) | |||
| 690 | { | 683 | { |
| 691 | struct nfp_net_fw_version fw_ver; | 684 | struct nfp_net_fw_version fw_ver; |
| 692 | u8 __iomem *ctrl_bar, *qc_bar; | 685 | u8 __iomem *ctrl_bar, *qc_bar; |
| 686 | struct nfp_net *nn; | ||
| 693 | int stride; | 687 | int stride; |
| 694 | int err; | 688 | int err; |
| 695 | 689 | ||
| @@ -766,7 +760,7 @@ int nfp_net_pci_probe(struct nfp_pf *pf) | |||
| 766 | if (err) | 760 | if (err) |
| 767 | goto err_free_vnics; | 761 | goto err_free_vnics; |
| 768 | 762 | ||
| 769 | err = nfp_net_pf_app_start(pf); | 763 | err = nfp_net_pf_app_start_ctrl(pf); |
| 770 | if (err) | 764 | if (err) |
| 771 | goto err_free_irqs; | 765 | goto err_free_irqs; |
| 772 | 766 | ||
| @@ -774,12 +768,20 @@ int nfp_net_pci_probe(struct nfp_pf *pf) | |||
| 774 | if (err) | 768 | if (err) |
| 775 | goto err_stop_app; | 769 | goto err_stop_app; |
| 776 | 770 | ||
| 771 | err = nfp_net_pf_app_start(pf); | ||
| 772 | if (err) | ||
| 773 | goto err_clean_vnics; | ||
| 774 | |||
| 777 | mutex_unlock(&pf->lock); | 775 | mutex_unlock(&pf->lock); |
| 778 | 776 | ||
| 779 | return 0; | 777 | return 0; |
| 780 | 778 | ||
| 779 | err_clean_vnics: | ||
| 780 | list_for_each_entry(nn, &pf->vnics, vnic_list) | ||
| 781 | if (nfp_net_is_data_vnic(nn)) | ||
| 782 | nfp_net_pf_clean_vnic(pf, nn); | ||
| 781 | err_stop_app: | 783 | err_stop_app: |
| 782 | nfp_net_pf_app_stop(pf); | 784 | nfp_net_pf_app_stop_ctrl(pf); |
| 783 | err_free_irqs: | 785 | err_free_irqs: |
| 784 | nfp_net_pf_free_irqs(pf); | 786 | nfp_net_pf_free_irqs(pf); |
| 785 | err_free_vnics: | 787 | err_free_vnics: |
| @@ -803,6 +805,8 @@ void nfp_net_pci_remove(struct nfp_pf *pf) | |||
| 803 | if (list_empty(&pf->vnics)) | 805 | if (list_empty(&pf->vnics)) |
| 804 | goto out; | 806 | goto out; |
| 805 | 807 | ||
| 808 | nfp_net_pf_app_stop(pf); | ||
| 809 | |||
| 806 | list_for_each_entry(nn, &pf->vnics, vnic_list) | 810 | list_for_each_entry(nn, &pf->vnics, vnic_list) |
| 807 | if (nfp_net_is_data_vnic(nn)) | 811 | if (nfp_net_is_data_vnic(nn)) |
| 808 | nfp_net_pf_clean_vnic(pf, nn); | 812 | nfp_net_pf_clean_vnic(pf, nn); |
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c index 66ff15d08bad..0a66389c06c2 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c | |||
| @@ -2311,7 +2311,7 @@ netxen_md_rdqueue(struct netxen_adapter *adapter, | |||
| 2311 | loop_cnt++) { | 2311 | loop_cnt++) { |
| 2312 | NX_WR_DUMP_REG(select_addr, adapter->ahw.pci_base0, queue_id); | 2312 | NX_WR_DUMP_REG(select_addr, adapter->ahw.pci_base0, queue_id); |
| 2313 | read_addr = queueEntry->read_addr; | 2313 | read_addr = queueEntry->read_addr; |
| 2314 | for (k = 0; k < read_cnt; k--) { | 2314 | for (k = 0; k < read_cnt; k++) { |
| 2315 | NX_RD_DUMP_REG(read_addr, adapter->ahw.pci_base0, | 2315 | NX_RD_DUMP_REG(read_addr, adapter->ahw.pci_base0, |
| 2316 | &read_value); | 2316 | &read_value); |
| 2317 | *data_buff++ = read_value; | 2317 | *data_buff++ = read_value; |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 9da91045d167..3eb241657368 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c | |||
| @@ -253,7 +253,7 @@ int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) | |||
| 253 | size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32); | 253 | size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32); |
| 254 | p_info->mfw_mb_cur = kzalloc(size, GFP_KERNEL); | 254 | p_info->mfw_mb_cur = kzalloc(size, GFP_KERNEL); |
| 255 | p_info->mfw_mb_shadow = kzalloc(size, GFP_KERNEL); | 255 | p_info->mfw_mb_shadow = kzalloc(size, GFP_KERNEL); |
| 256 | if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr) | 256 | if (!p_info->mfw_mb_cur || !p_info->mfw_mb_shadow) |
| 257 | goto err; | 257 | goto err; |
| 258 | 258 | ||
| 259 | return 0; | 259 | return 0; |
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c index 28ea0af89aef..e3223f2fe2ff 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c | |||
| @@ -724,7 +724,7 @@ static void ql_build_coredump_seg_header( | |||
| 724 | seg_hdr->cookie = MPI_COREDUMP_COOKIE; | 724 | seg_hdr->cookie = MPI_COREDUMP_COOKIE; |
| 725 | seg_hdr->segNum = seg_number; | 725 | seg_hdr->segNum = seg_number; |
| 726 | seg_hdr->segSize = seg_size; | 726 | seg_hdr->segSize = seg_size; |
| 727 | memcpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1); | 727 | strncpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1); |
| 728 | } | 728 | } |
| 729 | 729 | ||
| 730 | /* | 730 | /* |
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index bd07a15d3b7c..e03fcf914690 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c | |||
| @@ -6863,8 +6863,7 @@ static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start, | |||
| 6863 | rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb, | 6863 | rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb, |
| 6864 | tp->TxDescArray + entry); | 6864 | tp->TxDescArray + entry); |
| 6865 | if (skb) { | 6865 | if (skb) { |
| 6866 | tp->dev->stats.tx_dropped++; | 6866 | dev_consume_skb_any(skb); |
| 6867 | dev_kfree_skb_any(skb); | ||
| 6868 | tx_skb->skb = NULL; | 6867 | tx_skb->skb = NULL; |
| 6869 | } | 6868 | } |
| 6870 | } | 6869 | } |
| @@ -7319,7 +7318,7 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp) | |||
| 7319 | tp->tx_stats.packets++; | 7318 | tp->tx_stats.packets++; |
| 7320 | tp->tx_stats.bytes += tx_skb->skb->len; | 7319 | tp->tx_stats.bytes += tx_skb->skb->len; |
| 7321 | u64_stats_update_end(&tp->tx_stats.syncp); | 7320 | u64_stats_update_end(&tp->tx_stats.syncp); |
| 7322 | dev_kfree_skb_any(tx_skb->skb); | 7321 | dev_consume_skb_any(tx_skb->skb); |
| 7323 | tx_skb->skb = NULL; | 7322 | tx_skb->skb = NULL; |
| 7324 | } | 7323 | } |
| 7325 | dirty_tx++; | 7324 | dirty_tx++; |
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c index 73427e29df2a..fbd00cb0cb7d 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c | |||
| @@ -47,6 +47,8 @@ static int sxgbe_probe_config_dt(struct platform_device *pdev, | |||
| 47 | plat->mdio_bus_data = devm_kzalloc(&pdev->dev, | 47 | plat->mdio_bus_data = devm_kzalloc(&pdev->dev, |
| 48 | sizeof(*plat->mdio_bus_data), | 48 | sizeof(*plat->mdio_bus_data), |
| 49 | GFP_KERNEL); | 49 | GFP_KERNEL); |
| 50 | if (!plat->mdio_bus_data) | ||
| 51 | return -ENOMEM; | ||
| 50 | 52 | ||
| 51 | dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg), GFP_KERNEL); | 53 | dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg), GFP_KERNEL); |
| 52 | if (!dma_cfg) | 54 | if (!dma_cfg) |
diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c index c905971c5f3a..990a63d7fcb7 100644 --- a/drivers/net/ethernet/sfc/mcdi_port.c +++ b/drivers/net/ethernet/sfc/mcdi_port.c | |||
| @@ -938,7 +938,6 @@ enum efx_stats_action { | |||
| 938 | static int efx_mcdi_mac_stats(struct efx_nic *efx, | 938 | static int efx_mcdi_mac_stats(struct efx_nic *efx, |
| 939 | enum efx_stats_action action, int clear) | 939 | enum efx_stats_action action, int clear) |
| 940 | { | 940 | { |
| 941 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | ||
| 942 | MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN); | 941 | MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN); |
| 943 | int rc; | 942 | int rc; |
| 944 | int change = action == EFX_STATS_PULL ? 0 : 1; | 943 | int change = action == EFX_STATS_PULL ? 0 : 1; |
| @@ -960,7 +959,12 @@ static int efx_mcdi_mac_stats(struct efx_nic *efx, | |||
| 960 | MAC_STATS_IN_PERIODIC_NOEVENT, 1, | 959 | MAC_STATS_IN_PERIODIC_NOEVENT, 1, |
| 961 | MAC_STATS_IN_PERIOD_MS, period); | 960 | MAC_STATS_IN_PERIOD_MS, period); |
| 962 | MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len); | 961 | MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len); |
| 963 | MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, nic_data->vport_id); | 962 | |
| 963 | if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) { | ||
| 964 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | ||
| 965 | |||
| 966 | MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, nic_data->vport_id); | ||
| 967 | } | ||
| 964 | 968 | ||
| 965 | rc = efx_mcdi_rpc_quiet(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf), | 969 | rc = efx_mcdi_rpc_quiet(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf), |
| 966 | NULL, 0, NULL); | 970 | NULL, 0, NULL); |
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c index 17d4bbaeb65c..6e359572b9f0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c | |||
| @@ -269,7 +269,10 @@ static int socfpga_dwmac_set_phy_mode(struct socfpga_dwmac *dwmac) | |||
| 269 | ctrl &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK << reg_shift); | 269 | ctrl &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK << reg_shift); |
| 270 | ctrl |= val << reg_shift; | 270 | ctrl |= val << reg_shift; |
| 271 | 271 | ||
| 272 | if (dwmac->f2h_ptp_ref_clk) { | 272 | if (dwmac->f2h_ptp_ref_clk || |
| 273 | phymode == PHY_INTERFACE_MODE_MII || | ||
| 274 | phymode == PHY_INTERFACE_MODE_GMII || | ||
| 275 | phymode == PHY_INTERFACE_MODE_SGMII) { | ||
| 273 | ctrl |= SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2); | 276 | ctrl |= SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2); |
| 274 | regmap_read(sys_mgr_base_addr, SYSMGR_FPGAGRP_MODULE_REG, | 277 | regmap_read(sys_mgr_base_addr, SYSMGR_FPGAGRP_MODULE_REG, |
| 275 | &module); | 278 | &module); |
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c index fffd6d5fc907..39c2122a4f26 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c | |||
| @@ -979,14 +979,6 @@ static int sun8i_dwmac_probe(struct platform_device *pdev) | |||
| 979 | } | 979 | } |
| 980 | 980 | ||
| 981 | static const struct of_device_id sun8i_dwmac_match[] = { | 981 | static const struct of_device_id sun8i_dwmac_match[] = { |
| 982 | { .compatible = "allwinner,sun8i-h3-emac", | ||
| 983 | .data = &emac_variant_h3 }, | ||
| 984 | { .compatible = "allwinner,sun8i-v3s-emac", | ||
| 985 | .data = &emac_variant_v3s }, | ||
| 986 | { .compatible = "allwinner,sun8i-a83t-emac", | ||
| 987 | .data = &emac_variant_a83t }, | ||
| 988 | { .compatible = "allwinner,sun50i-a64-emac", | ||
| 989 | .data = &emac_variant_a64 }, | ||
| 990 | { } | 982 | { } |
| 991 | }; | 983 | }; |
| 992 | MODULE_DEVICE_TABLE(of, sun8i_dwmac_match); | 984 | MODULE_DEVICE_TABLE(of, sun8i_dwmac_match); |
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c index 22cf6353ba04..7ecf549c7f1c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c | |||
| @@ -205,7 +205,7 @@ static void dwmac1000_dump_dma_regs(void __iomem *ioaddr, u32 *reg_space) | |||
| 205 | { | 205 | { |
| 206 | int i; | 206 | int i; |
| 207 | 207 | ||
| 208 | for (i = 0; i < 23; i++) | 208 | for (i = 0; i < NUM_DWMAC1000_DMA_REGS; i++) |
| 209 | if ((i < 12) || (i > 17)) | 209 | if ((i < 12) || (i > 17)) |
| 210 | reg_space[DMA_BUS_MODE / 4 + i] = | 210 | reg_space[DMA_BUS_MODE / 4 + i] = |
| 211 | readl(ioaddr + DMA_BUS_MODE + i * 4); | 211 | readl(ioaddr + DMA_BUS_MODE + i * 4); |
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c index eef2f222ce9a..6502b9aa3bf5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c | |||
| @@ -70,7 +70,7 @@ static void dwmac100_dump_dma_regs(void __iomem *ioaddr, u32 *reg_space) | |||
| 70 | { | 70 | { |
| 71 | int i; | 71 | int i; |
| 72 | 72 | ||
| 73 | for (i = 0; i < 9; i++) | 73 | for (i = 0; i < NUM_DWMAC100_DMA_REGS; i++) |
| 74 | reg_space[DMA_BUS_MODE / 4 + i] = | 74 | reg_space[DMA_BUS_MODE / 4 + i] = |
| 75 | readl(ioaddr + DMA_BUS_MODE + i * 4); | 75 | readl(ioaddr + DMA_BUS_MODE + i * 4); |
| 76 | 76 | ||
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h index 9091df86723a..adc54006f884 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h | |||
| @@ -136,6 +136,9 @@ | |||
| 136 | #define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */ | 136 | #define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */ |
| 137 | #define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */ | 137 | #define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */ |
| 138 | 138 | ||
| 139 | #define NUM_DWMAC100_DMA_REGS 9 | ||
| 140 | #define NUM_DWMAC1000_DMA_REGS 23 | ||
| 141 | |||
| 139 | void dwmac_enable_dma_transmission(void __iomem *ioaddr); | 142 | void dwmac_enable_dma_transmission(void __iomem *ioaddr); |
| 140 | void dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan); | 143 | void dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan); |
| 141 | void dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan); | 144 | void dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan); |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index babb39c646ff..af30b4857c3b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c | |||
| @@ -33,6 +33,8 @@ | |||
| 33 | #define MAC100_ETHTOOL_NAME "st_mac100" | 33 | #define MAC100_ETHTOOL_NAME "st_mac100" |
| 34 | #define GMAC_ETHTOOL_NAME "st_gmac" | 34 | #define GMAC_ETHTOOL_NAME "st_gmac" |
| 35 | 35 | ||
| 36 | #define ETHTOOL_DMA_OFFSET 55 | ||
| 37 | |||
| 36 | struct stmmac_stats { | 38 | struct stmmac_stats { |
| 37 | char stat_string[ETH_GSTRING_LEN]; | 39 | char stat_string[ETH_GSTRING_LEN]; |
| 38 | int sizeof_stat; | 40 | int sizeof_stat; |
| @@ -442,6 +444,9 @@ static void stmmac_ethtool_gregs(struct net_device *dev, | |||
| 442 | 444 | ||
| 443 | priv->hw->mac->dump_regs(priv->hw, reg_space); | 445 | priv->hw->mac->dump_regs(priv->hw, reg_space); |
| 444 | priv->hw->dma->dump_regs(priv->ioaddr, reg_space); | 446 | priv->hw->dma->dump_regs(priv->ioaddr, reg_space); |
| 447 | /* Copy DMA registers to where ethtool expects them */ | ||
| 448 | memcpy(®_space[ETHTOOL_DMA_OFFSET], ®_space[DMA_BUS_MODE / 4], | ||
| 449 | NUM_DWMAC1000_DMA_REGS * 4); | ||
| 445 | } | 450 | } |
| 446 | 451 | ||
| 447 | static void | 452 | static void |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index db157a47000c..72ec711fcba2 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c | |||
| @@ -204,6 +204,7 @@ int stmmac_mdio_register(struct net_device *ndev) | |||
| 204 | struct stmmac_priv *priv = netdev_priv(ndev); | 204 | struct stmmac_priv *priv = netdev_priv(ndev); |
| 205 | struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data; | 205 | struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data; |
| 206 | struct device_node *mdio_node = priv->plat->mdio_node; | 206 | struct device_node *mdio_node = priv->plat->mdio_node; |
| 207 | struct device *dev = ndev->dev.parent; | ||
| 207 | int addr, found; | 208 | int addr, found; |
| 208 | 209 | ||
| 209 | if (!mdio_bus_data) | 210 | if (!mdio_bus_data) |
| @@ -237,7 +238,7 @@ int stmmac_mdio_register(struct net_device *ndev) | |||
| 237 | else | 238 | else |
| 238 | err = mdiobus_register(new_bus); | 239 | err = mdiobus_register(new_bus); |
| 239 | if (err != 0) { | 240 | if (err != 0) { |
| 240 | netdev_err(ndev, "Cannot register the MDIO bus\n"); | 241 | dev_err(dev, "Cannot register the MDIO bus\n"); |
| 241 | goto bus_register_fail; | 242 | goto bus_register_fail; |
| 242 | } | 243 | } |
| 243 | 244 | ||
| @@ -285,14 +286,12 @@ int stmmac_mdio_register(struct net_device *ndev) | |||
| 285 | irq_str = irq_num; | 286 | irq_str = irq_num; |
| 286 | break; | 287 | break; |
| 287 | } | 288 | } |
| 288 | netdev_info(ndev, "PHY ID %08x at %d IRQ %s (%s)%s\n", | 289 | phy_attached_info(phydev); |
| 289 | phydev->phy_id, addr, irq_str, phydev_name(phydev), | ||
| 290 | act ? " active" : ""); | ||
| 291 | found = 1; | 290 | found = 1; |
| 292 | } | 291 | } |
| 293 | 292 | ||
| 294 | if (!found && !mdio_node) { | 293 | if (!found && !mdio_node) { |
| 295 | netdev_warn(ndev, "No PHY found\n"); | 294 | dev_warn(dev, "No PHY found\n"); |
| 296 | mdiobus_unregister(new_bus); | 295 | mdiobus_unregister(new_bus); |
| 297 | mdiobus_free(new_bus); | 296 | mdiobus_free(new_bus); |
| 298 | return -ENODEV; | 297 | return -ENODEV; |
diff --git a/drivers/net/ethernet/sun/sunhme.h b/drivers/net/ethernet/sun/sunhme.h index 3af540adb3c5..fca1bca7f69d 100644 --- a/drivers/net/ethernet/sun/sunhme.h +++ b/drivers/net/ethernet/sun/sunhme.h | |||
| @@ -13,9 +13,9 @@ | |||
| 13 | /* Happy Meal global registers. */ | 13 | /* Happy Meal global registers. */ |
| 14 | #define GREG_SWRESET 0x000UL /* Software Reset */ | 14 | #define GREG_SWRESET 0x000UL /* Software Reset */ |
| 15 | #define GREG_CFG 0x004UL /* Config Register */ | 15 | #define GREG_CFG 0x004UL /* Config Register */ |
| 16 | #define GREG_STAT 0x108UL /* Status */ | 16 | #define GREG_STAT 0x100UL /* Status */ |
| 17 | #define GREG_IMASK 0x10cUL /* Interrupt Mask */ | 17 | #define GREG_IMASK 0x104UL /* Interrupt Mask */ |
| 18 | #define GREG_REG_SIZE 0x110UL | 18 | #define GREG_REG_SIZE 0x108UL |
| 19 | 19 | ||
| 20 | /* Global reset register. */ | 20 | /* Global reset register. */ |
| 21 | #define GREG_RESET_ETX 0x01 | 21 | #define GREG_RESET_ETX 0x01 |
diff --git a/drivers/net/ethernet/ti/cpsw-common.c b/drivers/net/ethernet/ti/cpsw-common.c index 56ba411421f0..38d1cc557c11 100644 --- a/drivers/net/ethernet/ti/cpsw-common.c +++ b/drivers/net/ethernet/ti/cpsw-common.c | |||
| @@ -96,7 +96,7 @@ int ti_cm_get_macid(struct device *dev, int slave, u8 *mac_addr) | |||
| 96 | if (of_machine_is_compatible("ti,dra7")) | 96 | if (of_machine_is_compatible("ti,dra7")) |
| 97 | return davinci_emac_3517_get_macid(dev, 0x514, slave, mac_addr); | 97 | return davinci_emac_3517_get_macid(dev, 0x514, slave, mac_addr); |
| 98 | 98 | ||
| 99 | dev_err(dev, "incompatible machine/device type for reading mac address\n"); | 99 | dev_info(dev, "incompatible machine/device type for reading mac address\n"); |
| 100 | return -ENOENT; | 100 | return -ENOENT; |
| 101 | } | 101 | } |
| 102 | EXPORT_SYMBOL_GPL(ti_cm_get_macid); | 102 | EXPORT_SYMBOL_GPL(ti_cm_get_macid); |
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c index 32279d21c836..c2121d214f08 100644 --- a/drivers/net/ethernet/ti/cpts.c +++ b/drivers/net/ethernet/ti/cpts.c | |||
| @@ -31,9 +31,18 @@ | |||
| 31 | 31 | ||
| 32 | #include "cpts.h" | 32 | #include "cpts.h" |
| 33 | 33 | ||
| 34 | #define CPTS_SKB_TX_WORK_TIMEOUT 1 /* jiffies */ | ||
| 35 | |||
| 36 | struct cpts_skb_cb_data { | ||
| 37 | unsigned long tmo; | ||
| 38 | }; | ||
| 39 | |||
| 34 | #define cpts_read32(c, r) readl_relaxed(&c->reg->r) | 40 | #define cpts_read32(c, r) readl_relaxed(&c->reg->r) |
| 35 | #define cpts_write32(c, v, r) writel_relaxed(v, &c->reg->r) | 41 | #define cpts_write32(c, v, r) writel_relaxed(v, &c->reg->r) |
| 36 | 42 | ||
| 43 | static int cpts_match(struct sk_buff *skb, unsigned int ptp_class, | ||
| 44 | u16 ts_seqid, u8 ts_msgtype); | ||
| 45 | |||
| 37 | static int event_expired(struct cpts_event *event) | 46 | static int event_expired(struct cpts_event *event) |
| 38 | { | 47 | { |
| 39 | return time_after(jiffies, event->tmo); | 48 | return time_after(jiffies, event->tmo); |
| @@ -77,6 +86,47 @@ static int cpts_purge_events(struct cpts *cpts) | |||
| 77 | return removed ? 0 : -1; | 86 | return removed ? 0 : -1; |
| 78 | } | 87 | } |
| 79 | 88 | ||
| 89 | static bool cpts_match_tx_ts(struct cpts *cpts, struct cpts_event *event) | ||
| 90 | { | ||
| 91 | struct sk_buff *skb, *tmp; | ||
| 92 | u16 seqid; | ||
| 93 | u8 mtype; | ||
| 94 | bool found = false; | ||
| 95 | |||
| 96 | mtype = (event->high >> MESSAGE_TYPE_SHIFT) & MESSAGE_TYPE_MASK; | ||
| 97 | seqid = (event->high >> SEQUENCE_ID_SHIFT) & SEQUENCE_ID_MASK; | ||
| 98 | |||
| 99 | /* no need to grab txq.lock as access is always done under cpts->lock */ | ||
| 100 | skb_queue_walk_safe(&cpts->txq, skb, tmp) { | ||
| 101 | struct skb_shared_hwtstamps ssh; | ||
| 102 | unsigned int class = ptp_classify_raw(skb); | ||
| 103 | struct cpts_skb_cb_data *skb_cb = | ||
| 104 | (struct cpts_skb_cb_data *)skb->cb; | ||
| 105 | |||
| 106 | if (cpts_match(skb, class, seqid, mtype)) { | ||
| 107 | u64 ns = timecounter_cyc2time(&cpts->tc, event->low); | ||
| 108 | |||
| 109 | memset(&ssh, 0, sizeof(ssh)); | ||
| 110 | ssh.hwtstamp = ns_to_ktime(ns); | ||
| 111 | skb_tstamp_tx(skb, &ssh); | ||
| 112 | found = true; | ||
| 113 | __skb_unlink(skb, &cpts->txq); | ||
| 114 | dev_consume_skb_any(skb); | ||
| 115 | dev_dbg(cpts->dev, "match tx timestamp mtype %u seqid %04x\n", | ||
| 116 | mtype, seqid); | ||
| 117 | } else if (time_after(jiffies, skb_cb->tmo)) { | ||
| 118 | /* timeout any expired skbs over 1s */ | ||
| 119 | dev_dbg(cpts->dev, | ||
| 120 | "expiring tx timestamp mtype %u seqid %04x\n", | ||
| 121 | mtype, seqid); | ||
| 122 | __skb_unlink(skb, &cpts->txq); | ||
| 123 | dev_consume_skb_any(skb); | ||
| 124 | } | ||
| 125 | } | ||
| 126 | |||
| 127 | return found; | ||
| 128 | } | ||
| 129 | |||
| 80 | /* | 130 | /* |
| 81 | * Returns zero if matching event type was found. | 131 | * Returns zero if matching event type was found. |
| 82 | */ | 132 | */ |
| @@ -101,9 +151,15 @@ static int cpts_fifo_read(struct cpts *cpts, int match) | |||
| 101 | event->low = lo; | 151 | event->low = lo; |
| 102 | type = event_type(event); | 152 | type = event_type(event); |
| 103 | switch (type) { | 153 | switch (type) { |
| 154 | case CPTS_EV_TX: | ||
| 155 | if (cpts_match_tx_ts(cpts, event)) { | ||
| 156 | /* if the new event matches an existing skb, | ||
| 157 | * then don't queue it | ||
| 158 | */ | ||
| 159 | break; | ||
| 160 | } | ||
| 104 | case CPTS_EV_PUSH: | 161 | case CPTS_EV_PUSH: |
| 105 | case CPTS_EV_RX: | 162 | case CPTS_EV_RX: |
| 106 | case CPTS_EV_TX: | ||
| 107 | list_del_init(&event->list); | 163 | list_del_init(&event->list); |
| 108 | list_add_tail(&event->list, &cpts->events); | 164 | list_add_tail(&event->list, &cpts->events); |
| 109 | break; | 165 | break; |
| @@ -224,6 +280,24 @@ static int cpts_ptp_enable(struct ptp_clock_info *ptp, | |||
| 224 | return -EOPNOTSUPP; | 280 | return -EOPNOTSUPP; |
| 225 | } | 281 | } |
| 226 | 282 | ||
| 283 | static long cpts_overflow_check(struct ptp_clock_info *ptp) | ||
| 284 | { | ||
| 285 | struct cpts *cpts = container_of(ptp, struct cpts, info); | ||
| 286 | unsigned long delay = cpts->ov_check_period; | ||
| 287 | struct timespec64 ts; | ||
| 288 | unsigned long flags; | ||
| 289 | |||
| 290 | spin_lock_irqsave(&cpts->lock, flags); | ||
| 291 | ts = ns_to_timespec64(timecounter_read(&cpts->tc)); | ||
| 292 | |||
| 293 | if (!skb_queue_empty(&cpts->txq)) | ||
| 294 | delay = CPTS_SKB_TX_WORK_TIMEOUT; | ||
| 295 | spin_unlock_irqrestore(&cpts->lock, flags); | ||
| 296 | |||
| 297 | pr_debug("cpts overflow check at %lld.%09lu\n", ts.tv_sec, ts.tv_nsec); | ||
| 298 | return (long)delay; | ||
| 299 | } | ||
| 300 | |||
| 227 | static struct ptp_clock_info cpts_info = { | 301 | static struct ptp_clock_info cpts_info = { |
| 228 | .owner = THIS_MODULE, | 302 | .owner = THIS_MODULE, |
| 229 | .name = "CTPS timer", | 303 | .name = "CTPS timer", |
| @@ -236,18 +310,9 @@ static struct ptp_clock_info cpts_info = { | |||
| 236 | .gettime64 = cpts_ptp_gettime, | 310 | .gettime64 = cpts_ptp_gettime, |
| 237 | .settime64 = cpts_ptp_settime, | 311 | .settime64 = cpts_ptp_settime, |
| 238 | .enable = cpts_ptp_enable, | 312 | .enable = cpts_ptp_enable, |
| 313 | .do_aux_work = cpts_overflow_check, | ||
| 239 | }; | 314 | }; |
| 240 | 315 | ||
| 241 | static void cpts_overflow_check(struct work_struct *work) | ||
| 242 | { | ||
| 243 | struct timespec64 ts; | ||
| 244 | struct cpts *cpts = container_of(work, struct cpts, overflow_work.work); | ||
| 245 | |||
| 246 | cpts_ptp_gettime(&cpts->info, &ts); | ||
| 247 | pr_debug("cpts overflow check at %lld.%09lu\n", ts.tv_sec, ts.tv_nsec); | ||
| 248 | schedule_delayed_work(&cpts->overflow_work, cpts->ov_check_period); | ||
| 249 | } | ||
| 250 | |||
| 251 | static int cpts_match(struct sk_buff *skb, unsigned int ptp_class, | 316 | static int cpts_match(struct sk_buff *skb, unsigned int ptp_class, |
| 252 | u16 ts_seqid, u8 ts_msgtype) | 317 | u16 ts_seqid, u8 ts_msgtype) |
| 253 | { | 318 | { |
| @@ -299,7 +364,7 @@ static u64 cpts_find_ts(struct cpts *cpts, struct sk_buff *skb, int ev_type) | |||
| 299 | return 0; | 364 | return 0; |
| 300 | 365 | ||
| 301 | spin_lock_irqsave(&cpts->lock, flags); | 366 | spin_lock_irqsave(&cpts->lock, flags); |
| 302 | cpts_fifo_read(cpts, CPTS_EV_PUSH); | 367 | cpts_fifo_read(cpts, -1); |
| 303 | list_for_each_safe(this, next, &cpts->events) { | 368 | list_for_each_safe(this, next, &cpts->events) { |
| 304 | event = list_entry(this, struct cpts_event, list); | 369 | event = list_entry(this, struct cpts_event, list); |
| 305 | if (event_expired(event)) { | 370 | if (event_expired(event)) { |
| @@ -317,6 +382,19 @@ static u64 cpts_find_ts(struct cpts *cpts, struct sk_buff *skb, int ev_type) | |||
| 317 | break; | 382 | break; |
| 318 | } | 383 | } |
| 319 | } | 384 | } |
| 385 | |||
| 386 | if (ev_type == CPTS_EV_TX && !ns) { | ||
| 387 | struct cpts_skb_cb_data *skb_cb = | ||
| 388 | (struct cpts_skb_cb_data *)skb->cb; | ||
| 389 | /* Not found, add frame to queue for processing later. | ||
| 390 | * The periodic FIFO check will handle this. | ||
| 391 | */ | ||
| 392 | skb_get(skb); | ||
| 393 | /* get the timestamp for timeouts */ | ||
| 394 | skb_cb->tmo = jiffies + msecs_to_jiffies(100); | ||
| 395 | __skb_queue_tail(&cpts->txq, skb); | ||
| 396 | ptp_schedule_worker(cpts->clock, 0); | ||
| 397 | } | ||
| 320 | spin_unlock_irqrestore(&cpts->lock, flags); | 398 | spin_unlock_irqrestore(&cpts->lock, flags); |
| 321 | 399 | ||
| 322 | return ns; | 400 | return ns; |
| @@ -358,6 +436,7 @@ int cpts_register(struct cpts *cpts) | |||
| 358 | { | 436 | { |
| 359 | int err, i; | 437 | int err, i; |
| 360 | 438 | ||
| 439 | skb_queue_head_init(&cpts->txq); | ||
| 361 | INIT_LIST_HEAD(&cpts->events); | 440 | INIT_LIST_HEAD(&cpts->events); |
| 362 | INIT_LIST_HEAD(&cpts->pool); | 441 | INIT_LIST_HEAD(&cpts->pool); |
| 363 | for (i = 0; i < CPTS_MAX_EVENTS; i++) | 442 | for (i = 0; i < CPTS_MAX_EVENTS; i++) |
| @@ -378,7 +457,7 @@ int cpts_register(struct cpts *cpts) | |||
| 378 | } | 457 | } |
| 379 | cpts->phc_index = ptp_clock_index(cpts->clock); | 458 | cpts->phc_index = ptp_clock_index(cpts->clock); |
| 380 | 459 | ||
| 381 | schedule_delayed_work(&cpts->overflow_work, cpts->ov_check_period); | 460 | ptp_schedule_worker(cpts->clock, cpts->ov_check_period); |
| 382 | return 0; | 461 | return 0; |
| 383 | 462 | ||
| 384 | err_ptp: | 463 | err_ptp: |
| @@ -392,14 +471,15 @@ void cpts_unregister(struct cpts *cpts) | |||
| 392 | if (WARN_ON(!cpts->clock)) | 471 | if (WARN_ON(!cpts->clock)) |
| 393 | return; | 472 | return; |
| 394 | 473 | ||
| 395 | cancel_delayed_work_sync(&cpts->overflow_work); | ||
| 396 | |||
| 397 | ptp_clock_unregister(cpts->clock); | 474 | ptp_clock_unregister(cpts->clock); |
| 398 | cpts->clock = NULL; | 475 | cpts->clock = NULL; |
| 399 | 476 | ||
| 400 | cpts_write32(cpts, 0, int_enable); | 477 | cpts_write32(cpts, 0, int_enable); |
| 401 | cpts_write32(cpts, 0, control); | 478 | cpts_write32(cpts, 0, control); |
| 402 | 479 | ||
| 480 | /* Drop all packet */ | ||
| 481 | skb_queue_purge(&cpts->txq); | ||
| 482 | |||
| 403 | clk_disable(cpts->refclk); | 483 | clk_disable(cpts->refclk); |
| 404 | } | 484 | } |
| 405 | EXPORT_SYMBOL_GPL(cpts_unregister); | 485 | EXPORT_SYMBOL_GPL(cpts_unregister); |
| @@ -476,7 +556,6 @@ struct cpts *cpts_create(struct device *dev, void __iomem *regs, | |||
| 476 | cpts->dev = dev; | 556 | cpts->dev = dev; |
| 477 | cpts->reg = (struct cpsw_cpts __iomem *)regs; | 557 | cpts->reg = (struct cpsw_cpts __iomem *)regs; |
| 478 | spin_lock_init(&cpts->lock); | 558 | spin_lock_init(&cpts->lock); |
| 479 | INIT_DELAYED_WORK(&cpts->overflow_work, cpts_overflow_check); | ||
| 480 | 559 | ||
| 481 | ret = cpts_of_parse(cpts, node); | 560 | ret = cpts_of_parse(cpts, node); |
| 482 | if (ret) | 561 | if (ret) |
diff --git a/drivers/net/ethernet/ti/cpts.h b/drivers/net/ethernet/ti/cpts.h index 01ea82ba9cdc..73d73faf0f38 100644 --- a/drivers/net/ethernet/ti/cpts.h +++ b/drivers/net/ethernet/ti/cpts.h | |||
| @@ -119,13 +119,13 @@ struct cpts { | |||
| 119 | u32 cc_mult; /* for the nominal frequency */ | 119 | u32 cc_mult; /* for the nominal frequency */ |
| 120 | struct cyclecounter cc; | 120 | struct cyclecounter cc; |
| 121 | struct timecounter tc; | 121 | struct timecounter tc; |
| 122 | struct delayed_work overflow_work; | ||
| 123 | int phc_index; | 122 | int phc_index; |
| 124 | struct clk *refclk; | 123 | struct clk *refclk; |
| 125 | struct list_head events; | 124 | struct list_head events; |
| 126 | struct list_head pool; | 125 | struct list_head pool; |
| 127 | struct cpts_event pool_data[CPTS_MAX_EVENTS]; | 126 | struct cpts_event pool_data[CPTS_MAX_EVENTS]; |
| 128 | unsigned long ov_check_period; | 127 | unsigned long ov_check_period; |
| 128 | struct sk_buff_head txq; | ||
| 129 | }; | 129 | }; |
| 130 | 130 | ||
| 131 | void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb); | 131 | void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb); |
diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c index d9db8a06afd2..cce9c9ed46aa 100644 --- a/drivers/net/ethernet/toshiba/tc35815.c +++ b/drivers/net/ethernet/toshiba/tc35815.c | |||
| @@ -1338,7 +1338,7 @@ static int tc35815_send_packet(struct sk_buff *skb, struct net_device *dev) | |||
| 1338 | static void tc35815_fatal_error_interrupt(struct net_device *dev, u32 status) | 1338 | static void tc35815_fatal_error_interrupt(struct net_device *dev, u32 status) |
| 1339 | { | 1339 | { |
| 1340 | static int count; | 1340 | static int count; |
| 1341 | printk(KERN_WARNING "%s: Fatal Error Intterrupt (%#x):", | 1341 | printk(KERN_WARNING "%s: Fatal Error Interrupt (%#x):", |
| 1342 | dev->name, status); | 1342 | dev->name, status); |
| 1343 | if (status & Int_IntPCI) | 1343 | if (status & Int_IntPCI) |
| 1344 | printk(" IntPCI"); | 1344 | printk(" IntPCI"); |
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index de8156c6b292..2bbda71818ad 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c | |||
| @@ -1091,7 +1091,7 @@ static int geneve_validate(struct nlattr *tb[], struct nlattr *data[], | |||
| 1091 | if (data[IFLA_GENEVE_ID]) { | 1091 | if (data[IFLA_GENEVE_ID]) { |
| 1092 | __u32 vni = nla_get_u32(data[IFLA_GENEVE_ID]); | 1092 | __u32 vni = nla_get_u32(data[IFLA_GENEVE_ID]); |
| 1093 | 1093 | ||
| 1094 | if (vni >= GENEVE_VID_MASK) | 1094 | if (vni >= GENEVE_N_VID) |
| 1095 | return -ERANGE; | 1095 | return -ERANGE; |
| 1096 | } | 1096 | } |
| 1097 | 1097 | ||
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c index 1542e837fdfa..f38e32a7ec9c 100644 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c | |||
| @@ -364,7 +364,7 @@ static int gtp_dev_init(struct net_device *dev) | |||
| 364 | 364 | ||
| 365 | gtp->dev = dev; | 365 | gtp->dev = dev; |
| 366 | 366 | ||
| 367 | dev->tstats = alloc_percpu(struct pcpu_sw_netstats); | 367 | dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); |
| 368 | if (!dev->tstats) | 368 | if (!dev->tstats) |
| 369 | return -ENOMEM; | 369 | return -ENOMEM; |
| 370 | 370 | ||
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index d6c25580f8dd..12cc64bfcff8 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h | |||
| @@ -765,7 +765,8 @@ struct netvsc_device { | |||
| 765 | u32 max_chn; | 765 | u32 max_chn; |
| 766 | u32 num_chn; | 766 | u32 num_chn; |
| 767 | 767 | ||
| 768 | refcount_t sc_offered; | 768 | atomic_t open_chn; |
| 769 | wait_queue_head_t subchan_open; | ||
| 769 | 770 | ||
| 770 | struct rndis_device *extension; | 771 | struct rndis_device *extension; |
| 771 | 772 | ||
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 0a9167dd72fb..d18c3326a1f7 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c | |||
| @@ -78,6 +78,7 @@ static struct netvsc_device *alloc_net_device(void) | |||
| 78 | net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT; | 78 | net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT; |
| 79 | net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT; | 79 | net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT; |
| 80 | init_completion(&net_device->channel_init_wait); | 80 | init_completion(&net_device->channel_init_wait); |
| 81 | init_waitqueue_head(&net_device->subchan_open); | ||
| 81 | 82 | ||
| 82 | return net_device; | 83 | return net_device; |
| 83 | } | 84 | } |
| @@ -1302,6 +1303,8 @@ int netvsc_device_add(struct hv_device *device, | |||
| 1302 | struct netvsc_channel *nvchan = &net_device->chan_table[i]; | 1303 | struct netvsc_channel *nvchan = &net_device->chan_table[i]; |
| 1303 | 1304 | ||
| 1304 | nvchan->channel = device->channel; | 1305 | nvchan->channel = device->channel; |
| 1306 | u64_stats_init(&nvchan->tx_stats.syncp); | ||
| 1307 | u64_stats_init(&nvchan->rx_stats.syncp); | ||
| 1305 | } | 1308 | } |
| 1306 | 1309 | ||
| 1307 | /* Enable NAPI handler before init callbacks */ | 1310 | /* Enable NAPI handler before init callbacks */ |
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 63c98bbbc596..d91cbc6c3ca4 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c | |||
| @@ -315,14 +315,34 @@ static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb, | |||
| 315 | return slots_used; | 315 | return slots_used; |
| 316 | } | 316 | } |
| 317 | 317 | ||
| 318 | /* Estimate number of page buffers neede to transmit | 318 | static int count_skb_frag_slots(struct sk_buff *skb) |
| 319 | * Need at most 2 for RNDIS header plus skb body and fragments. | ||
| 320 | */ | ||
| 321 | static unsigned int netvsc_get_slots(const struct sk_buff *skb) | ||
| 322 | { | 319 | { |
| 323 | return PFN_UP(offset_in_page(skb->data) + skb_headlen(skb)) | 320 | int i, frags = skb_shinfo(skb)->nr_frags; |
| 324 | + skb_shinfo(skb)->nr_frags | 321 | int pages = 0; |
| 325 | + 2; | 322 | |
| 323 | for (i = 0; i < frags; i++) { | ||
| 324 | skb_frag_t *frag = skb_shinfo(skb)->frags + i; | ||
| 325 | unsigned long size = skb_frag_size(frag); | ||
| 326 | unsigned long offset = frag->page_offset; | ||
| 327 | |||
| 328 | /* Skip unused frames from start of page */ | ||
| 329 | offset &= ~PAGE_MASK; | ||
| 330 | pages += PFN_UP(offset + size); | ||
| 331 | } | ||
| 332 | return pages; | ||
| 333 | } | ||
| 334 | |||
| 335 | static int netvsc_get_slots(struct sk_buff *skb) | ||
| 336 | { | ||
| 337 | char *data = skb->data; | ||
| 338 | unsigned int offset = offset_in_page(data); | ||
| 339 | unsigned int len = skb_headlen(skb); | ||
| 340 | int slots; | ||
| 341 | int frag_slots; | ||
| 342 | |||
| 343 | slots = DIV_ROUND_UP(offset + len, PAGE_SIZE); | ||
| 344 | frag_slots = count_skb_frag_slots(skb); | ||
| 345 | return slots + frag_slots; | ||
| 326 | } | 346 | } |
| 327 | 347 | ||
| 328 | static u32 net_checksum_info(struct sk_buff *skb) | 348 | static u32 net_checksum_info(struct sk_buff *skb) |
| @@ -360,18 +380,21 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) | |||
| 360 | struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT]; | 380 | struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT]; |
| 361 | struct hv_page_buffer *pb = page_buf; | 381 | struct hv_page_buffer *pb = page_buf; |
| 362 | 382 | ||
| 363 | /* We can only transmit MAX_PAGE_BUFFER_COUNT number | 383 | /* We will atmost need two pages to describe the rndis |
| 384 | * header. We can only transmit MAX_PAGE_BUFFER_COUNT number | ||
| 364 | * of pages in a single packet. If skb is scattered around | 385 | * of pages in a single packet. If skb is scattered around |
| 365 | * more pages we try linearizing it. | 386 | * more pages we try linearizing it. |
| 366 | */ | 387 | */ |
| 367 | num_data_pgs = netvsc_get_slots(skb); | 388 | |
| 389 | num_data_pgs = netvsc_get_slots(skb) + 2; | ||
| 390 | |||
| 368 | if (unlikely(num_data_pgs > MAX_PAGE_BUFFER_COUNT)) { | 391 | if (unlikely(num_data_pgs > MAX_PAGE_BUFFER_COUNT)) { |
| 369 | ++net_device_ctx->eth_stats.tx_scattered; | 392 | ++net_device_ctx->eth_stats.tx_scattered; |
| 370 | 393 | ||
| 371 | if (skb_linearize(skb)) | 394 | if (skb_linearize(skb)) |
| 372 | goto no_memory; | 395 | goto no_memory; |
| 373 | 396 | ||
| 374 | num_data_pgs = netvsc_get_slots(skb); | 397 | num_data_pgs = netvsc_get_slots(skb) + 2; |
| 375 | if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) { | 398 | if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) { |
| 376 | ++net_device_ctx->eth_stats.tx_too_big; | 399 | ++net_device_ctx->eth_stats.tx_too_big; |
| 377 | goto drop; | 400 | goto drop; |
| @@ -1246,7 +1269,12 @@ static void netvsc_link_change(struct work_struct *w) | |||
| 1246 | bool notify = false, reschedule = false; | 1269 | bool notify = false, reschedule = false; |
| 1247 | unsigned long flags, next_reconfig, delay; | 1270 | unsigned long flags, next_reconfig, delay; |
| 1248 | 1271 | ||
| 1249 | rtnl_lock(); | 1272 | /* if changes are happening, comeback later */ |
| 1273 | if (!rtnl_trylock()) { | ||
| 1274 | schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT); | ||
| 1275 | return; | ||
| 1276 | } | ||
| 1277 | |||
| 1250 | net_device = rtnl_dereference(ndev_ctx->nvdev); | 1278 | net_device = rtnl_dereference(ndev_ctx->nvdev); |
| 1251 | if (!net_device) | 1279 | if (!net_device) |
| 1252 | goto out_unlock; | 1280 | goto out_unlock; |
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 85c00e1c52b6..d6308ffda53e 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c | |||
| @@ -1048,8 +1048,8 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc) | |||
| 1048 | else | 1048 | else |
| 1049 | netif_napi_del(&nvchan->napi); | 1049 | netif_napi_del(&nvchan->napi); |
| 1050 | 1050 | ||
| 1051 | if (refcount_dec_and_test(&nvscdev->sc_offered)) | 1051 | atomic_inc(&nvscdev->open_chn); |
| 1052 | complete(&nvscdev->channel_init_wait); | 1052 | wake_up(&nvscdev->subchan_open); |
| 1053 | } | 1053 | } |
| 1054 | 1054 | ||
| 1055 | int rndis_filter_device_add(struct hv_device *dev, | 1055 | int rndis_filter_device_add(struct hv_device *dev, |
| @@ -1090,8 +1090,6 @@ int rndis_filter_device_add(struct hv_device *dev, | |||
| 1090 | net_device->max_chn = 1; | 1090 | net_device->max_chn = 1; |
| 1091 | net_device->num_chn = 1; | 1091 | net_device->num_chn = 1; |
| 1092 | 1092 | ||
| 1093 | refcount_set(&net_device->sc_offered, 0); | ||
| 1094 | |||
| 1095 | net_device->extension = rndis_device; | 1093 | net_device->extension = rndis_device; |
| 1096 | rndis_device->ndev = net; | 1094 | rndis_device->ndev = net; |
| 1097 | 1095 | ||
| @@ -1221,11 +1219,11 @@ int rndis_filter_device_add(struct hv_device *dev, | |||
| 1221 | rndis_device->ind_table[i] = ethtool_rxfh_indir_default(i, | 1219 | rndis_device->ind_table[i] = ethtool_rxfh_indir_default(i, |
| 1222 | net_device->num_chn); | 1220 | net_device->num_chn); |
| 1223 | 1221 | ||
| 1222 | atomic_set(&net_device->open_chn, 1); | ||
| 1224 | num_rss_qs = net_device->num_chn - 1; | 1223 | num_rss_qs = net_device->num_chn - 1; |
| 1225 | if (num_rss_qs == 0) | 1224 | if (num_rss_qs == 0) |
| 1226 | return 0; | 1225 | return 0; |
| 1227 | 1226 | ||
| 1228 | refcount_set(&net_device->sc_offered, num_rss_qs); | ||
| 1229 | vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open); | 1227 | vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open); |
| 1230 | 1228 | ||
| 1231 | init_packet = &net_device->channel_init_pkt; | 1229 | init_packet = &net_device->channel_init_pkt; |
| @@ -1242,15 +1240,19 @@ int rndis_filter_device_add(struct hv_device *dev, | |||
| 1242 | if (ret) | 1240 | if (ret) |
| 1243 | goto out; | 1241 | goto out; |
| 1244 | 1242 | ||
| 1243 | wait_for_completion(&net_device->channel_init_wait); | ||
| 1245 | if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) { | 1244 | if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) { |
| 1246 | ret = -ENODEV; | 1245 | ret = -ENODEV; |
| 1247 | goto out; | 1246 | goto out; |
| 1248 | } | 1247 | } |
| 1249 | wait_for_completion(&net_device->channel_init_wait); | ||
| 1250 | 1248 | ||
| 1251 | net_device->num_chn = 1 + | 1249 | net_device->num_chn = 1 + |
| 1252 | init_packet->msg.v5_msg.subchn_comp.num_subchannels; | 1250 | init_packet->msg.v5_msg.subchn_comp.num_subchannels; |
| 1253 | 1251 | ||
| 1252 | /* wait for all sub channels to open */ | ||
| 1253 | wait_event(net_device->subchan_open, | ||
| 1254 | atomic_read(&net_device->open_chn) == net_device->num_chn); | ||
| 1255 | |||
| 1254 | /* ignore failues from setting rss parameters, still have channels */ | 1256 | /* ignore failues from setting rss parameters, still have channels */ |
| 1255 | rndis_filter_set_rss_param(rndis_device, netvsc_hash_key, | 1257 | rndis_filter_set_rss_param(rndis_device, netvsc_hash_key, |
| 1256 | net_device->num_chn); | 1258 | net_device->num_chn); |
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index f37e3c1fd4e7..8dab74a81303 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c | |||
| @@ -192,7 +192,7 @@ static int ipvlan_init(struct net_device *dev) | |||
| 192 | 192 | ||
| 193 | netdev_lockdep_set_classes(dev); | 193 | netdev_lockdep_set_classes(dev); |
| 194 | 194 | ||
| 195 | ipvlan->pcpu_stats = alloc_percpu(struct ipvl_pcpu_stats); | 195 | ipvlan->pcpu_stats = netdev_alloc_pcpu_stats(struct ipvl_pcpu_stats); |
| 196 | if (!ipvlan->pcpu_stats) | 196 | if (!ipvlan->pcpu_stats) |
| 197 | return -ENOMEM; | 197 | return -ENOMEM; |
| 198 | 198 | ||
diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c index 6f6ed75b63c9..765de3bedb88 100644 --- a/drivers/net/irda/mcs7780.c +++ b/drivers/net/irda/mcs7780.c | |||
| @@ -141,9 +141,19 @@ static int mcs_set_reg(struct mcs_cb *mcs, __u16 reg, __u16 val) | |||
| 141 | static int mcs_get_reg(struct mcs_cb *mcs, __u16 reg, __u16 * val) | 141 | static int mcs_get_reg(struct mcs_cb *mcs, __u16 reg, __u16 * val) |
| 142 | { | 142 | { |
| 143 | struct usb_device *dev = mcs->usbdev; | 143 | struct usb_device *dev = mcs->usbdev; |
| 144 | int ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ, | 144 | void *dmabuf; |
| 145 | MCS_RD_RTYPE, 0, reg, val, 2, | 145 | int ret; |
| 146 | msecs_to_jiffies(MCS_CTRL_TIMEOUT)); | 146 | |
| 147 | dmabuf = kmalloc(sizeof(__u16), GFP_KERNEL); | ||
| 148 | if (!dmabuf) | ||
| 149 | return -ENOMEM; | ||
| 150 | |||
| 151 | ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ, | ||
| 152 | MCS_RD_RTYPE, 0, reg, dmabuf, 2, | ||
| 153 | msecs_to_jiffies(MCS_CTRL_TIMEOUT)); | ||
| 154 | |||
| 155 | memcpy(val, dmabuf, sizeof(__u16)); | ||
| 156 | kfree(dmabuf); | ||
| 147 | 157 | ||
| 148 | return ret; | 158 | return ret; |
| 149 | } | 159 | } |
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index 5e1ab1160856..98e4deaa3a6a 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c | |||
| @@ -3521,6 +3521,7 @@ module_init(macsec_init); | |||
| 3521 | module_exit(macsec_exit); | 3521 | module_exit(macsec_exit); |
| 3522 | 3522 | ||
| 3523 | MODULE_ALIAS_RTNL_LINK("macsec"); | 3523 | MODULE_ALIAS_RTNL_LINK("macsec"); |
| 3524 | MODULE_ALIAS_GENL_FAMILY("macsec"); | ||
| 3524 | 3525 | ||
| 3525 | MODULE_DESCRIPTION("MACsec IEEE 802.1AE"); | 3526 | MODULE_DESCRIPTION("MACsec IEEE 802.1AE"); |
| 3526 | MODULE_LICENSE("GPL v2"); | 3527 | MODULE_LICENSE("GPL v2"); |
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 2dda72004a7d..928fd892f167 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig | |||
| @@ -7,7 +7,16 @@ menuconfig MDIO_DEVICE | |||
| 7 | help | 7 | help |
| 8 | MDIO devices and driver infrastructure code. | 8 | MDIO devices and driver infrastructure code. |
| 9 | 9 | ||
| 10 | if MDIO_DEVICE | 10 | config MDIO_BUS |
| 11 | tristate | ||
| 12 | default m if PHYLIB=m | ||
| 13 | default MDIO_DEVICE | ||
| 14 | help | ||
| 15 | This internal symbol is used for link time dependencies and it | ||
| 16 | reflects whether the mdio_bus/mdio_device code is built as a | ||
| 17 | loadable module or built-in. | ||
| 18 | |||
| 19 | if MDIO_BUS | ||
| 11 | 20 | ||
| 12 | config MDIO_BCM_IPROC | 21 | config MDIO_BCM_IPROC |
| 13 | tristate "Broadcom iProc MDIO bus controller" | 22 | tristate "Broadcom iProc MDIO bus controller" |
| @@ -28,7 +37,6 @@ config MDIO_BCM_UNIMAC | |||
| 28 | 37 | ||
| 29 | config MDIO_BITBANG | 38 | config MDIO_BITBANG |
| 30 | tristate "Bitbanged MDIO buses" | 39 | tristate "Bitbanged MDIO buses" |
| 31 | depends on !(MDIO_DEVICE=y && PHYLIB=m) | ||
| 32 | help | 40 | help |
| 33 | This module implements the MDIO bus protocol in software, | 41 | This module implements the MDIO bus protocol in software, |
| 34 | for use by low level drivers that export the ability to | 42 | for use by low level drivers that export the ability to |
| @@ -127,7 +135,6 @@ config MDIO_THUNDER | |||
| 127 | tristate "ThunderX SOCs MDIO buses" | 135 | tristate "ThunderX SOCs MDIO buses" |
| 128 | depends on 64BIT | 136 | depends on 64BIT |
| 129 | depends on PCI | 137 | depends on PCI |
| 130 | depends on !(MDIO_DEVICE=y && PHYLIB=m) | ||
| 131 | select MDIO_CAVIUM | 138 | select MDIO_CAVIUM |
| 132 | help | 139 | help |
| 133 | This driver supports the MDIO interfaces found on Cavium | 140 | This driver supports the MDIO interfaces found on Cavium |
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 1790f7fec125..2f742ae5b92e 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c | |||
| @@ -864,15 +864,17 @@ EXPORT_SYMBOL(phy_attached_info); | |||
| 864 | #define ATTACHED_FMT "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)" | 864 | #define ATTACHED_FMT "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)" |
| 865 | void phy_attached_print(struct phy_device *phydev, const char *fmt, ...) | 865 | void phy_attached_print(struct phy_device *phydev, const char *fmt, ...) |
| 866 | { | 866 | { |
| 867 | const char *drv_name = phydev->drv ? phydev->drv->name : "unbound"; | ||
| 868 | |||
| 867 | if (!fmt) { | 869 | if (!fmt) { |
| 868 | dev_info(&phydev->mdio.dev, ATTACHED_FMT "\n", | 870 | dev_info(&phydev->mdio.dev, ATTACHED_FMT "\n", |
| 869 | phydev->drv->name, phydev_name(phydev), | 871 | drv_name, phydev_name(phydev), |
| 870 | phydev->irq); | 872 | phydev->irq); |
| 871 | } else { | 873 | } else { |
| 872 | va_list ap; | 874 | va_list ap; |
| 873 | 875 | ||
| 874 | dev_info(&phydev->mdio.dev, ATTACHED_FMT, | 876 | dev_info(&phydev->mdio.dev, ATTACHED_FMT, |
| 875 | phydev->drv->name, phydev_name(phydev), | 877 | drv_name, phydev_name(phydev), |
| 876 | phydev->irq); | 878 | phydev->irq); |
| 877 | 879 | ||
| 878 | va_start(ap, fmt); | 880 | va_start(ap, fmt); |
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index bd4303944e44..a404552555d4 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c | |||
| @@ -1915,21 +1915,23 @@ static void __ppp_channel_push(struct channel *pch) | |||
| 1915 | spin_unlock(&pch->downl); | 1915 | spin_unlock(&pch->downl); |
| 1916 | /* see if there is anything from the attached unit to be sent */ | 1916 | /* see if there is anything from the attached unit to be sent */ |
| 1917 | if (skb_queue_empty(&pch->file.xq)) { | 1917 | if (skb_queue_empty(&pch->file.xq)) { |
| 1918 | read_lock(&pch->upl); | ||
| 1919 | ppp = pch->ppp; | 1918 | ppp = pch->ppp; |
| 1920 | if (ppp) | 1919 | if (ppp) |
| 1921 | ppp_xmit_process(ppp); | 1920 | __ppp_xmit_process(ppp); |
| 1922 | read_unlock(&pch->upl); | ||
| 1923 | } | 1921 | } |
| 1924 | } | 1922 | } |
| 1925 | 1923 | ||
| 1926 | static void ppp_channel_push(struct channel *pch) | 1924 | static void ppp_channel_push(struct channel *pch) |
| 1927 | { | 1925 | { |
| 1928 | local_bh_disable(); | 1926 | read_lock_bh(&pch->upl); |
| 1929 | 1927 | if (pch->ppp) { | |
| 1930 | __ppp_channel_push(pch); | 1928 | (*this_cpu_ptr(pch->ppp->xmit_recursion))++; |
| 1931 | 1929 | __ppp_channel_push(pch); | |
| 1932 | local_bh_enable(); | 1930 | (*this_cpu_ptr(pch->ppp->xmit_recursion))--; |
| 1931 | } else { | ||
| 1932 | __ppp_channel_push(pch); | ||
| 1933 | } | ||
| 1934 | read_unlock_bh(&pch->upl); | ||
| 1933 | } | 1935 | } |
| 1934 | 1936 | ||
| 1935 | /* | 1937 | /* |
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c index eac499c58aa7..6dde9a0cfe76 100644 --- a/drivers/net/ppp/pptp.c +++ b/drivers/net/ppp/pptp.c | |||
| @@ -131,7 +131,6 @@ static void del_chan(struct pppox_sock *sock) | |||
| 131 | clear_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap); | 131 | clear_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap); |
| 132 | RCU_INIT_POINTER(callid_sock[sock->proto.pptp.src_addr.call_id], NULL); | 132 | RCU_INIT_POINTER(callid_sock[sock->proto.pptp.src_addr.call_id], NULL); |
| 133 | spin_unlock(&chan_lock); | 133 | spin_unlock(&chan_lock); |
| 134 | synchronize_rcu(); | ||
| 135 | } | 134 | } |
| 136 | 135 | ||
| 137 | static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb) | 136 | static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb) |
| @@ -520,6 +519,7 @@ static int pptp_release(struct socket *sock) | |||
| 520 | 519 | ||
| 521 | po = pppox_sk(sk); | 520 | po = pppox_sk(sk); |
| 522 | del_chan(po); | 521 | del_chan(po); |
| 522 | synchronize_rcu(); | ||
| 523 | 523 | ||
| 524 | pppox_unbind_sock(sk); | 524 | pppox_unbind_sock(sk); |
| 525 | sk->sk_state = PPPOX_DEAD; | 525 | sk->sk_state = PPPOX_DEAD; |
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 464570409796..ae53e899259f 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c | |||
| @@ -60,11 +60,11 @@ static struct team_port *team_port_get_rtnl(const struct net_device *dev) | |||
| 60 | static int __set_port_dev_addr(struct net_device *port_dev, | 60 | static int __set_port_dev_addr(struct net_device *port_dev, |
| 61 | const unsigned char *dev_addr) | 61 | const unsigned char *dev_addr) |
| 62 | { | 62 | { |
| 63 | struct sockaddr addr; | 63 | struct sockaddr_storage addr; |
| 64 | 64 | ||
| 65 | memcpy(addr.sa_data, dev_addr, port_dev->addr_len); | 65 | memcpy(addr.__data, dev_addr, port_dev->addr_len); |
| 66 | addr.sa_family = port_dev->type; | 66 | addr.ss_family = port_dev->type; |
| 67 | return dev_set_mac_address(port_dev, &addr); | 67 | return dev_set_mac_address(port_dev, (struct sockaddr *)&addr); |
| 68 | } | 68 | } |
| 69 | 69 | ||
| 70 | static int team_port_set_orig_dev_addr(struct team_port *port) | 70 | static int team_port_set_orig_dev_addr(struct team_port *port) |
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 3d4c24572ecd..0a2c0a42283f 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
| @@ -1879,6 +1879,9 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) | |||
| 1879 | 1879 | ||
| 1880 | err_detach: | 1880 | err_detach: |
| 1881 | tun_detach_all(dev); | 1881 | tun_detach_all(dev); |
| 1882 | /* register_netdevice() already called tun_free_netdev() */ | ||
| 1883 | goto err_free_dev; | ||
| 1884 | |||
| 1882 | err_free_flow: | 1885 | err_free_flow: |
| 1883 | tun_flow_uninit(tun); | 1886 | tun_flow_uninit(tun); |
| 1884 | security_tun_dev_free_security(tun->security); | 1887 | security_tun_dev_free_security(tun->security); |
| @@ -2598,8 +2601,16 @@ static int __init tun_init(void) | |||
| 2598 | goto err_misc; | 2601 | goto err_misc; |
| 2599 | } | 2602 | } |
| 2600 | 2603 | ||
| 2601 | register_netdevice_notifier(&tun_notifier_block); | 2604 | ret = register_netdevice_notifier(&tun_notifier_block); |
| 2605 | if (ret) { | ||
| 2606 | pr_err("Can't register netdevice notifier\n"); | ||
| 2607 | goto err_notifier; | ||
| 2608 | } | ||
| 2609 | |||
| 2602 | return 0; | 2610 | return 0; |
| 2611 | |||
| 2612 | err_notifier: | ||
| 2613 | misc_deregister(&tun_miscdev); | ||
| 2603 | err_misc: | 2614 | err_misc: |
| 2604 | rtnl_link_unregister(&tun_link_ops); | 2615 | rtnl_link_unregister(&tun_link_ops); |
| 2605 | err_linkops: | 2616 | err_linkops: |
diff --git a/drivers/net/usb/asix.h b/drivers/net/usb/asix.h index d1092421aaa7..9a4171b90947 100644 --- a/drivers/net/usb/asix.h +++ b/drivers/net/usb/asix.h | |||
| @@ -209,6 +209,7 @@ void asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value, | |||
| 209 | int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb, | 209 | int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb, |
| 210 | struct asix_rx_fixup_info *rx); | 210 | struct asix_rx_fixup_info *rx); |
| 211 | int asix_rx_fixup_common(struct usbnet *dev, struct sk_buff *skb); | 211 | int asix_rx_fixup_common(struct usbnet *dev, struct sk_buff *skb); |
| 212 | void asix_rx_fixup_common_free(struct asix_common_private *dp); | ||
| 212 | 213 | ||
| 213 | struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb, | 214 | struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb, |
| 214 | gfp_t flags); | 215 | gfp_t flags); |
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c index 7847436c441e..522d2900cd1d 100644 --- a/drivers/net/usb/asix_common.c +++ b/drivers/net/usb/asix_common.c | |||
| @@ -75,6 +75,27 @@ void asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value, u16 index, | |||
| 75 | value, index, data, size); | 75 | value, index, data, size); |
| 76 | } | 76 | } |
| 77 | 77 | ||
| 78 | static void reset_asix_rx_fixup_info(struct asix_rx_fixup_info *rx) | ||
| 79 | { | ||
| 80 | /* Reset the variables that have a lifetime outside of | ||
| 81 | * asix_rx_fixup_internal() so that future processing starts from a | ||
| 82 | * known set of initial conditions. | ||
| 83 | */ | ||
| 84 | |||
| 85 | if (rx->ax_skb) { | ||
| 86 | /* Discard any incomplete Ethernet frame in the netdev buffer */ | ||
| 87 | kfree_skb(rx->ax_skb); | ||
| 88 | rx->ax_skb = NULL; | ||
| 89 | } | ||
| 90 | |||
| 91 | /* Assume the Data header 32-bit word is at the start of the current | ||
| 92 | * or next URB socket buffer so reset all the state variables. | ||
| 93 | */ | ||
| 94 | rx->remaining = 0; | ||
| 95 | rx->split_head = false; | ||
| 96 | rx->header = 0; | ||
| 97 | } | ||
| 98 | |||
| 78 | int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb, | 99 | int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb, |
| 79 | struct asix_rx_fixup_info *rx) | 100 | struct asix_rx_fixup_info *rx) |
| 80 | { | 101 | { |
| @@ -99,15 +120,7 @@ int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb, | |||
| 99 | if (size != ((~rx->header >> 16) & 0x7ff)) { | 120 | if (size != ((~rx->header >> 16) & 0x7ff)) { |
| 100 | netdev_err(dev->net, "asix_rx_fixup() Data Header synchronisation was lost, remaining %d\n", | 121 | netdev_err(dev->net, "asix_rx_fixup() Data Header synchronisation was lost, remaining %d\n", |
| 101 | rx->remaining); | 122 | rx->remaining); |
| 102 | if (rx->ax_skb) { | 123 | reset_asix_rx_fixup_info(rx); |
| 103 | kfree_skb(rx->ax_skb); | ||
| 104 | rx->ax_skb = NULL; | ||
| 105 | /* Discard the incomplete netdev Ethernet frame | ||
| 106 | * and assume the Data header is at the start of | ||
| 107 | * the current URB socket buffer. | ||
| 108 | */ | ||
| 109 | } | ||
| 110 | rx->remaining = 0; | ||
| 111 | } | 124 | } |
| 112 | } | 125 | } |
| 113 | 126 | ||
| @@ -139,11 +152,13 @@ int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb, | |||
| 139 | if (size != ((~rx->header >> 16) & 0x7ff)) { | 152 | if (size != ((~rx->header >> 16) & 0x7ff)) { |
| 140 | netdev_err(dev->net, "asix_rx_fixup() Bad Header Length 0x%x, offset %d\n", | 153 | netdev_err(dev->net, "asix_rx_fixup() Bad Header Length 0x%x, offset %d\n", |
| 141 | rx->header, offset); | 154 | rx->header, offset); |
| 155 | reset_asix_rx_fixup_info(rx); | ||
| 142 | return 0; | 156 | return 0; |
| 143 | } | 157 | } |
| 144 | if (size > dev->net->mtu + ETH_HLEN + VLAN_HLEN) { | 158 | if (size > dev->net->mtu + ETH_HLEN + VLAN_HLEN) { |
| 145 | netdev_dbg(dev->net, "asix_rx_fixup() Bad RX Length %d\n", | 159 | netdev_dbg(dev->net, "asix_rx_fixup() Bad RX Length %d\n", |
| 146 | size); | 160 | size); |
| 161 | reset_asix_rx_fixup_info(rx); | ||
| 147 | return 0; | 162 | return 0; |
| 148 | } | 163 | } |
| 149 | 164 | ||
| @@ -168,8 +183,10 @@ int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb, | |||
| 168 | if (rx->ax_skb) { | 183 | if (rx->ax_skb) { |
| 169 | skb_put_data(rx->ax_skb, skb->data + offset, | 184 | skb_put_data(rx->ax_skb, skb->data + offset, |
| 170 | copy_length); | 185 | copy_length); |
| 171 | if (!rx->remaining) | 186 | if (!rx->remaining) { |
| 172 | usbnet_skb_return(dev, rx->ax_skb); | 187 | usbnet_skb_return(dev, rx->ax_skb); |
| 188 | rx->ax_skb = NULL; | ||
| 189 | } | ||
| 173 | } | 190 | } |
| 174 | 191 | ||
| 175 | offset += (copy_length + 1) & 0xfffe; | 192 | offset += (copy_length + 1) & 0xfffe; |
| @@ -178,6 +195,7 @@ int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb, | |||
| 178 | if (skb->len != offset) { | 195 | if (skb->len != offset) { |
| 179 | netdev_err(dev->net, "asix_rx_fixup() Bad SKB Length %d, %d\n", | 196 | netdev_err(dev->net, "asix_rx_fixup() Bad SKB Length %d, %d\n", |
| 180 | skb->len, offset); | 197 | skb->len, offset); |
| 198 | reset_asix_rx_fixup_info(rx); | ||
| 181 | return 0; | 199 | return 0; |
| 182 | } | 200 | } |
| 183 | 201 | ||
| @@ -192,6 +210,21 @@ int asix_rx_fixup_common(struct usbnet *dev, struct sk_buff *skb) | |||
| 192 | return asix_rx_fixup_internal(dev, skb, rx); | 210 | return asix_rx_fixup_internal(dev, skb, rx); |
| 193 | } | 211 | } |
| 194 | 212 | ||
| 213 | void asix_rx_fixup_common_free(struct asix_common_private *dp) | ||
| 214 | { | ||
| 215 | struct asix_rx_fixup_info *rx; | ||
| 216 | |||
| 217 | if (!dp) | ||
| 218 | return; | ||
| 219 | |||
| 220 | rx = &dp->rx_fixup_info; | ||
| 221 | |||
| 222 | if (rx->ax_skb) { | ||
| 223 | kfree_skb(rx->ax_skb); | ||
| 224 | rx->ax_skb = NULL; | ||
| 225 | } | ||
| 226 | } | ||
| 227 | |||
| 195 | struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb, | 228 | struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb, |
| 196 | gfp_t flags) | 229 | gfp_t flags) |
| 197 | { | 230 | { |
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c index a3aa0a27dfe5..b2ff88e69a81 100644 --- a/drivers/net/usb/asix_devices.c +++ b/drivers/net/usb/asix_devices.c | |||
| @@ -764,6 +764,7 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf) | |||
| 764 | 764 | ||
| 765 | static void ax88772_unbind(struct usbnet *dev, struct usb_interface *intf) | 765 | static void ax88772_unbind(struct usbnet *dev, struct usb_interface *intf) |
| 766 | { | 766 | { |
| 767 | asix_rx_fixup_common_free(dev->driver_priv); | ||
| 767 | kfree(dev->driver_priv); | 768 | kfree(dev->driver_priv); |
| 768 | } | 769 | } |
| 769 | 770 | ||
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index 8f572b9f3625..9c80e80c5493 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c | |||
| @@ -1758,6 +1758,13 @@ static const struct usb_device_id cdc_devs[] = { | |||
| 1758 | .driver_info = (unsigned long)&wwan_noarp_info, | 1758 | .driver_info = (unsigned long)&wwan_noarp_info, |
| 1759 | }, | 1759 | }, |
| 1760 | 1760 | ||
| 1761 | /* u-blox TOBY-L4 */ | ||
| 1762 | { USB_DEVICE_AND_INTERFACE_INFO(0x1546, 0x1010, | ||
| 1763 | USB_CLASS_COMM, | ||
| 1764 | USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE), | ||
| 1765 | .driver_info = (unsigned long)&wwan_info, | ||
| 1766 | }, | ||
| 1767 | |||
| 1761 | /* Generic CDC-NCM devices */ | 1768 | /* Generic CDC-NCM devices */ |
| 1762 | { USB_INTERFACE_INFO(USB_CLASS_COMM, | 1769 | { USB_INTERFACE_INFO(USB_CLASS_COMM, |
| 1763 | USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE), | 1770 | USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE), |
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index 5833f7e2a127..b99a7fb09f8e 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c | |||
| @@ -2367,9 +2367,6 @@ static int lan78xx_reset(struct lan78xx_net *dev) | |||
| 2367 | /* Init LTM */ | 2367 | /* Init LTM */ |
| 2368 | lan78xx_init_ltm(dev); | 2368 | lan78xx_init_ltm(dev); |
| 2369 | 2369 | ||
| 2370 | dev->net->hard_header_len += TX_OVERHEAD; | ||
| 2371 | dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; | ||
| 2372 | |||
| 2373 | if (dev->udev->speed == USB_SPEED_SUPER) { | 2370 | if (dev->udev->speed == USB_SPEED_SUPER) { |
| 2374 | buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE; | 2371 | buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE; |
| 2375 | dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE; | 2372 | dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE; |
| @@ -2855,16 +2852,19 @@ static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf) | |||
| 2855 | return ret; | 2852 | return ret; |
| 2856 | } | 2853 | } |
| 2857 | 2854 | ||
| 2855 | dev->net->hard_header_len += TX_OVERHEAD; | ||
| 2856 | dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; | ||
| 2857 | |||
| 2858 | /* Init all registers */ | 2858 | /* Init all registers */ |
| 2859 | ret = lan78xx_reset(dev); | 2859 | ret = lan78xx_reset(dev); |
| 2860 | 2860 | ||
| 2861 | lan78xx_mdio_init(dev); | 2861 | ret = lan78xx_mdio_init(dev); |
| 2862 | 2862 | ||
| 2863 | dev->net->flags |= IFF_MULTICAST; | 2863 | dev->net->flags |= IFF_MULTICAST; |
| 2864 | 2864 | ||
| 2865 | pdata->wol = WAKE_MAGIC; | 2865 | pdata->wol = WAKE_MAGIC; |
| 2866 | 2866 | ||
| 2867 | return 0; | 2867 | return ret; |
| 2868 | } | 2868 | } |
| 2869 | 2869 | ||
| 2870 | static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf) | 2870 | static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf) |
| @@ -3525,11 +3525,11 @@ static int lan78xx_probe(struct usb_interface *intf, | |||
| 3525 | udev = interface_to_usbdev(intf); | 3525 | udev = interface_to_usbdev(intf); |
| 3526 | udev = usb_get_dev(udev); | 3526 | udev = usb_get_dev(udev); |
| 3527 | 3527 | ||
| 3528 | ret = -ENOMEM; | ||
| 3529 | netdev = alloc_etherdev(sizeof(struct lan78xx_net)); | 3528 | netdev = alloc_etherdev(sizeof(struct lan78xx_net)); |
| 3530 | if (!netdev) { | 3529 | if (!netdev) { |
| 3531 | dev_err(&intf->dev, "Error: OOM\n"); | 3530 | dev_err(&intf->dev, "Error: OOM\n"); |
| 3532 | goto out1; | 3531 | ret = -ENOMEM; |
| 3532 | goto out1; | ||
| 3533 | } | 3533 | } |
| 3534 | 3534 | ||
| 3535 | /* netdev_printk() needs this */ | 3535 | /* netdev_printk() needs this */ |
| @@ -3610,7 +3610,7 @@ static int lan78xx_probe(struct usb_interface *intf, | |||
| 3610 | ret = register_netdev(netdev); | 3610 | ret = register_netdev(netdev); |
| 3611 | if (ret != 0) { | 3611 | if (ret != 0) { |
| 3612 | netif_err(dev, probe, netdev, "couldn't register the device\n"); | 3612 | netif_err(dev, probe, netdev, "couldn't register the device\n"); |
| 3613 | goto out2; | 3613 | goto out3; |
| 3614 | } | 3614 | } |
| 3615 | 3615 | ||
| 3616 | usb_set_intfdata(intf, dev); | 3616 | usb_set_intfdata(intf, dev); |
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 5894e3c9468f..8c3733608271 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c | |||
| @@ -1175,6 +1175,7 @@ static const struct usb_device_id products[] = { | |||
| 1175 | {QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */ | 1175 | {QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */ |
| 1176 | {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */ | 1176 | {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */ |
| 1177 | {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */ | 1177 | {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */ |
| 1178 | {QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */ | ||
| 1178 | {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */ | 1179 | {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */ |
| 1179 | {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ | 1180 | {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ |
| 1180 | {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */ | 1181 | {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */ |
| @@ -1340,10 +1341,14 @@ static int qmi_wwan_probe(struct usb_interface *intf, | |||
| 1340 | static void qmi_wwan_disconnect(struct usb_interface *intf) | 1341 | static void qmi_wwan_disconnect(struct usb_interface *intf) |
| 1341 | { | 1342 | { |
| 1342 | struct usbnet *dev = usb_get_intfdata(intf); | 1343 | struct usbnet *dev = usb_get_intfdata(intf); |
| 1343 | struct qmi_wwan_state *info = (void *)&dev->data; | 1344 | struct qmi_wwan_state *info; |
| 1344 | struct list_head *iter; | 1345 | struct list_head *iter; |
| 1345 | struct net_device *ldev; | 1346 | struct net_device *ldev; |
| 1346 | 1347 | ||
| 1348 | /* called twice if separate control and data intf */ | ||
| 1349 | if (!dev) | ||
| 1350 | return; | ||
| 1351 | info = (void *)&dev->data; | ||
| 1347 | if (info->flags & QMI_WWAN_FLAG_MUX) { | 1352 | if (info->flags & QMI_WWAN_FLAG_MUX) { |
| 1348 | if (!rtnl_trylock()) { | 1353 | if (!rtnl_trylock()) { |
| 1349 | restart_syscall(); | 1354 | restart_syscall(); |
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 99a26a9efec1..b06169ea60dc 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
| @@ -889,21 +889,20 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi, | |||
| 889 | 889 | ||
| 890 | buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; | 890 | buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; |
| 891 | buf += headroom; /* advance address leaving hole at front of pkt */ | 891 | buf += headroom; /* advance address leaving hole at front of pkt */ |
| 892 | ctx = (void *)(unsigned long)len; | ||
| 893 | get_page(alloc_frag->page); | 892 | get_page(alloc_frag->page); |
| 894 | alloc_frag->offset += len + headroom; | 893 | alloc_frag->offset += len + headroom; |
| 895 | hole = alloc_frag->size - alloc_frag->offset; | 894 | hole = alloc_frag->size - alloc_frag->offset; |
| 896 | if (hole < len + headroom) { | 895 | if (hole < len + headroom) { |
| 897 | /* To avoid internal fragmentation, if there is very likely not | 896 | /* To avoid internal fragmentation, if there is very likely not |
| 898 | * enough space for another buffer, add the remaining space to | 897 | * enough space for another buffer, add the remaining space to |
| 899 | * the current buffer. This extra space is not included in | 898 | * the current buffer. |
| 900 | * the truesize stored in ctx. | ||
| 901 | */ | 899 | */ |
| 902 | len += hole; | 900 | len += hole; |
| 903 | alloc_frag->offset += hole; | 901 | alloc_frag->offset += hole; |
| 904 | } | 902 | } |
| 905 | 903 | ||
| 906 | sg_init_one(rq->sg, buf, len); | 904 | sg_init_one(rq->sg, buf, len); |
| 905 | ctx = (void *)(unsigned long)len; | ||
| 907 | err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp); | 906 | err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp); |
| 908 | if (err < 0) | 907 | if (err < 0) |
| 909 | put_page(virt_to_head_page(buf)); | 908 | put_page(virt_to_head_page(buf)); |
| @@ -1059,7 +1058,7 @@ static void free_old_xmit_skbs(struct send_queue *sq) | |||
| 1059 | bytes += skb->len; | 1058 | bytes += skb->len; |
| 1060 | packets++; | 1059 | packets++; |
| 1061 | 1060 | ||
| 1062 | dev_kfree_skb_any(skb); | 1061 | dev_consume_skb_any(skb); |
| 1063 | } | 1062 | } |
| 1064 | 1063 | ||
| 1065 | /* Avoid overhead when no packets have been processed | 1064 | /* Avoid overhead when no packets have been processed |
| @@ -2743,9 +2742,9 @@ module_init(virtio_net_driver_init); | |||
| 2743 | 2742 | ||
| 2744 | static __exit void virtio_net_driver_exit(void) | 2743 | static __exit void virtio_net_driver_exit(void) |
| 2745 | { | 2744 | { |
| 2745 | unregister_virtio_driver(&virtio_net_driver); | ||
| 2746 | cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD); | 2746 | cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD); |
| 2747 | cpuhp_remove_multi_state(virtionet_online); | 2747 | cpuhp_remove_multi_state(virtionet_online); |
| 2748 | unregister_virtio_driver(&virtio_net_driver); | ||
| 2749 | } | 2748 | } |
| 2750 | module_exit(virtio_net_driver_exit); | 2749 | module_exit(virtio_net_driver_exit); |
| 2751 | 2750 | ||
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 96aa7e6cf214..e17baac70f43 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c | |||
| @@ -623,6 +623,7 @@ static struct sk_buff **vxlan_gro_receive(struct sock *sk, | |||
| 623 | 623 | ||
| 624 | out: | 624 | out: |
| 625 | skb_gro_remcsum_cleanup(skb, &grc); | 625 | skb_gro_remcsum_cleanup(skb, &grc); |
| 626 | skb->remcsum_offload = 0; | ||
| 626 | NAPI_GRO_CB(skb)->flush |= flush; | 627 | NAPI_GRO_CB(skb)->flush |= flush; |
| 627 | 628 | ||
| 628 | return pp; | 629 | return pp; |
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c index 2153e8062b4c..5cc3a07dda9e 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c | |||
| @@ -214,7 +214,7 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb, | |||
| 214 | 214 | ||
| 215 | /* Make sure there's enough writeable headroom */ | 215 | /* Make sure there's enough writeable headroom */ |
| 216 | if (skb_headroom(skb) < drvr->hdrlen || skb_header_cloned(skb)) { | 216 | if (skb_headroom(skb) < drvr->hdrlen || skb_header_cloned(skb)) { |
| 217 | head_delta = drvr->hdrlen - skb_headroom(skb); | 217 | head_delta = max_t(int, drvr->hdrlen - skb_headroom(skb), 0); |
| 218 | 218 | ||
| 219 | brcmf_dbg(INFO, "%s: insufficient headroom (%d)\n", | 219 | brcmf_dbg(INFO, "%s: insufficient headroom (%d)\n", |
| 220 | brcmf_ifname(ifp), head_delta); | 220 | brcmf_ifname(ifp), head_delta); |
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c index d21258d277ce..f1b60740e020 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c | |||
| @@ -159,8 +159,10 @@ void brcmf_feat_attach(struct brcmf_pub *drvr) | |||
| 159 | 159 | ||
| 160 | brcmf_feat_firmware_capabilities(ifp); | 160 | brcmf_feat_firmware_capabilities(ifp); |
| 161 | memset(&gscan_cfg, 0, sizeof(gscan_cfg)); | 161 | memset(&gscan_cfg, 0, sizeof(gscan_cfg)); |
| 162 | brcmf_feat_iovar_data_set(ifp, BRCMF_FEAT_GSCAN, "pfn_gscan_cfg", | 162 | if (drvr->bus_if->chip != BRCM_CC_43430_CHIP_ID) |
| 163 | &gscan_cfg, sizeof(gscan_cfg)); | 163 | brcmf_feat_iovar_data_set(ifp, BRCMF_FEAT_GSCAN, |
| 164 | "pfn_gscan_cfg", | ||
| 165 | &gscan_cfg, sizeof(gscan_cfg)); | ||
| 164 | brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_PNO, "pfn"); | 166 | brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_PNO, "pfn"); |
| 165 | if (drvr->bus_if->wowl_supported) | 167 | if (drvr->bus_if->wowl_supported) |
| 166 | brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_WOWL, "wowl"); | 168 | brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_WOWL, "wowl"); |
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index fbcbb4325936..f3556122c6ac 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c | |||
| @@ -2053,12 +2053,13 @@ static int brcmf_sdio_txpkt_hdalign(struct brcmf_sdio *bus, struct sk_buff *pkt) | |||
| 2053 | atomic_inc(&stats->pktcow_failed); | 2053 | atomic_inc(&stats->pktcow_failed); |
| 2054 | return -ENOMEM; | 2054 | return -ENOMEM; |
| 2055 | } | 2055 | } |
| 2056 | head_pad = 0; | ||
| 2056 | } | 2057 | } |
| 2057 | skb_push(pkt, head_pad); | 2058 | skb_push(pkt, head_pad); |
| 2058 | dat_buf = (u8 *)(pkt->data); | 2059 | dat_buf = (u8 *)(pkt->data); |
| 2059 | } | 2060 | } |
| 2060 | memset(dat_buf, 0, head_pad + bus->tx_hdrlen); | 2061 | memset(dat_buf, 0, head_pad + bus->tx_hdrlen); |
| 2061 | return 0; | 2062 | return head_pad; |
| 2062 | } | 2063 | } |
| 2063 | 2064 | ||
| 2064 | /** | 2065 | /** |
| @@ -4174,11 +4175,6 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev) | |||
| 4174 | goto fail; | 4175 | goto fail; |
| 4175 | } | 4176 | } |
| 4176 | 4177 | ||
| 4177 | /* allocate scatter-gather table. sg support | ||
| 4178 | * will be disabled upon allocation failure. | ||
| 4179 | */ | ||
| 4180 | brcmf_sdiod_sgtable_alloc(bus->sdiodev); | ||
| 4181 | |||
| 4182 | /* Query the F2 block size, set roundup accordingly */ | 4178 | /* Query the F2 block size, set roundup accordingly */ |
| 4183 | bus->blocksize = bus->sdiodev->func[2]->cur_blksize; | 4179 | bus->blocksize = bus->sdiodev->func[2]->cur_blksize; |
| 4184 | bus->roundup = min(max_roundup, bus->blocksize); | 4180 | bus->roundup = min(max_roundup, bus->blocksize); |
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c index b4ecd1fe1374..97208ce19f92 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c | |||
| @@ -154,7 +154,7 @@ static const struct iwl_tt_params iwl9000_tt_params = { | |||
| 154 | const struct iwl_cfg iwl9160_2ac_cfg = { | 154 | const struct iwl_cfg iwl9160_2ac_cfg = { |
| 155 | .name = "Intel(R) Dual Band Wireless AC 9160", | 155 | .name = "Intel(R) Dual Band Wireless AC 9160", |
| 156 | .fw_name_pre = IWL9260A_FW_PRE, | 156 | .fw_name_pre = IWL9260A_FW_PRE, |
| 157 | .fw_name_pre_next_step = IWL9260B_FW_PRE, | 157 | .fw_name_pre_b_or_c_step = IWL9260B_FW_PRE, |
| 158 | IWL_DEVICE_9000, | 158 | IWL_DEVICE_9000, |
| 159 | .ht_params = &iwl9000_ht_params, | 159 | .ht_params = &iwl9000_ht_params, |
| 160 | .nvm_ver = IWL9000_NVM_VERSION, | 160 | .nvm_ver = IWL9000_NVM_VERSION, |
| @@ -165,7 +165,7 @@ const struct iwl_cfg iwl9160_2ac_cfg = { | |||
| 165 | const struct iwl_cfg iwl9260_2ac_cfg = { | 165 | const struct iwl_cfg iwl9260_2ac_cfg = { |
| 166 | .name = "Intel(R) Dual Band Wireless AC 9260", | 166 | .name = "Intel(R) Dual Band Wireless AC 9260", |
| 167 | .fw_name_pre = IWL9260A_FW_PRE, | 167 | .fw_name_pre = IWL9260A_FW_PRE, |
| 168 | .fw_name_pre_next_step = IWL9260B_FW_PRE, | 168 | .fw_name_pre_b_or_c_step = IWL9260B_FW_PRE, |
| 169 | IWL_DEVICE_9000, | 169 | IWL_DEVICE_9000, |
| 170 | .ht_params = &iwl9000_ht_params, | 170 | .ht_params = &iwl9000_ht_params, |
| 171 | .nvm_ver = IWL9000_NVM_VERSION, | 171 | .nvm_ver = IWL9000_NVM_VERSION, |
| @@ -176,7 +176,7 @@ const struct iwl_cfg iwl9260_2ac_cfg = { | |||
| 176 | const struct iwl_cfg iwl9270_2ac_cfg = { | 176 | const struct iwl_cfg iwl9270_2ac_cfg = { |
| 177 | .name = "Intel(R) Dual Band Wireless AC 9270", | 177 | .name = "Intel(R) Dual Band Wireless AC 9270", |
| 178 | .fw_name_pre = IWL9260A_FW_PRE, | 178 | .fw_name_pre = IWL9260A_FW_PRE, |
| 179 | .fw_name_pre_next_step = IWL9260B_FW_PRE, | 179 | .fw_name_pre_b_or_c_step = IWL9260B_FW_PRE, |
| 180 | IWL_DEVICE_9000, | 180 | IWL_DEVICE_9000, |
| 181 | .ht_params = &iwl9000_ht_params, | 181 | .ht_params = &iwl9000_ht_params, |
| 182 | .nvm_ver = IWL9000_NVM_VERSION, | 182 | .nvm_ver = IWL9000_NVM_VERSION, |
| @@ -186,8 +186,8 @@ const struct iwl_cfg iwl9270_2ac_cfg = { | |||
| 186 | 186 | ||
| 187 | const struct iwl_cfg iwl9460_2ac_cfg = { | 187 | const struct iwl_cfg iwl9460_2ac_cfg = { |
| 188 | .name = "Intel(R) Dual Band Wireless AC 9460", | 188 | .name = "Intel(R) Dual Band Wireless AC 9460", |
| 189 | .fw_name_pre = IWL9000_FW_PRE, | 189 | .fw_name_pre = IWL9260A_FW_PRE, |
| 190 | .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, | 190 | .fw_name_pre_b_or_c_step = IWL9260B_FW_PRE, |
| 191 | IWL_DEVICE_9000, | 191 | IWL_DEVICE_9000, |
| 192 | .ht_params = &iwl9000_ht_params, | 192 | .ht_params = &iwl9000_ht_params, |
| 193 | .nvm_ver = IWL9000_NVM_VERSION, | 193 | .nvm_ver = IWL9000_NVM_VERSION, |
| @@ -198,8 +198,8 @@ const struct iwl_cfg iwl9460_2ac_cfg = { | |||
| 198 | 198 | ||
| 199 | const struct iwl_cfg iwl9560_2ac_cfg = { | 199 | const struct iwl_cfg iwl9560_2ac_cfg = { |
| 200 | .name = "Intel(R) Dual Band Wireless AC 9560", | 200 | .name = "Intel(R) Dual Band Wireless AC 9560", |
| 201 | .fw_name_pre = IWL9000_FW_PRE, | 201 | .fw_name_pre = IWL9260A_FW_PRE, |
| 202 | .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, | 202 | .fw_name_pre_b_or_c_step = IWL9260B_FW_PRE, |
| 203 | IWL_DEVICE_9000, | 203 | IWL_DEVICE_9000, |
| 204 | .ht_params = &iwl9000_ht_params, | 204 | .ht_params = &iwl9000_ht_params, |
| 205 | .nvm_ver = IWL9000_NVM_VERSION, | 205 | .nvm_ver = IWL9000_NVM_VERSION, |
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c index adaa2f0097cc..fb40ddfced99 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c | |||
| @@ -1189,11 +1189,11 @@ void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb) | |||
| 1189 | next_reclaimed; | 1189 | next_reclaimed; |
| 1190 | IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n", | 1190 | IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n", |
| 1191 | next_reclaimed); | 1191 | next_reclaimed); |
| 1192 | iwlagn_check_ratid_empty(priv, sta_id, tid); | ||
| 1192 | } | 1193 | } |
| 1193 | 1194 | ||
| 1194 | iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs); | 1195 | iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs); |
| 1195 | 1196 | ||
| 1196 | iwlagn_check_ratid_empty(priv, sta_id, tid); | ||
| 1197 | freed = 0; | 1197 | freed = 0; |
| 1198 | 1198 | ||
| 1199 | /* process frames */ | 1199 | /* process frames */ |
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h index 0fa8c473f1e2..c73a6438ce8f 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/file.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h | |||
| @@ -328,6 +328,7 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t; | |||
| 328 | * @IWL_UCODE_TLV_CAPA_TX_POWER_ACK: reduced TX power API has larger | 328 | * @IWL_UCODE_TLV_CAPA_TX_POWER_ACK: reduced TX power API has larger |
| 329 | * command size (command version 4) that supports toggling ACK TX | 329 | * command size (command version 4) that supports toggling ACK TX |
| 330 | * power reduction. | 330 | * power reduction. |
| 331 | * @IWL_UCODE_TLV_CAPA_MLME_OFFLOAD: supports MLME offload | ||
| 331 | * | 332 | * |
| 332 | * @NUM_IWL_UCODE_TLV_CAPA: number of bits used | 333 | * @NUM_IWL_UCODE_TLV_CAPA: number of bits used |
| 333 | */ | 334 | */ |
| @@ -373,6 +374,7 @@ enum iwl_ucode_tlv_capa { | |||
| 373 | IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG = (__force iwl_ucode_tlv_capa_t)80, | 374 | IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG = (__force iwl_ucode_tlv_capa_t)80, |
| 374 | IWL_UCODE_TLV_CAPA_LQM_SUPPORT = (__force iwl_ucode_tlv_capa_t)81, | 375 | IWL_UCODE_TLV_CAPA_LQM_SUPPORT = (__force iwl_ucode_tlv_capa_t)81, |
| 375 | IWL_UCODE_TLV_CAPA_TX_POWER_ACK = (__force iwl_ucode_tlv_capa_t)84, | 376 | IWL_UCODE_TLV_CAPA_TX_POWER_ACK = (__force iwl_ucode_tlv_capa_t)84, |
| 377 | IWL_UCODE_TLV_CAPA_MLME_OFFLOAD = (__force iwl_ucode_tlv_capa_t)96, | ||
| 376 | 378 | ||
| 377 | NUM_IWL_UCODE_TLV_CAPA | 379 | NUM_IWL_UCODE_TLV_CAPA |
| 378 | #ifdef __CHECKER__ | 380 | #ifdef __CHECKER__ |
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index c52623cb7c2a..d19c74827fbb 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h | |||
| @@ -276,10 +276,10 @@ struct iwl_pwr_tx_backoff { | |||
| 276 | * @fw_name_pre: Firmware filename prefix. The api version and extension | 276 | * @fw_name_pre: Firmware filename prefix. The api version and extension |
| 277 | * (.ucode) will be added to filename before loading from disk. The | 277 | * (.ucode) will be added to filename before loading from disk. The |
| 278 | * filename is constructed as fw_name_pre<api>.ucode. | 278 | * filename is constructed as fw_name_pre<api>.ucode. |
| 279 | * @fw_name_pre_next_step: same as @fw_name_pre, only for next step | 279 | * @fw_name_pre_b_or_c_step: same as @fw_name_pre, only for b or c steps |
| 280 | * (if supported) | 280 | * (if supported) |
| 281 | * @fw_name_pre_rf_next_step: same as @fw_name_pre_next_step, only for rf next | 281 | * @fw_name_pre_rf_next_step: same as @fw_name_pre_b_or_c_step, only for rf |
| 282 | * step. Supported only in integrated solutions. | 282 | * next step. Supported only in integrated solutions. |
| 283 | * @ucode_api_max: Highest version of uCode API supported by driver. | 283 | * @ucode_api_max: Highest version of uCode API supported by driver. |
| 284 | * @ucode_api_min: Lowest version of uCode API supported by driver. | 284 | * @ucode_api_min: Lowest version of uCode API supported by driver. |
| 285 | * @max_inst_size: The maximal length of the fw inst section | 285 | * @max_inst_size: The maximal length of the fw inst section |
| @@ -330,7 +330,7 @@ struct iwl_cfg { | |||
| 330 | /* params specific to an individual device within a device family */ | 330 | /* params specific to an individual device within a device family */ |
| 331 | const char *name; | 331 | const char *name; |
| 332 | const char *fw_name_pre; | 332 | const char *fw_name_pre; |
| 333 | const char *fw_name_pre_next_step; | 333 | const char *fw_name_pre_b_or_c_step; |
| 334 | const char *fw_name_pre_rf_next_step; | 334 | const char *fw_name_pre_rf_next_step; |
| 335 | /* params not likely to change within a device family */ | 335 | /* params not likely to change within a device family */ |
| 336 | const struct iwl_base_params *base_params; | 336 | const struct iwl_base_params *base_params; |
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h index 545d14b0bc92..f5c1127253cb 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h | |||
| @@ -55,8 +55,8 @@ static inline bool iwl_trace_data(struct sk_buff *skb) | |||
| 55 | /* also account for the RFC 1042 header, of course */ | 55 | /* also account for the RFC 1042 header, of course */ |
| 56 | offs += 6; | 56 | offs += 6; |
| 57 | 57 | ||
| 58 | return skb->len > offs + 2 && | 58 | return skb->len <= offs + 2 || |
| 59 | *(__be16 *)(skb->data + offs) == cpu_to_be16(ETH_P_PAE); | 59 | *(__be16 *)(skb->data + offs) != cpu_to_be16(ETH_P_PAE); |
| 60 | } | 60 | } |
| 61 | 61 | ||
| 62 | static inline size_t iwl_rx_trace_len(const struct iwl_trans *trans, | 62 | static inline size_t iwl_rx_trace_len(const struct iwl_trans *trans, |
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index 6fdb5921e17f..4e0f86fe0a6f 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c | |||
| @@ -216,8 +216,9 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first) | |||
| 216 | const char *fw_pre_name; | 216 | const char *fw_pre_name; |
| 217 | 217 | ||
| 218 | if (drv->trans->cfg->device_family == IWL_DEVICE_FAMILY_9000 && | 218 | if (drv->trans->cfg->device_family == IWL_DEVICE_FAMILY_9000 && |
| 219 | CSR_HW_REV_STEP(drv->trans->hw_rev) == SILICON_B_STEP) | 219 | (CSR_HW_REV_STEP(drv->trans->hw_rev) == SILICON_B_STEP || |
| 220 | fw_pre_name = cfg->fw_name_pre_next_step; | 220 | CSR_HW_REV_STEP(drv->trans->hw_rev) == SILICON_C_STEP)) |
| 221 | fw_pre_name = cfg->fw_name_pre_b_or_c_step; | ||
| 221 | else if (drv->trans->cfg->integrated && | 222 | else if (drv->trans->cfg->integrated && |
| 222 | CSR_HW_RFID_STEP(drv->trans->hw_rf_id) == SILICON_B_STEP && | 223 | CSR_HW_RFID_STEP(drv->trans->hw_rf_id) == SILICON_B_STEP && |
| 223 | cfg->fw_name_pre_rf_next_step) | 224 | cfg->fw_name_pre_rf_next_step) |
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index 5c08f4d40f6a..3ee6767392b6 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c | |||
| @@ -785,7 +785,8 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, | |||
| 785 | int num_of_ch, __le32 *channels, u16 fw_mcc) | 785 | int num_of_ch, __le32 *channels, u16 fw_mcc) |
| 786 | { | 786 | { |
| 787 | int ch_idx; | 787 | int ch_idx; |
| 788 | u16 ch_flags, prev_ch_flags = 0; | 788 | u16 ch_flags; |
| 789 | u32 reg_rule_flags, prev_reg_rule_flags = 0; | ||
| 789 | const u8 *nvm_chan = cfg->ext_nvm ? | 790 | const u8 *nvm_chan = cfg->ext_nvm ? |
| 790 | iwl_ext_nvm_channels : iwl_nvm_channels; | 791 | iwl_ext_nvm_channels : iwl_nvm_channels; |
| 791 | struct ieee80211_regdomain *regd; | 792 | struct ieee80211_regdomain *regd; |
| @@ -834,8 +835,11 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, | |||
| 834 | continue; | 835 | continue; |
| 835 | } | 836 | } |
| 836 | 837 | ||
| 838 | reg_rule_flags = iwl_nvm_get_regdom_bw_flags(nvm_chan, ch_idx, | ||
| 839 | ch_flags, cfg); | ||
| 840 | |||
| 837 | /* we can't continue the same rule */ | 841 | /* we can't continue the same rule */ |
| 838 | if (ch_idx == 0 || prev_ch_flags != ch_flags || | 842 | if (ch_idx == 0 || prev_reg_rule_flags != reg_rule_flags || |
| 839 | center_freq - prev_center_freq > 20) { | 843 | center_freq - prev_center_freq > 20) { |
| 840 | valid_rules++; | 844 | valid_rules++; |
| 841 | new_rule = true; | 845 | new_rule = true; |
| @@ -854,18 +858,17 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, | |||
| 854 | rule->power_rule.max_eirp = | 858 | rule->power_rule.max_eirp = |
| 855 | DBM_TO_MBM(IWL_DEFAULT_MAX_TX_POWER); | 859 | DBM_TO_MBM(IWL_DEFAULT_MAX_TX_POWER); |
| 856 | 860 | ||
| 857 | rule->flags = iwl_nvm_get_regdom_bw_flags(nvm_chan, ch_idx, | 861 | rule->flags = reg_rule_flags; |
| 858 | ch_flags, cfg); | ||
| 859 | 862 | ||
| 860 | /* rely on auto-calculation to merge BW of contiguous chans */ | 863 | /* rely on auto-calculation to merge BW of contiguous chans */ |
| 861 | rule->flags |= NL80211_RRF_AUTO_BW; | 864 | rule->flags |= NL80211_RRF_AUTO_BW; |
| 862 | rule->freq_range.max_bandwidth_khz = 0; | 865 | rule->freq_range.max_bandwidth_khz = 0; |
| 863 | 866 | ||
| 864 | prev_ch_flags = ch_flags; | ||
| 865 | prev_center_freq = center_freq; | 867 | prev_center_freq = center_freq; |
| 868 | prev_reg_rule_flags = reg_rule_flags; | ||
| 866 | 869 | ||
| 867 | IWL_DEBUG_DEV(dev, IWL_DL_LAR, | 870 | IWL_DEBUG_DEV(dev, IWL_DL_LAR, |
| 868 | "Ch. %d [%sGHz] %s%s%s%s%s%s%s%s%s(0x%02x): Ad-Hoc %ssupported\n", | 871 | "Ch. %d [%sGHz] %s%s%s%s%s%s%s%s%s(0x%02x) reg_flags 0x%x: %s\n", |
| 869 | center_freq, | 872 | center_freq, |
| 870 | band == NL80211_BAND_5GHZ ? "5.2" : "2.4", | 873 | band == NL80211_BAND_5GHZ ? "5.2" : "2.4", |
| 871 | CHECK_AND_PRINT_I(VALID), | 874 | CHECK_AND_PRINT_I(VALID), |
| @@ -877,10 +880,10 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, | |||
| 877 | CHECK_AND_PRINT_I(160MHZ), | 880 | CHECK_AND_PRINT_I(160MHZ), |
| 878 | CHECK_AND_PRINT_I(INDOOR_ONLY), | 881 | CHECK_AND_PRINT_I(INDOOR_ONLY), |
| 879 | CHECK_AND_PRINT_I(GO_CONCURRENT), | 882 | CHECK_AND_PRINT_I(GO_CONCURRENT), |
| 880 | ch_flags, | 883 | ch_flags, reg_rule_flags, |
| 881 | ((ch_flags & NVM_CHANNEL_ACTIVE) && | 884 | ((ch_flags & NVM_CHANNEL_ACTIVE) && |
| 882 | !(ch_flags & NVM_CHANNEL_RADAR)) | 885 | !(ch_flags & NVM_CHANNEL_RADAR)) |
| 883 | ? "" : "not "); | 886 | ? "Ad-Hoc" : ""); |
| 884 | } | 887 | } |
| 885 | 888 | ||
| 886 | regd->n_reg_rules = valid_rules; | 889 | regd->n_reg_rules = valid_rules; |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 79e7a7a285dc..82863e9273eb 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c | |||
| @@ -1275,8 +1275,10 @@ static int iwl_mvm_sar_get_wgds_table(struct iwl_mvm *mvm) | |||
| 1275 | 1275 | ||
| 1276 | entry = &wifi_pkg->package.elements[idx++]; | 1276 | entry = &wifi_pkg->package.elements[idx++]; |
| 1277 | if ((entry->type != ACPI_TYPE_INTEGER) || | 1277 | if ((entry->type != ACPI_TYPE_INTEGER) || |
| 1278 | (entry->integer.value > U8_MAX)) | 1278 | (entry->integer.value > U8_MAX)) { |
| 1279 | return -EINVAL; | 1279 | ret = -EINVAL; |
| 1280 | goto out_free; | ||
| 1281 | } | ||
| 1280 | 1282 | ||
| 1281 | mvm->geo_profiles[i].values[j] = entry->integer.value; | 1283 | mvm->geo_profiles[i].values[j] = entry->integer.value; |
| 1282 | } | 1284 | } |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index bcde1ba0f1c8..ce901be5fba8 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c | |||
| @@ -1084,7 +1084,13 @@ int __iwl_mvm_mac_start(struct iwl_mvm *mvm) | |||
| 1084 | 1084 | ||
| 1085 | lockdep_assert_held(&mvm->mutex); | 1085 | lockdep_assert_held(&mvm->mutex); |
| 1086 | 1086 | ||
| 1087 | if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { | 1087 | if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status)) { |
| 1088 | /* | ||
| 1089 | * Now convert the HW_RESTART_REQUESTED flag to IN_HW_RESTART | ||
| 1090 | * so later code will - from now on - see that we're doing it. | ||
| 1091 | */ | ||
| 1092 | set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); | ||
| 1093 | clear_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status); | ||
| 1088 | /* Clean up some internal and mac80211 state on restart */ | 1094 | /* Clean up some internal and mac80211 state on restart */ |
| 1089 | iwl_mvm_restart_cleanup(mvm); | 1095 | iwl_mvm_restart_cleanup(mvm); |
| 1090 | } else { | 1096 | } else { |
| @@ -2591,8 +2597,18 @@ static void iwl_mvm_purge_deferred_tx_frames(struct iwl_mvm *mvm, | |||
| 2591 | spin_lock_bh(&mvm_sta->lock); | 2597 | spin_lock_bh(&mvm_sta->lock); |
| 2592 | for (i = 0; i <= IWL_MAX_TID_COUNT; i++) { | 2598 | for (i = 0; i <= IWL_MAX_TID_COUNT; i++) { |
| 2593 | tid_data = &mvm_sta->tid_data[i]; | 2599 | tid_data = &mvm_sta->tid_data[i]; |
| 2594 | while ((skb = __skb_dequeue(&tid_data->deferred_tx_frames))) | 2600 | |
| 2601 | while ((skb = __skb_dequeue(&tid_data->deferred_tx_frames))) { | ||
| 2602 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | ||
| 2603 | |||
| 2604 | /* | ||
| 2605 | * The first deferred frame should've stopped the MAC | ||
| 2606 | * queues, so we should never get a second deferred | ||
| 2607 | * frame for the RA/TID. | ||
| 2608 | */ | ||
| 2609 | iwl_mvm_start_mac_queues(mvm, info->hw_queue); | ||
| 2595 | ieee80211_free_txskb(mvm->hw, skb); | 2610 | ieee80211_free_txskb(mvm->hw, skb); |
| 2611 | } | ||
| 2596 | } | 2612 | } |
| 2597 | spin_unlock_bh(&mvm_sta->lock); | 2613 | spin_unlock_bh(&mvm_sta->lock); |
| 2598 | } | 2614 | } |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index eaacfaf37206..ddd8719f27b8 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | |||
| @@ -1090,6 +1090,7 @@ struct iwl_mvm { | |||
| 1090 | * @IWL_MVM_STATUS_HW_RFKILL: HW RF-kill is asserted | 1090 | * @IWL_MVM_STATUS_HW_RFKILL: HW RF-kill is asserted |
| 1091 | * @IWL_MVM_STATUS_HW_CTKILL: CT-kill is active | 1091 | * @IWL_MVM_STATUS_HW_CTKILL: CT-kill is active |
| 1092 | * @IWL_MVM_STATUS_ROC_RUNNING: remain-on-channel is running | 1092 | * @IWL_MVM_STATUS_ROC_RUNNING: remain-on-channel is running |
| 1093 | * @IWL_MVM_STATUS_HW_RESTART_REQUESTED: HW restart was requested | ||
| 1093 | * @IWL_MVM_STATUS_IN_HW_RESTART: HW restart is active | 1094 | * @IWL_MVM_STATUS_IN_HW_RESTART: HW restart is active |
| 1094 | * @IWL_MVM_STATUS_IN_D0I3: NIC is in D0i3 | 1095 | * @IWL_MVM_STATUS_IN_D0I3: NIC is in D0i3 |
| 1095 | * @IWL_MVM_STATUS_ROC_AUX_RUNNING: AUX remain-on-channel is running | 1096 | * @IWL_MVM_STATUS_ROC_AUX_RUNNING: AUX remain-on-channel is running |
| @@ -1101,6 +1102,7 @@ enum iwl_mvm_status { | |||
| 1101 | IWL_MVM_STATUS_HW_RFKILL, | 1102 | IWL_MVM_STATUS_HW_RFKILL, |
| 1102 | IWL_MVM_STATUS_HW_CTKILL, | 1103 | IWL_MVM_STATUS_HW_CTKILL, |
| 1103 | IWL_MVM_STATUS_ROC_RUNNING, | 1104 | IWL_MVM_STATUS_ROC_RUNNING, |
| 1105 | IWL_MVM_STATUS_HW_RESTART_REQUESTED, | ||
| 1104 | IWL_MVM_STATUS_IN_HW_RESTART, | 1106 | IWL_MVM_STATUS_IN_HW_RESTART, |
| 1105 | IWL_MVM_STATUS_IN_D0I3, | 1107 | IWL_MVM_STATUS_IN_D0I3, |
| 1106 | IWL_MVM_STATUS_ROC_AUX_RUNNING, | 1108 | IWL_MVM_STATUS_ROC_AUX_RUNNING, |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 4d1188b8736a..9c175d5e9d67 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c | |||
| @@ -1235,9 +1235,8 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error) | |||
| 1235 | */ | 1235 | */ |
| 1236 | if (!mvm->fw_restart && fw_error) { | 1236 | if (!mvm->fw_restart && fw_error) { |
| 1237 | iwl_mvm_fw_dbg_collect_desc(mvm, &iwl_mvm_dump_desc_assert, | 1237 | iwl_mvm_fw_dbg_collect_desc(mvm, &iwl_mvm_dump_desc_assert, |
| 1238 | NULL); | 1238 | NULL); |
| 1239 | } else if (test_and_set_bit(IWL_MVM_STATUS_IN_HW_RESTART, | 1239 | } else if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { |
| 1240 | &mvm->status)) { | ||
| 1241 | struct iwl_mvm_reprobe *reprobe; | 1240 | struct iwl_mvm_reprobe *reprobe; |
| 1242 | 1241 | ||
| 1243 | IWL_ERR(mvm, | 1242 | IWL_ERR(mvm, |
| @@ -1268,6 +1267,7 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error) | |||
| 1268 | 1267 | ||
| 1269 | if (fw_error && mvm->fw_restart > 0) | 1268 | if (fw_error && mvm->fw_restart > 0) |
| 1270 | mvm->fw_restart--; | 1269 | mvm->fw_restart--; |
| 1270 | set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status); | ||
| 1271 | ieee80211_restart_hw(mvm->hw); | 1271 | ieee80211_restart_hw(mvm->hw); |
| 1272 | } | 1272 | } |
| 1273 | } | 1273 | } |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c index 65beca3a457a..8999a1199d60 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c | |||
| @@ -1291,7 +1291,7 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, | |||
| 1291 | * first index into rate scale table. | 1291 | * first index into rate scale table. |
| 1292 | */ | 1292 | */ |
| 1293 | if (info->flags & IEEE80211_TX_STAT_AMPDU) { | 1293 | if (info->flags & IEEE80211_TX_STAT_AMPDU) { |
| 1294 | rs_collect_tpc_data(mvm, lq_sta, curr_tbl, lq_rate.index, | 1294 | rs_collect_tpc_data(mvm, lq_sta, curr_tbl, tx_resp_rate.index, |
| 1295 | info->status.ampdu_len, | 1295 | info->status.ampdu_len, |
| 1296 | info->status.ampdu_ack_len, | 1296 | info->status.ampdu_ack_len, |
| 1297 | reduced_txp); | 1297 | reduced_txp); |
| @@ -1312,7 +1312,7 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, | |||
| 1312 | if (info->status.ampdu_ack_len == 0) | 1312 | if (info->status.ampdu_ack_len == 0) |
| 1313 | info->status.ampdu_len = 1; | 1313 | info->status.ampdu_len = 1; |
| 1314 | 1314 | ||
| 1315 | rs_collect_tlc_data(mvm, lq_sta, curr_tbl, lq_rate.index, | 1315 | rs_collect_tlc_data(mvm, lq_sta, curr_tbl, tx_resp_rate.index, |
| 1316 | info->status.ampdu_len, | 1316 | info->status.ampdu_len, |
| 1317 | info->status.ampdu_ack_len); | 1317 | info->status.ampdu_ack_len); |
| 1318 | 1318 | ||
| @@ -1348,11 +1348,11 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, | |||
| 1348 | continue; | 1348 | continue; |
| 1349 | 1349 | ||
| 1350 | rs_collect_tpc_data(mvm, lq_sta, tmp_tbl, | 1350 | rs_collect_tpc_data(mvm, lq_sta, tmp_tbl, |
| 1351 | lq_rate.index, 1, | 1351 | tx_resp_rate.index, 1, |
| 1352 | i < retries ? 0 : legacy_success, | 1352 | i < retries ? 0 : legacy_success, |
| 1353 | reduced_txp); | 1353 | reduced_txp); |
| 1354 | rs_collect_tlc_data(mvm, lq_sta, tmp_tbl, | 1354 | rs_collect_tlc_data(mvm, lq_sta, tmp_tbl, |
| 1355 | lq_rate.index, 1, | 1355 | tx_resp_rate.index, 1, |
| 1356 | i < retries ? 0 : legacy_success); | 1356 | i < retries ? 0 : legacy_success); |
| 1357 | } | 1357 | } |
| 1358 | 1358 | ||
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index f3e608196369..71c8b800ffa9 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c | |||
| @@ -636,9 +636,9 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm, | |||
| 636 | 636 | ||
| 637 | baid_data = rcu_dereference(mvm->baid_map[baid]); | 637 | baid_data = rcu_dereference(mvm->baid_map[baid]); |
| 638 | if (!baid_data) { | 638 | if (!baid_data) { |
| 639 | WARN(!(reorder & IWL_RX_MPDU_REORDER_BA_OLD_SN), | 639 | IWL_DEBUG_RX(mvm, |
| 640 | "Received baid %d, but no data exists for this BAID\n", | 640 | "Got valid BAID but no baid allocated, bypass the re-ordering buffer. Baid %d reorder 0x%x\n", |
| 641 | baid); | 641 | baid, reorder); |
| 642 | return false; | 642 | return false; |
| 643 | } | 643 | } |
| 644 | 644 | ||
| @@ -759,7 +759,9 @@ static void iwl_mvm_agg_rx_received(struct iwl_mvm *mvm, | |||
| 759 | 759 | ||
| 760 | data = rcu_dereference(mvm->baid_map[baid]); | 760 | data = rcu_dereference(mvm->baid_map[baid]); |
| 761 | if (!data) { | 761 | if (!data) { |
| 762 | WARN_ON(!(reorder_data & IWL_RX_MPDU_REORDER_BA_OLD_SN)); | 762 | IWL_DEBUG_RX(mvm, |
| 763 | "Got valid BAID but no baid allocated, bypass the re-ordering buffer. Baid %d reorder 0x%x\n", | ||
| 764 | baid, reorder_data); | ||
| 763 | goto out; | 765 | goto out; |
| 764 | } | 766 | } |
| 765 | 767 | ||
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 4df5f13fcdae..027ee5e72172 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c | |||
| @@ -121,7 +121,8 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, | |||
| 121 | .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color), | 121 | .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color), |
| 122 | .add_modify = update ? 1 : 0, | 122 | .add_modify = update ? 1 : 0, |
| 123 | .station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK | | 123 | .station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK | |
| 124 | STA_FLG_MIMO_EN_MSK), | 124 | STA_FLG_MIMO_EN_MSK | |
| 125 | STA_FLG_RTS_MIMO_PROT), | ||
| 125 | .tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg), | 126 | .tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg), |
| 126 | }; | 127 | }; |
| 127 | int ret; | 128 | int ret; |
| @@ -277,9 +278,21 @@ static void iwl_mvm_rx_agg_session_expired(unsigned long data) | |||
| 277 | 278 | ||
| 278 | /* Timer expired */ | 279 | /* Timer expired */ |
| 279 | sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]); | 280 | sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]); |
| 281 | |||
| 282 | /* | ||
| 283 | * sta should be valid unless the following happens: | ||
| 284 | * The firmware asserts which triggers a reconfig flow, but | ||
| 285 | * the reconfig fails before we set the pointer to sta into | ||
| 286 | * the fw_id_to_mac_id pointer table. Mac80211 can't stop | ||
| 287 | * A-MDPU and hence the timer continues to run. Then, the | ||
| 288 | * timer expires and sta is NULL. | ||
| 289 | */ | ||
| 290 | if (!sta) | ||
| 291 | goto unlock; | ||
| 292 | |||
| 280 | mvm_sta = iwl_mvm_sta_from_mac80211(sta); | 293 | mvm_sta = iwl_mvm_sta_from_mac80211(sta); |
| 281 | ieee80211_stop_rx_ba_session_offl(mvm_sta->vif, | 294 | ieee80211_rx_ba_timer_expired(mvm_sta->vif, |
| 282 | sta->addr, ba_data->tid); | 295 | sta->addr, ba_data->tid); |
| 283 | unlock: | 296 | unlock: |
| 284 | rcu_read_unlock(); | 297 | rcu_read_unlock(); |
| 285 | } | 298 | } |
| @@ -2015,7 +2028,8 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) | |||
| 2015 | IWL_MAX_TID_COUNT, | 2028 | IWL_MAX_TID_COUNT, |
| 2016 | wdg_timeout); | 2029 | wdg_timeout); |
| 2017 | 2030 | ||
| 2018 | if (vif->type == NL80211_IFTYPE_AP) | 2031 | if (vif->type == NL80211_IFTYPE_AP || |
| 2032 | vif->type == NL80211_IFTYPE_ADHOC) | ||
| 2019 | mvm->probe_queue = queue; | 2033 | mvm->probe_queue = queue; |
| 2020 | else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) | 2034 | else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) |
| 2021 | mvm->p2p_dev_queue = queue; | 2035 | mvm->p2p_dev_queue = queue; |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index 60360ed73f26..5fcc9dd6be56 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c | |||
| @@ -185,8 +185,14 @@ static u16 iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb, | |||
| 185 | else | 185 | else |
| 186 | udp_hdr(skb)->check = 0; | 186 | udp_hdr(skb)->check = 0; |
| 187 | 187 | ||
| 188 | /* mac header len should include IV, size is in words */ | 188 | /* |
| 189 | if (info->control.hw_key) | 189 | * mac header len should include IV, size is in words unless |
| 190 | * the IV is added by the firmware like in WEP. | ||
| 191 | * In new Tx API, the IV is always added by the firmware. | ||
| 192 | */ | ||
| 193 | if (!iwl_mvm_has_new_tx_api(mvm) && info->control.hw_key && | ||
| 194 | info->control.hw_key->cipher != WLAN_CIPHER_SUITE_WEP40 && | ||
| 195 | info->control.hw_key->cipher != WLAN_CIPHER_SUITE_WEP104) | ||
| 190 | mh_len += info->control.hw_key->iv_len; | 196 | mh_len += info->control.hw_key->iv_len; |
| 191 | mh_len /= 2; | 197 | mh_len /= 2; |
| 192 | offload_assist |= mh_len << TX_CMD_OFFLD_MH_SIZE; | 198 | offload_assist |= mh_len << TX_CMD_OFFLD_MH_SIZE; |
| @@ -1815,6 +1821,8 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) | |||
| 1815 | struct iwl_mvm_tid_data *tid_data; | 1821 | struct iwl_mvm_tid_data *tid_data; |
| 1816 | struct iwl_mvm_sta *mvmsta; | 1822 | struct iwl_mvm_sta *mvmsta; |
| 1817 | 1823 | ||
| 1824 | ba_info.flags = IEEE80211_TX_STAT_AMPDU; | ||
| 1825 | |||
| 1818 | if (iwl_mvm_has_new_tx_api(mvm)) { | 1826 | if (iwl_mvm_has_new_tx_api(mvm)) { |
| 1819 | struct iwl_mvm_compressed_ba_notif *ba_res = | 1827 | struct iwl_mvm_compressed_ba_notif *ba_res = |
| 1820 | (void *)pkt->data; | 1828 | (void *)pkt->data; |
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index f16c1bb9bf94..84f4ba01e14f 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c | |||
| @@ -510,9 +510,17 @@ static const struct pci_device_id iwl_hw_card_ids[] = { | |||
| 510 | 510 | ||
| 511 | /* 9000 Series */ | 511 | /* 9000 Series */ |
| 512 | {IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)}, | 512 | {IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)}, |
| 513 | {IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)}, | ||
| 514 | {IWL_PCI_DEVICE(0x271B, 0x0210, iwl9160_2ac_cfg)}, | ||
| 513 | {IWL_PCI_DEVICE(0x2526, 0x0000, iwl9260_2ac_cfg)}, | 515 | {IWL_PCI_DEVICE(0x2526, 0x0000, iwl9260_2ac_cfg)}, |
| 514 | {IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_cfg)}, | 516 | {IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_cfg)}, |
| 517 | {IWL_PCI_DEVICE(0x2526, 0x0014, iwl9260_2ac_cfg)}, | ||
| 518 | {IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_cfg)}, | ||
| 519 | {IWL_PCI_DEVICE(0x2526, 0x4010, iwl9260_2ac_cfg)}, | ||
| 520 | {IWL_PCI_DEVICE(0x2526, 0x0210, iwl9260_2ac_cfg)}, | ||
| 521 | {IWL_PCI_DEVICE(0x2526, 0x0214, iwl9260_2ac_cfg)}, | ||
| 515 | {IWL_PCI_DEVICE(0x2526, 0x1410, iwl9270_2ac_cfg)}, | 522 | {IWL_PCI_DEVICE(0x2526, 0x1410, iwl9270_2ac_cfg)}, |
| 523 | {IWL_PCI_DEVICE(0x2526, 0x1610, iwl9270_2ac_cfg)}, | ||
| 516 | {IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9460_2ac_cfg)}, | 524 | {IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9460_2ac_cfg)}, |
| 517 | {IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9460_2ac_cfg)}, | 525 | {IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9460_2ac_cfg)}, |
| 518 | {IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9460_2ac_cfg)}, | 526 | {IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9460_2ac_cfg)}, |
| @@ -527,10 +535,22 @@ static const struct pci_device_id iwl_hw_card_ids[] = { | |||
| 527 | {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl9460_2ac_cfg)}, | 535 | {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl9460_2ac_cfg)}, |
| 528 | {IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9460_2ac_cfg)}, | 536 | {IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9460_2ac_cfg)}, |
| 529 | {IWL_PCI_DEVICE(0x2526, 0x0060, iwl9460_2ac_cfg)}, | 537 | {IWL_PCI_DEVICE(0x2526, 0x0060, iwl9460_2ac_cfg)}, |
| 538 | {IWL_PCI_DEVICE(0x2526, 0x0260, iwl9460_2ac_cfg)}, | ||
| 539 | {IWL_PCI_DEVICE(0x2526, 0x0064, iwl9460_2ac_cfg)}, | ||
| 540 | {IWL_PCI_DEVICE(0x2526, 0x00A4, iwl9460_2ac_cfg)}, | ||
| 541 | {IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)}, | ||
| 542 | {IWL_PCI_DEVICE(0x2526, 0x02A4, iwl9460_2ac_cfg)}, | ||
| 543 | {IWL_PCI_DEVICE(0x2526, 0x00A0, iwl9460_2ac_cfg)}, | ||
| 544 | {IWL_PCI_DEVICE(0x2526, 0x02A0, iwl9460_2ac_cfg)}, | ||
| 530 | {IWL_PCI_DEVICE(0x9DF0, 0x0060, iwl9460_2ac_cfg)}, | 545 | {IWL_PCI_DEVICE(0x9DF0, 0x0060, iwl9460_2ac_cfg)}, |
| 531 | {IWL_PCI_DEVICE(0xA370, 0x0060, iwl9460_2ac_cfg)}, | 546 | {IWL_PCI_DEVICE(0xA370, 0x0060, iwl9460_2ac_cfg)}, |
| 532 | {IWL_PCI_DEVICE(0x31DC, 0x0060, iwl9460_2ac_cfg)}, | 547 | {IWL_PCI_DEVICE(0x31DC, 0x0060, iwl9460_2ac_cfg)}, |
| 533 | {IWL_PCI_DEVICE(0x2526, 0x0030, iwl9560_2ac_cfg)}, | 548 | {IWL_PCI_DEVICE(0x2526, 0x0030, iwl9560_2ac_cfg)}, |
| 549 | {IWL_PCI_DEVICE(0x2526, 0x4030, iwl9560_2ac_cfg)}, | ||
| 550 | {IWL_PCI_DEVICE(0x2526, 0x0230, iwl9560_2ac_cfg)}, | ||
| 551 | {IWL_PCI_DEVICE(0x2526, 0x0234, iwl9560_2ac_cfg)}, | ||
| 552 | {IWL_PCI_DEVICE(0x2526, 0x0238, iwl9560_2ac_cfg)}, | ||
| 553 | {IWL_PCI_DEVICE(0x2526, 0x023C, iwl9560_2ac_cfg)}, | ||
| 534 | {IWL_PCI_DEVICE(0x9DF0, 0x0030, iwl9560_2ac_cfg)}, | 554 | {IWL_PCI_DEVICE(0x9DF0, 0x0030, iwl9560_2ac_cfg)}, |
| 535 | {IWL_PCI_DEVICE(0xA370, 0x0030, iwl9560_2ac_cfg)}, | 555 | {IWL_PCI_DEVICE(0xA370, 0x0030, iwl9560_2ac_cfg)}, |
| 536 | {IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_cfg)}, | 556 | {IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_cfg)}, |
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index fa315d84e98e..a1ea9ef97ed9 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h | |||
| @@ -787,6 +787,8 @@ int iwl_pci_fw_enter_d0i3(struct iwl_trans *trans); | |||
| 787 | 787 | ||
| 788 | void iwl_pcie_enable_rx_wake(struct iwl_trans *trans, bool enable); | 788 | void iwl_pcie_enable_rx_wake(struct iwl_trans *trans, bool enable); |
| 789 | 789 | ||
| 790 | void iwl_pcie_rx_allocator_work(struct work_struct *data); | ||
| 791 | |||
| 790 | /* common functions that are used by gen2 transport */ | 792 | /* common functions that are used by gen2 transport */ |
| 791 | void iwl_pcie_apm_config(struct iwl_trans *trans); | 793 | void iwl_pcie_apm_config(struct iwl_trans *trans); |
| 792 | int iwl_pcie_prepare_card_hw(struct iwl_trans *trans); | 794 | int iwl_pcie_prepare_card_hw(struct iwl_trans *trans); |
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index 351c4423125a..942736d3fa75 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c | |||
| @@ -597,7 +597,7 @@ static void iwl_pcie_rx_allocator_get(struct iwl_trans *trans, | |||
| 597 | rxq->free_count += RX_CLAIM_REQ_ALLOC; | 597 | rxq->free_count += RX_CLAIM_REQ_ALLOC; |
| 598 | } | 598 | } |
| 599 | 599 | ||
| 600 | static void iwl_pcie_rx_allocator_work(struct work_struct *data) | 600 | void iwl_pcie_rx_allocator_work(struct work_struct *data) |
| 601 | { | 601 | { |
| 602 | struct iwl_rb_allocator *rba_p = | 602 | struct iwl_rb_allocator *rba_p = |
| 603 | container_of(data, struct iwl_rb_allocator, rx_alloc); | 603 | container_of(data, struct iwl_rb_allocator, rx_alloc); |
| @@ -900,10 +900,6 @@ static int _iwl_pcie_rx_init(struct iwl_trans *trans) | |||
| 900 | return err; | 900 | return err; |
| 901 | } | 901 | } |
| 902 | def_rxq = trans_pcie->rxq; | 902 | def_rxq = trans_pcie->rxq; |
| 903 | if (!rba->alloc_wq) | ||
| 904 | rba->alloc_wq = alloc_workqueue("rb_allocator", | ||
| 905 | WQ_HIGHPRI | WQ_UNBOUND, 1); | ||
| 906 | INIT_WORK(&rba->rx_alloc, iwl_pcie_rx_allocator_work); | ||
| 907 | 903 | ||
| 908 | spin_lock(&rba->lock); | 904 | spin_lock(&rba->lock); |
| 909 | atomic_set(&rba->req_pending, 0); | 905 | atomic_set(&rba->req_pending, 0); |
| @@ -1017,10 +1013,6 @@ void iwl_pcie_rx_free(struct iwl_trans *trans) | |||
| 1017 | } | 1013 | } |
| 1018 | 1014 | ||
| 1019 | cancel_work_sync(&rba->rx_alloc); | 1015 | cancel_work_sync(&rba->rx_alloc); |
| 1020 | if (rba->alloc_wq) { | ||
| 1021 | destroy_workqueue(rba->alloc_wq); | ||
| 1022 | rba->alloc_wq = NULL; | ||
| 1023 | } | ||
| 1024 | 1016 | ||
| 1025 | iwl_pcie_free_rbs_pool(trans); | 1017 | iwl_pcie_free_rbs_pool(trans); |
| 1026 | 1018 | ||
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 92b3a55d0fbc..3927bbf04f72 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c | |||
| @@ -1786,6 +1786,11 @@ void iwl_trans_pcie_free(struct iwl_trans *trans) | |||
| 1786 | iwl_pcie_tx_free(trans); | 1786 | iwl_pcie_tx_free(trans); |
| 1787 | iwl_pcie_rx_free(trans); | 1787 | iwl_pcie_rx_free(trans); |
| 1788 | 1788 | ||
| 1789 | if (trans_pcie->rba.alloc_wq) { | ||
| 1790 | destroy_workqueue(trans_pcie->rba.alloc_wq); | ||
| 1791 | trans_pcie->rba.alloc_wq = NULL; | ||
| 1792 | } | ||
| 1793 | |||
| 1789 | if (trans_pcie->msix_enabled) { | 1794 | if (trans_pcie->msix_enabled) { |
| 1790 | for (i = 0; i < trans_pcie->alloc_vecs; i++) { | 1795 | for (i = 0; i < trans_pcie->alloc_vecs; i++) { |
| 1791 | irq_set_affinity_hint( | 1796 | irq_set_affinity_hint( |
| @@ -3150,7 +3155,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, | |||
| 3150 | init_waitqueue_head(&trans_pcie->d0i3_waitq); | 3155 | init_waitqueue_head(&trans_pcie->d0i3_waitq); |
| 3151 | 3156 | ||
| 3152 | if (trans_pcie->msix_enabled) { | 3157 | if (trans_pcie->msix_enabled) { |
| 3153 | if (iwl_pcie_init_msix_handler(pdev, trans_pcie)) | 3158 | ret = iwl_pcie_init_msix_handler(pdev, trans_pcie); |
| 3159 | if (ret) | ||
| 3154 | goto out_no_pci; | 3160 | goto out_no_pci; |
| 3155 | } else { | 3161 | } else { |
| 3156 | ret = iwl_pcie_alloc_ict(trans); | 3162 | ret = iwl_pcie_alloc_ict(trans); |
| @@ -3168,6 +3174,10 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, | |||
| 3168 | trans_pcie->inta_mask = CSR_INI_SET_MASK; | 3174 | trans_pcie->inta_mask = CSR_INI_SET_MASK; |
| 3169 | } | 3175 | } |
| 3170 | 3176 | ||
| 3177 | trans_pcie->rba.alloc_wq = alloc_workqueue("rb_allocator", | ||
| 3178 | WQ_HIGHPRI | WQ_UNBOUND, 1); | ||
| 3179 | INIT_WORK(&trans_pcie->rba.rx_alloc, iwl_pcie_rx_allocator_work); | ||
| 3180 | |||
| 3171 | #ifdef CONFIG_IWLWIFI_PCIE_RTPM | 3181 | #ifdef CONFIG_IWLWIFI_PCIE_RTPM |
| 3172 | trans->runtime_pm_mode = IWL_PLAT_PM_MODE_D0I3; | 3182 | trans->runtime_pm_mode = IWL_PLAT_PM_MODE_D0I3; |
| 3173 | #else | 3183 | #else |
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index de50418adae5..034bdb4a0b06 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c | |||
| @@ -298,6 +298,9 @@ void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans) | |||
| 298 | for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) { | 298 | for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) { |
| 299 | struct iwl_txq *txq = trans_pcie->txq[i]; | 299 | struct iwl_txq *txq = trans_pcie->txq[i]; |
| 300 | 300 | ||
| 301 | if (!test_bit(i, trans_pcie->queue_used)) | ||
| 302 | continue; | ||
| 303 | |||
| 301 | spin_lock_bh(&txq->lock); | 304 | spin_lock_bh(&txq->lock); |
| 302 | if (txq->need_update) { | 305 | if (txq->need_update) { |
| 303 | iwl_pcie_txq_inc_wr_ptr(trans, txq); | 306 | iwl_pcie_txq_inc_wr_ptr(trans, txq); |
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c index 2a7ad5ffe997..cd5dc6dcb19f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c | |||
| @@ -846,9 +846,6 @@ static bool _rtl8723be_init_mac(struct ieee80211_hw *hw) | |||
| 846 | return false; | 846 | return false; |
| 847 | } | 847 | } |
| 848 | 848 | ||
| 849 | if (rtlpriv->cfg->ops->get_btc_status()) | ||
| 850 | rtlpriv->btcoexist.btc_ops->btc_power_on_setting(rtlpriv); | ||
| 851 | |||
| 852 | bytetmp = rtl_read_byte(rtlpriv, REG_MULTI_FUNC_CTRL); | 849 | bytetmp = rtl_read_byte(rtlpriv, REG_MULTI_FUNC_CTRL); |
| 853 | rtl_write_byte(rtlpriv, REG_MULTI_FUNC_CTRL, bytetmp | BIT(3)); | 850 | rtl_write_byte(rtlpriv, REG_MULTI_FUNC_CTRL, bytetmp | BIT(3)); |
| 854 | 851 | ||
diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h index fb1ebb01133f..70723e67b7d7 100644 --- a/drivers/net/wireless/realtek/rtlwifi/wifi.h +++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h | |||
| @@ -2547,7 +2547,6 @@ struct bt_coexist_info { | |||
| 2547 | struct rtl_btc_ops { | 2547 | struct rtl_btc_ops { |
| 2548 | void (*btc_init_variables) (struct rtl_priv *rtlpriv); | 2548 | void (*btc_init_variables) (struct rtl_priv *rtlpriv); |
| 2549 | void (*btc_init_hal_vars) (struct rtl_priv *rtlpriv); | 2549 | void (*btc_init_hal_vars) (struct rtl_priv *rtlpriv); |
| 2550 | void (*btc_power_on_setting)(struct rtl_priv *rtlpriv); | ||
| 2551 | void (*btc_init_hw_config) (struct rtl_priv *rtlpriv); | 2550 | void (*btc_init_hw_config) (struct rtl_priv *rtlpriv); |
| 2552 | void (*btc_ips_notify) (struct rtl_priv *rtlpriv, u8 type); | 2551 | void (*btc_ips_notify) (struct rtl_priv *rtlpriv, u8 type); |
| 2553 | void (*btc_lps_notify)(struct rtl_priv *rtlpriv, u8 type); | 2552 | void (*btc_lps_notify)(struct rtl_priv *rtlpriv, u8 type); |
diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c index 08f0477f78d9..9915d83a4a30 100644 --- a/drivers/net/wireless/ti/wl1251/main.c +++ b/drivers/net/wireless/ti/wl1251/main.c | |||
| @@ -1571,6 +1571,7 @@ struct ieee80211_hw *wl1251_alloc_hw(void) | |||
| 1571 | 1571 | ||
| 1572 | wl->state = WL1251_STATE_OFF; | 1572 | wl->state = WL1251_STATE_OFF; |
| 1573 | mutex_init(&wl->mutex); | 1573 | mutex_init(&wl->mutex); |
| 1574 | spin_lock_init(&wl->wl_lock); | ||
| 1574 | 1575 | ||
| 1575 | wl->tx_mgmt_frm_rate = DEFAULT_HW_GEN_TX_RATE; | 1576 | wl->tx_mgmt_frm_rate = DEFAULT_HW_GEN_TX_RATE; |
| 1576 | wl->tx_mgmt_frm_mod = DEFAULT_HW_GEN_MODULATION_TYPE; | 1577 | wl->tx_mgmt_frm_mod = DEFAULT_HW_GEN_MODULATION_TYPE; |
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c index 9a03c5871efe..f58d8e305323 100644 --- a/drivers/ntb/ntb_transport.c +++ b/drivers/ntb/ntb_transport.c | |||
| @@ -924,10 +924,8 @@ out1: | |||
| 924 | ntb_free_mw(nt, i); | 924 | ntb_free_mw(nt, i); |
| 925 | 925 | ||
| 926 | /* if there's an actual failure, we should just bail */ | 926 | /* if there's an actual failure, we should just bail */ |
| 927 | if (rc < 0) { | 927 | if (rc < 0) |
| 928 | ntb_link_disable(ndev); | ||
| 929 | return; | 928 | return; |
| 930 | } | ||
| 931 | 929 | ||
| 932 | out: | 930 | out: |
| 933 | if (ntb_link_is_up(ndev, NULL, NULL) == 1) | 931 | if (ntb_link_is_up(ndev, NULL, NULL) == 1) |
| @@ -1059,7 +1057,7 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev) | |||
| 1059 | int node; | 1057 | int node; |
| 1060 | int rc, i; | 1058 | int rc, i; |
| 1061 | 1059 | ||
| 1062 | mw_count = ntb_mw_count(ndev, PIDX); | 1060 | mw_count = ntb_peer_mw_count(ndev); |
| 1063 | 1061 | ||
| 1064 | if (!ndev->ops->mw_set_trans) { | 1062 | if (!ndev->ops->mw_set_trans) { |
| 1065 | dev_err(&ndev->dev, "Inbound MW based NTB API is required\n"); | 1063 | dev_err(&ndev->dev, "Inbound MW based NTB API is required\n"); |
diff --git a/drivers/ntb/test/ntb_tool.c b/drivers/ntb/test/ntb_tool.c index f002bf48a08d..a69815c45ce6 100644 --- a/drivers/ntb/test/ntb_tool.c +++ b/drivers/ntb/test/ntb_tool.c | |||
| @@ -959,7 +959,7 @@ static int tool_probe(struct ntb_client *self, struct ntb_dev *ntb) | |||
| 959 | tc->ntb = ntb; | 959 | tc->ntb = ntb; |
| 960 | init_waitqueue_head(&tc->link_wq); | 960 | init_waitqueue_head(&tc->link_wq); |
| 961 | 961 | ||
| 962 | tc->mw_count = min(ntb_mw_count(tc->ntb, PIDX), MAX_MWS); | 962 | tc->mw_count = min(ntb_peer_mw_count(tc->ntb), MAX_MWS); |
| 963 | for (i = 0; i < tc->mw_count; i++) { | 963 | for (i = 0; i < tc->mw_count; i++) { |
| 964 | rc = tool_init_mw(tc, i); | 964 | rc = tool_init_mw(tc, i); |
| 965 | if (rc) | 965 | if (rc) |
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 3b77cfe5aa1e..37046ac2c441 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c | |||
| @@ -336,7 +336,7 @@ static int nvme_get_stream_params(struct nvme_ctrl *ctrl, | |||
| 336 | 336 | ||
| 337 | c.directive.opcode = nvme_admin_directive_recv; | 337 | c.directive.opcode = nvme_admin_directive_recv; |
| 338 | c.directive.nsid = cpu_to_le32(nsid); | 338 | c.directive.nsid = cpu_to_le32(nsid); |
| 339 | c.directive.numd = cpu_to_le32(sizeof(*s)); | 339 | c.directive.numd = cpu_to_le32((sizeof(*s) >> 2) - 1); |
| 340 | c.directive.doper = NVME_DIR_RCV_ST_OP_PARAM; | 340 | c.directive.doper = NVME_DIR_RCV_ST_OP_PARAM; |
| 341 | c.directive.dtype = NVME_DIR_STREAMS; | 341 | c.directive.dtype = NVME_DIR_STREAMS; |
| 342 | 342 | ||
| @@ -1509,7 +1509,7 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl, | |||
| 1509 | blk_queue_write_cache(q, vwc, vwc); | 1509 | blk_queue_write_cache(q, vwc, vwc); |
| 1510 | } | 1510 | } |
| 1511 | 1511 | ||
| 1512 | static void nvme_configure_apst(struct nvme_ctrl *ctrl) | 1512 | static int nvme_configure_apst(struct nvme_ctrl *ctrl) |
| 1513 | { | 1513 | { |
| 1514 | /* | 1514 | /* |
| 1515 | * APST (Autonomous Power State Transition) lets us program a | 1515 | * APST (Autonomous Power State Transition) lets us program a |
| @@ -1538,16 +1538,16 @@ static void nvme_configure_apst(struct nvme_ctrl *ctrl) | |||
| 1538 | * then don't do anything. | 1538 | * then don't do anything. |
| 1539 | */ | 1539 | */ |
| 1540 | if (!ctrl->apsta) | 1540 | if (!ctrl->apsta) |
| 1541 | return; | 1541 | return 0; |
| 1542 | 1542 | ||
| 1543 | if (ctrl->npss > 31) { | 1543 | if (ctrl->npss > 31) { |
| 1544 | dev_warn(ctrl->device, "NPSS is invalid; not using APST\n"); | 1544 | dev_warn(ctrl->device, "NPSS is invalid; not using APST\n"); |
| 1545 | return; | 1545 | return 0; |
| 1546 | } | 1546 | } |
| 1547 | 1547 | ||
| 1548 | table = kzalloc(sizeof(*table), GFP_KERNEL); | 1548 | table = kzalloc(sizeof(*table), GFP_KERNEL); |
| 1549 | if (!table) | 1549 | if (!table) |
| 1550 | return; | 1550 | return 0; |
| 1551 | 1551 | ||
| 1552 | if (!ctrl->apst_enabled || ctrl->ps_max_latency_us == 0) { | 1552 | if (!ctrl->apst_enabled || ctrl->ps_max_latency_us == 0) { |
| 1553 | /* Turn off APST. */ | 1553 | /* Turn off APST. */ |
| @@ -1629,6 +1629,7 @@ static void nvme_configure_apst(struct nvme_ctrl *ctrl) | |||
| 1629 | dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret); | 1629 | dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret); |
| 1630 | 1630 | ||
| 1631 | kfree(table); | 1631 | kfree(table); |
| 1632 | return ret; | ||
| 1632 | } | 1633 | } |
| 1633 | 1634 | ||
| 1634 | static void nvme_set_latency_tolerance(struct device *dev, s32 val) | 1635 | static void nvme_set_latency_tolerance(struct device *dev, s32 val) |
| @@ -1835,13 +1836,16 @@ int nvme_init_identify(struct nvme_ctrl *ctrl) | |||
| 1835 | * In fabrics we need to verify the cntlid matches the | 1836 | * In fabrics we need to verify the cntlid matches the |
| 1836 | * admin connect | 1837 | * admin connect |
| 1837 | */ | 1838 | */ |
| 1838 | if (ctrl->cntlid != le16_to_cpu(id->cntlid)) | 1839 | if (ctrl->cntlid != le16_to_cpu(id->cntlid)) { |
| 1839 | ret = -EINVAL; | 1840 | ret = -EINVAL; |
| 1841 | goto out_free; | ||
| 1842 | } | ||
| 1840 | 1843 | ||
| 1841 | if (!ctrl->opts->discovery_nqn && !ctrl->kas) { | 1844 | if (!ctrl->opts->discovery_nqn && !ctrl->kas) { |
| 1842 | dev_err(ctrl->device, | 1845 | dev_err(ctrl->device, |
| 1843 | "keep-alive support is mandatory for fabrics\n"); | 1846 | "keep-alive support is mandatory for fabrics\n"); |
| 1844 | ret = -EINVAL; | 1847 | ret = -EINVAL; |
| 1848 | goto out_free; | ||
| 1845 | } | 1849 | } |
| 1846 | } else { | 1850 | } else { |
| 1847 | ctrl->cntlid = le16_to_cpu(id->cntlid); | 1851 | ctrl->cntlid = le16_to_cpu(id->cntlid); |
| @@ -1856,11 +1860,20 @@ int nvme_init_identify(struct nvme_ctrl *ctrl) | |||
| 1856 | else if (!ctrl->apst_enabled && prev_apst_enabled) | 1860 | else if (!ctrl->apst_enabled && prev_apst_enabled) |
| 1857 | dev_pm_qos_hide_latency_tolerance(ctrl->device); | 1861 | dev_pm_qos_hide_latency_tolerance(ctrl->device); |
| 1858 | 1862 | ||
| 1859 | nvme_configure_apst(ctrl); | 1863 | ret = nvme_configure_apst(ctrl); |
| 1860 | nvme_configure_directives(ctrl); | 1864 | if (ret < 0) |
| 1865 | return ret; | ||
| 1866 | |||
| 1867 | ret = nvme_configure_directives(ctrl); | ||
| 1868 | if (ret < 0) | ||
| 1869 | return ret; | ||
| 1861 | 1870 | ||
| 1862 | ctrl->identified = true; | 1871 | ctrl->identified = true; |
| 1863 | 1872 | ||
| 1873 | return 0; | ||
| 1874 | |||
| 1875 | out_free: | ||
| 1876 | kfree(id); | ||
| 1864 | return ret; | 1877 | return ret; |
| 1865 | } | 1878 | } |
| 1866 | EXPORT_SYMBOL_GPL(nvme_init_identify); | 1879 | EXPORT_SYMBOL_GPL(nvme_init_identify); |
| @@ -1995,15 +2008,20 @@ static ssize_t wwid_show(struct device *dev, struct device_attribute *attr, | |||
| 1995 | int serial_len = sizeof(ctrl->serial); | 2008 | int serial_len = sizeof(ctrl->serial); |
| 1996 | int model_len = sizeof(ctrl->model); | 2009 | int model_len = sizeof(ctrl->model); |
| 1997 | 2010 | ||
| 2011 | if (!uuid_is_null(&ns->uuid)) | ||
| 2012 | return sprintf(buf, "uuid.%pU\n", &ns->uuid); | ||
| 2013 | |||
| 1998 | if (memchr_inv(ns->nguid, 0, sizeof(ns->nguid))) | 2014 | if (memchr_inv(ns->nguid, 0, sizeof(ns->nguid))) |
| 1999 | return sprintf(buf, "eui.%16phN\n", ns->nguid); | 2015 | return sprintf(buf, "eui.%16phN\n", ns->nguid); |
| 2000 | 2016 | ||
| 2001 | if (memchr_inv(ns->eui, 0, sizeof(ns->eui))) | 2017 | if (memchr_inv(ns->eui, 0, sizeof(ns->eui))) |
| 2002 | return sprintf(buf, "eui.%8phN\n", ns->eui); | 2018 | return sprintf(buf, "eui.%8phN\n", ns->eui); |
| 2003 | 2019 | ||
| 2004 | while (ctrl->serial[serial_len - 1] == ' ') | 2020 | while (serial_len > 0 && (ctrl->serial[serial_len - 1] == ' ' || |
| 2021 | ctrl->serial[serial_len - 1] == '\0')) | ||
| 2005 | serial_len--; | 2022 | serial_len--; |
| 2006 | while (ctrl->model[model_len - 1] == ' ') | 2023 | while (model_len > 0 && (ctrl->model[model_len - 1] == ' ' || |
| 2024 | ctrl->model[model_len - 1] == '\0')) | ||
| 2007 | model_len--; | 2025 | model_len--; |
| 2008 | 2026 | ||
| 2009 | return sprintf(buf, "nvme.%04x-%*phN-%*phN-%08x\n", ctrl->vid, | 2027 | return sprintf(buf, "nvme.%04x-%*phN-%*phN-%08x\n", ctrl->vid, |
| @@ -2709,7 +2727,8 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl) | |||
| 2709 | mutex_lock(&ctrl->namespaces_mutex); | 2727 | mutex_lock(&ctrl->namespaces_mutex); |
| 2710 | 2728 | ||
| 2711 | /* Forcibly unquiesce queues to avoid blocking dispatch */ | 2729 | /* Forcibly unquiesce queues to avoid blocking dispatch */ |
| 2712 | blk_mq_unquiesce_queue(ctrl->admin_q); | 2730 | if (ctrl->admin_q) |
| 2731 | blk_mq_unquiesce_queue(ctrl->admin_q); | ||
| 2713 | 2732 | ||
| 2714 | list_for_each_entry(ns, &ctrl->namespaces, list) { | 2733 | list_for_each_entry(ns, &ctrl->namespaces, list) { |
| 2715 | /* | 2734 | /* |
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c index 2e582a240943..5f5cd306f76d 100644 --- a/drivers/nvme/host/fabrics.c +++ b/drivers/nvme/host/fabrics.c | |||
| @@ -794,7 +794,8 @@ static int nvmf_check_allowed_opts(struct nvmf_ctrl_options *opts, | |||
| 794 | int i; | 794 | int i; |
| 795 | 795 | ||
| 796 | for (i = 0; i < ARRAY_SIZE(opt_tokens); i++) { | 796 | for (i = 0; i < ARRAY_SIZE(opt_tokens); i++) { |
| 797 | if (opt_tokens[i].token & ~allowed_opts) { | 797 | if ((opt_tokens[i].token & opts->mask) && |
| 798 | (opt_tokens[i].token & ~allowed_opts)) { | ||
| 798 | pr_warn("invalid parameter '%s'\n", | 799 | pr_warn("invalid parameter '%s'\n", |
| 799 | opt_tokens[i].pattern); | 800 | opt_tokens[i].pattern); |
| 800 | } | 801 | } |
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index d666ada39a9b..5c2a08ef08ba 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c | |||
| @@ -1888,7 +1888,7 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, | |||
| 1888 | * the target device is present | 1888 | * the target device is present |
| 1889 | */ | 1889 | */ |
| 1890 | if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE) | 1890 | if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE) |
| 1891 | return BLK_STS_IOERR; | 1891 | goto busy; |
| 1892 | 1892 | ||
| 1893 | if (!nvme_fc_ctrl_get(ctrl)) | 1893 | if (!nvme_fc_ctrl_get(ctrl)) |
| 1894 | return BLK_STS_IOERR; | 1894 | return BLK_STS_IOERR; |
| @@ -1958,22 +1958,25 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, | |||
| 1958 | queue->lldd_handle, &op->fcp_req); | 1958 | queue->lldd_handle, &op->fcp_req); |
| 1959 | 1959 | ||
| 1960 | if (ret) { | 1960 | if (ret) { |
| 1961 | if (op->rq) /* normal request */ | 1961 | if (!(op->flags & FCOP_FLAGS_AEN)) |
| 1962 | nvme_fc_unmap_data(ctrl, op->rq, op); | 1962 | nvme_fc_unmap_data(ctrl, op->rq, op); |
| 1963 | /* else - aen. no cleanup needed */ | ||
| 1964 | 1963 | ||
| 1965 | nvme_fc_ctrl_put(ctrl); | 1964 | nvme_fc_ctrl_put(ctrl); |
| 1966 | 1965 | ||
| 1967 | if (ret != -EBUSY) | 1966 | if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE && |
| 1967 | ret != -EBUSY) | ||
| 1968 | return BLK_STS_IOERR; | 1968 | return BLK_STS_IOERR; |
| 1969 | 1969 | ||
| 1970 | if (op->rq) | 1970 | goto busy; |
| 1971 | blk_mq_delay_run_hw_queue(queue->hctx, NVMEFC_QUEUE_DELAY); | ||
| 1972 | |||
| 1973 | return BLK_STS_RESOURCE; | ||
| 1974 | } | 1971 | } |
| 1975 | 1972 | ||
| 1976 | return BLK_STS_OK; | 1973 | return BLK_STS_OK; |
| 1974 | |||
| 1975 | busy: | ||
| 1976 | if (!(op->flags & FCOP_FLAGS_AEN) && queue->hctx) | ||
| 1977 | blk_mq_delay_run_hw_queue(queue->hctx, NVMEFC_QUEUE_DELAY); | ||
| 1978 | |||
| 1979 | return BLK_STS_RESOURCE; | ||
| 1977 | } | 1980 | } |
| 1978 | 1981 | ||
| 1979 | static blk_status_t | 1982 | static blk_status_t |
| @@ -2802,66 +2805,70 @@ out_fail: | |||
| 2802 | return ERR_PTR(ret); | 2805 | return ERR_PTR(ret); |
| 2803 | } | 2806 | } |
| 2804 | 2807 | ||
| 2805 | enum { | ||
| 2806 | FCT_TRADDR_ERR = 0, | ||
| 2807 | FCT_TRADDR_WWNN = 1 << 0, | ||
| 2808 | FCT_TRADDR_WWPN = 1 << 1, | ||
| 2809 | }; | ||
| 2810 | 2808 | ||
| 2811 | struct nvmet_fc_traddr { | 2809 | struct nvmet_fc_traddr { |
| 2812 | u64 nn; | 2810 | u64 nn; |
| 2813 | u64 pn; | 2811 | u64 pn; |
| 2814 | }; | 2812 | }; |
| 2815 | 2813 | ||
| 2816 | static const match_table_t traddr_opt_tokens = { | ||
| 2817 | { FCT_TRADDR_WWNN, "nn-%s" }, | ||
| 2818 | { FCT_TRADDR_WWPN, "pn-%s" }, | ||
| 2819 | { FCT_TRADDR_ERR, NULL } | ||
| 2820 | }; | ||
| 2821 | |||
| 2822 | static int | 2814 | static int |
| 2823 | nvme_fc_parse_address(struct nvmet_fc_traddr *traddr, char *buf) | 2815 | __nvme_fc_parse_u64(substring_t *sstr, u64 *val) |
| 2824 | { | 2816 | { |
| 2825 | substring_t args[MAX_OPT_ARGS]; | ||
| 2826 | char *options, *o, *p; | ||
| 2827 | int token, ret = 0; | ||
| 2828 | u64 token64; | 2817 | u64 token64; |
| 2829 | 2818 | ||
| 2830 | options = o = kstrdup(buf, GFP_KERNEL); | 2819 | if (match_u64(sstr, &token64)) |
| 2831 | if (!options) | 2820 | return -EINVAL; |
| 2832 | return -ENOMEM; | 2821 | *val = token64; |
| 2833 | 2822 | ||
| 2834 | while ((p = strsep(&o, ":\n")) != NULL) { | 2823 | return 0; |
| 2835 | if (!*p) | 2824 | } |
| 2836 | continue; | ||
| 2837 | 2825 | ||
| 2838 | token = match_token(p, traddr_opt_tokens, args); | 2826 | /* |
| 2839 | switch (token) { | 2827 | * This routine validates and extracts the WWN's from the TRADDR string. |
| 2840 | case FCT_TRADDR_WWNN: | 2828 | * As kernel parsers need the 0x to determine number base, universally |
| 2841 | if (match_u64(args, &token64)) { | 2829 | * build string to parse with 0x prefix before parsing name strings. |
| 2842 | ret = -EINVAL; | 2830 | */ |
| 2843 | goto out; | 2831 | static int |
| 2844 | } | 2832 | nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen) |
| 2845 | traddr->nn = token64; | 2833 | { |
| 2846 | break; | 2834 | char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1]; |
| 2847 | case FCT_TRADDR_WWPN: | 2835 | substring_t wwn = { name, &name[sizeof(name)-1] }; |
| 2848 | if (match_u64(args, &token64)) { | 2836 | int nnoffset, pnoffset; |
| 2849 | ret = -EINVAL; | 2837 | |
| 2850 | goto out; | 2838 | /* validate it string one of the 2 allowed formats */ |
| 2851 | } | 2839 | if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH && |
| 2852 | traddr->pn = token64; | 2840 | !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) && |
| 2853 | break; | 2841 | !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET], |
| 2854 | default: | 2842 | "pn-0x", NVME_FC_TRADDR_OXNNLEN)) { |
| 2855 | pr_warn("unknown traddr token or missing value '%s'\n", | 2843 | nnoffset = NVME_FC_TRADDR_OXNNLEN; |
| 2856 | p); | 2844 | pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET + |
| 2857 | ret = -EINVAL; | 2845 | NVME_FC_TRADDR_OXNNLEN; |
| 2858 | goto out; | 2846 | } else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH && |
| 2859 | } | 2847 | !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) && |
| 2860 | } | 2848 | !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET], |
| 2849 | "pn-", NVME_FC_TRADDR_NNLEN))) { | ||
| 2850 | nnoffset = NVME_FC_TRADDR_NNLEN; | ||
| 2851 | pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN; | ||
| 2852 | } else | ||
| 2853 | goto out_einval; | ||
| 2861 | 2854 | ||
| 2862 | out: | 2855 | name[0] = '0'; |
| 2863 | kfree(options); | 2856 | name[1] = 'x'; |
| 2864 | return ret; | 2857 | name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0; |
| 2858 | |||
| 2859 | memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN); | ||
| 2860 | if (__nvme_fc_parse_u64(&wwn, &traddr->nn)) | ||
| 2861 | goto out_einval; | ||
| 2862 | |||
| 2863 | memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN); | ||
| 2864 | if (__nvme_fc_parse_u64(&wwn, &traddr->pn)) | ||
| 2865 | goto out_einval; | ||
| 2866 | |||
| 2867 | return 0; | ||
| 2868 | |||
| 2869 | out_einval: | ||
| 2870 | pr_warn("%s: bad traddr string\n", __func__); | ||
| 2871 | return -EINVAL; | ||
| 2865 | } | 2872 | } |
| 2866 | 2873 | ||
| 2867 | static struct nvme_ctrl * | 2874 | static struct nvme_ctrl * |
| @@ -2875,11 +2882,11 @@ nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts) | |||
| 2875 | unsigned long flags; | 2882 | unsigned long flags; |
| 2876 | int ret; | 2883 | int ret; |
| 2877 | 2884 | ||
| 2878 | ret = nvme_fc_parse_address(&raddr, opts->traddr); | 2885 | ret = nvme_fc_parse_traddr(&raddr, opts->traddr, NVMF_TRADDR_SIZE); |
| 2879 | if (ret || !raddr.nn || !raddr.pn) | 2886 | if (ret || !raddr.nn || !raddr.pn) |
| 2880 | return ERR_PTR(-EINVAL); | 2887 | return ERR_PTR(-EINVAL); |
| 2881 | 2888 | ||
| 2882 | ret = nvme_fc_parse_address(&laddr, opts->host_traddr); | 2889 | ret = nvme_fc_parse_traddr(&laddr, opts->host_traddr, NVMF_TRADDR_SIZE); |
| 2883 | if (ret || !laddr.nn || !laddr.pn) | 2890 | if (ret || !laddr.nn || !laddr.pn) |
| 2884 | return ERR_PTR(-EINVAL); | 2891 | return ERR_PTR(-EINVAL); |
| 2885 | 2892 | ||
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 8569ee771269..ea892e732268 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c | |||
| @@ -109,6 +109,7 @@ struct nvme_dev { | |||
| 109 | /* host memory buffer support: */ | 109 | /* host memory buffer support: */ |
| 110 | u64 host_mem_size; | 110 | u64 host_mem_size; |
| 111 | u32 nr_host_mem_descs; | 111 | u32 nr_host_mem_descs; |
| 112 | dma_addr_t host_mem_descs_dma; | ||
| 112 | struct nvme_host_mem_buf_desc *host_mem_descs; | 113 | struct nvme_host_mem_buf_desc *host_mem_descs; |
| 113 | void **host_mem_desc_bufs; | 114 | void **host_mem_desc_bufs; |
| 114 | }; | 115 | }; |
| @@ -801,6 +802,7 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, | |||
| 801 | return; | 802 | return; |
| 802 | } | 803 | } |
| 803 | 804 | ||
| 805 | nvmeq->cqe_seen = 1; | ||
| 804 | req = blk_mq_tag_to_rq(*nvmeq->tags, cqe->command_id); | 806 | req = blk_mq_tag_to_rq(*nvmeq->tags, cqe->command_id); |
| 805 | nvme_end_request(req, cqe->status, cqe->result); | 807 | nvme_end_request(req, cqe->status, cqe->result); |
| 806 | } | 808 | } |
| @@ -830,10 +832,8 @@ static void nvme_process_cq(struct nvme_queue *nvmeq) | |||
| 830 | consumed++; | 832 | consumed++; |
| 831 | } | 833 | } |
| 832 | 834 | ||
| 833 | if (consumed) { | 835 | if (consumed) |
| 834 | nvme_ring_cq_doorbell(nvmeq); | 836 | nvme_ring_cq_doorbell(nvmeq); |
| 835 | nvmeq->cqe_seen = 1; | ||
| 836 | } | ||
| 837 | } | 837 | } |
| 838 | 838 | ||
| 839 | static irqreturn_t nvme_irq(int irq, void *data) | 839 | static irqreturn_t nvme_irq(int irq, void *data) |
| @@ -1558,26 +1558,18 @@ static inline void nvme_release_cmb(struct nvme_dev *dev) | |||
| 1558 | if (dev->cmb) { | 1558 | if (dev->cmb) { |
| 1559 | iounmap(dev->cmb); | 1559 | iounmap(dev->cmb); |
| 1560 | dev->cmb = NULL; | 1560 | dev->cmb = NULL; |
| 1561 | if (dev->cmbsz) { | 1561 | sysfs_remove_file_from_group(&dev->ctrl.device->kobj, |
| 1562 | sysfs_remove_file_from_group(&dev->ctrl.device->kobj, | 1562 | &dev_attr_cmb.attr, NULL); |
| 1563 | &dev_attr_cmb.attr, NULL); | 1563 | dev->cmbsz = 0; |
| 1564 | dev->cmbsz = 0; | ||
| 1565 | } | ||
| 1566 | } | 1564 | } |
| 1567 | } | 1565 | } |
| 1568 | 1566 | ||
| 1569 | static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits) | 1567 | static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits) |
| 1570 | { | 1568 | { |
| 1571 | size_t len = dev->nr_host_mem_descs * sizeof(*dev->host_mem_descs); | 1569 | u64 dma_addr = dev->host_mem_descs_dma; |
| 1572 | struct nvme_command c; | 1570 | struct nvme_command c; |
| 1573 | u64 dma_addr; | ||
| 1574 | int ret; | 1571 | int ret; |
| 1575 | 1572 | ||
| 1576 | dma_addr = dma_map_single(dev->dev, dev->host_mem_descs, len, | ||
| 1577 | DMA_TO_DEVICE); | ||
| 1578 | if (dma_mapping_error(dev->dev, dma_addr)) | ||
| 1579 | return -ENOMEM; | ||
| 1580 | |||
| 1581 | memset(&c, 0, sizeof(c)); | 1573 | memset(&c, 0, sizeof(c)); |
| 1582 | c.features.opcode = nvme_admin_set_features; | 1574 | c.features.opcode = nvme_admin_set_features; |
| 1583 | c.features.fid = cpu_to_le32(NVME_FEAT_HOST_MEM_BUF); | 1575 | c.features.fid = cpu_to_le32(NVME_FEAT_HOST_MEM_BUF); |
| @@ -1594,7 +1586,6 @@ static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits) | |||
| 1594 | "failed to set host mem (err %d, flags %#x).\n", | 1586 | "failed to set host mem (err %d, flags %#x).\n", |
| 1595 | ret, bits); | 1587 | ret, bits); |
| 1596 | } | 1588 | } |
| 1597 | dma_unmap_single(dev->dev, dma_addr, len, DMA_TO_DEVICE); | ||
| 1598 | return ret; | 1589 | return ret; |
| 1599 | } | 1590 | } |
| 1600 | 1591 | ||
| @@ -1612,14 +1603,17 @@ static void nvme_free_host_mem(struct nvme_dev *dev) | |||
| 1612 | 1603 | ||
| 1613 | kfree(dev->host_mem_desc_bufs); | 1604 | kfree(dev->host_mem_desc_bufs); |
| 1614 | dev->host_mem_desc_bufs = NULL; | 1605 | dev->host_mem_desc_bufs = NULL; |
| 1615 | kfree(dev->host_mem_descs); | 1606 | dma_free_coherent(dev->dev, |
| 1607 | dev->nr_host_mem_descs * sizeof(*dev->host_mem_descs), | ||
| 1608 | dev->host_mem_descs, dev->host_mem_descs_dma); | ||
| 1616 | dev->host_mem_descs = NULL; | 1609 | dev->host_mem_descs = NULL; |
| 1617 | } | 1610 | } |
| 1618 | 1611 | ||
| 1619 | static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred) | 1612 | static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred) |
| 1620 | { | 1613 | { |
| 1621 | struct nvme_host_mem_buf_desc *descs; | 1614 | struct nvme_host_mem_buf_desc *descs; |
| 1622 | u32 chunk_size, max_entries; | 1615 | u32 chunk_size, max_entries, len; |
| 1616 | dma_addr_t descs_dma; | ||
| 1623 | int i = 0; | 1617 | int i = 0; |
| 1624 | void **bufs; | 1618 | void **bufs; |
| 1625 | u64 size = 0, tmp; | 1619 | u64 size = 0, tmp; |
| @@ -1630,7 +1624,8 @@ retry: | |||
| 1630 | tmp = (preferred + chunk_size - 1); | 1624 | tmp = (preferred + chunk_size - 1); |
| 1631 | do_div(tmp, chunk_size); | 1625 | do_div(tmp, chunk_size); |
| 1632 | max_entries = tmp; | 1626 | max_entries = tmp; |
| 1633 | descs = kcalloc(max_entries, sizeof(*descs), GFP_KERNEL); | 1627 | descs = dma_zalloc_coherent(dev->dev, max_entries * sizeof(*descs), |
| 1628 | &descs_dma, GFP_KERNEL); | ||
| 1634 | if (!descs) | 1629 | if (!descs) |
| 1635 | goto out; | 1630 | goto out; |
| 1636 | 1631 | ||
| @@ -1638,10 +1633,10 @@ retry: | |||
| 1638 | if (!bufs) | 1633 | if (!bufs) |
| 1639 | goto out_free_descs; | 1634 | goto out_free_descs; |
| 1640 | 1635 | ||
| 1641 | for (size = 0; size < preferred; size += chunk_size) { | 1636 | for (size = 0; size < preferred; size += len) { |
| 1642 | u32 len = min_t(u64, chunk_size, preferred - size); | ||
| 1643 | dma_addr_t dma_addr; | 1637 | dma_addr_t dma_addr; |
| 1644 | 1638 | ||
| 1639 | len = min_t(u64, chunk_size, preferred - size); | ||
| 1645 | bufs[i] = dma_alloc_attrs(dev->dev, len, &dma_addr, GFP_KERNEL, | 1640 | bufs[i] = dma_alloc_attrs(dev->dev, len, &dma_addr, GFP_KERNEL, |
| 1646 | DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN); | 1641 | DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN); |
| 1647 | if (!bufs[i]) | 1642 | if (!bufs[i]) |
| @@ -1664,6 +1659,7 @@ retry: | |||
| 1664 | dev->nr_host_mem_descs = i; | 1659 | dev->nr_host_mem_descs = i; |
| 1665 | dev->host_mem_size = size; | 1660 | dev->host_mem_size = size; |
| 1666 | dev->host_mem_descs = descs; | 1661 | dev->host_mem_descs = descs; |
| 1662 | dev->host_mem_descs_dma = descs_dma; | ||
| 1667 | dev->host_mem_desc_bufs = bufs; | 1663 | dev->host_mem_desc_bufs = bufs; |
| 1668 | return 0; | 1664 | return 0; |
| 1669 | 1665 | ||
| @@ -1677,7 +1673,8 @@ out_free_bufs: | |||
| 1677 | 1673 | ||
| 1678 | kfree(bufs); | 1674 | kfree(bufs); |
| 1679 | out_free_descs: | 1675 | out_free_descs: |
| 1680 | kfree(descs); | 1676 | dma_free_coherent(dev->dev, max_entries * sizeof(*descs), descs, |
| 1677 | descs_dma); | ||
| 1681 | out: | 1678 | out: |
| 1682 | /* try a smaller chunk size if we failed early */ | 1679 | /* try a smaller chunk size if we failed early */ |
| 1683 | if (chunk_size >= PAGE_SIZE * 2 && (i == 0 || size < min)) { | 1680 | if (chunk_size >= PAGE_SIZE * 2 && (i == 0 || size < min)) { |
| @@ -1953,16 +1950,14 @@ static int nvme_pci_enable(struct nvme_dev *dev) | |||
| 1953 | 1950 | ||
| 1954 | /* | 1951 | /* |
| 1955 | * CMBs can currently only exist on >=1.2 PCIe devices. We only | 1952 | * CMBs can currently only exist on >=1.2 PCIe devices. We only |
| 1956 | * populate sysfs if a CMB is implemented. Note that we add the | 1953 | * populate sysfs if a CMB is implemented. Since nvme_dev_attrs_group |
| 1957 | * CMB attribute to the nvme_ctrl kobj which removes the need to remove | 1954 | * has no name we can pass NULL as final argument to |
| 1958 | * it on exit. Since nvme_dev_attrs_group has no name we can pass | 1955 | * sysfs_add_file_to_group. |
| 1959 | * NULL as final argument to sysfs_add_file_to_group. | ||
| 1960 | */ | 1956 | */ |
| 1961 | 1957 | ||
| 1962 | if (readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 2, 0)) { | 1958 | if (readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 2, 0)) { |
| 1963 | dev->cmb = nvme_map_cmb(dev); | 1959 | dev->cmb = nvme_map_cmb(dev); |
| 1964 | 1960 | if (dev->cmb) { | |
| 1965 | if (dev->cmbsz) { | ||
| 1966 | if (sysfs_add_file_to_group(&dev->ctrl.device->kobj, | 1961 | if (sysfs_add_file_to_group(&dev->ctrl.device->kobj, |
| 1967 | &dev_attr_cmb.attr, NULL)) | 1962 | &dev_attr_cmb.attr, NULL)) |
| 1968 | dev_warn(dev->ctrl.device, | 1963 | dev_warn(dev->ctrl.device, |
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index da04df1af231..a03299d77922 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c | |||
| @@ -920,7 +920,11 @@ static int nvme_rdma_map_sg_fr(struct nvme_rdma_queue *queue, | |||
| 920 | struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl; | 920 | struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl; |
| 921 | int nr; | 921 | int nr; |
| 922 | 922 | ||
| 923 | nr = ib_map_mr_sg(req->mr, req->sg_table.sgl, count, NULL, PAGE_SIZE); | 923 | /* |
| 924 | * Align the MR to a 4K page size to match the ctrl page size and | ||
| 925 | * the block virtual boundary. | ||
| 926 | */ | ||
| 927 | nr = ib_map_mr_sg(req->mr, req->sg_table.sgl, count, NULL, SZ_4K); | ||
| 924 | if (nr < count) { | 928 | if (nr < count) { |
| 925 | if (nr < 0) | 929 | if (nr < 0) |
| 926 | return nr; | 930 | return nr; |
| @@ -1583,7 +1587,7 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl) | |||
| 1583 | goto out_cleanup_queue; | 1587 | goto out_cleanup_queue; |
| 1584 | 1588 | ||
| 1585 | ctrl->ctrl.max_hw_sectors = | 1589 | ctrl->ctrl.max_hw_sectors = |
| 1586 | (ctrl->max_fr_pages - 1) << (PAGE_SHIFT - 9); | 1590 | (ctrl->max_fr_pages - 1) << (ilog2(SZ_4K) - 9); |
| 1587 | 1591 | ||
| 1588 | error = nvme_init_identify(&ctrl->ctrl); | 1592 | error = nvme_init_identify(&ctrl->ctrl); |
| 1589 | if (error) | 1593 | if (error) |
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index 2d7a98ab53fb..a53bb6635b83 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c | |||
| @@ -199,12 +199,6 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req) | |||
| 199 | copy_and_pad(id->mn, sizeof(id->mn), model, sizeof(model) - 1); | 199 | copy_and_pad(id->mn, sizeof(id->mn), model, sizeof(model) - 1); |
| 200 | copy_and_pad(id->fr, sizeof(id->fr), UTS_RELEASE, strlen(UTS_RELEASE)); | 200 | copy_and_pad(id->fr, sizeof(id->fr), UTS_RELEASE, strlen(UTS_RELEASE)); |
| 201 | 201 | ||
| 202 | memset(id->mn, ' ', sizeof(id->mn)); | ||
| 203 | strncpy((char *)id->mn, "Linux", sizeof(id->mn)); | ||
| 204 | |||
| 205 | memset(id->fr, ' ', sizeof(id->fr)); | ||
| 206 | strncpy((char *)id->fr, UTS_RELEASE, sizeof(id->fr)); | ||
| 207 | |||
| 208 | id->rab = 6; | 202 | id->rab = 6; |
| 209 | 203 | ||
| 210 | /* | 204 | /* |
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c index d5801c150b1c..309c84aa7595 100644 --- a/drivers/nvme/target/fc.c +++ b/drivers/nvme/target/fc.c | |||
| @@ -114,6 +114,11 @@ struct nvmet_fc_tgtport { | |||
| 114 | struct kref ref; | 114 | struct kref ref; |
| 115 | }; | 115 | }; |
| 116 | 116 | ||
| 117 | struct nvmet_fc_defer_fcp_req { | ||
| 118 | struct list_head req_list; | ||
| 119 | struct nvmefc_tgt_fcp_req *fcp_req; | ||
| 120 | }; | ||
| 121 | |||
| 117 | struct nvmet_fc_tgt_queue { | 122 | struct nvmet_fc_tgt_queue { |
| 118 | bool ninetypercent; | 123 | bool ninetypercent; |
| 119 | u16 qid; | 124 | u16 qid; |
| @@ -132,6 +137,8 @@ struct nvmet_fc_tgt_queue { | |||
| 132 | struct nvmet_fc_tgt_assoc *assoc; | 137 | struct nvmet_fc_tgt_assoc *assoc; |
| 133 | struct nvmet_fc_fcp_iod *fod; /* array of fcp_iods */ | 138 | struct nvmet_fc_fcp_iod *fod; /* array of fcp_iods */ |
| 134 | struct list_head fod_list; | 139 | struct list_head fod_list; |
| 140 | struct list_head pending_cmd_list; | ||
| 141 | struct list_head avail_defer_list; | ||
| 135 | struct workqueue_struct *work_q; | 142 | struct workqueue_struct *work_q; |
| 136 | struct kref ref; | 143 | struct kref ref; |
| 137 | } __aligned(sizeof(unsigned long long)); | 144 | } __aligned(sizeof(unsigned long long)); |
| @@ -223,6 +230,8 @@ static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue); | |||
| 223 | static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue); | 230 | static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue); |
| 224 | static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport); | 231 | static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport); |
| 225 | static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport); | 232 | static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport); |
| 233 | static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport, | ||
| 234 | struct nvmet_fc_fcp_iod *fod); | ||
| 226 | 235 | ||
| 227 | 236 | ||
| 228 | /* *********************** FC-NVME DMA Handling **************************** */ | 237 | /* *********************** FC-NVME DMA Handling **************************** */ |
| @@ -385,7 +394,7 @@ nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport *tgtport) | |||
| 385 | static struct nvmet_fc_ls_iod * | 394 | static struct nvmet_fc_ls_iod * |
| 386 | nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport) | 395 | nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport) |
| 387 | { | 396 | { |
| 388 | static struct nvmet_fc_ls_iod *iod; | 397 | struct nvmet_fc_ls_iod *iod; |
| 389 | unsigned long flags; | 398 | unsigned long flags; |
| 390 | 399 | ||
| 391 | spin_lock_irqsave(&tgtport->lock, flags); | 400 | spin_lock_irqsave(&tgtport->lock, flags); |
| @@ -462,10 +471,10 @@ nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport *tgtport, | |||
| 462 | static struct nvmet_fc_fcp_iod * | 471 | static struct nvmet_fc_fcp_iod * |
| 463 | nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue) | 472 | nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue) |
| 464 | { | 473 | { |
| 465 | static struct nvmet_fc_fcp_iod *fod; | 474 | struct nvmet_fc_fcp_iod *fod; |
| 466 | unsigned long flags; | 475 | |
| 476 | lockdep_assert_held(&queue->qlock); | ||
| 467 | 477 | ||
| 468 | spin_lock_irqsave(&queue->qlock, flags); | ||
| 469 | fod = list_first_entry_or_null(&queue->fod_list, | 478 | fod = list_first_entry_or_null(&queue->fod_list, |
| 470 | struct nvmet_fc_fcp_iod, fcp_list); | 479 | struct nvmet_fc_fcp_iod, fcp_list); |
| 471 | if (fod) { | 480 | if (fod) { |
| @@ -477,17 +486,37 @@ nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue) | |||
| 477 | * will "inherit" that reference. | 486 | * will "inherit" that reference. |
| 478 | */ | 487 | */ |
| 479 | } | 488 | } |
| 480 | spin_unlock_irqrestore(&queue->qlock, flags); | ||
| 481 | return fod; | 489 | return fod; |
| 482 | } | 490 | } |
| 483 | 491 | ||
| 484 | 492 | ||
| 485 | static void | 493 | static void |
| 494 | nvmet_fc_queue_fcp_req(struct nvmet_fc_tgtport *tgtport, | ||
| 495 | struct nvmet_fc_tgt_queue *queue, | ||
| 496 | struct nvmefc_tgt_fcp_req *fcpreq) | ||
| 497 | { | ||
| 498 | struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private; | ||
| 499 | |||
| 500 | /* | ||
| 501 | * put all admin cmds on hw queue id 0. All io commands go to | ||
| 502 | * the respective hw queue based on a modulo basis | ||
| 503 | */ | ||
| 504 | fcpreq->hwqid = queue->qid ? | ||
| 505 | ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0; | ||
| 506 | |||
| 507 | if (tgtport->ops->target_features & NVMET_FCTGTFEAT_CMD_IN_ISR) | ||
| 508 | queue_work_on(queue->cpu, queue->work_q, &fod->work); | ||
| 509 | else | ||
| 510 | nvmet_fc_handle_fcp_rqst(tgtport, fod); | ||
| 511 | } | ||
| 512 | |||
| 513 | static void | ||
| 486 | nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue, | 514 | nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue, |
| 487 | struct nvmet_fc_fcp_iod *fod) | 515 | struct nvmet_fc_fcp_iod *fod) |
| 488 | { | 516 | { |
| 489 | struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; | 517 | struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; |
| 490 | struct nvmet_fc_tgtport *tgtport = fod->tgtport; | 518 | struct nvmet_fc_tgtport *tgtport = fod->tgtport; |
| 519 | struct nvmet_fc_defer_fcp_req *deferfcp; | ||
| 491 | unsigned long flags; | 520 | unsigned long flags; |
| 492 | 521 | ||
| 493 | fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma, | 522 | fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma, |
| @@ -495,21 +524,56 @@ nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue, | |||
| 495 | 524 | ||
| 496 | fcpreq->nvmet_fc_private = NULL; | 525 | fcpreq->nvmet_fc_private = NULL; |
| 497 | 526 | ||
| 498 | spin_lock_irqsave(&queue->qlock, flags); | ||
| 499 | list_add_tail(&fod->fcp_list, &fod->queue->fod_list); | ||
| 500 | fod->active = false; | 527 | fod->active = false; |
| 501 | fod->abort = false; | 528 | fod->abort = false; |
| 502 | fod->aborted = false; | 529 | fod->aborted = false; |
| 503 | fod->writedataactive = false; | 530 | fod->writedataactive = false; |
| 504 | fod->fcpreq = NULL; | 531 | fod->fcpreq = NULL; |
| 532 | |||
| 533 | tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq); | ||
| 534 | |||
| 535 | spin_lock_irqsave(&queue->qlock, flags); | ||
| 536 | deferfcp = list_first_entry_or_null(&queue->pending_cmd_list, | ||
| 537 | struct nvmet_fc_defer_fcp_req, req_list); | ||
| 538 | if (!deferfcp) { | ||
| 539 | list_add_tail(&fod->fcp_list, &fod->queue->fod_list); | ||
| 540 | spin_unlock_irqrestore(&queue->qlock, flags); | ||
| 541 | |||
| 542 | /* Release reference taken at queue lookup and fod allocation */ | ||
| 543 | nvmet_fc_tgt_q_put(queue); | ||
| 544 | return; | ||
| 545 | } | ||
| 546 | |||
| 547 | /* Re-use the fod for the next pending cmd that was deferred */ | ||
| 548 | list_del(&deferfcp->req_list); | ||
| 549 | |||
| 550 | fcpreq = deferfcp->fcp_req; | ||
| 551 | |||
| 552 | /* deferfcp can be reused for another IO at a later date */ | ||
| 553 | list_add_tail(&deferfcp->req_list, &queue->avail_defer_list); | ||
| 554 | |||
| 505 | spin_unlock_irqrestore(&queue->qlock, flags); | 555 | spin_unlock_irqrestore(&queue->qlock, flags); |
| 506 | 556 | ||
| 557 | /* Save NVME CMD IO in fod */ | ||
| 558 | memcpy(&fod->cmdiubuf, fcpreq->rspaddr, fcpreq->rsplen); | ||
| 559 | |||
| 560 | /* Setup new fcpreq to be processed */ | ||
| 561 | fcpreq->rspaddr = NULL; | ||
| 562 | fcpreq->rsplen = 0; | ||
| 563 | fcpreq->nvmet_fc_private = fod; | ||
| 564 | fod->fcpreq = fcpreq; | ||
| 565 | fod->active = true; | ||
| 566 | |||
| 567 | /* inform LLDD IO is now being processed */ | ||
| 568 | tgtport->ops->defer_rcv(&tgtport->fc_target_port, fcpreq); | ||
| 569 | |||
| 570 | /* Submit deferred IO for processing */ | ||
| 571 | nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq); | ||
| 572 | |||
| 507 | /* | 573 | /* |
| 508 | * release the reference taken at queue lookup and fod allocation | 574 | * Leave the queue lookup get reference taken when |
| 575 | * fod was originally allocated. | ||
| 509 | */ | 576 | */ |
| 510 | nvmet_fc_tgt_q_put(queue); | ||
| 511 | |||
| 512 | tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq); | ||
| 513 | } | 577 | } |
| 514 | 578 | ||
| 515 | static int | 579 | static int |
| @@ -569,6 +633,8 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc, | |||
| 569 | queue->port = assoc->tgtport->port; | 633 | queue->port = assoc->tgtport->port; |
| 570 | queue->cpu = nvmet_fc_queue_to_cpu(assoc->tgtport, qid); | 634 | queue->cpu = nvmet_fc_queue_to_cpu(assoc->tgtport, qid); |
| 571 | INIT_LIST_HEAD(&queue->fod_list); | 635 | INIT_LIST_HEAD(&queue->fod_list); |
| 636 | INIT_LIST_HEAD(&queue->avail_defer_list); | ||
| 637 | INIT_LIST_HEAD(&queue->pending_cmd_list); | ||
| 572 | atomic_set(&queue->connected, 0); | 638 | atomic_set(&queue->connected, 0); |
| 573 | atomic_set(&queue->sqtail, 0); | 639 | atomic_set(&queue->sqtail, 0); |
| 574 | atomic_set(&queue->rsn, 1); | 640 | atomic_set(&queue->rsn, 1); |
| @@ -638,6 +704,7 @@ nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue) | |||
| 638 | { | 704 | { |
| 639 | struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport; | 705 | struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport; |
| 640 | struct nvmet_fc_fcp_iod *fod = queue->fod; | 706 | struct nvmet_fc_fcp_iod *fod = queue->fod; |
| 707 | struct nvmet_fc_defer_fcp_req *deferfcp, *tempptr; | ||
| 641 | unsigned long flags; | 708 | unsigned long flags; |
| 642 | int i, writedataactive; | 709 | int i, writedataactive; |
| 643 | bool disconnect; | 710 | bool disconnect; |
| @@ -666,6 +733,36 @@ nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue) | |||
| 666 | } | 733 | } |
| 667 | } | 734 | } |
| 668 | } | 735 | } |
| 736 | |||
| 737 | /* Cleanup defer'ed IOs in queue */ | ||
| 738 | list_for_each_entry_safe(deferfcp, tempptr, &queue->avail_defer_list, | ||
| 739 | req_list) { | ||
| 740 | list_del(&deferfcp->req_list); | ||
| 741 | kfree(deferfcp); | ||
| 742 | } | ||
| 743 | |||
| 744 | for (;;) { | ||
| 745 | deferfcp = list_first_entry_or_null(&queue->pending_cmd_list, | ||
| 746 | struct nvmet_fc_defer_fcp_req, req_list); | ||
| 747 | if (!deferfcp) | ||
| 748 | break; | ||
| 749 | |||
| 750 | list_del(&deferfcp->req_list); | ||
| 751 | spin_unlock_irqrestore(&queue->qlock, flags); | ||
| 752 | |||
| 753 | tgtport->ops->defer_rcv(&tgtport->fc_target_port, | ||
| 754 | deferfcp->fcp_req); | ||
| 755 | |||
| 756 | tgtport->ops->fcp_abort(&tgtport->fc_target_port, | ||
| 757 | deferfcp->fcp_req); | ||
| 758 | |||
| 759 | tgtport->ops->fcp_req_release(&tgtport->fc_target_port, | ||
| 760 | deferfcp->fcp_req); | ||
| 761 | |||
| 762 | kfree(deferfcp); | ||
| 763 | |||
| 764 | spin_lock_irqsave(&queue->qlock, flags); | ||
| 765 | } | ||
| 669 | spin_unlock_irqrestore(&queue->qlock, flags); | 766 | spin_unlock_irqrestore(&queue->qlock, flags); |
| 670 | 767 | ||
| 671 | flush_workqueue(queue->work_q); | 768 | flush_workqueue(queue->work_q); |
| @@ -2172,11 +2269,38 @@ nvmet_fc_handle_fcp_rqst_work(struct work_struct *work) | |||
| 2172 | * Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc | 2269 | * Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc |
| 2173 | * layer for processing. | 2270 | * layer for processing. |
| 2174 | * | 2271 | * |
| 2175 | * The nvmet-fc layer will copy cmd payload to an internal structure for | 2272 | * The nvmet_fc layer allocates a local job structure (struct |
| 2176 | * processing. As such, upon completion of the routine, the LLDD may | 2273 | * nvmet_fc_fcp_iod) from the queue for the io and copies the |
| 2177 | * immediately free/reuse the CMD IU buffer passed in the call. | 2274 | * CMD IU buffer to the job structure. As such, on a successful |
| 2275 | * completion (returns 0), the LLDD may immediately free/reuse | ||
| 2276 | * the CMD IU buffer passed in the call. | ||
| 2277 | * | ||
| 2278 | * However, in some circumstances, due to the packetized nature of FC | ||
| 2279 | * and the api of the FC LLDD which may issue a hw command to send the | ||
| 2280 | * response, but the LLDD may not get the hw completion for that command | ||
| 2281 | * and upcall the nvmet_fc layer before a new command may be | ||
| 2282 | * asynchronously received - its possible for a command to be received | ||
| 2283 | * before the LLDD and nvmet_fc have recycled the job structure. It gives | ||
| 2284 | * the appearance of more commands received than fits in the sq. | ||
| 2285 | * To alleviate this scenario, a temporary queue is maintained in the | ||
| 2286 | * transport for pending LLDD requests waiting for a queue job structure. | ||
| 2287 | * In these "overrun" cases, a temporary queue element is allocated | ||
| 2288 | * the LLDD request and CMD iu buffer information remembered, and the | ||
| 2289 | * routine returns a -EOVERFLOW status. Subsequently, when a queue job | ||
| 2290 | * structure is freed, it is immediately reallocated for anything on the | ||
| 2291 | * pending request list. The LLDDs defer_rcv() callback is called, | ||
| 2292 | * informing the LLDD that it may reuse the CMD IU buffer, and the io | ||
| 2293 | * is then started normally with the transport. | ||
| 2178 | * | 2294 | * |
| 2179 | * If this routine returns error, the lldd should abort the exchange. | 2295 | * The LLDD, when receiving an -EOVERFLOW completion status, is to treat |
| 2296 | * the completion as successful but must not reuse the CMD IU buffer | ||
| 2297 | * until the LLDD's defer_rcv() callback has been called for the | ||
| 2298 | * corresponding struct nvmefc_tgt_fcp_req pointer. | ||
| 2299 | * | ||
| 2300 | * If there is any other condition in which an error occurs, the | ||
| 2301 | * transport will return a non-zero status indicating the error. | ||
| 2302 | * In all cases other than -EOVERFLOW, the transport has not accepted the | ||
| 2303 | * request and the LLDD should abort the exchange. | ||
| 2180 | * | 2304 | * |
| 2181 | * @target_port: pointer to the (registered) target port the FCP CMD IU | 2305 | * @target_port: pointer to the (registered) target port the FCP CMD IU |
| 2182 | * was received on. | 2306 | * was received on. |
| @@ -2194,6 +2318,8 @@ nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port, | |||
| 2194 | struct nvme_fc_cmd_iu *cmdiu = cmdiubuf; | 2318 | struct nvme_fc_cmd_iu *cmdiu = cmdiubuf; |
| 2195 | struct nvmet_fc_tgt_queue *queue; | 2319 | struct nvmet_fc_tgt_queue *queue; |
| 2196 | struct nvmet_fc_fcp_iod *fod; | 2320 | struct nvmet_fc_fcp_iod *fod; |
| 2321 | struct nvmet_fc_defer_fcp_req *deferfcp; | ||
| 2322 | unsigned long flags; | ||
| 2197 | 2323 | ||
| 2198 | /* validate iu, so the connection id can be used to find the queue */ | 2324 | /* validate iu, so the connection id can be used to find the queue */ |
| 2199 | if ((cmdiubuf_len != sizeof(*cmdiu)) || | 2325 | if ((cmdiubuf_len != sizeof(*cmdiu)) || |
| @@ -2214,29 +2340,60 @@ nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port, | |||
| 2214 | * when the fod is freed. | 2340 | * when the fod is freed. |
| 2215 | */ | 2341 | */ |
| 2216 | 2342 | ||
| 2343 | spin_lock_irqsave(&queue->qlock, flags); | ||
| 2344 | |||
| 2217 | fod = nvmet_fc_alloc_fcp_iod(queue); | 2345 | fod = nvmet_fc_alloc_fcp_iod(queue); |
| 2218 | if (!fod) { | 2346 | if (fod) { |
| 2347 | spin_unlock_irqrestore(&queue->qlock, flags); | ||
| 2348 | |||
| 2349 | fcpreq->nvmet_fc_private = fod; | ||
| 2350 | fod->fcpreq = fcpreq; | ||
| 2351 | |||
| 2352 | memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len); | ||
| 2353 | |||
| 2354 | nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq); | ||
| 2355 | |||
| 2356 | return 0; | ||
| 2357 | } | ||
| 2358 | |||
| 2359 | if (!tgtport->ops->defer_rcv) { | ||
| 2360 | spin_unlock_irqrestore(&queue->qlock, flags); | ||
| 2219 | /* release the queue lookup reference */ | 2361 | /* release the queue lookup reference */ |
| 2220 | nvmet_fc_tgt_q_put(queue); | 2362 | nvmet_fc_tgt_q_put(queue); |
| 2221 | return -ENOENT; | 2363 | return -ENOENT; |
| 2222 | } | 2364 | } |
| 2223 | 2365 | ||
| 2224 | fcpreq->nvmet_fc_private = fod; | 2366 | deferfcp = list_first_entry_or_null(&queue->avail_defer_list, |
| 2225 | fod->fcpreq = fcpreq; | 2367 | struct nvmet_fc_defer_fcp_req, req_list); |
| 2226 | /* | 2368 | if (deferfcp) { |
| 2227 | * put all admin cmds on hw queue id 0. All io commands go to | 2369 | /* Just re-use one that was previously allocated */ |
| 2228 | * the respective hw queue based on a modulo basis | 2370 | list_del(&deferfcp->req_list); |
| 2229 | */ | 2371 | } else { |
| 2230 | fcpreq->hwqid = queue->qid ? | 2372 | spin_unlock_irqrestore(&queue->qlock, flags); |
| 2231 | ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0; | ||
| 2232 | memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len); | ||
| 2233 | 2373 | ||
| 2234 | if (tgtport->ops->target_features & NVMET_FCTGTFEAT_CMD_IN_ISR) | 2374 | /* Now we need to dynamically allocate one */ |
| 2235 | queue_work_on(queue->cpu, queue->work_q, &fod->work); | 2375 | deferfcp = kmalloc(sizeof(*deferfcp), GFP_KERNEL); |
| 2236 | else | 2376 | if (!deferfcp) { |
| 2237 | nvmet_fc_handle_fcp_rqst(tgtport, fod); | 2377 | /* release the queue lookup reference */ |
| 2378 | nvmet_fc_tgt_q_put(queue); | ||
| 2379 | return -ENOMEM; | ||
| 2380 | } | ||
| 2381 | spin_lock_irqsave(&queue->qlock, flags); | ||
| 2382 | } | ||
| 2238 | 2383 | ||
| 2239 | return 0; | 2384 | /* For now, use rspaddr / rsplen to save payload information */ |
| 2385 | fcpreq->rspaddr = cmdiubuf; | ||
| 2386 | fcpreq->rsplen = cmdiubuf_len; | ||
| 2387 | deferfcp->fcp_req = fcpreq; | ||
| 2388 | |||
| 2389 | /* defer processing till a fod becomes available */ | ||
| 2390 | list_add_tail(&deferfcp->req_list, &queue->pending_cmd_list); | ||
| 2391 | |||
| 2392 | /* NOTE: the queue lookup reference is still valid */ | ||
| 2393 | |||
| 2394 | spin_unlock_irqrestore(&queue->qlock, flags); | ||
| 2395 | |||
| 2396 | return -EOVERFLOW; | ||
| 2240 | } | 2397 | } |
| 2241 | EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req); | 2398 | EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req); |
| 2242 | 2399 | ||
| @@ -2293,66 +2450,70 @@ nvmet_fc_rcv_fcp_abort(struct nvmet_fc_target_port *target_port, | |||
| 2293 | } | 2450 | } |
| 2294 | EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_abort); | 2451 | EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_abort); |
| 2295 | 2452 | ||
| 2296 | enum { | ||
| 2297 | FCT_TRADDR_ERR = 0, | ||
| 2298 | FCT_TRADDR_WWNN = 1 << 0, | ||
| 2299 | FCT_TRADDR_WWPN = 1 << 1, | ||
| 2300 | }; | ||
| 2301 | 2453 | ||
| 2302 | struct nvmet_fc_traddr { | 2454 | struct nvmet_fc_traddr { |
| 2303 | u64 nn; | 2455 | u64 nn; |
| 2304 | u64 pn; | 2456 | u64 pn; |
| 2305 | }; | 2457 | }; |
| 2306 | 2458 | ||
| 2307 | static const match_table_t traddr_opt_tokens = { | ||
| 2308 | { FCT_TRADDR_WWNN, "nn-%s" }, | ||
| 2309 | { FCT_TRADDR_WWPN, "pn-%s" }, | ||
| 2310 | { FCT_TRADDR_ERR, NULL } | ||
| 2311 | }; | ||
| 2312 | |||
| 2313 | static int | 2459 | static int |
| 2314 | nvmet_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf) | 2460 | __nvme_fc_parse_u64(substring_t *sstr, u64 *val) |
| 2315 | { | 2461 | { |
| 2316 | substring_t args[MAX_OPT_ARGS]; | ||
| 2317 | char *options, *o, *p; | ||
| 2318 | int token, ret = 0; | ||
| 2319 | u64 token64; | 2462 | u64 token64; |
| 2320 | 2463 | ||
| 2321 | options = o = kstrdup(buf, GFP_KERNEL); | 2464 | if (match_u64(sstr, &token64)) |
| 2322 | if (!options) | 2465 | return -EINVAL; |
| 2323 | return -ENOMEM; | 2466 | *val = token64; |
| 2324 | 2467 | ||
| 2325 | while ((p = strsep(&o, ":\n")) != NULL) { | 2468 | return 0; |
| 2326 | if (!*p) | 2469 | } |
| 2327 | continue; | ||
| 2328 | 2470 | ||
| 2329 | token = match_token(p, traddr_opt_tokens, args); | 2471 | /* |
| 2330 | switch (token) { | 2472 | * This routine validates and extracts the WWN's from the TRADDR string. |
| 2331 | case FCT_TRADDR_WWNN: | 2473 | * As kernel parsers need the 0x to determine number base, universally |
| 2332 | if (match_u64(args, &token64)) { | 2474 | * build string to parse with 0x prefix before parsing name strings. |
| 2333 | ret = -EINVAL; | 2475 | */ |
| 2334 | goto out; | 2476 | static int |
| 2335 | } | 2477 | nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen) |
| 2336 | traddr->nn = token64; | 2478 | { |
| 2337 | break; | 2479 | char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1]; |
| 2338 | case FCT_TRADDR_WWPN: | 2480 | substring_t wwn = { name, &name[sizeof(name)-1] }; |
| 2339 | if (match_u64(args, &token64)) { | 2481 | int nnoffset, pnoffset; |
| 2340 | ret = -EINVAL; | 2482 | |
| 2341 | goto out; | 2483 | /* validate it string one of the 2 allowed formats */ |
| 2342 | } | 2484 | if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH && |
| 2343 | traddr->pn = token64; | 2485 | !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) && |
| 2344 | break; | 2486 | !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET], |
| 2345 | default: | 2487 | "pn-0x", NVME_FC_TRADDR_OXNNLEN)) { |
| 2346 | pr_warn("unknown traddr token or missing value '%s'\n", | 2488 | nnoffset = NVME_FC_TRADDR_OXNNLEN; |
| 2347 | p); | 2489 | pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET + |
| 2348 | ret = -EINVAL; | 2490 | NVME_FC_TRADDR_OXNNLEN; |
| 2349 | goto out; | 2491 | } else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH && |
| 2350 | } | 2492 | !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) && |
| 2351 | } | 2493 | !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET], |
| 2494 | "pn-", NVME_FC_TRADDR_NNLEN))) { | ||
| 2495 | nnoffset = NVME_FC_TRADDR_NNLEN; | ||
| 2496 | pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN; | ||
| 2497 | } else | ||
| 2498 | goto out_einval; | ||
| 2499 | |||
| 2500 | name[0] = '0'; | ||
| 2501 | name[1] = 'x'; | ||
| 2502 | name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0; | ||
| 2503 | |||
| 2504 | memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN); | ||
| 2505 | if (__nvme_fc_parse_u64(&wwn, &traddr->nn)) | ||
| 2506 | goto out_einval; | ||
| 2507 | |||
| 2508 | memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN); | ||
| 2509 | if (__nvme_fc_parse_u64(&wwn, &traddr->pn)) | ||
| 2510 | goto out_einval; | ||
| 2352 | 2511 | ||
| 2353 | out: | 2512 | return 0; |
| 2354 | kfree(options); | 2513 | |
| 2355 | return ret; | 2514 | out_einval: |
| 2515 | pr_warn("%s: bad traddr string\n", __func__); | ||
| 2516 | return -EINVAL; | ||
| 2356 | } | 2517 | } |
| 2357 | 2518 | ||
| 2358 | static int | 2519 | static int |
| @@ -2370,7 +2531,8 @@ nvmet_fc_add_port(struct nvmet_port *port) | |||
| 2370 | 2531 | ||
| 2371 | /* map the traddr address info to a target port */ | 2532 | /* map the traddr address info to a target port */ |
| 2372 | 2533 | ||
| 2373 | ret = nvmet_fc_parse_traddr(&traddr, port->disc_addr.traddr); | 2534 | ret = nvme_fc_parse_traddr(&traddr, port->disc_addr.traddr, |
| 2535 | sizeof(port->disc_addr.traddr)); | ||
| 2374 | if (ret) | 2536 | if (ret) |
| 2375 | return ret; | 2537 | return ret; |
| 2376 | 2538 | ||
diff --git a/drivers/of/device.c b/drivers/of/device.c index 28c38c756f92..e0a28ea341fe 100644 --- a/drivers/of/device.c +++ b/drivers/of/device.c | |||
| @@ -89,6 +89,7 @@ int of_dma_configure(struct device *dev, struct device_node *np) | |||
| 89 | bool coherent; | 89 | bool coherent; |
| 90 | unsigned long offset; | 90 | unsigned long offset; |
| 91 | const struct iommu_ops *iommu; | 91 | const struct iommu_ops *iommu; |
| 92 | u64 mask; | ||
| 92 | 93 | ||
| 93 | /* | 94 | /* |
| 94 | * Set default coherent_dma_mask to 32 bit. Drivers are expected to | 95 | * Set default coherent_dma_mask to 32 bit. Drivers are expected to |
| @@ -134,10 +135,9 @@ int of_dma_configure(struct device *dev, struct device_node *np) | |||
| 134 | * Limit coherent and dma mask based on size and default mask | 135 | * Limit coherent and dma mask based on size and default mask |
| 135 | * set by the driver. | 136 | * set by the driver. |
| 136 | */ | 137 | */ |
| 137 | dev->coherent_dma_mask = min(dev->coherent_dma_mask, | 138 | mask = DMA_BIT_MASK(ilog2(dma_addr + size - 1) + 1); |
| 138 | DMA_BIT_MASK(ilog2(dma_addr + size))); | 139 | dev->coherent_dma_mask &= mask; |
| 139 | *dev->dma_mask = min((*dev->dma_mask), | 140 | *dev->dma_mask &= mask; |
| 140 | DMA_BIT_MASK(ilog2(dma_addr + size))); | ||
| 141 | 141 | ||
| 142 | coherent = of_dma_is_coherent(np); | 142 | coherent = of_dma_is_coherent(np); |
| 143 | dev_dbg(dev, "device is%sdma coherent\n", | 143 | dev_dbg(dev, "device is%sdma coherent\n", |
diff --git a/drivers/of/irq.c b/drivers/of/irq.c index 6ce72aa65425..ab21c846eb27 100644 --- a/drivers/of/irq.c +++ b/drivers/of/irq.c | |||
| @@ -476,7 +476,7 @@ int of_irq_to_resource_table(struct device_node *dev, struct resource *res, | |||
| 476 | int i; | 476 | int i; |
| 477 | 477 | ||
| 478 | for (i = 0; i < nr_irqs; i++, res++) | 478 | for (i = 0; i < nr_irqs; i++, res++) |
| 479 | if (!of_irq_to_resource(dev, i, res)) | 479 | if (of_irq_to_resource(dev, i, res) <= 0) |
| 480 | break; | 480 | break; |
| 481 | 481 | ||
| 482 | return i; | 482 | return i; |
diff --git a/drivers/of/property.c b/drivers/of/property.c index eda50b4be934..067f9fab7b77 100644 --- a/drivers/of/property.c +++ b/drivers/of/property.c | |||
| @@ -708,6 +708,15 @@ struct device_node *of_graph_get_port_parent(struct device_node *node) | |||
| 708 | { | 708 | { |
| 709 | unsigned int depth; | 709 | unsigned int depth; |
| 710 | 710 | ||
| 711 | if (!node) | ||
| 712 | return NULL; | ||
| 713 | |||
| 714 | /* | ||
| 715 | * Preserve usecount for passed in node as of_get_next_parent() | ||
| 716 | * will do of_node_put() on it. | ||
| 717 | */ | ||
| 718 | of_node_get(node); | ||
| 719 | |||
| 711 | /* Walk 3 levels up only if there is 'ports' node. */ | 720 | /* Walk 3 levels up only if there is 'ports' node. */ |
| 712 | for (depth = 3; depth && node; depth--) { | 721 | for (depth = 3; depth && node; depth--) { |
| 713 | node = of_get_next_parent(node); | 722 | node = of_get_next_parent(node); |
| @@ -728,12 +737,16 @@ EXPORT_SYMBOL(of_graph_get_port_parent); | |||
| 728 | struct device_node *of_graph_get_remote_port_parent( | 737 | struct device_node *of_graph_get_remote_port_parent( |
| 729 | const struct device_node *node) | 738 | const struct device_node *node) |
| 730 | { | 739 | { |
| 731 | struct device_node *np; | 740 | struct device_node *np, *pp; |
| 732 | 741 | ||
| 733 | /* Get remote endpoint node. */ | 742 | /* Get remote endpoint node. */ |
| 734 | np = of_graph_get_remote_endpoint(node); | 743 | np = of_graph_get_remote_endpoint(node); |
| 735 | 744 | ||
| 736 | return of_graph_get_port_parent(np); | 745 | pp = of_graph_get_port_parent(np); |
| 746 | |||
| 747 | of_node_put(np); | ||
| 748 | |||
| 749 | return pp; | ||
| 737 | } | 750 | } |
| 738 | EXPORT_SYMBOL(of_graph_get_remote_port_parent); | 751 | EXPORT_SYMBOL(of_graph_get_remote_port_parent); |
| 739 | 752 | ||
diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c index 5c63b920b471..ed92c1254cff 100644 --- a/drivers/parisc/dino.c +++ b/drivers/parisc/dino.c | |||
| @@ -956,7 +956,7 @@ static int __init dino_probe(struct parisc_device *dev) | |||
| 956 | 956 | ||
| 957 | dino_dev->hba.dev = dev; | 957 | dino_dev->hba.dev = dev; |
| 958 | dino_dev->hba.base_addr = ioremap_nocache(hpa, 4096); | 958 | dino_dev->hba.base_addr = ioremap_nocache(hpa, 4096); |
| 959 | dino_dev->hba.lmmio_space_offset = 0; /* CPU addrs == bus addrs */ | 959 | dino_dev->hba.lmmio_space_offset = PCI_F_EXTEND; |
| 960 | spin_lock_init(&dino_dev->dinosaur_pen); | 960 | spin_lock_init(&dino_dev->dinosaur_pen); |
| 961 | dino_dev->hba.iommu = ccio_get_iommu(dev); | 961 | dino_dev->hba.iommu = ccio_get_iommu(dev); |
| 962 | 962 | ||
diff --git a/drivers/parisc/pdc_stable.c b/drivers/parisc/pdc_stable.c index 055f83fddc18..b1ff46fe4547 100644 --- a/drivers/parisc/pdc_stable.c +++ b/drivers/parisc/pdc_stable.c | |||
| @@ -333,11 +333,11 @@ pdcspath_hwpath_write(struct pdcspath_entry *entry, const char *buf, size_t coun | |||
| 333 | 333 | ||
| 334 | /* Update the symlink to the real device */ | 334 | /* Update the symlink to the real device */ |
| 335 | sysfs_remove_link(&entry->kobj, "device"); | 335 | sysfs_remove_link(&entry->kobj, "device"); |
| 336 | write_unlock(&entry->rw_lock); | ||
| 337 | |||
| 336 | ret = sysfs_create_link(&entry->kobj, &entry->dev->kobj, "device"); | 338 | ret = sysfs_create_link(&entry->kobj, &entry->dev->kobj, "device"); |
| 337 | WARN_ON(ret); | 339 | WARN_ON(ret); |
| 338 | 340 | ||
| 339 | write_unlock(&entry->rw_lock); | ||
| 340 | |||
| 341 | printk(KERN_INFO PDCS_PREFIX ": changed \"%s\" path to \"%s\"\n", | 341 | printk(KERN_INFO PDCS_PREFIX ": changed \"%s\" path to \"%s\"\n", |
| 342 | entry->name, buf); | 342 | entry->name, buf); |
| 343 | 343 | ||
| @@ -954,7 +954,7 @@ static struct attribute *pdcs_subsys_attrs[] = { | |||
| 954 | NULL, | 954 | NULL, |
| 955 | }; | 955 | }; |
| 956 | 956 | ||
| 957 | static struct attribute_group pdcs_attr_group = { | 957 | static const struct attribute_group pdcs_attr_group = { |
| 958 | .attrs = pdcs_subsys_attrs, | 958 | .attrs = pdcs_subsys_attrs, |
| 959 | }; | 959 | }; |
| 960 | 960 | ||
| @@ -998,6 +998,7 @@ pdcs_register_pathentries(void) | |||
| 998 | /* kobject is now registered */ | 998 | /* kobject is now registered */ |
| 999 | write_lock(&entry->rw_lock); | 999 | write_lock(&entry->rw_lock); |
| 1000 | entry->ready = 2; | 1000 | entry->ready = 2; |
| 1001 | write_unlock(&entry->rw_lock); | ||
| 1001 | 1002 | ||
| 1002 | /* Add a nice symlink to the real device */ | 1003 | /* Add a nice symlink to the real device */ |
| 1003 | if (entry->dev) { | 1004 | if (entry->dev) { |
| @@ -1005,7 +1006,6 @@ pdcs_register_pathentries(void) | |||
| 1005 | WARN_ON(err); | 1006 | WARN_ON(err); |
| 1006 | } | 1007 | } |
| 1007 | 1008 | ||
| 1008 | write_unlock(&entry->rw_lock); | ||
| 1009 | kobject_uevent(&entry->kobj, KOBJ_ADD); | 1009 | kobject_uevent(&entry->kobj, KOBJ_ADD); |
| 1010 | } | 1010 | } |
| 1011 | 1011 | ||
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 253d92409bb3..2225afc1cbbb 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c | |||
| @@ -538,12 +538,9 @@ msi_setup_entry(struct pci_dev *dev, int nvec, const struct irq_affinity *affd) | |||
| 538 | struct msi_desc *entry; | 538 | struct msi_desc *entry; |
| 539 | u16 control; | 539 | u16 control; |
| 540 | 540 | ||
| 541 | if (affd) { | 541 | if (affd) |
| 542 | masks = irq_create_affinity_masks(nvec, affd); | 542 | masks = irq_create_affinity_masks(nvec, affd); |
| 543 | if (!masks) | 543 | |
| 544 | dev_err(&dev->dev, "can't allocate MSI affinity masks for %d vectors\n", | ||
| 545 | nvec); | ||
| 546 | } | ||
| 547 | 544 | ||
| 548 | /* MSI Entry Initialization */ | 545 | /* MSI Entry Initialization */ |
| 549 | entry = alloc_msi_entry(&dev->dev, nvec, masks); | 546 | entry = alloc_msi_entry(&dev->dev, nvec, masks); |
| @@ -679,12 +676,8 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base, | |||
| 679 | struct msi_desc *entry; | 676 | struct msi_desc *entry; |
| 680 | int ret, i; | 677 | int ret, i; |
| 681 | 678 | ||
| 682 | if (affd) { | 679 | if (affd) |
| 683 | masks = irq_create_affinity_masks(nvec, affd); | 680 | masks = irq_create_affinity_masks(nvec, affd); |
| 684 | if (!masks) | ||
| 685 | dev_err(&dev->dev, "can't allocate MSI-X affinity masks for %d vectors\n", | ||
| 686 | nvec); | ||
| 687 | } | ||
| 688 | 681 | ||
| 689 | for (i = 0, curmsk = masks; i < nvec; i++) { | 682 | for (i = 0, curmsk = masks; i < nvec; i++) { |
| 690 | entry = alloc_msi_entry(&dev->dev, 1, curmsk); | 683 | entry = alloc_msi_entry(&dev->dev, 1, curmsk); |
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index af0cc3456dc1..fdf65a6c13f6 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c | |||
| @@ -514,7 +514,7 @@ EXPORT_SYMBOL(pci_find_resource); | |||
| 514 | */ | 514 | */ |
| 515 | struct pci_dev *pci_find_pcie_root_port(struct pci_dev *dev) | 515 | struct pci_dev *pci_find_pcie_root_port(struct pci_dev *dev) |
| 516 | { | 516 | { |
| 517 | struct pci_dev *bridge, *highest_pcie_bridge = NULL; | 517 | struct pci_dev *bridge, *highest_pcie_bridge = dev; |
| 518 | 518 | ||
| 519 | bridge = pci_upstream_bridge(dev); | 519 | bridge = pci_upstream_bridge(dev); |
| 520 | while (bridge && pci_is_pcie(bridge)) { | 520 | while (bridge && pci_is_pcie(bridge)) { |
| @@ -4260,6 +4260,41 @@ int pci_reset_function(struct pci_dev *dev) | |||
| 4260 | EXPORT_SYMBOL_GPL(pci_reset_function); | 4260 | EXPORT_SYMBOL_GPL(pci_reset_function); |
| 4261 | 4261 | ||
| 4262 | /** | 4262 | /** |
| 4263 | * pci_reset_function_locked - quiesce and reset a PCI device function | ||
| 4264 | * @dev: PCI device to reset | ||
| 4265 | * | ||
| 4266 | * Some devices allow an individual function to be reset without affecting | ||
| 4267 | * other functions in the same device. The PCI device must be responsive | ||
| 4268 | * to PCI config space in order to use this function. | ||
| 4269 | * | ||
| 4270 | * This function does not just reset the PCI portion of a device, but | ||
| 4271 | * clears all the state associated with the device. This function differs | ||
| 4272 | * from __pci_reset_function() in that it saves and restores device state | ||
| 4273 | * over the reset. It also differs from pci_reset_function() in that it | ||
| 4274 | * requires the PCI device lock to be held. | ||
| 4275 | * | ||
| 4276 | * Returns 0 if the device function was successfully reset or negative if the | ||
| 4277 | * device doesn't support resetting a single function. | ||
| 4278 | */ | ||
| 4279 | int pci_reset_function_locked(struct pci_dev *dev) | ||
| 4280 | { | ||
| 4281 | int rc; | ||
| 4282 | |||
| 4283 | rc = pci_probe_reset_function(dev); | ||
| 4284 | if (rc) | ||
| 4285 | return rc; | ||
| 4286 | |||
| 4287 | pci_dev_save_and_disable(dev); | ||
| 4288 | |||
| 4289 | rc = __pci_reset_function_locked(dev); | ||
| 4290 | |||
| 4291 | pci_dev_restore(dev); | ||
| 4292 | |||
| 4293 | return rc; | ||
| 4294 | } | ||
| 4295 | EXPORT_SYMBOL_GPL(pci_reset_function_locked); | ||
| 4296 | |||
| 4297 | /** | ||
| 4263 | * pci_try_reset_function - quiesce and reset a PCI device function | 4298 | * pci_try_reset_function - quiesce and reset a PCI device function |
| 4264 | * @dev: PCI device to reset | 4299 | * @dev: PCI device to reset |
| 4265 | * | 4300 | * |
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index c31310db0404..e6a917b4acd3 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c | |||
| @@ -1762,6 +1762,48 @@ static void pci_configure_extended_tags(struct pci_dev *dev) | |||
| 1762 | PCI_EXP_DEVCTL_EXT_TAG); | 1762 | PCI_EXP_DEVCTL_EXT_TAG); |
| 1763 | } | 1763 | } |
| 1764 | 1764 | ||
| 1765 | /** | ||
| 1766 | * pcie_relaxed_ordering_enabled - Probe for PCIe relaxed ordering enable | ||
| 1767 | * @dev: PCI device to query | ||
| 1768 | * | ||
| 1769 | * Returns true if the device has enabled relaxed ordering attribute. | ||
| 1770 | */ | ||
| 1771 | bool pcie_relaxed_ordering_enabled(struct pci_dev *dev) | ||
| 1772 | { | ||
| 1773 | u16 v; | ||
| 1774 | |||
| 1775 | pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &v); | ||
| 1776 | |||
| 1777 | return !!(v & PCI_EXP_DEVCTL_RELAX_EN); | ||
| 1778 | } | ||
| 1779 | EXPORT_SYMBOL(pcie_relaxed_ordering_enabled); | ||
| 1780 | |||
| 1781 | static void pci_configure_relaxed_ordering(struct pci_dev *dev) | ||
| 1782 | { | ||
| 1783 | struct pci_dev *root; | ||
| 1784 | |||
| 1785 | /* PCI_EXP_DEVICE_RELAX_EN is RsvdP in VFs */ | ||
| 1786 | if (dev->is_virtfn) | ||
| 1787 | return; | ||
| 1788 | |||
| 1789 | if (!pcie_relaxed_ordering_enabled(dev)) | ||
| 1790 | return; | ||
| 1791 | |||
| 1792 | /* | ||
| 1793 | * For now, we only deal with Relaxed Ordering issues with Root | ||
| 1794 | * Ports. Peer-to-Peer DMA is another can of worms. | ||
| 1795 | */ | ||
| 1796 | root = pci_find_pcie_root_port(dev); | ||
| 1797 | if (!root) | ||
| 1798 | return; | ||
| 1799 | |||
| 1800 | if (root->dev_flags & PCI_DEV_FLAGS_NO_RELAXED_ORDERING) { | ||
| 1801 | pcie_capability_clear_word(dev, PCI_EXP_DEVCTL, | ||
| 1802 | PCI_EXP_DEVCTL_RELAX_EN); | ||
| 1803 | dev_info(&dev->dev, "Disable Relaxed Ordering because the Root Port didn't support it\n"); | ||
| 1804 | } | ||
| 1805 | } | ||
| 1806 | |||
| 1765 | static void pci_configure_device(struct pci_dev *dev) | 1807 | static void pci_configure_device(struct pci_dev *dev) |
| 1766 | { | 1808 | { |
| 1767 | struct hotplug_params hpp; | 1809 | struct hotplug_params hpp; |
| @@ -1769,6 +1811,7 @@ static void pci_configure_device(struct pci_dev *dev) | |||
| 1769 | 1811 | ||
| 1770 | pci_configure_mps(dev); | 1812 | pci_configure_mps(dev); |
| 1771 | pci_configure_extended_tags(dev); | 1813 | pci_configure_extended_tags(dev); |
| 1814 | pci_configure_relaxed_ordering(dev); | ||
| 1772 | 1815 | ||
| 1773 | memset(&hpp, 0, sizeof(hpp)); | 1816 | memset(&hpp, 0, sizeof(hpp)); |
| 1774 | ret = pci_get_hp_params(dev, &hpp); | 1817 | ret = pci_get_hp_params(dev, &hpp); |
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 6967c6b4cf6b..140760403f36 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c | |||
| @@ -4016,6 +4016,95 @@ DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6869, PCI_CLASS_NOT_DEFINED, 8, | |||
| 4016 | quirk_tw686x_class); | 4016 | quirk_tw686x_class); |
| 4017 | 4017 | ||
| 4018 | /* | 4018 | /* |
| 4019 | * Some devices have problems with Transaction Layer Packets with the Relaxed | ||
| 4020 | * Ordering Attribute set. Such devices should mark themselves and other | ||
| 4021 | * Device Drivers should check before sending TLPs with RO set. | ||
| 4022 | */ | ||
| 4023 | static void quirk_relaxedordering_disable(struct pci_dev *dev) | ||
| 4024 | { | ||
| 4025 | dev->dev_flags |= PCI_DEV_FLAGS_NO_RELAXED_ORDERING; | ||
| 4026 | dev_info(&dev->dev, "Disable Relaxed Ordering Attributes to avoid PCIe Completion erratum\n"); | ||
| 4027 | } | ||
| 4028 | |||
| 4029 | /* | ||
| 4030 | * Intel Xeon processors based on Broadwell/Haswell microarchitecture Root | ||
| 4031 | * Complex has a Flow Control Credit issue which can cause performance | ||
| 4032 | * problems with Upstream Transaction Layer Packets with Relaxed Ordering set. | ||
| 4033 | */ | ||
| 4034 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f01, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4035 | quirk_relaxedordering_disable); | ||
| 4036 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f02, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4037 | quirk_relaxedordering_disable); | ||
| 4038 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f03, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4039 | quirk_relaxedordering_disable); | ||
| 4040 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f04, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4041 | quirk_relaxedordering_disable); | ||
| 4042 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f05, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4043 | quirk_relaxedordering_disable); | ||
| 4044 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f06, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4045 | quirk_relaxedordering_disable); | ||
| 4046 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f07, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4047 | quirk_relaxedordering_disable); | ||
| 4048 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f08, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4049 | quirk_relaxedordering_disable); | ||
| 4050 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f09, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4051 | quirk_relaxedordering_disable); | ||
| 4052 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0a, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4053 | quirk_relaxedordering_disable); | ||
| 4054 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0b, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4055 | quirk_relaxedordering_disable); | ||
| 4056 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0c, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4057 | quirk_relaxedordering_disable); | ||
| 4058 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0d, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4059 | quirk_relaxedordering_disable); | ||
| 4060 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0e, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4061 | quirk_relaxedordering_disable); | ||
| 4062 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f01, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4063 | quirk_relaxedordering_disable); | ||
| 4064 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f02, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4065 | quirk_relaxedordering_disable); | ||
| 4066 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f03, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4067 | quirk_relaxedordering_disable); | ||
| 4068 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f04, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4069 | quirk_relaxedordering_disable); | ||
| 4070 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f05, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4071 | quirk_relaxedordering_disable); | ||
| 4072 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f06, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4073 | quirk_relaxedordering_disable); | ||
| 4074 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f07, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4075 | quirk_relaxedordering_disable); | ||
| 4076 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f08, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4077 | quirk_relaxedordering_disable); | ||
| 4078 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f09, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4079 | quirk_relaxedordering_disable); | ||
| 4080 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0a, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4081 | quirk_relaxedordering_disable); | ||
| 4082 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0b, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4083 | quirk_relaxedordering_disable); | ||
| 4084 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0c, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4085 | quirk_relaxedordering_disable); | ||
| 4086 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0d, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4087 | quirk_relaxedordering_disable); | ||
| 4088 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0e, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4089 | quirk_relaxedordering_disable); | ||
| 4090 | |||
| 4091 | /* | ||
| 4092 | * The AMD ARM A1100 (AKA "SEATTLE") SoC has a bug in its PCIe Root Complex | ||
| 4093 | * where Upstream Transaction Layer Packets with the Relaxed Ordering | ||
| 4094 | * Attribute clear are allowed to bypass earlier TLPs with Relaxed Ordering | ||
| 4095 | * set. This is a violation of the PCIe 3.0 Transaction Ordering Rules | ||
| 4096 | * outlined in Section 2.4.1 (PCI Express(r) Base Specification Revision 3.0 | ||
| 4097 | * November 10, 2010). As a result, on this platform we can't use Relaxed | ||
| 4098 | * Ordering for Upstream TLPs. | ||
| 4099 | */ | ||
| 4100 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AMD, 0x1a00, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4101 | quirk_relaxedordering_disable); | ||
| 4102 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AMD, 0x1a01, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4103 | quirk_relaxedordering_disable); | ||
| 4104 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AMD, 0x1a02, PCI_CLASS_NOT_DEFINED, 8, | ||
| 4105 | quirk_relaxedordering_disable); | ||
| 4106 | |||
| 4107 | /* | ||
| 4019 | * Per PCIe r3.0, sec 2.2.9, "Completion headers must supply the same | 4108 | * Per PCIe r3.0, sec 2.2.9, "Completion headers must supply the same |
| 4020 | * values for the Attribute as were supplied in the header of the | 4109 | * values for the Attribute as were supplied in the header of the |
| 4021 | * corresponding Request, except as explicitly allowed when IDO is used." | 4110 | * corresponding Request, except as explicitly allowed when IDO is used." |
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index dc459eb1246b..1c5e0f333779 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c | |||
| @@ -569,22 +569,41 @@ int armpmu_request_irq(struct arm_pmu *armpmu, int cpu) | |||
| 569 | if (irq != other_irq) { | 569 | if (irq != other_irq) { |
| 570 | pr_warn("mismatched PPIs detected.\n"); | 570 | pr_warn("mismatched PPIs detected.\n"); |
| 571 | err = -EINVAL; | 571 | err = -EINVAL; |
| 572 | goto err_out; | ||
| 572 | } | 573 | } |
| 573 | } else { | 574 | } else { |
| 574 | err = request_irq(irq, handler, | 575 | struct arm_pmu_platdata *platdata = armpmu_get_platdata(armpmu); |
| 575 | IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu", | 576 | unsigned long irq_flags; |
| 577 | |||
| 578 | err = irq_force_affinity(irq, cpumask_of(cpu)); | ||
| 579 | |||
| 580 | if (err && num_possible_cpus() > 1) { | ||
| 581 | pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n", | ||
| 582 | irq, cpu); | ||
| 583 | goto err_out; | ||
| 584 | } | ||
| 585 | |||
| 586 | if (platdata && platdata->irq_flags) { | ||
| 587 | irq_flags = platdata->irq_flags; | ||
| 588 | } else { | ||
| 589 | irq_flags = IRQF_PERCPU | | ||
| 590 | IRQF_NOBALANCING | | ||
| 591 | IRQF_NO_THREAD; | ||
| 592 | } | ||
| 593 | |||
| 594 | err = request_irq(irq, handler, irq_flags, "arm-pmu", | ||
| 576 | per_cpu_ptr(&hw_events->percpu_pmu, cpu)); | 595 | per_cpu_ptr(&hw_events->percpu_pmu, cpu)); |
| 577 | } | 596 | } |
| 578 | 597 | ||
| 579 | if (err) { | 598 | if (err) |
| 580 | pr_err("unable to request IRQ%d for ARM PMU counters\n", | 599 | goto err_out; |
| 581 | irq); | ||
| 582 | return err; | ||
| 583 | } | ||
| 584 | 600 | ||
| 585 | cpumask_set_cpu(cpu, &armpmu->active_irqs); | 601 | cpumask_set_cpu(cpu, &armpmu->active_irqs); |
| 586 | |||
| 587 | return 0; | 602 | return 0; |
| 603 | |||
| 604 | err_out: | ||
| 605 | pr_err("unable to request IRQ%d for ARM PMU counters\n", irq); | ||
| 606 | return err; | ||
| 588 | } | 607 | } |
| 589 | 608 | ||
| 590 | int armpmu_request_irqs(struct arm_pmu *armpmu) | 609 | int armpmu_request_irqs(struct arm_pmu *armpmu) |
| @@ -628,12 +647,6 @@ static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node) | |||
| 628 | enable_percpu_irq(irq, IRQ_TYPE_NONE); | 647 | enable_percpu_irq(irq, IRQ_TYPE_NONE); |
| 629 | return 0; | 648 | return 0; |
| 630 | } | 649 | } |
| 631 | |||
| 632 | if (irq_force_affinity(irq, cpumask_of(cpu)) && | ||
| 633 | num_possible_cpus() > 1) { | ||
| 634 | pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n", | ||
| 635 | irq, cpu); | ||
| 636 | } | ||
| 637 | } | 650 | } |
| 638 | 651 | ||
| 639 | return 0; | 652 | return 0; |
diff --git a/drivers/perf/arm_pmu_platform.c b/drivers/perf/arm_pmu_platform.c index 69255f53057a..4eafa7a42e52 100644 --- a/drivers/perf/arm_pmu_platform.c +++ b/drivers/perf/arm_pmu_platform.c | |||
| @@ -131,8 +131,8 @@ static int pmu_parse_irqs(struct arm_pmu *pmu) | |||
| 131 | } | 131 | } |
| 132 | 132 | ||
| 133 | if (!pmu_has_irq_affinity(pdev->dev.of_node)) { | 133 | if (!pmu_has_irq_affinity(pdev->dev.of_node)) { |
| 134 | pr_warn("no interrupt-affinity property for %s, guessing.\n", | 134 | pr_warn("no interrupt-affinity property for %pOF, guessing.\n", |
| 135 | of_node_full_name(pdev->dev.of_node)); | 135 | pdev->dev.of_node); |
| 136 | } | 136 | } |
| 137 | 137 | ||
| 138 | /* | 138 | /* |
| @@ -211,7 +211,7 @@ int arm_pmu_device_probe(struct platform_device *pdev, | |||
| 211 | } | 211 | } |
| 212 | 212 | ||
| 213 | if (ret) { | 213 | if (ret) { |
| 214 | pr_info("%s: failed to probe PMU!\n", of_node_full_name(node)); | 214 | pr_info("%pOF: failed to probe PMU!\n", node); |
| 215 | goto out_free; | 215 | goto out_free; |
| 216 | } | 216 | } |
| 217 | 217 | ||
| @@ -228,8 +228,7 @@ int arm_pmu_device_probe(struct platform_device *pdev, | |||
| 228 | out_free_irqs: | 228 | out_free_irqs: |
| 229 | armpmu_free_irqs(pmu); | 229 | armpmu_free_irqs(pmu); |
| 230 | out_free: | 230 | out_free: |
| 231 | pr_info("%s: failed to register PMU devices!\n", | 231 | pr_info("%pOF: failed to register PMU devices!\n", node); |
| 232 | of_node_full_name(node)); | ||
| 233 | armpmu_free(pmu); | 232 | armpmu_free(pmu); |
| 234 | return ret; | 233 | return ret; |
| 235 | } | 234 | } |
diff --git a/drivers/perf/qcom_l2_pmu.c b/drivers/perf/qcom_l2_pmu.c index c259848228b4..b242cce10468 100644 --- a/drivers/perf/qcom_l2_pmu.c +++ b/drivers/perf/qcom_l2_pmu.c | |||
| @@ -546,6 +546,7 @@ static int l2_cache_event_init(struct perf_event *event) | |||
| 546 | } | 546 | } |
| 547 | 547 | ||
| 548 | if ((event != event->group_leader) && | 548 | if ((event != event->group_leader) && |
| 549 | !is_software_event(event->group_leader) && | ||
| 549 | (L2_EVT_GROUP(event->group_leader->attr.config) == | 550 | (L2_EVT_GROUP(event->group_leader->attr.config) == |
| 550 | L2_EVT_GROUP(event->attr.config))) { | 551 | L2_EVT_GROUP(event->attr.config))) { |
| 551 | dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, | 552 | dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, |
| @@ -558,6 +559,7 @@ static int l2_cache_event_init(struct perf_event *event) | |||
| 558 | list_for_each_entry(sibling, &event->group_leader->sibling_list, | 559 | list_for_each_entry(sibling, &event->group_leader->sibling_list, |
| 559 | group_entry) { | 560 | group_entry) { |
| 560 | if ((sibling != event) && | 561 | if ((sibling != event) && |
| 562 | !is_software_event(sibling) && | ||
| 561 | (L2_EVT_GROUP(sibling->attr.config) == | 563 | (L2_EVT_GROUP(sibling->attr.config) == |
| 562 | L2_EVT_GROUP(event->attr.config))) { | 564 | L2_EVT_GROUP(event->attr.config))) { |
| 563 | dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, | 565 | dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, |
diff --git a/drivers/phy/broadcom/Kconfig b/drivers/phy/broadcom/Kconfig index 37371b89b14f..64fc59c3ae6d 100644 --- a/drivers/phy/broadcom/Kconfig +++ b/drivers/phy/broadcom/Kconfig | |||
| @@ -30,8 +30,8 @@ config PHY_BCM_NS_USB3 | |||
| 30 | tristate "Broadcom Northstar USB 3.0 PHY Driver" | 30 | tristate "Broadcom Northstar USB 3.0 PHY Driver" |
| 31 | depends on ARCH_BCM_IPROC || COMPILE_TEST | 31 | depends on ARCH_BCM_IPROC || COMPILE_TEST |
| 32 | depends on HAS_IOMEM && OF | 32 | depends on HAS_IOMEM && OF |
| 33 | depends on MDIO_BUS | ||
| 33 | select GENERIC_PHY | 34 | select GENERIC_PHY |
| 34 | select MDIO_DEVICE | ||
| 35 | help | 35 | help |
| 36 | Enable this to support Broadcom USB 3.0 PHY connected to the USB | 36 | Enable this to support Broadcom USB 3.0 PHY connected to the USB |
| 37 | controller on Northstar family. | 37 | controller on Northstar family. |
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c index 20f1b4493994..04e929fd0ffe 100644 --- a/drivers/pinctrl/intel/pinctrl-cherryview.c +++ b/drivers/pinctrl/intel/pinctrl-cherryview.c | |||
| @@ -1548,6 +1548,13 @@ static const struct dmi_system_id chv_no_valid_mask[] = { | |||
| 1548 | }, | 1548 | }, |
| 1549 | }, | 1549 | }, |
| 1550 | { | 1550 | { |
| 1551 | .ident = "HP Chromebook 11 G5 (Setzer)", | ||
| 1552 | .matches = { | ||
| 1553 | DMI_MATCH(DMI_SYS_VENDOR, "HP"), | ||
| 1554 | DMI_MATCH(DMI_PRODUCT_NAME, "Setzer"), | ||
| 1555 | }, | ||
| 1556 | }, | ||
| 1557 | { | ||
| 1551 | .ident = "Acer Chromebook R11 (Cyan)", | 1558 | .ident = "Acer Chromebook R11 (Cyan)", |
| 1552 | .matches = { | 1559 | .matches = { |
| 1553 | DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"), | 1560 | DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"), |
diff --git a/drivers/pinctrl/intel/pinctrl-merrifield.c b/drivers/pinctrl/intel/pinctrl-merrifield.c index 4d4ef42a39b5..86c4b3fab7b0 100644 --- a/drivers/pinctrl/intel/pinctrl-merrifield.c +++ b/drivers/pinctrl/intel/pinctrl-merrifield.c | |||
| @@ -343,9 +343,9 @@ static const struct pinctrl_pin_desc mrfld_pins[] = { | |||
| 343 | 343 | ||
| 344 | static const unsigned int mrfld_sdio_pins[] = { 50, 51, 52, 53, 54, 55, 56 }; | 344 | static const unsigned int mrfld_sdio_pins[] = { 50, 51, 52, 53, 54, 55, 56 }; |
| 345 | static const unsigned int mrfld_spi5_pins[] = { 90, 91, 92, 93, 94, 95, 96 }; | 345 | static const unsigned int mrfld_spi5_pins[] = { 90, 91, 92, 93, 94, 95, 96 }; |
| 346 | static const unsigned int mrfld_uart0_pins[] = { 124, 125, 126, 127 }; | 346 | static const unsigned int mrfld_uart0_pins[] = { 115, 116, 117, 118 }; |
| 347 | static const unsigned int mrfld_uart1_pins[] = { 128, 129, 130, 131 }; | 347 | static const unsigned int mrfld_uart1_pins[] = { 119, 120, 121, 122 }; |
| 348 | static const unsigned int mrfld_uart2_pins[] = { 132, 133, 134, 135 }; | 348 | static const unsigned int mrfld_uart2_pins[] = { 123, 124, 125, 126 }; |
| 349 | static const unsigned int mrfld_pwm0_pins[] = { 144 }; | 349 | static const unsigned int mrfld_pwm0_pins[] = { 144 }; |
| 350 | static const unsigned int mrfld_pwm1_pins[] = { 145 }; | 350 | static const unsigned int mrfld_pwm1_pins[] = { 145 }; |
| 351 | static const unsigned int mrfld_pwm2_pins[] = { 132 }; | 351 | static const unsigned int mrfld_pwm2_pins[] = { 132 }; |
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c index f024e25787fc..0c6d7812d6fd 100644 --- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c +++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c | |||
| @@ -37,7 +37,7 @@ | |||
| 37 | #define IRQ_STATUS 0x10 | 37 | #define IRQ_STATUS 0x10 |
| 38 | #define IRQ_WKUP 0x18 | 38 | #define IRQ_WKUP 0x18 |
| 39 | 39 | ||
| 40 | #define NB_FUNCS 2 | 40 | #define NB_FUNCS 3 |
| 41 | #define GPIO_PER_REG 32 | 41 | #define GPIO_PER_REG 32 |
| 42 | 42 | ||
| 43 | /** | 43 | /** |
| @@ -126,6 +126,16 @@ struct armada_37xx_pinctrl { | |||
| 126 | .funcs = {_func1, "gpio"} \ | 126 | .funcs = {_func1, "gpio"} \ |
| 127 | } | 127 | } |
| 128 | 128 | ||
| 129 | #define PIN_GRP_GPIO_3(_name, _start, _nr, _mask, _v1, _v2, _v3, _f1, _f2) \ | ||
| 130 | { \ | ||
| 131 | .name = _name, \ | ||
| 132 | .start_pin = _start, \ | ||
| 133 | .npins = _nr, \ | ||
| 134 | .reg_mask = _mask, \ | ||
| 135 | .val = {_v1, _v2, _v3}, \ | ||
| 136 | .funcs = {_f1, _f2, "gpio"} \ | ||
| 137 | } | ||
| 138 | |||
| 129 | #define PIN_GRP_EXTRA(_name, _start, _nr, _mask, _v1, _v2, _start2, _nr2, \ | 139 | #define PIN_GRP_EXTRA(_name, _start, _nr, _mask, _v1, _v2, _start2, _nr2, \ |
| 130 | _f1, _f2) \ | 140 | _f1, _f2) \ |
| 131 | { \ | 141 | { \ |
| @@ -171,12 +181,13 @@ static struct armada_37xx_pin_group armada_37xx_sb_groups[] = { | |||
| 171 | PIN_GRP_GPIO("usb32_drvvbus0", 0, 1, BIT(0), "drvbus"), | 181 | PIN_GRP_GPIO("usb32_drvvbus0", 0, 1, BIT(0), "drvbus"), |
| 172 | PIN_GRP_GPIO("usb2_drvvbus1", 1, 1, BIT(1), "drvbus"), | 182 | PIN_GRP_GPIO("usb2_drvvbus1", 1, 1, BIT(1), "drvbus"), |
| 173 | PIN_GRP_GPIO("sdio_sb", 24, 6, BIT(2), "sdio"), | 183 | PIN_GRP_GPIO("sdio_sb", 24, 6, BIT(2), "sdio"), |
| 174 | PIN_GRP_EXTRA("rgmii", 6, 12, BIT(3), 0, BIT(3), 23, 1, "mii", "gpio"), | 184 | PIN_GRP_GPIO("rgmii", 6, 12, BIT(3), "mii"), |
| 175 | PIN_GRP_GPIO("pcie1", 3, 2, BIT(4), "pcie"), | 185 | PIN_GRP_GPIO("pcie1", 3, 2, BIT(4), "pcie"), |
| 176 | PIN_GRP_GPIO("ptp", 20, 3, BIT(5), "ptp"), | 186 | PIN_GRP_GPIO("ptp", 20, 3, BIT(5), "ptp"), |
| 177 | PIN_GRP("ptp_clk", 21, 1, BIT(6), "ptp", "mii"), | 187 | PIN_GRP("ptp_clk", 21, 1, BIT(6), "ptp", "mii"), |
| 178 | PIN_GRP("ptp_trig", 22, 1, BIT(7), "ptp", "mii"), | 188 | PIN_GRP("ptp_trig", 22, 1, BIT(7), "ptp", "mii"), |
| 179 | PIN_GRP("mii_col", 23, 1, BIT(8), "mii", "mii_err"), | 189 | PIN_GRP_GPIO_3("mii_col", 23, 1, BIT(8) | BIT(14), 0, BIT(8), BIT(14), |
| 190 | "mii", "mii_err"), | ||
| 180 | }; | 191 | }; |
| 181 | 192 | ||
| 182 | const struct armada_37xx_pin_data armada_37xx_pin_nb = { | 193 | const struct armada_37xx_pin_data armada_37xx_pin_nb = { |
| @@ -187,7 +198,7 @@ const struct armada_37xx_pin_data armada_37xx_pin_nb = { | |||
| 187 | }; | 198 | }; |
| 188 | 199 | ||
| 189 | const struct armada_37xx_pin_data armada_37xx_pin_sb = { | 200 | const struct armada_37xx_pin_data armada_37xx_pin_sb = { |
| 190 | .nr_pins = 29, | 201 | .nr_pins = 30, |
| 191 | .name = "GPIO2", | 202 | .name = "GPIO2", |
| 192 | .groups = armada_37xx_sb_groups, | 203 | .groups = armada_37xx_sb_groups, |
| 193 | .ngroups = ARRAY_SIZE(armada_37xx_sb_groups), | 204 | .ngroups = ARRAY_SIZE(armada_37xx_sb_groups), |
| @@ -208,7 +219,7 @@ static int armada_37xx_get_func_reg(struct armada_37xx_pin_group *grp, | |||
| 208 | { | 219 | { |
| 209 | int f; | 220 | int f; |
| 210 | 221 | ||
| 211 | for (f = 0; f < NB_FUNCS; f++) | 222 | for (f = 0; (f < NB_FUNCS) && grp->funcs[f]; f++) |
| 212 | if (!strcmp(grp->funcs[f], func)) | 223 | if (!strcmp(grp->funcs[f], func)) |
| 213 | return f; | 224 | return f; |
| 214 | 225 | ||
| @@ -795,7 +806,7 @@ static int armada_37xx_fill_group(struct armada_37xx_pinctrl *info) | |||
| 795 | for (j = 0; j < grp->extra_npins; j++) | 806 | for (j = 0; j < grp->extra_npins; j++) |
| 796 | grp->pins[i+j] = grp->extra_pin + j; | 807 | grp->pins[i+j] = grp->extra_pin + j; |
| 797 | 808 | ||
| 798 | for (f = 0; f < NB_FUNCS; f++) { | 809 | for (f = 0; (f < NB_FUNCS) && grp->funcs[f]; f++) { |
| 799 | int ret; | 810 | int ret; |
| 800 | /* check for unique functions and count groups */ | 811 | /* check for unique functions and count groups */ |
| 801 | ret = armada_37xx_add_function(info->funcs, &funcsize, | 812 | ret = armada_37xx_add_function(info->funcs, &funcsize, |
| @@ -847,7 +858,7 @@ static int armada_37xx_fill_func(struct armada_37xx_pinctrl *info) | |||
| 847 | struct armada_37xx_pin_group *gp = &info->groups[g]; | 858 | struct armada_37xx_pin_group *gp = &info->groups[g]; |
| 848 | int f; | 859 | int f; |
| 849 | 860 | ||
| 850 | for (f = 0; f < NB_FUNCS; f++) { | 861 | for (f = 0; (f < NB_FUNCS) && gp->funcs[f]; f++) { |
| 851 | if (strcmp(gp->funcs[f], name) == 0) { | 862 | if (strcmp(gp->funcs[f], name) == 0) { |
| 852 | *groups = gp->name; | 863 | *groups = gp->name; |
| 853 | groups++; | 864 | groups++; |
diff --git a/drivers/pinctrl/stm32/Kconfig b/drivers/pinctrl/stm32/Kconfig index 3b8026fca057..7e1fe39a56a5 100644 --- a/drivers/pinctrl/stm32/Kconfig +++ b/drivers/pinctrl/stm32/Kconfig | |||
| @@ -6,29 +6,30 @@ config PINCTRL_STM32 | |||
| 6 | select PINMUX | 6 | select PINMUX |
| 7 | select GENERIC_PINCONF | 7 | select GENERIC_PINCONF |
| 8 | select GPIOLIB | 8 | select GPIOLIB |
| 9 | select IRQ_DOMAIN_HIERARCHY | ||
| 9 | select MFD_SYSCON | 10 | select MFD_SYSCON |
| 10 | 11 | ||
| 11 | config PINCTRL_STM32F429 | 12 | config PINCTRL_STM32F429 |
| 12 | bool "STMicroelectronics STM32F429 pin control" if COMPILE_TEST && !MACH_STM32F429 | 13 | bool "STMicroelectronics STM32F429 pin control" if COMPILE_TEST && !MACH_STM32F429 |
| 13 | depends on OF && IRQ_DOMAIN_HIERARCHY | 14 | depends on OF |
| 14 | default MACH_STM32F429 | 15 | default MACH_STM32F429 |
| 15 | select PINCTRL_STM32 | 16 | select PINCTRL_STM32 |
| 16 | 17 | ||
| 17 | config PINCTRL_STM32F469 | 18 | config PINCTRL_STM32F469 |
| 18 | bool "STMicroelectronics STM32F469 pin control" if COMPILE_TEST && !MACH_STM32F469 | 19 | bool "STMicroelectronics STM32F469 pin control" if COMPILE_TEST && !MACH_STM32F469 |
| 19 | depends on OF && IRQ_DOMAIN_HIERARCHY | 20 | depends on OF |
| 20 | default MACH_STM32F469 | 21 | default MACH_STM32F469 |
| 21 | select PINCTRL_STM32 | 22 | select PINCTRL_STM32 |
| 22 | 23 | ||
| 23 | config PINCTRL_STM32F746 | 24 | config PINCTRL_STM32F746 |
| 24 | bool "STMicroelectronics STM32F746 pin control" if COMPILE_TEST && !MACH_STM32F746 | 25 | bool "STMicroelectronics STM32F746 pin control" if COMPILE_TEST && !MACH_STM32F746 |
| 25 | depends on OF && IRQ_DOMAIN_HIERARCHY | 26 | depends on OF |
| 26 | default MACH_STM32F746 | 27 | default MACH_STM32F746 |
| 27 | select PINCTRL_STM32 | 28 | select PINCTRL_STM32 |
| 28 | 29 | ||
| 29 | config PINCTRL_STM32H743 | 30 | config PINCTRL_STM32H743 |
| 30 | bool "STMicroelectronics STM32H743 pin control" if COMPILE_TEST && !MACH_STM32H743 | 31 | bool "STMicroelectronics STM32H743 pin control" if COMPILE_TEST && !MACH_STM32H743 |
| 31 | depends on OF && IRQ_DOMAIN_HIERARCHY | 32 | depends on OF |
| 32 | default MACH_STM32H743 | 33 | default MACH_STM32H743 |
| 33 | select PINCTRL_STM32 | 34 | select PINCTRL_STM32 |
| 34 | endif | 35 | endif |
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c b/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c index 159580c04b14..47a392bc73c8 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c +++ b/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c | |||
| @@ -918,6 +918,7 @@ static const struct sunxi_desc_pin sun4i_a10_pins[] = { | |||
| 918 | SUNXI_FUNCTION_VARIANT(0x3, "emac", /* ETXD1 */ | 918 | SUNXI_FUNCTION_VARIANT(0x3, "emac", /* ETXD1 */ |
| 919 | PINCTRL_SUN7I_A20), | 919 | PINCTRL_SUN7I_A20), |
| 920 | SUNXI_FUNCTION(0x4, "keypad"), /* IN6 */ | 920 | SUNXI_FUNCTION(0x4, "keypad"), /* IN6 */ |
| 921 | SUNXI_FUNCTION(0x5, "sim"), /* DET */ | ||
| 921 | SUNXI_FUNCTION_IRQ(0x6, 16), /* EINT16 */ | 922 | SUNXI_FUNCTION_IRQ(0x6, 16), /* EINT16 */ |
| 922 | SUNXI_FUNCTION(0x7, "csi1")), /* D16 */ | 923 | SUNXI_FUNCTION(0x7, "csi1")), /* D16 */ |
| 923 | SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 17), | 924 | SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 17), |
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c index a433a306a2d0..c75e094b2d90 100644 --- a/drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c +++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c | |||
| @@ -1084,7 +1084,7 @@ static const unsigned usb1_pins[] = {182, 183}; | |||
| 1084 | static const int usb1_muxvals[] = {0, 0}; | 1084 | static const int usb1_muxvals[] = {0, 0}; |
| 1085 | static const unsigned usb2_pins[] = {184, 185}; | 1085 | static const unsigned usb2_pins[] = {184, 185}; |
| 1086 | static const int usb2_muxvals[] = {0, 0}; | 1086 | static const int usb2_muxvals[] = {0, 0}; |
| 1087 | static const unsigned usb3_pins[] = {186, 187}; | 1087 | static const unsigned usb3_pins[] = {187, 188}; |
| 1088 | static const int usb3_muxvals[] = {0, 0}; | 1088 | static const int usb3_muxvals[] = {0, 0}; |
| 1089 | static const unsigned port_range0_pins[] = { | 1089 | static const unsigned port_range0_pins[] = { |
| 1090 | 300, 301, 302, 303, 304, 305, 306, 307, /* PORT0x */ | 1090 | 300, 301, 302, 303, 304, 305, 306, 307, /* PORT0x */ |
diff --git a/drivers/pinctrl/zte/pinctrl-zx.c b/drivers/pinctrl/zte/pinctrl-zx.c index 787e3967bd5c..f828ee340a98 100644 --- a/drivers/pinctrl/zte/pinctrl-zx.c +++ b/drivers/pinctrl/zte/pinctrl-zx.c | |||
| @@ -64,10 +64,8 @@ static int zx_set_mux(struct pinctrl_dev *pctldev, unsigned int func_selector, | |||
| 64 | struct zx_pinctrl_soc_info *info = zpctl->info; | 64 | struct zx_pinctrl_soc_info *info = zpctl->info; |
| 65 | const struct pinctrl_pin_desc *pindesc = info->pins + group_selector; | 65 | const struct pinctrl_pin_desc *pindesc = info->pins + group_selector; |
| 66 | struct zx_pin_data *data = pindesc->drv_data; | 66 | struct zx_pin_data *data = pindesc->drv_data; |
| 67 | struct zx_mux_desc *mux = data->muxes; | 67 | struct zx_mux_desc *mux; |
| 68 | u32 mask = (1 << data->width) - 1; | 68 | u32 mask, offset, bitpos; |
| 69 | u32 offset = data->offset; | ||
| 70 | u32 bitpos = data->bitpos; | ||
| 71 | struct function_desc *func; | 69 | struct function_desc *func; |
| 72 | unsigned long flags; | 70 | unsigned long flags; |
| 73 | u32 val, mval; | 71 | u32 val, mval; |
| @@ -76,6 +74,11 @@ static int zx_set_mux(struct pinctrl_dev *pctldev, unsigned int func_selector, | |||
| 76 | if (!data) | 74 | if (!data) |
| 77 | return -EINVAL; | 75 | return -EINVAL; |
| 78 | 76 | ||
| 77 | mux = data->muxes; | ||
| 78 | mask = (1 << data->width) - 1; | ||
| 79 | offset = data->offset; | ||
| 80 | bitpos = data->bitpos; | ||
| 81 | |||
| 79 | func = pinmux_generic_get_function(pctldev, func_selector); | 82 | func = pinmux_generic_get_function(pctldev, func_selector); |
| 80 | if (!func) | 83 | if (!func) |
| 81 | return -EINVAL; | 84 | return -EINVAL; |
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index b04860703740..80b87954f6dd 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig | |||
| @@ -675,6 +675,7 @@ config PEAQ_WMI | |||
| 675 | tristate "PEAQ 2-in-1 WMI hotkey driver" | 675 | tristate "PEAQ 2-in-1 WMI hotkey driver" |
| 676 | depends on ACPI_WMI | 676 | depends on ACPI_WMI |
| 677 | depends on INPUT | 677 | depends on INPUT |
| 678 | select INPUT_POLLDEV | ||
| 678 | help | 679 | help |
| 679 | Say Y here if you want to support WMI-based hotkeys on PEAQ 2-in-1s. | 680 | Say Y here if you want to support WMI-based hotkeys on PEAQ 2-in-1s. |
| 680 | 681 | ||
diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c index f8978464df31..dad8f4afa17c 100644 --- a/drivers/platform/x86/dell-wmi.c +++ b/drivers/platform/x86/dell-wmi.c | |||
| @@ -626,7 +626,7 @@ static void dell_wmi_input_destroy(struct wmi_device *wdev) | |||
| 626 | * WMI Interface Version 8 4 <version> | 626 | * WMI Interface Version 8 4 <version> |
| 627 | * WMI buffer length 12 4 4096 | 627 | * WMI buffer length 12 4 4096 |
| 628 | */ | 628 | */ |
| 629 | static int __init dell_wmi_check_descriptor_buffer(void) | 629 | static int dell_wmi_check_descriptor_buffer(void) |
| 630 | { | 630 | { |
| 631 | struct acpi_buffer out = { ACPI_ALLOCATE_BUFFER, NULL }; | 631 | struct acpi_buffer out = { ACPI_ALLOCATE_BUFFER, NULL }; |
| 632 | union acpi_object *obj; | 632 | union acpi_object *obj; |
| @@ -717,9 +717,15 @@ static int dell_wmi_events_set_enabled(bool enable) | |||
| 717 | 717 | ||
| 718 | static int dell_wmi_probe(struct wmi_device *wdev) | 718 | static int dell_wmi_probe(struct wmi_device *wdev) |
| 719 | { | 719 | { |
| 720 | int err; | ||
| 721 | |||
| 720 | struct dell_wmi_priv *priv = devm_kzalloc( | 722 | struct dell_wmi_priv *priv = devm_kzalloc( |
| 721 | &wdev->dev, sizeof(struct dell_wmi_priv), GFP_KERNEL); | 723 | &wdev->dev, sizeof(struct dell_wmi_priv), GFP_KERNEL); |
| 722 | 724 | ||
| 725 | err = dell_wmi_check_descriptor_buffer(); | ||
| 726 | if (err) | ||
| 727 | return err; | ||
| 728 | |||
| 723 | dev_set_drvdata(&wdev->dev, priv); | 729 | dev_set_drvdata(&wdev->dev, priv); |
| 724 | 730 | ||
| 725 | return dell_wmi_input_setup(wdev); | 731 | return dell_wmi_input_setup(wdev); |
| @@ -749,10 +755,6 @@ static int __init dell_wmi_init(void) | |||
| 749 | { | 755 | { |
| 750 | int err; | 756 | int err; |
| 751 | 757 | ||
| 752 | err = dell_wmi_check_descriptor_buffer(); | ||
| 753 | if (err) | ||
| 754 | return err; | ||
| 755 | |||
| 756 | dmi_check_system(dell_wmi_smbios_list); | 758 | dmi_check_system(dell_wmi_smbios_list); |
| 757 | 759 | ||
| 758 | if (wmi_requires_smbios_request) { | 760 | if (wmi_requires_smbios_request) { |
diff --git a/drivers/platform/x86/intel-vbtn.c b/drivers/platform/x86/intel-vbtn.c index 61f106377661..480926786cb8 100644 --- a/drivers/platform/x86/intel-vbtn.c +++ b/drivers/platform/x86/intel-vbtn.c | |||
| @@ -36,8 +36,8 @@ static const struct acpi_device_id intel_vbtn_ids[] = { | |||
| 36 | 36 | ||
| 37 | /* In theory, these are HID usages. */ | 37 | /* In theory, these are HID usages. */ |
| 38 | static const struct key_entry intel_vbtn_keymap[] = { | 38 | static const struct key_entry intel_vbtn_keymap[] = { |
| 39 | { KE_IGNORE, 0xC0, { KEY_POWER } }, /* power key press */ | 39 | { KE_KEY, 0xC0, { KEY_POWER } }, /* power key press */ |
| 40 | { KE_KEY, 0xC1, { KEY_POWER } }, /* power key release */ | 40 | { KE_IGNORE, 0xC1, { KEY_POWER } }, /* power key release */ |
| 41 | { KE_KEY, 0xC4, { KEY_VOLUMEUP } }, /* volume-up key press */ | 41 | { KE_KEY, 0xC4, { KEY_VOLUMEUP } }, /* volume-up key press */ |
| 42 | { KE_IGNORE, 0xC5, { KEY_VOLUMEUP } }, /* volume-up key release */ | 42 | { KE_IGNORE, 0xC5, { KEY_VOLUMEUP } }, /* volume-up key release */ |
| 43 | { KE_KEY, 0xC6, { KEY_VOLUMEDOWN } }, /* volume-down key press */ | 43 | { KE_KEY, 0xC6, { KEY_VOLUMEDOWN } }, /* volume-down key press */ |
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c index 1a764e311e11..e32ba575e8d9 100644 --- a/drivers/platform/x86/wmi.c +++ b/drivers/platform/x86/wmi.c | |||
| @@ -1252,12 +1252,12 @@ static int __init acpi_wmi_init(void) | |||
| 1252 | 1252 | ||
| 1253 | return 0; | 1253 | return 0; |
| 1254 | 1254 | ||
| 1255 | err_unreg_class: | ||
| 1256 | class_unregister(&wmi_bus_class); | ||
| 1257 | |||
| 1258 | err_unreg_bus: | 1255 | err_unreg_bus: |
| 1259 | bus_unregister(&wmi_bus_type); | 1256 | bus_unregister(&wmi_bus_type); |
| 1260 | 1257 | ||
| 1258 | err_unreg_class: | ||
| 1259 | class_unregister(&wmi_bus_class); | ||
| 1260 | |||
| 1261 | return error; | 1261 | return error; |
| 1262 | } | 1262 | } |
| 1263 | 1263 | ||
diff --git a/drivers/power/avs/rockchip-io-domain.c b/drivers/power/avs/rockchip-io-domain.c index 031a34372191..75f63e38a8d1 100644 --- a/drivers/power/avs/rockchip-io-domain.c +++ b/drivers/power/avs/rockchip-io-domain.c | |||
| @@ -349,6 +349,36 @@ static const struct rockchip_iodomain_soc_data soc_data_rk3399_pmu = { | |||
| 349 | .init = rk3399_pmu_iodomain_init, | 349 | .init = rk3399_pmu_iodomain_init, |
| 350 | }; | 350 | }; |
| 351 | 351 | ||
| 352 | static const struct rockchip_iodomain_soc_data soc_data_rv1108 = { | ||
| 353 | .grf_offset = 0x404, | ||
| 354 | .supply_names = { | ||
| 355 | NULL, | ||
| 356 | NULL, | ||
| 357 | NULL, | ||
| 358 | NULL, | ||
| 359 | NULL, | ||
| 360 | NULL, | ||
| 361 | NULL, | ||
| 362 | NULL, | ||
| 363 | NULL, | ||
| 364 | NULL, | ||
| 365 | NULL, | ||
| 366 | "vccio1", | ||
| 367 | "vccio2", | ||
| 368 | "vccio3", | ||
| 369 | "vccio5", | ||
| 370 | "vccio6", | ||
| 371 | }, | ||
| 372 | |||
| 373 | }; | ||
| 374 | |||
| 375 | static const struct rockchip_iodomain_soc_data soc_data_rv1108_pmu = { | ||
| 376 | .grf_offset = 0x104, | ||
| 377 | .supply_names = { | ||
| 378 | "pmu", | ||
| 379 | }, | ||
| 380 | }; | ||
| 381 | |||
| 352 | static const struct of_device_id rockchip_iodomain_match[] = { | 382 | static const struct of_device_id rockchip_iodomain_match[] = { |
| 353 | { | 383 | { |
| 354 | .compatible = "rockchip,rk3188-io-voltage-domain", | 384 | .compatible = "rockchip,rk3188-io-voltage-domain", |
| @@ -382,6 +412,14 @@ static const struct of_device_id rockchip_iodomain_match[] = { | |||
| 382 | .compatible = "rockchip,rk3399-pmu-io-voltage-domain", | 412 | .compatible = "rockchip,rk3399-pmu-io-voltage-domain", |
| 383 | .data = (void *)&soc_data_rk3399_pmu | 413 | .data = (void *)&soc_data_rk3399_pmu |
| 384 | }, | 414 | }, |
| 415 | { | ||
| 416 | .compatible = "rockchip,rv1108-io-voltage-domain", | ||
| 417 | .data = (void *)&soc_data_rv1108 | ||
| 418 | }, | ||
| 419 | { | ||
| 420 | .compatible = "rockchip,rv1108-pmu-io-voltage-domain", | ||
| 421 | .data = (void *)&soc_data_rv1108_pmu | ||
| 422 | }, | ||
| 385 | { /* sentinel */ }, | 423 | { /* sentinel */ }, |
| 386 | }; | 424 | }; |
| 387 | MODULE_DEVICE_TABLE(of, rockchip_iodomain_match); | 425 | MODULE_DEVICE_TABLE(of, rockchip_iodomain_match); |
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c index b77435783ef3..7eacc1c4b3b1 100644 --- a/drivers/ptp/ptp_clock.c +++ b/drivers/ptp/ptp_clock.c | |||
| @@ -28,6 +28,7 @@ | |||
| 28 | #include <linux/slab.h> | 28 | #include <linux/slab.h> |
| 29 | #include <linux/syscalls.h> | 29 | #include <linux/syscalls.h> |
| 30 | #include <linux/uaccess.h> | 30 | #include <linux/uaccess.h> |
| 31 | #include <uapi/linux/sched/types.h> | ||
| 31 | 32 | ||
| 32 | #include "ptp_private.h" | 33 | #include "ptp_private.h" |
| 33 | 34 | ||
| @@ -184,6 +185,19 @@ static void delete_ptp_clock(struct posix_clock *pc) | |||
| 184 | kfree(ptp); | 185 | kfree(ptp); |
| 185 | } | 186 | } |
| 186 | 187 | ||
| 188 | static void ptp_aux_kworker(struct kthread_work *work) | ||
| 189 | { | ||
| 190 | struct ptp_clock *ptp = container_of(work, struct ptp_clock, | ||
| 191 | aux_work.work); | ||
| 192 | struct ptp_clock_info *info = ptp->info; | ||
| 193 | long delay; | ||
| 194 | |||
| 195 | delay = info->do_aux_work(info); | ||
| 196 | |||
| 197 | if (delay >= 0) | ||
| 198 | kthread_queue_delayed_work(ptp->kworker, &ptp->aux_work, delay); | ||
| 199 | } | ||
| 200 | |||
| 187 | /* public interface */ | 201 | /* public interface */ |
| 188 | 202 | ||
| 189 | struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, | 203 | struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, |
| @@ -217,6 +231,20 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, | |||
| 217 | mutex_init(&ptp->pincfg_mux); | 231 | mutex_init(&ptp->pincfg_mux); |
| 218 | init_waitqueue_head(&ptp->tsev_wq); | 232 | init_waitqueue_head(&ptp->tsev_wq); |
| 219 | 233 | ||
| 234 | if (ptp->info->do_aux_work) { | ||
| 235 | char *worker_name = kasprintf(GFP_KERNEL, "ptp%d", ptp->index); | ||
| 236 | |||
| 237 | kthread_init_delayed_work(&ptp->aux_work, ptp_aux_kworker); | ||
| 238 | ptp->kworker = kthread_create_worker(0, worker_name ? | ||
| 239 | worker_name : info->name); | ||
| 240 | kfree(worker_name); | ||
| 241 | if (IS_ERR(ptp->kworker)) { | ||
| 242 | err = PTR_ERR(ptp->kworker); | ||
| 243 | pr_err("failed to create ptp aux_worker %d\n", err); | ||
| 244 | goto kworker_err; | ||
| 245 | } | ||
| 246 | } | ||
| 247 | |||
| 220 | err = ptp_populate_pin_groups(ptp); | 248 | err = ptp_populate_pin_groups(ptp); |
| 221 | if (err) | 249 | if (err) |
| 222 | goto no_pin_groups; | 250 | goto no_pin_groups; |
| @@ -259,6 +287,9 @@ no_pps: | |||
| 259 | no_device: | 287 | no_device: |
| 260 | ptp_cleanup_pin_groups(ptp); | 288 | ptp_cleanup_pin_groups(ptp); |
| 261 | no_pin_groups: | 289 | no_pin_groups: |
| 290 | if (ptp->kworker) | ||
| 291 | kthread_destroy_worker(ptp->kworker); | ||
| 292 | kworker_err: | ||
| 262 | mutex_destroy(&ptp->tsevq_mux); | 293 | mutex_destroy(&ptp->tsevq_mux); |
| 263 | mutex_destroy(&ptp->pincfg_mux); | 294 | mutex_destroy(&ptp->pincfg_mux); |
| 264 | ida_simple_remove(&ptp_clocks_map, index); | 295 | ida_simple_remove(&ptp_clocks_map, index); |
| @@ -274,6 +305,11 @@ int ptp_clock_unregister(struct ptp_clock *ptp) | |||
| 274 | ptp->defunct = 1; | 305 | ptp->defunct = 1; |
| 275 | wake_up_interruptible(&ptp->tsev_wq); | 306 | wake_up_interruptible(&ptp->tsev_wq); |
| 276 | 307 | ||
| 308 | if (ptp->kworker) { | ||
| 309 | kthread_cancel_delayed_work_sync(&ptp->aux_work); | ||
| 310 | kthread_destroy_worker(ptp->kworker); | ||
| 311 | } | ||
| 312 | |||
| 277 | /* Release the clock's resources. */ | 313 | /* Release the clock's resources. */ |
| 278 | if (ptp->pps_source) | 314 | if (ptp->pps_source) |
| 279 | pps_unregister_source(ptp->pps_source); | 315 | pps_unregister_source(ptp->pps_source); |
| @@ -339,6 +375,12 @@ int ptp_find_pin(struct ptp_clock *ptp, | |||
| 339 | } | 375 | } |
| 340 | EXPORT_SYMBOL(ptp_find_pin); | 376 | EXPORT_SYMBOL(ptp_find_pin); |
| 341 | 377 | ||
| 378 | int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay) | ||
| 379 | { | ||
| 380 | return kthread_mod_delayed_work(ptp->kworker, &ptp->aux_work, delay); | ||
| 381 | } | ||
| 382 | EXPORT_SYMBOL(ptp_schedule_worker); | ||
| 383 | |||
| 342 | /* module operations */ | 384 | /* module operations */ |
| 343 | 385 | ||
| 344 | static void __exit ptp_exit(void) | 386 | static void __exit ptp_exit(void) |
diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h index d95888974d0c..b86f1bfecd6f 100644 --- a/drivers/ptp/ptp_private.h +++ b/drivers/ptp/ptp_private.h | |||
| @@ -22,6 +22,7 @@ | |||
| 22 | 22 | ||
| 23 | #include <linux/cdev.h> | 23 | #include <linux/cdev.h> |
| 24 | #include <linux/device.h> | 24 | #include <linux/device.h> |
| 25 | #include <linux/kthread.h> | ||
| 25 | #include <linux/mutex.h> | 26 | #include <linux/mutex.h> |
| 26 | #include <linux/posix-clock.h> | 27 | #include <linux/posix-clock.h> |
| 27 | #include <linux/ptp_clock.h> | 28 | #include <linux/ptp_clock.h> |
| @@ -56,6 +57,8 @@ struct ptp_clock { | |||
| 56 | struct attribute_group pin_attr_group; | 57 | struct attribute_group pin_attr_group; |
| 57 | /* 1st entry is a pointer to the real group, 2nd is NULL terminator */ | 58 | /* 1st entry is a pointer to the real group, 2nd is NULL terminator */ |
| 58 | const struct attribute_group *pin_attr_groups[2]; | 59 | const struct attribute_group *pin_attr_groups[2]; |
| 60 | struct kthread_worker *kworker; | ||
| 61 | struct kthread_delayed_work aux_work; | ||
| 59 | }; | 62 | }; |
| 60 | 63 | ||
| 61 | /* | 64 | /* |
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c index 4fac49e55d47..4b43aa62fbc7 100644 --- a/drivers/rtc/rtc-ds1307.c +++ b/drivers/rtc/rtc-ds1307.c | |||
| @@ -1301,7 +1301,6 @@ static void ds1307_clks_register(struct ds1307 *ds1307) | |||
| 1301 | static const struct regmap_config regmap_config = { | 1301 | static const struct regmap_config regmap_config = { |
| 1302 | .reg_bits = 8, | 1302 | .reg_bits = 8, |
| 1303 | .val_bits = 8, | 1303 | .val_bits = 8, |
| 1304 | .max_register = 0x12, | ||
| 1305 | }; | 1304 | }; |
| 1306 | 1305 | ||
| 1307 | static int ds1307_probe(struct i2c_client *client, | 1306 | static int ds1307_probe(struct i2c_client *client, |
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c index 7e0d4f724dda..432fc40990bd 100644 --- a/drivers/s390/cio/chp.c +++ b/drivers/s390/cio/chp.c | |||
| @@ -559,6 +559,7 @@ static void chp_process_crw(struct crw *crw0, struct crw *crw1, | |||
| 559 | chpid.id = crw0->rsid; | 559 | chpid.id = crw0->rsid; |
| 560 | switch (crw0->erc) { | 560 | switch (crw0->erc) { |
| 561 | case CRW_ERC_IPARM: /* Path has come. */ | 561 | case CRW_ERC_IPARM: /* Path has come. */ |
| 562 | case CRW_ERC_INIT: | ||
| 562 | if (!chp_is_registered(chpid)) | 563 | if (!chp_is_registered(chpid)) |
| 563 | chp_new(chpid); | 564 | chp_new(chpid); |
| 564 | chsc_chp_online(chpid); | 565 | chsc_chp_online(chpid); |
diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c index ba6ac83a6c25..5ccfdc80d0ec 100644 --- a/drivers/s390/cio/vfio_ccw_cp.c +++ b/drivers/s390/cio/vfio_ccw_cp.c | |||
| @@ -481,7 +481,7 @@ static int ccwchain_fetch_tic(struct ccwchain *chain, | |||
| 481 | ccw_tail = ccw_head + (iter->ch_len - 1) * sizeof(struct ccw1); | 481 | ccw_tail = ccw_head + (iter->ch_len - 1) * sizeof(struct ccw1); |
| 482 | 482 | ||
| 483 | if ((ccw_head <= ccw->cda) && (ccw->cda <= ccw_tail)) { | 483 | if ((ccw_head <= ccw->cda) && (ccw->cda <= ccw_tail)) { |
| 484 | ccw->cda = (__u32) (addr_t) (iter->ch_ccw + | 484 | ccw->cda = (__u32) (addr_t) (((char *)iter->ch_ccw) + |
| 485 | (ccw->cda - ccw_head)); | 485 | (ccw->cda - ccw_head)); |
| 486 | return 0; | 486 | return 0; |
| 487 | } | 487 | } |
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 8975cd321390..d42e758518ed 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c | |||
| @@ -2512,7 +2512,7 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, | |||
| 2512 | struct rtable *rt = (struct rtable *) dst; | 2512 | struct rtable *rt = (struct rtable *) dst; |
| 2513 | __be32 *pkey = &ip_hdr(skb)->daddr; | 2513 | __be32 *pkey = &ip_hdr(skb)->daddr; |
| 2514 | 2514 | ||
| 2515 | if (rt->rt_gateway) | 2515 | if (rt && rt->rt_gateway) |
| 2516 | pkey = &rt->rt_gateway; | 2516 | pkey = &rt->rt_gateway; |
| 2517 | 2517 | ||
| 2518 | /* IPv4 */ | 2518 | /* IPv4 */ |
| @@ -2523,7 +2523,7 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, | |||
| 2523 | struct rt6_info *rt = (struct rt6_info *) dst; | 2523 | struct rt6_info *rt = (struct rt6_info *) dst; |
| 2524 | struct in6_addr *pkey = &ipv6_hdr(skb)->daddr; | 2524 | struct in6_addr *pkey = &ipv6_hdr(skb)->daddr; |
| 2525 | 2525 | ||
| 2526 | if (!ipv6_addr_any(&rt->rt6i_gateway)) | 2526 | if (rt && !ipv6_addr_any(&rt->rt6i_gateway)) |
| 2527 | pkey = &rt->rt6i_gateway; | 2527 | pkey = &rt->rt6i_gateway; |
| 2528 | 2528 | ||
| 2529 | /* IPv6 */ | 2529 | /* IPv6 */ |
diff --git a/drivers/sbus/char/display7seg.c b/drivers/sbus/char/display7seg.c index 04efed171c88..f32765d3cbd8 100644 --- a/drivers/sbus/char/display7seg.c +++ b/drivers/sbus/char/display7seg.c | |||
| @@ -212,8 +212,8 @@ static int d7s_probe(struct platform_device *op) | |||
| 212 | 212 | ||
| 213 | writeb(regs, p->regs); | 213 | writeb(regs, p->regs); |
| 214 | 214 | ||
| 215 | printk(KERN_INFO PFX "7-Segment Display%s at [%s:0x%llx] %s\n", | 215 | printk(KERN_INFO PFX "7-Segment Display%pOF at [%s:0x%llx] %s\n", |
| 216 | op->dev.of_node->full_name, | 216 | op->dev.of_node, |
| 217 | (regs & D7S_FLIP) ? " (FLIPPED)" : "", | 217 | (regs & D7S_FLIP) ? " (FLIPPED)" : "", |
| 218 | op->resource[0].start, | 218 | op->resource[0].start, |
| 219 | sol_compat ? "in sol_compat mode" : ""); | 219 | sol_compat ? "in sol_compat mode" : ""); |
diff --git a/drivers/sbus/char/flash.c b/drivers/sbus/char/flash.c index 216f923161d1..a610b8d3d11f 100644 --- a/drivers/sbus/char/flash.c +++ b/drivers/sbus/char/flash.c | |||
| @@ -181,8 +181,8 @@ static int flash_probe(struct platform_device *op) | |||
| 181 | } | 181 | } |
| 182 | flash.busy = 0; | 182 | flash.busy = 0; |
| 183 | 183 | ||
| 184 | printk(KERN_INFO "%s: OBP Flash, RD %lx[%lx] WR %lx[%lx]\n", | 184 | printk(KERN_INFO "%pOF: OBP Flash, RD %lx[%lx] WR %lx[%lx]\n", |
| 185 | op->dev.of_node->full_name, | 185 | op->dev.of_node, |
| 186 | flash.read_base, flash.read_size, | 186 | flash.read_base, flash.read_size, |
| 187 | flash.write_base, flash.write_size); | 187 | flash.write_base, flash.write_size); |
| 188 | 188 | ||
diff --git a/drivers/sbus/char/uctrl.c b/drivers/sbus/char/uctrl.c index 57696fc0b482..0a5013350acd 100644 --- a/drivers/sbus/char/uctrl.c +++ b/drivers/sbus/char/uctrl.c | |||
| @@ -379,8 +379,8 @@ static int uctrl_probe(struct platform_device *op) | |||
| 379 | } | 379 | } |
| 380 | 380 | ||
| 381 | sbus_writel(UCTRL_INTR_RXNE_REQ|UCTRL_INTR_RXNE_MSK, &p->regs->uctrl_intr); | 381 | sbus_writel(UCTRL_INTR_RXNE_REQ|UCTRL_INTR_RXNE_MSK, &p->regs->uctrl_intr); |
| 382 | printk(KERN_INFO "%s: uctrl regs[0x%p] (irq %d)\n", | 382 | printk(KERN_INFO "%pOF: uctrl regs[0x%p] (irq %d)\n", |
| 383 | op->dev.of_node->full_name, p->regs, p->irq); | 383 | op->dev.of_node, p->regs, p->irq); |
| 384 | uctrl_get_event_status(p); | 384 | uctrl_get_event_status(p); |
| 385 | uctrl_get_external_status(p); | 385 | uctrl_get_external_status(p); |
| 386 | 386 | ||
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index d384f4f86c26..d145e0d90227 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig | |||
| @@ -47,6 +47,17 @@ config SCSI_NETLINK | |||
| 47 | default n | 47 | default n |
| 48 | depends on NET | 48 | depends on NET |
| 49 | 49 | ||
| 50 | config SCSI_MQ_DEFAULT | ||
| 51 | bool "SCSI: use blk-mq I/O path by default" | ||
| 52 | depends on SCSI | ||
| 53 | ---help--- | ||
| 54 | This option enables the new blk-mq based I/O path for SCSI | ||
| 55 | devices by default. With the option the scsi_mod.use_blk_mq | ||
| 56 | module/boot option defaults to Y, without it to N, but it can | ||
| 57 | still be overridden either way. | ||
| 58 | |||
| 59 | If unsure say N. | ||
| 60 | |||
| 50 | config SCSI_PROC_FS | 61 | config SCSI_PROC_FS |
| 51 | bool "legacy /proc/scsi/ support" | 62 | bool "legacy /proc/scsi/ support" |
| 52 | depends on SCSI && PROC_FS | 63 | depends on SCSI && PROC_FS |
| @@ -1230,6 +1241,8 @@ config SCSI_LPFC | |||
| 1230 | tristate "Emulex LightPulse Fibre Channel Support" | 1241 | tristate "Emulex LightPulse Fibre Channel Support" |
| 1231 | depends on PCI && SCSI | 1242 | depends on PCI && SCSI |
| 1232 | depends on SCSI_FC_ATTRS | 1243 | depends on SCSI_FC_ATTRS |
| 1244 | depends on NVME_TARGET_FC || NVME_TARGET_FC=n | ||
| 1245 | depends on NVME_FC || NVME_FC=n | ||
| 1233 | select CRC_T10DIF | 1246 | select CRC_T10DIF |
| 1234 | ---help--- | 1247 | ---help--- |
| 1235 | This lpfc driver supports the Emulex LightPulse | 1248 | This lpfc driver supports the Emulex LightPulse |
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index 707ee2f5954d..a1a2c71e1626 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c | |||
| @@ -549,7 +549,9 @@ static void get_container_name_callback(void *context, struct fib * fibptr) | |||
| 549 | if ((le32_to_cpu(get_name_reply->status) == CT_OK) | 549 | if ((le32_to_cpu(get_name_reply->status) == CT_OK) |
| 550 | && (get_name_reply->data[0] != '\0')) { | 550 | && (get_name_reply->data[0] != '\0')) { |
| 551 | char *sp = get_name_reply->data; | 551 | char *sp = get_name_reply->data; |
| 552 | sp[sizeof(((struct aac_get_name_resp *)NULL)->data)] = '\0'; | 552 | int data_size = FIELD_SIZEOF(struct aac_get_name_resp, data); |
| 553 | |||
| 554 | sp[data_size - 1] = '\0'; | ||
| 553 | while (*sp == ' ') | 555 | while (*sp == ' ') |
| 554 | ++sp; | 556 | ++sp; |
| 555 | if (*sp) { | 557 | if (*sp) { |
| @@ -579,12 +581,15 @@ static void get_container_name_callback(void *context, struct fib * fibptr) | |||
| 579 | static int aac_get_container_name(struct scsi_cmnd * scsicmd) | 581 | static int aac_get_container_name(struct scsi_cmnd * scsicmd) |
| 580 | { | 582 | { |
| 581 | int status; | 583 | int status; |
| 584 | int data_size; | ||
| 582 | struct aac_get_name *dinfo; | 585 | struct aac_get_name *dinfo; |
| 583 | struct fib * cmd_fibcontext; | 586 | struct fib * cmd_fibcontext; |
| 584 | struct aac_dev * dev; | 587 | struct aac_dev * dev; |
| 585 | 588 | ||
| 586 | dev = (struct aac_dev *)scsicmd->device->host->hostdata; | 589 | dev = (struct aac_dev *)scsicmd->device->host->hostdata; |
| 587 | 590 | ||
| 591 | data_size = FIELD_SIZEOF(struct aac_get_name_resp, data); | ||
| 592 | |||
| 588 | cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd); | 593 | cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd); |
| 589 | 594 | ||
| 590 | aac_fib_init(cmd_fibcontext); | 595 | aac_fib_init(cmd_fibcontext); |
| @@ -593,7 +598,7 @@ static int aac_get_container_name(struct scsi_cmnd * scsicmd) | |||
| 593 | dinfo->command = cpu_to_le32(VM_ContainerConfig); | 598 | dinfo->command = cpu_to_le32(VM_ContainerConfig); |
| 594 | dinfo->type = cpu_to_le32(CT_READ_NAME); | 599 | dinfo->type = cpu_to_le32(CT_READ_NAME); |
| 595 | dinfo->cid = cpu_to_le32(scmd_id(scsicmd)); | 600 | dinfo->cid = cpu_to_le32(scmd_id(scsicmd)); |
| 596 | dinfo->count = cpu_to_le32(sizeof(((struct aac_get_name_resp *)NULL)->data)); | 601 | dinfo->count = cpu_to_le32(data_size - 1); |
| 597 | 602 | ||
| 598 | status = aac_fib_send(ContainerCommand, | 603 | status = aac_fib_send(ContainerCommand, |
| 599 | cmd_fibcontext, | 604 | cmd_fibcontext, |
| @@ -3198,10 +3203,11 @@ static int query_disk(struct aac_dev *dev, void __user *arg) | |||
| 3198 | return -EBUSY; | 3203 | return -EBUSY; |
| 3199 | if (copy_from_user(&qd, arg, sizeof (struct aac_query_disk))) | 3204 | if (copy_from_user(&qd, arg, sizeof (struct aac_query_disk))) |
| 3200 | return -EFAULT; | 3205 | return -EFAULT; |
| 3201 | if (qd.cnum == -1) | 3206 | if (qd.cnum == -1) { |
| 3207 | if (qd.id < 0 || qd.id >= dev->maximum_num_containers) | ||
| 3208 | return -EINVAL; | ||
| 3202 | qd.cnum = qd.id; | 3209 | qd.cnum = qd.id; |
| 3203 | else if ((qd.bus == -1) && (qd.id == -1) && (qd.lun == -1)) | 3210 | } else if ((qd.bus == -1) && (qd.id == -1) && (qd.lun == -1)) { |
| 3204 | { | ||
| 3205 | if (qd.cnum < 0 || qd.cnum >= dev->maximum_num_containers) | 3211 | if (qd.cnum < 0 || qd.cnum >= dev->maximum_num_containers) |
| 3206 | return -EINVAL; | 3212 | return -EINVAL; |
| 3207 | qd.instance = dev->scsi_host_ptr->host_no; | 3213 | qd.instance = dev->scsi_host_ptr->host_no; |
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index d31a9bc2ba69..ee2667e20e42 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h | |||
| @@ -2274,7 +2274,7 @@ struct aac_get_name_resp { | |||
| 2274 | __le32 parm3; | 2274 | __le32 parm3; |
| 2275 | __le32 parm4; | 2275 | __le32 parm4; |
| 2276 | __le32 parm5; | 2276 | __le32 parm5; |
| 2277 | u8 data[16]; | 2277 | u8 data[17]; |
| 2278 | }; | 2278 | }; |
| 2279 | 2279 | ||
| 2280 | #define CT_CID_TO_32BITS_UID 165 | 2280 | #define CT_CID_TO_32BITS_UID 165 |
diff --git a/drivers/scsi/aic7xxx/Makefile b/drivers/scsi/aic7xxx/Makefile index 741d81861d17..07b60a780c06 100644 --- a/drivers/scsi/aic7xxx/Makefile +++ b/drivers/scsi/aic7xxx/Makefile | |||
| @@ -55,9 +55,9 @@ aicasm-7xxx-opts-$(CONFIG_AIC7XXX_REG_PRETTY_PRINT) := \ | |||
| 55 | 55 | ||
| 56 | ifeq ($(CONFIG_AIC7XXX_BUILD_FIRMWARE),y) | 56 | ifeq ($(CONFIG_AIC7XXX_BUILD_FIRMWARE),y) |
| 57 | $(obj)/aic7xxx_seq.h: $(src)/aic7xxx.seq $(src)/aic7xxx.reg $(obj)/aicasm/aicasm | 57 | $(obj)/aic7xxx_seq.h: $(src)/aic7xxx.seq $(src)/aic7xxx.reg $(obj)/aicasm/aicasm |
| 58 | $(obj)/aicasm/aicasm -I$(src) -r $(obj)/aic7xxx_reg.h \ | 58 | $(obj)/aicasm/aicasm -I$(srctree)/$(src) -r $(obj)/aic7xxx_reg.h \ |
| 59 | $(aicasm-7xxx-opts-y) -o $(obj)/aic7xxx_seq.h \ | 59 | $(aicasm-7xxx-opts-y) -o $(obj)/aic7xxx_seq.h \ |
| 60 | $(src)/aic7xxx.seq | 60 | $(srctree)/$(src)/aic7xxx.seq |
| 61 | 61 | ||
| 62 | $(aic7xxx-gen-y): $(obj)/aic7xxx_seq.h | 62 | $(aic7xxx-gen-y): $(obj)/aic7xxx_seq.h |
| 63 | else | 63 | else |
| @@ -72,14 +72,14 @@ aicasm-79xx-opts-$(CONFIG_AIC79XX_REG_PRETTY_PRINT) := \ | |||
| 72 | 72 | ||
| 73 | ifeq ($(CONFIG_AIC79XX_BUILD_FIRMWARE),y) | 73 | ifeq ($(CONFIG_AIC79XX_BUILD_FIRMWARE),y) |
| 74 | $(obj)/aic79xx_seq.h: $(src)/aic79xx.seq $(src)/aic79xx.reg $(obj)/aicasm/aicasm | 74 | $(obj)/aic79xx_seq.h: $(src)/aic79xx.seq $(src)/aic79xx.reg $(obj)/aicasm/aicasm |
| 75 | $(obj)/aicasm/aicasm -I$(src) -r $(obj)/aic79xx_reg.h \ | 75 | $(obj)/aicasm/aicasm -I$(srctree)/$(src) -r $(obj)/aic79xx_reg.h \ |
| 76 | $(aicasm-79xx-opts-y) -o $(obj)/aic79xx_seq.h \ | 76 | $(aicasm-79xx-opts-y) -o $(obj)/aic79xx_seq.h \ |
| 77 | $(src)/aic79xx.seq | 77 | $(srctree)/$(src)/aic79xx.seq |
| 78 | 78 | ||
| 79 | $(aic79xx-gen-y): $(obj)/aic79xx_seq.h | 79 | $(aic79xx-gen-y): $(obj)/aic79xx_seq.h |
| 80 | else | 80 | else |
| 81 | $(obj)/aic79xx_reg_print.c: $(src)/aic79xx_reg_print.c_shipped | 81 | $(obj)/aic79xx_reg_print.c: $(src)/aic79xx_reg_print.c_shipped |
| 82 | endif | 82 | endif |
| 83 | 83 | ||
| 84 | $(obj)/aicasm/aicasm: $(src)/aicasm/*.[chyl] | 84 | $(obj)/aicasm/aicasm: $(srctree)/$(src)/aicasm/*.[chyl] |
| 85 | $(MAKE) -C $(src)/aicasm | 85 | $(MAKE) -C $(srctree)/$(src)/aicasm OUTDIR=$(shell pwd)/$(obj)/aicasm/ |
diff --git a/drivers/scsi/aic7xxx/aicasm/Makefile b/drivers/scsi/aic7xxx/aicasm/Makefile index b98c5c1056c3..45e2d49c1fff 100644 --- a/drivers/scsi/aic7xxx/aicasm/Makefile +++ b/drivers/scsi/aic7xxx/aicasm/Makefile | |||
| @@ -1,19 +1,21 @@ | |||
| 1 | PROG= aicasm | 1 | PROG= aicasm |
| 2 | 2 | ||
| 3 | OUTDIR ?= ./ | ||
| 4 | |||
| 3 | .SUFFIXES= .l .y .c .h | 5 | .SUFFIXES= .l .y .c .h |
| 4 | 6 | ||
| 5 | CSRCS= aicasm.c aicasm_symbol.c | 7 | CSRCS= aicasm.c aicasm_symbol.c |
| 6 | YSRCS= aicasm_gram.y aicasm_macro_gram.y | 8 | YSRCS= aicasm_gram.y aicasm_macro_gram.y |
| 7 | LSRCS= aicasm_scan.l aicasm_macro_scan.l | 9 | LSRCS= aicasm_scan.l aicasm_macro_scan.l |
| 8 | 10 | ||
| 9 | GENHDRS= aicdb.h $(YSRCS:.y=.h) | 11 | GENHDRS= $(addprefix ${OUTDIR}/,aicdb.h $(YSRCS:.y=.h)) |
| 10 | GENSRCS= $(YSRCS:.y=.c) $(LSRCS:.l=.c) | 12 | GENSRCS= $(addprefix ${OUTDIR}/,$(YSRCS:.y=.c) $(LSRCS:.l=.c)) |
| 11 | 13 | ||
| 12 | SRCS= ${CSRCS} ${GENSRCS} | 14 | SRCS= ${CSRCS} ${GENSRCS} |
| 13 | LIBS= -ldb | 15 | LIBS= -ldb |
| 14 | clean-files:= ${GENSRCS} ${GENHDRS} $(YSRCS:.y=.output) $(PROG) | 16 | clean-files:= ${GENSRCS} ${GENHDRS} $(YSRCS:.y=.output) $(PROG) |
| 15 | # Override default kernel CFLAGS. This is a userland app. | 17 | # Override default kernel CFLAGS. This is a userland app. |
| 16 | AICASM_CFLAGS:= -I/usr/include -I. | 18 | AICASM_CFLAGS:= -I/usr/include -I. -I$(OUTDIR) |
| 17 | LEX= flex | 19 | LEX= flex |
| 18 | YACC= bison | 20 | YACC= bison |
| 19 | YFLAGS= -d | 21 | YFLAGS= -d |
| @@ -32,22 +34,25 @@ YFLAGS+= -t -v | |||
| 32 | LFLAGS= -d | 34 | LFLAGS= -d |
| 33 | endif | 35 | endif |
| 34 | 36 | ||
| 35 | $(PROG): ${GENHDRS} $(SRCS) | 37 | $(PROG): $(OUTDIR) ${GENHDRS} $(SRCS) |
| 36 | $(AICASM_CC) $(AICASM_CFLAGS) $(SRCS) -o $(PROG) $(LIBS) | 38 | $(AICASM_CC) $(AICASM_CFLAGS) $(SRCS) -o $(OUTDIR)/$(PROG) $(LIBS) |
| 39 | |||
| 40 | $(OUTDIR): | ||
| 41 | mkdir -p $(OUTDIR) | ||
| 37 | 42 | ||
| 38 | aicdb.h: | 43 | $(OUTDIR)/aicdb.h: |
| 39 | @if [ -e "/usr/include/db4/db_185.h" ]; then \ | 44 | @if [ -e "/usr/include/db4/db_185.h" ]; then \ |
| 40 | echo "#include <db4/db_185.h>" > aicdb.h; \ | 45 | echo "#include <db4/db_185.h>" > $@; \ |
| 41 | elif [ -e "/usr/include/db3/db_185.h" ]; then \ | 46 | elif [ -e "/usr/include/db3/db_185.h" ]; then \ |
| 42 | echo "#include <db3/db_185.h>" > aicdb.h; \ | 47 | echo "#include <db3/db_185.h>" > $@; \ |
| 43 | elif [ -e "/usr/include/db2/db_185.h" ]; then \ | 48 | elif [ -e "/usr/include/db2/db_185.h" ]; then \ |
| 44 | echo "#include <db2/db_185.h>" > aicdb.h; \ | 49 | echo "#include <db2/db_185.h>" > $@; \ |
| 45 | elif [ -e "/usr/include/db1/db_185.h" ]; then \ | 50 | elif [ -e "/usr/include/db1/db_185.h" ]; then \ |
| 46 | echo "#include <db1/db_185.h>" > aicdb.h; \ | 51 | echo "#include <db1/db_185.h>" > $@; \ |
| 47 | elif [ -e "/usr/include/db/db_185.h" ]; then \ | 52 | elif [ -e "/usr/include/db/db_185.h" ]; then \ |
| 48 | echo "#include <db/db_185.h>" > aicdb.h; \ | 53 | echo "#include <db/db_185.h>" > $@; \ |
| 49 | elif [ -e "/usr/include/db_185.h" ]; then \ | 54 | elif [ -e "/usr/include/db_185.h" ]; then \ |
| 50 | echo "#include <db_185.h>" > aicdb.h; \ | 55 | echo "#include <db_185.h>" > $@; \ |
| 51 | else \ | 56 | else \ |
| 52 | echo "*** Install db development libraries"; \ | 57 | echo "*** Install db development libraries"; \ |
| 53 | fi | 58 | fi |
| @@ -58,23 +63,23 @@ clean: | |||
| 58 | # Create a dependency chain in generated files | 63 | # Create a dependency chain in generated files |
| 59 | # to avoid concurrent invocations of the single | 64 | # to avoid concurrent invocations of the single |
| 60 | # rule that builds them all. | 65 | # rule that builds them all. |
| 61 | aicasm_gram.c: aicasm_gram.h | 66 | $(OUTDIR)/aicasm_gram.c: $(OUTDIR)/aicasm_gram.h |
| 62 | aicasm_gram.c aicasm_gram.h: aicasm_gram.y | 67 | $(OUTDIR)/aicasm_gram.c $(OUTDIR)/aicasm_gram.h: aicasm_gram.y |
| 63 | $(YACC) $(YFLAGS) -b $(<:.y=) $< | 68 | $(YACC) $(YFLAGS) -b $(<:.y=) $< |
| 64 | mv $(<:.y=).tab.c $(<:.y=.c) | 69 | mv $(<:.y=).tab.c $(OUTDIR)/$(<:.y=.c) |
| 65 | mv $(<:.y=).tab.h $(<:.y=.h) | 70 | mv $(<:.y=).tab.h $(OUTDIR)/$(<:.y=.h) |
| 66 | 71 | ||
| 67 | # Create a dependency chain in generated files | 72 | # Create a dependency chain in generated files |
| 68 | # to avoid concurrent invocations of the single | 73 | # to avoid concurrent invocations of the single |
| 69 | # rule that builds them all. | 74 | # rule that builds them all. |
| 70 | aicasm_macro_gram.c: aicasm_macro_gram.h | 75 | $(OUTDIR)/aicasm_macro_gram.c: $(OUTDIR)/aicasm_macro_gram.h |
| 71 | aicasm_macro_gram.c aicasm_macro_gram.h: aicasm_macro_gram.y | 76 | $(OUTDIR)/aicasm_macro_gram.c $(OUTDIR)/aicasm_macro_gram.h: aicasm_macro_gram.y |
| 72 | $(YACC) $(YFLAGS) -b $(<:.y=) -p mm $< | 77 | $(YACC) $(YFLAGS) -b $(<:.y=) -p mm $< |
| 73 | mv $(<:.y=).tab.c $(<:.y=.c) | 78 | mv $(<:.y=).tab.c $(OUTDIR)/$(<:.y=.c) |
| 74 | mv $(<:.y=).tab.h $(<:.y=.h) | 79 | mv $(<:.y=).tab.h $(OUTDIR)/$(<:.y=.h) |
| 75 | 80 | ||
| 76 | aicasm_scan.c: aicasm_scan.l | 81 | $(OUTDIR)/aicasm_scan.c: aicasm_scan.l |
| 77 | $(LEX) $(LFLAGS) -o$@ $< | 82 | $(LEX) $(LFLAGS) -o $@ $< |
| 78 | 83 | ||
| 79 | aicasm_macro_scan.c: aicasm_macro_scan.l | 84 | $(OUTDIR)/aicasm_macro_scan.c: aicasm_macro_scan.l |
| 80 | $(LEX) $(LFLAGS) -Pmm -o$@ $< | 85 | $(LEX) $(LFLAGS) -Pmm -o $@ $< |
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c index 7dfe709a7138..6844ba361616 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c | |||
| @@ -2624,12 +2624,11 @@ static struct fcoe_transport bnx2fc_transport = { | |||
| 2624 | }; | 2624 | }; |
| 2625 | 2625 | ||
| 2626 | /** | 2626 | /** |
| 2627 | * bnx2fc_percpu_thread_create - Create a receive thread for an | 2627 | * bnx2fc_cpu_online - Create a receive thread for an online CPU |
| 2628 | * online CPU | ||
| 2629 | * | 2628 | * |
| 2630 | * @cpu: cpu index for the online cpu | 2629 | * @cpu: cpu index for the online cpu |
| 2631 | */ | 2630 | */ |
| 2632 | static void bnx2fc_percpu_thread_create(unsigned int cpu) | 2631 | static int bnx2fc_cpu_online(unsigned int cpu) |
| 2633 | { | 2632 | { |
| 2634 | struct bnx2fc_percpu_s *p; | 2633 | struct bnx2fc_percpu_s *p; |
| 2635 | struct task_struct *thread; | 2634 | struct task_struct *thread; |
| @@ -2639,15 +2638,17 @@ static void bnx2fc_percpu_thread_create(unsigned int cpu) | |||
| 2639 | thread = kthread_create_on_node(bnx2fc_percpu_io_thread, | 2638 | thread = kthread_create_on_node(bnx2fc_percpu_io_thread, |
| 2640 | (void *)p, cpu_to_node(cpu), | 2639 | (void *)p, cpu_to_node(cpu), |
| 2641 | "bnx2fc_thread/%d", cpu); | 2640 | "bnx2fc_thread/%d", cpu); |
| 2641 | if (IS_ERR(thread)) | ||
| 2642 | return PTR_ERR(thread); | ||
| 2643 | |||
| 2642 | /* bind thread to the cpu */ | 2644 | /* bind thread to the cpu */ |
| 2643 | if (likely(!IS_ERR(thread))) { | 2645 | kthread_bind(thread, cpu); |
| 2644 | kthread_bind(thread, cpu); | 2646 | p->iothread = thread; |
| 2645 | p->iothread = thread; | 2647 | wake_up_process(thread); |
| 2646 | wake_up_process(thread); | 2648 | return 0; |
| 2647 | } | ||
| 2648 | } | 2649 | } |
| 2649 | 2650 | ||
| 2650 | static void bnx2fc_percpu_thread_destroy(unsigned int cpu) | 2651 | static int bnx2fc_cpu_offline(unsigned int cpu) |
| 2651 | { | 2652 | { |
| 2652 | struct bnx2fc_percpu_s *p; | 2653 | struct bnx2fc_percpu_s *p; |
| 2653 | struct task_struct *thread; | 2654 | struct task_struct *thread; |
| @@ -2661,7 +2662,6 @@ static void bnx2fc_percpu_thread_destroy(unsigned int cpu) | |||
| 2661 | thread = p->iothread; | 2662 | thread = p->iothread; |
| 2662 | p->iothread = NULL; | 2663 | p->iothread = NULL; |
| 2663 | 2664 | ||
| 2664 | |||
| 2665 | /* Free all work in the list */ | 2665 | /* Free all work in the list */ |
| 2666 | list_for_each_entry_safe(work, tmp, &p->work_list, list) { | 2666 | list_for_each_entry_safe(work, tmp, &p->work_list, list) { |
| 2667 | list_del_init(&work->list); | 2667 | list_del_init(&work->list); |
| @@ -2673,20 +2673,6 @@ static void bnx2fc_percpu_thread_destroy(unsigned int cpu) | |||
| 2673 | 2673 | ||
| 2674 | if (thread) | 2674 | if (thread) |
| 2675 | kthread_stop(thread); | 2675 | kthread_stop(thread); |
| 2676 | } | ||
| 2677 | |||
| 2678 | |||
| 2679 | static int bnx2fc_cpu_online(unsigned int cpu) | ||
| 2680 | { | ||
| 2681 | printk(PFX "CPU %x online: Create Rx thread\n", cpu); | ||
| 2682 | bnx2fc_percpu_thread_create(cpu); | ||
| 2683 | return 0; | ||
| 2684 | } | ||
| 2685 | |||
| 2686 | static int bnx2fc_cpu_dead(unsigned int cpu) | ||
| 2687 | { | ||
| 2688 | printk(PFX "CPU %x offline: Remove Rx thread\n", cpu); | ||
| 2689 | bnx2fc_percpu_thread_destroy(cpu); | ||
| 2690 | return 0; | 2676 | return 0; |
| 2691 | } | 2677 | } |
| 2692 | 2678 | ||
| @@ -2761,30 +2747,16 @@ static int __init bnx2fc_mod_init(void) | |||
| 2761 | spin_lock_init(&p->fp_work_lock); | 2747 | spin_lock_init(&p->fp_work_lock); |
| 2762 | } | 2748 | } |
| 2763 | 2749 | ||
| 2764 | get_online_cpus(); | 2750 | rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "scsi/bnx2fc:online", |
| 2765 | 2751 | bnx2fc_cpu_online, bnx2fc_cpu_offline); | |
| 2766 | for_each_online_cpu(cpu) | ||
| 2767 | bnx2fc_percpu_thread_create(cpu); | ||
| 2768 | |||
| 2769 | rc = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, | ||
| 2770 | "scsi/bnx2fc:online", | ||
| 2771 | bnx2fc_cpu_online, NULL); | ||
| 2772 | if (rc < 0) | 2752 | if (rc < 0) |
| 2773 | goto stop_threads; | 2753 | goto stop_thread; |
| 2774 | bnx2fc_online_state = rc; | 2754 | bnx2fc_online_state = rc; |
| 2775 | 2755 | ||
| 2776 | cpuhp_setup_state_nocalls(CPUHP_SCSI_BNX2FC_DEAD, "scsi/bnx2fc:dead", | ||
| 2777 | NULL, bnx2fc_cpu_dead); | ||
| 2778 | put_online_cpus(); | ||
| 2779 | |||
| 2780 | cnic_register_driver(CNIC_ULP_FCOE, &bnx2fc_cnic_cb); | 2756 | cnic_register_driver(CNIC_ULP_FCOE, &bnx2fc_cnic_cb); |
| 2781 | |||
| 2782 | return 0; | 2757 | return 0; |
| 2783 | 2758 | ||
| 2784 | stop_threads: | 2759 | stop_thread: |
| 2785 | for_each_online_cpu(cpu) | ||
| 2786 | bnx2fc_percpu_thread_destroy(cpu); | ||
| 2787 | put_online_cpus(); | ||
| 2788 | kthread_stop(l2_thread); | 2760 | kthread_stop(l2_thread); |
| 2789 | free_wq: | 2761 | free_wq: |
| 2790 | destroy_workqueue(bnx2fc_wq); | 2762 | destroy_workqueue(bnx2fc_wq); |
| @@ -2803,7 +2775,6 @@ static void __exit bnx2fc_mod_exit(void) | |||
| 2803 | struct fcoe_percpu_s *bg; | 2775 | struct fcoe_percpu_s *bg; |
| 2804 | struct task_struct *l2_thread; | 2776 | struct task_struct *l2_thread; |
| 2805 | struct sk_buff *skb; | 2777 | struct sk_buff *skb; |
| 2806 | unsigned int cpu = 0; | ||
| 2807 | 2778 | ||
| 2808 | /* | 2779 | /* |
| 2809 | * NOTE: Since cnic calls register_driver routine rtnl_lock, | 2780 | * NOTE: Since cnic calls register_driver routine rtnl_lock, |
| @@ -2844,16 +2815,7 @@ static void __exit bnx2fc_mod_exit(void) | |||
| 2844 | if (l2_thread) | 2815 | if (l2_thread) |
| 2845 | kthread_stop(l2_thread); | 2816 | kthread_stop(l2_thread); |
| 2846 | 2817 | ||
| 2847 | get_online_cpus(); | 2818 | cpuhp_remove_state(bnx2fc_online_state); |
| 2848 | /* Destroy per cpu threads */ | ||
| 2849 | for_each_online_cpu(cpu) { | ||
| 2850 | bnx2fc_percpu_thread_destroy(cpu); | ||
| 2851 | } | ||
| 2852 | |||
| 2853 | cpuhp_remove_state_nocalls(bnx2fc_online_state); | ||
| 2854 | cpuhp_remove_state_nocalls(CPUHP_SCSI_BNX2FC_DEAD); | ||
| 2855 | |||
| 2856 | put_online_cpus(); | ||
| 2857 | 2819 | ||
| 2858 | destroy_workqueue(bnx2fc_wq); | 2820 | destroy_workqueue(bnx2fc_wq); |
| 2859 | /* | 2821 | /* |
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c index 913c750205ce..26de61d65a4d 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c +++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c | |||
| @@ -1008,6 +1008,28 @@ static struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe) | |||
| 1008 | return work; | 1008 | return work; |
| 1009 | } | 1009 | } |
| 1010 | 1010 | ||
| 1011 | /* Pending work request completion */ | ||
| 1012 | static void bnx2fc_pending_work(struct bnx2fc_rport *tgt, unsigned int wqe) | ||
| 1013 | { | ||
| 1014 | unsigned int cpu = wqe % num_possible_cpus(); | ||
| 1015 | struct bnx2fc_percpu_s *fps; | ||
| 1016 | struct bnx2fc_work *work; | ||
| 1017 | |||
| 1018 | fps = &per_cpu(bnx2fc_percpu, cpu); | ||
| 1019 | spin_lock_bh(&fps->fp_work_lock); | ||
| 1020 | if (fps->iothread) { | ||
| 1021 | work = bnx2fc_alloc_work(tgt, wqe); | ||
| 1022 | if (work) { | ||
| 1023 | list_add_tail(&work->list, &fps->work_list); | ||
| 1024 | wake_up_process(fps->iothread); | ||
| 1025 | spin_unlock_bh(&fps->fp_work_lock); | ||
| 1026 | return; | ||
| 1027 | } | ||
| 1028 | } | ||
| 1029 | spin_unlock_bh(&fps->fp_work_lock); | ||
| 1030 | bnx2fc_process_cq_compl(tgt, wqe); | ||
| 1031 | } | ||
| 1032 | |||
| 1011 | int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt) | 1033 | int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt) |
| 1012 | { | 1034 | { |
| 1013 | struct fcoe_cqe *cq; | 1035 | struct fcoe_cqe *cq; |
| @@ -1042,28 +1064,7 @@ int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt) | |||
| 1042 | /* Unsolicited event notification */ | 1064 | /* Unsolicited event notification */ |
| 1043 | bnx2fc_process_unsol_compl(tgt, wqe); | 1065 | bnx2fc_process_unsol_compl(tgt, wqe); |
| 1044 | } else { | 1066 | } else { |
| 1045 | /* Pending work request completion */ | 1067 | bnx2fc_pending_work(tgt, wqe); |
| 1046 | struct bnx2fc_work *work = NULL; | ||
| 1047 | struct bnx2fc_percpu_s *fps = NULL; | ||
| 1048 | unsigned int cpu = wqe % num_possible_cpus(); | ||
| 1049 | |||
| 1050 | fps = &per_cpu(bnx2fc_percpu, cpu); | ||
| 1051 | spin_lock_bh(&fps->fp_work_lock); | ||
| 1052 | if (unlikely(!fps->iothread)) | ||
| 1053 | goto unlock; | ||
| 1054 | |||
| 1055 | work = bnx2fc_alloc_work(tgt, wqe); | ||
| 1056 | if (work) | ||
| 1057 | list_add_tail(&work->list, | ||
| 1058 | &fps->work_list); | ||
| 1059 | unlock: | ||
| 1060 | spin_unlock_bh(&fps->fp_work_lock); | ||
| 1061 | |||
| 1062 | /* Pending work request completion */ | ||
| 1063 | if (fps->iothread && work) | ||
| 1064 | wake_up_process(fps->iothread); | ||
| 1065 | else | ||
| 1066 | bnx2fc_process_cq_compl(tgt, wqe); | ||
| 1067 | num_free_sqes++; | 1068 | num_free_sqes++; |
| 1068 | } | 1069 | } |
| 1069 | cqe++; | 1070 | cqe++; |
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c index 86afc002814c..4ebcda8d9500 100644 --- a/drivers/scsi/bnx2i/bnx2i_init.c +++ b/drivers/scsi/bnx2i/bnx2i_init.c | |||
| @@ -404,12 +404,11 @@ int bnx2i_get_stats(void *handle) | |||
| 404 | 404 | ||
| 405 | 405 | ||
| 406 | /** | 406 | /** |
| 407 | * bnx2i_percpu_thread_create - Create a receive thread for an | 407 | * bnx2i_cpu_online - Create a receive thread for an online CPU |
| 408 | * online CPU | ||
| 409 | * | 408 | * |
| 410 | * @cpu: cpu index for the online cpu | 409 | * @cpu: cpu index for the online cpu |
| 411 | */ | 410 | */ |
| 412 | static void bnx2i_percpu_thread_create(unsigned int cpu) | 411 | static int bnx2i_cpu_online(unsigned int cpu) |
| 413 | { | 412 | { |
| 414 | struct bnx2i_percpu_s *p; | 413 | struct bnx2i_percpu_s *p; |
| 415 | struct task_struct *thread; | 414 | struct task_struct *thread; |
| @@ -419,16 +418,17 @@ static void bnx2i_percpu_thread_create(unsigned int cpu) | |||
| 419 | thread = kthread_create_on_node(bnx2i_percpu_io_thread, (void *)p, | 418 | thread = kthread_create_on_node(bnx2i_percpu_io_thread, (void *)p, |
| 420 | cpu_to_node(cpu), | 419 | cpu_to_node(cpu), |
| 421 | "bnx2i_thread/%d", cpu); | 420 | "bnx2i_thread/%d", cpu); |
| 421 | if (IS_ERR(thread)) | ||
| 422 | return PTR_ERR(thread); | ||
| 423 | |||
| 422 | /* bind thread to the cpu */ | 424 | /* bind thread to the cpu */ |
| 423 | if (likely(!IS_ERR(thread))) { | 425 | kthread_bind(thread, cpu); |
| 424 | kthread_bind(thread, cpu); | 426 | p->iothread = thread; |
| 425 | p->iothread = thread; | 427 | wake_up_process(thread); |
| 426 | wake_up_process(thread); | 428 | return 0; |
| 427 | } | ||
| 428 | } | 429 | } |
| 429 | 430 | ||
| 430 | 431 | static int bnx2i_cpu_offline(unsigned int cpu) | |
| 431 | static void bnx2i_percpu_thread_destroy(unsigned int cpu) | ||
| 432 | { | 432 | { |
| 433 | struct bnx2i_percpu_s *p; | 433 | struct bnx2i_percpu_s *p; |
| 434 | struct task_struct *thread; | 434 | struct task_struct *thread; |
| @@ -451,19 +451,6 @@ static void bnx2i_percpu_thread_destroy(unsigned int cpu) | |||
| 451 | spin_unlock_bh(&p->p_work_lock); | 451 | spin_unlock_bh(&p->p_work_lock); |
| 452 | if (thread) | 452 | if (thread) |
| 453 | kthread_stop(thread); | 453 | kthread_stop(thread); |
| 454 | } | ||
| 455 | |||
| 456 | static int bnx2i_cpu_online(unsigned int cpu) | ||
| 457 | { | ||
| 458 | pr_info("bnx2i: CPU %x online: Create Rx thread\n", cpu); | ||
| 459 | bnx2i_percpu_thread_create(cpu); | ||
| 460 | return 0; | ||
| 461 | } | ||
| 462 | |||
| 463 | static int bnx2i_cpu_dead(unsigned int cpu) | ||
| 464 | { | ||
| 465 | pr_info("CPU %x offline: Remove Rx thread\n", cpu); | ||
| 466 | bnx2i_percpu_thread_destroy(cpu); | ||
| 467 | return 0; | 454 | return 0; |
| 468 | } | 455 | } |
| 469 | 456 | ||
| @@ -511,27 +498,14 @@ static int __init bnx2i_mod_init(void) | |||
| 511 | p->iothread = NULL; | 498 | p->iothread = NULL; |
| 512 | } | 499 | } |
| 513 | 500 | ||
| 514 | get_online_cpus(); | 501 | err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "scsi/bnx2i:online", |
| 515 | 502 | bnx2i_cpu_online, bnx2i_cpu_offline); | |
| 516 | for_each_online_cpu(cpu) | ||
| 517 | bnx2i_percpu_thread_create(cpu); | ||
| 518 | |||
| 519 | err = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, | ||
| 520 | "scsi/bnx2i:online", | ||
| 521 | bnx2i_cpu_online, NULL); | ||
| 522 | if (err < 0) | 503 | if (err < 0) |
| 523 | goto remove_threads; | 504 | goto unreg_driver; |
| 524 | bnx2i_online_state = err; | 505 | bnx2i_online_state = err; |
| 525 | |||
| 526 | cpuhp_setup_state_nocalls(CPUHP_SCSI_BNX2I_DEAD, "scsi/bnx2i:dead", | ||
| 527 | NULL, bnx2i_cpu_dead); | ||
| 528 | put_online_cpus(); | ||
| 529 | return 0; | 506 | return 0; |
| 530 | 507 | ||
| 531 | remove_threads: | 508 | unreg_driver: |
| 532 | for_each_online_cpu(cpu) | ||
| 533 | bnx2i_percpu_thread_destroy(cpu); | ||
| 534 | put_online_cpus(); | ||
| 535 | cnic_unregister_driver(CNIC_ULP_ISCSI); | 509 | cnic_unregister_driver(CNIC_ULP_ISCSI); |
| 536 | unreg_xport: | 510 | unreg_xport: |
| 537 | iscsi_unregister_transport(&bnx2i_iscsi_transport); | 511 | iscsi_unregister_transport(&bnx2i_iscsi_transport); |
| @@ -551,7 +525,6 @@ out: | |||
| 551 | static void __exit bnx2i_mod_exit(void) | 525 | static void __exit bnx2i_mod_exit(void) |
| 552 | { | 526 | { |
| 553 | struct bnx2i_hba *hba; | 527 | struct bnx2i_hba *hba; |
| 554 | unsigned cpu = 0; | ||
| 555 | 528 | ||
| 556 | mutex_lock(&bnx2i_dev_lock); | 529 | mutex_lock(&bnx2i_dev_lock); |
| 557 | while (!list_empty(&adapter_list)) { | 530 | while (!list_empty(&adapter_list)) { |
| @@ -569,14 +542,7 @@ static void __exit bnx2i_mod_exit(void) | |||
| 569 | } | 542 | } |
| 570 | mutex_unlock(&bnx2i_dev_lock); | 543 | mutex_unlock(&bnx2i_dev_lock); |
| 571 | 544 | ||
| 572 | get_online_cpus(); | 545 | cpuhp_remove_state(bnx2i_online_state); |
| 573 | |||
| 574 | for_each_online_cpu(cpu) | ||
| 575 | bnx2i_percpu_thread_destroy(cpu); | ||
| 576 | |||
| 577 | cpuhp_remove_state_nocalls(bnx2i_online_state); | ||
| 578 | cpuhp_remove_state_nocalls(CPUHP_SCSI_BNX2I_DEAD); | ||
| 579 | put_online_cpus(); | ||
| 580 | 546 | ||
| 581 | iscsi_unregister_transport(&bnx2i_iscsi_transport); | 547 | iscsi_unregister_transport(&bnx2i_iscsi_transport); |
| 582 | cnic_unregister_driver(CNIC_ULP_ISCSI); | 548 | cnic_unregister_driver(CNIC_ULP_ISCSI); |
diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c index 2029ad225121..5be0086142ca 100644 --- a/drivers/scsi/csiostor/csio_hw.c +++ b/drivers/scsi/csiostor/csio_hw.c | |||
| @@ -3845,8 +3845,10 @@ csio_hw_start(struct csio_hw *hw) | |||
| 3845 | 3845 | ||
| 3846 | if (csio_is_hw_ready(hw)) | 3846 | if (csio_is_hw_ready(hw)) |
| 3847 | return 0; | 3847 | return 0; |
| 3848 | else | 3848 | else if (csio_match_state(hw, csio_hws_uninit)) |
| 3849 | return -EINVAL; | 3849 | return -EINVAL; |
| 3850 | else | ||
| 3851 | return -ENODEV; | ||
| 3850 | } | 3852 | } |
| 3851 | 3853 | ||
| 3852 | int | 3854 | int |
diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c index ea0c31086cc6..dcd074169aa9 100644 --- a/drivers/scsi/csiostor/csio_init.c +++ b/drivers/scsi/csiostor/csio_init.c | |||
| @@ -969,10 +969,14 @@ static int csio_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 969 | 969 | ||
| 970 | pci_set_drvdata(pdev, hw); | 970 | pci_set_drvdata(pdev, hw); |
| 971 | 971 | ||
| 972 | if (csio_hw_start(hw) != 0) { | 972 | rv = csio_hw_start(hw); |
| 973 | dev_err(&pdev->dev, | 973 | if (rv) { |
| 974 | "Failed to start FW, continuing in debug mode.\n"); | 974 | if (rv == -EINVAL) { |
| 975 | return 0; | 975 | dev_err(&pdev->dev, |
| 976 | "Failed to start FW, continuing in debug mode.\n"); | ||
| 977 | return 0; | ||
| 978 | } | ||
| 979 | goto err_lnode_exit; | ||
| 976 | } | 980 | } |
| 977 | 981 | ||
| 978 | sprintf(hw->fwrev_str, "%u.%u.%u.%u\n", | 982 | sprintf(hw->fwrev_str, "%u.%u.%u.%u\n", |
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c index a69a9ac836f5..1d02cf9fe06c 100644 --- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c +++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c | |||
| @@ -1635,6 +1635,9 @@ static int init_act_open(struct cxgbi_sock *csk) | |||
| 1635 | goto rel_resource; | 1635 | goto rel_resource; |
| 1636 | } | 1636 | } |
| 1637 | 1637 | ||
| 1638 | if (!(n->nud_state & NUD_VALID)) | ||
| 1639 | neigh_event_send(n, NULL); | ||
| 1640 | |||
| 1638 | csk->atid = cxgb4_alloc_atid(lldi->tids, csk); | 1641 | csk->atid = cxgb4_alloc_atid(lldi->tids, csk); |
| 1639 | if (csk->atid < 0) { | 1642 | if (csk->atid < 0) { |
| 1640 | pr_err("%s, NO atid available.\n", ndev->name); | 1643 | pr_err("%s, NO atid available.\n", ndev->name); |
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c index e4c83b7c96a8..1a4cfa562a60 100644 --- a/drivers/scsi/cxgbi/libcxgbi.c +++ b/drivers/scsi/cxgbi/libcxgbi.c | |||
| @@ -2128,6 +2128,13 @@ void cxgbi_cleanup_task(struct iscsi_task *task) | |||
| 2128 | struct iscsi_tcp_task *tcp_task = task->dd_data; | 2128 | struct iscsi_tcp_task *tcp_task = task->dd_data; |
| 2129 | struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); | 2129 | struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); |
| 2130 | 2130 | ||
| 2131 | if (!tcp_task || !tdata || (tcp_task->dd_data != tdata)) { | ||
| 2132 | pr_info("task 0x%p,0x%p, tcp_task 0x%p, tdata 0x%p/0x%p.\n", | ||
| 2133 | task, task->sc, tcp_task, | ||
| 2134 | tcp_task ? tcp_task->dd_data : NULL, tdata); | ||
| 2135 | return; | ||
| 2136 | } | ||
| 2137 | |||
| 2131 | log_debug(1 << CXGBI_DBG_ISCSI, | 2138 | log_debug(1 << CXGBI_DBG_ISCSI, |
| 2132 | "task 0x%p, skb 0x%p, itt 0x%x.\n", | 2139 | "task 0x%p, skb 0x%p, itt 0x%x.\n", |
| 2133 | task, tdata->skb, task->hdr_itt); | 2140 | task, tdata->skb, task->hdr_itt); |
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 8914eab84337..4f7cdb28bd38 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c | |||
| @@ -938,7 +938,7 @@ static struct scsi_host_template hpsa_driver_template = { | |||
| 938 | #endif | 938 | #endif |
| 939 | .sdev_attrs = hpsa_sdev_attrs, | 939 | .sdev_attrs = hpsa_sdev_attrs, |
| 940 | .shost_attrs = hpsa_shost_attrs, | 940 | .shost_attrs = hpsa_shost_attrs, |
| 941 | .max_sectors = 8192, | 941 | .max_sectors = 1024, |
| 942 | .no_write_same = 1, | 942 | .no_write_same = 1, |
| 943 | }; | 943 | }; |
| 944 | 944 | ||
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index b0c68d24db01..f838bd73befa 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c | |||
| @@ -3351,6 +3351,16 @@ static void ipr_worker_thread(struct work_struct *work) | |||
| 3351 | return; | 3351 | return; |
| 3352 | } | 3352 | } |
| 3353 | 3353 | ||
| 3354 | if (ioa_cfg->scsi_unblock) { | ||
| 3355 | ioa_cfg->scsi_unblock = 0; | ||
| 3356 | ioa_cfg->scsi_blocked = 0; | ||
| 3357 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | ||
| 3358 | scsi_unblock_requests(ioa_cfg->host); | ||
| 3359 | spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); | ||
| 3360 | if (ioa_cfg->scsi_blocked) | ||
| 3361 | scsi_block_requests(ioa_cfg->host); | ||
| 3362 | } | ||
| 3363 | |||
| 3354 | if (!ioa_cfg->scan_enabled) { | 3364 | if (!ioa_cfg->scan_enabled) { |
| 3355 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | 3365 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); |
| 3356 | return; | 3366 | return; |
| @@ -4935,6 +4945,7 @@ static int ipr_slave_configure(struct scsi_device *sdev) | |||
| 4935 | } | 4945 | } |
| 4936 | if (ipr_is_vset_device(res)) { | 4946 | if (ipr_is_vset_device(res)) { |
| 4937 | sdev->scsi_level = SCSI_SPC_3; | 4947 | sdev->scsi_level = SCSI_SPC_3; |
| 4948 | sdev->no_report_opcodes = 1; | ||
| 4938 | blk_queue_rq_timeout(sdev->request_queue, | 4949 | blk_queue_rq_timeout(sdev->request_queue, |
| 4939 | IPR_VSET_RW_TIMEOUT); | 4950 | IPR_VSET_RW_TIMEOUT); |
| 4940 | blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS); | 4951 | blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS); |
| @@ -7211,9 +7222,8 @@ static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd) | |||
| 7211 | ENTER; | 7222 | ENTER; |
| 7212 | if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) { | 7223 | if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) { |
| 7213 | ipr_trace; | 7224 | ipr_trace; |
| 7214 | spin_unlock_irq(ioa_cfg->host->host_lock); | 7225 | ioa_cfg->scsi_unblock = 1; |
| 7215 | scsi_unblock_requests(ioa_cfg->host); | 7226 | schedule_work(&ioa_cfg->work_q); |
| 7216 | spin_lock_irq(ioa_cfg->host->host_lock); | ||
| 7217 | } | 7227 | } |
| 7218 | 7228 | ||
| 7219 | ioa_cfg->in_reset_reload = 0; | 7229 | ioa_cfg->in_reset_reload = 0; |
| @@ -7287,13 +7297,7 @@ static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd) | |||
| 7287 | list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); | 7297 | list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); |
| 7288 | wake_up_all(&ioa_cfg->reset_wait_q); | 7298 | wake_up_all(&ioa_cfg->reset_wait_q); |
| 7289 | 7299 | ||
| 7290 | spin_unlock(ioa_cfg->host->host_lock); | 7300 | ioa_cfg->scsi_unblock = 1; |
| 7291 | scsi_unblock_requests(ioa_cfg->host); | ||
| 7292 | spin_lock(ioa_cfg->host->host_lock); | ||
| 7293 | |||
| 7294 | if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) | ||
| 7295 | scsi_block_requests(ioa_cfg->host); | ||
| 7296 | |||
| 7297 | schedule_work(&ioa_cfg->work_q); | 7301 | schedule_work(&ioa_cfg->work_q); |
| 7298 | LEAVE; | 7302 | LEAVE; |
| 7299 | return IPR_RC_JOB_RETURN; | 7303 | return IPR_RC_JOB_RETURN; |
| @@ -9249,8 +9253,11 @@ static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg, | |||
| 9249 | spin_unlock(&ioa_cfg->hrrq[i]._lock); | 9253 | spin_unlock(&ioa_cfg->hrrq[i]._lock); |
| 9250 | } | 9254 | } |
| 9251 | wmb(); | 9255 | wmb(); |
| 9252 | if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) | 9256 | if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) { |
| 9257 | ioa_cfg->scsi_unblock = 0; | ||
| 9258 | ioa_cfg->scsi_blocked = 1; | ||
| 9253 | scsi_block_requests(ioa_cfg->host); | 9259 | scsi_block_requests(ioa_cfg->host); |
| 9260 | } | ||
| 9254 | 9261 | ||
| 9255 | ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); | 9262 | ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); |
| 9256 | ioa_cfg->reset_cmd = ipr_cmd; | 9263 | ioa_cfg->reset_cmd = ipr_cmd; |
| @@ -9306,9 +9313,8 @@ static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg, | |||
| 9306 | wake_up_all(&ioa_cfg->reset_wait_q); | 9313 | wake_up_all(&ioa_cfg->reset_wait_q); |
| 9307 | 9314 | ||
| 9308 | if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) { | 9315 | if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) { |
| 9309 | spin_unlock_irq(ioa_cfg->host->host_lock); | 9316 | ioa_cfg->scsi_unblock = 1; |
| 9310 | scsi_unblock_requests(ioa_cfg->host); | 9317 | schedule_work(&ioa_cfg->work_q); |
| 9311 | spin_lock_irq(ioa_cfg->host->host_lock); | ||
| 9312 | } | 9318 | } |
| 9313 | return; | 9319 | return; |
| 9314 | } else { | 9320 | } else { |
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h index e98a87a65335..c7f0e9e3cd7d 100644 --- a/drivers/scsi/ipr.h +++ b/drivers/scsi/ipr.h | |||
| @@ -1488,6 +1488,8 @@ struct ipr_ioa_cfg { | |||
| 1488 | u8 cfg_locked:1; | 1488 | u8 cfg_locked:1; |
| 1489 | u8 clear_isr:1; | 1489 | u8 clear_isr:1; |
| 1490 | u8 probe_done:1; | 1490 | u8 probe_done:1; |
| 1491 | u8 scsi_unblock:1; | ||
| 1492 | u8 scsi_blocked:1; | ||
| 1491 | 1493 | ||
| 1492 | u8 revid; | 1494 | u8 revid; |
| 1493 | 1495 | ||
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 4ed48ed38e79..7ee1a94c0b33 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c | |||
| @@ -205,8 +205,10 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, | |||
| 205 | atomic_read(&tgtp->xmt_ls_rsp_error)); | 205 | atomic_read(&tgtp->xmt_ls_rsp_error)); |
| 206 | 206 | ||
| 207 | len += snprintf(buf+len, PAGE_SIZE-len, | 207 | len += snprintf(buf+len, PAGE_SIZE-len, |
| 208 | "FCP: Rcv %08x Release %08x Drop %08x\n", | 208 | "FCP: Rcv %08x Defer %08x Release %08x " |
| 209 | "Drop %08x\n", | ||
| 209 | atomic_read(&tgtp->rcv_fcp_cmd_in), | 210 | atomic_read(&tgtp->rcv_fcp_cmd_in), |
| 211 | atomic_read(&tgtp->rcv_fcp_cmd_defer), | ||
| 210 | atomic_read(&tgtp->xmt_fcp_release), | 212 | atomic_read(&tgtp->xmt_fcp_release), |
| 211 | atomic_read(&tgtp->rcv_fcp_cmd_drop)); | 213 | atomic_read(&tgtp->rcv_fcp_cmd_drop)); |
| 212 | 214 | ||
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index 5cc8b0f7d885..744f3f395b64 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c | |||
| @@ -782,8 +782,11 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size) | |||
| 782 | atomic_read(&tgtp->xmt_ls_rsp_error)); | 782 | atomic_read(&tgtp->xmt_ls_rsp_error)); |
| 783 | 783 | ||
| 784 | len += snprintf(buf + len, size - len, | 784 | len += snprintf(buf + len, size - len, |
| 785 | "FCP: Rcv %08x Drop %08x\n", | 785 | "FCP: Rcv %08x Defer %08x Release %08x " |
| 786 | "Drop %08x\n", | ||
| 786 | atomic_read(&tgtp->rcv_fcp_cmd_in), | 787 | atomic_read(&tgtp->rcv_fcp_cmd_in), |
| 788 | atomic_read(&tgtp->rcv_fcp_cmd_defer), | ||
| 789 | atomic_read(&tgtp->xmt_fcp_release), | ||
| 787 | atomic_read(&tgtp->rcv_fcp_cmd_drop)); | 790 | atomic_read(&tgtp->rcv_fcp_cmd_drop)); |
| 788 | 791 | ||
| 789 | if (atomic_read(&tgtp->rcv_fcp_cmd_in) != | 792 | if (atomic_read(&tgtp->rcv_fcp_cmd_in) != |
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c index fbeec344c6cc..bbbd0f84160d 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.c +++ b/drivers/scsi/lpfc/lpfc_nvmet.c | |||
| @@ -841,12 +841,31 @@ lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport, | |||
| 841 | lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); | 841 | lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); |
| 842 | } | 842 | } |
| 843 | 843 | ||
| 844 | static void | ||
| 845 | lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport, | ||
| 846 | struct nvmefc_tgt_fcp_req *rsp) | ||
| 847 | { | ||
| 848 | struct lpfc_nvmet_tgtport *tgtp; | ||
| 849 | struct lpfc_nvmet_rcv_ctx *ctxp = | ||
| 850 | container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req); | ||
| 851 | struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer; | ||
| 852 | struct lpfc_hba *phba = ctxp->phba; | ||
| 853 | |||
| 854 | lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n", | ||
| 855 | ctxp->oxid, ctxp->size, smp_processor_id()); | ||
| 856 | |||
| 857 | tgtp = phba->targetport->private; | ||
| 858 | atomic_inc(&tgtp->rcv_fcp_cmd_defer); | ||
| 859 | lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */ | ||
| 860 | } | ||
| 861 | |||
| 844 | static struct nvmet_fc_target_template lpfc_tgttemplate = { | 862 | static struct nvmet_fc_target_template lpfc_tgttemplate = { |
| 845 | .targetport_delete = lpfc_nvmet_targetport_delete, | 863 | .targetport_delete = lpfc_nvmet_targetport_delete, |
| 846 | .xmt_ls_rsp = lpfc_nvmet_xmt_ls_rsp, | 864 | .xmt_ls_rsp = lpfc_nvmet_xmt_ls_rsp, |
| 847 | .fcp_op = lpfc_nvmet_xmt_fcp_op, | 865 | .fcp_op = lpfc_nvmet_xmt_fcp_op, |
| 848 | .fcp_abort = lpfc_nvmet_xmt_fcp_abort, | 866 | .fcp_abort = lpfc_nvmet_xmt_fcp_abort, |
| 849 | .fcp_req_release = lpfc_nvmet_xmt_fcp_release, | 867 | .fcp_req_release = lpfc_nvmet_xmt_fcp_release, |
| 868 | .defer_rcv = lpfc_nvmet_defer_rcv, | ||
| 850 | 869 | ||
| 851 | .max_hw_queues = 1, | 870 | .max_hw_queues = 1, |
| 852 | .max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS, | 871 | .max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS, |
| @@ -1504,6 +1523,17 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, | |||
| 1504 | return; | 1523 | return; |
| 1505 | } | 1524 | } |
| 1506 | 1525 | ||
| 1526 | /* Processing of FCP command is deferred */ | ||
| 1527 | if (rc == -EOVERFLOW) { | ||
| 1528 | lpfc_nvmeio_data(phba, | ||
| 1529 | "NVMET RCV BUSY: xri x%x sz %d from %06x\n", | ||
| 1530 | oxid, size, sid); | ||
| 1531 | /* defer reposting rcv buffer till .defer_rcv callback */ | ||
| 1532 | ctxp->rqb_buffer = nvmebuf; | ||
| 1533 | atomic_inc(&tgtp->rcv_fcp_cmd_out); | ||
| 1534 | return; | ||
| 1535 | } | ||
| 1536 | |||
| 1507 | atomic_inc(&tgtp->rcv_fcp_cmd_drop); | 1537 | atomic_inc(&tgtp->rcv_fcp_cmd_drop); |
| 1508 | lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, | 1538 | lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, |
| 1509 | "6159 FCP Drop IO x%x: err x%x: x%x x%x x%x\n", | 1539 | "6159 FCP Drop IO x%x: err x%x: x%x x%x x%x\n", |
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.h b/drivers/scsi/lpfc/lpfc_nvmet.h index e675ef17be08..48a76788b003 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.h +++ b/drivers/scsi/lpfc/lpfc_nvmet.h | |||
| @@ -49,6 +49,7 @@ struct lpfc_nvmet_tgtport { | |||
| 49 | atomic_t rcv_fcp_cmd_in; | 49 | atomic_t rcv_fcp_cmd_in; |
| 50 | atomic_t rcv_fcp_cmd_out; | 50 | atomic_t rcv_fcp_cmd_out; |
| 51 | atomic_t rcv_fcp_cmd_drop; | 51 | atomic_t rcv_fcp_cmd_drop; |
| 52 | atomic_t rcv_fcp_cmd_defer; | ||
| 52 | atomic_t xmt_fcp_release; | 53 | atomic_t xmt_fcp_release; |
| 53 | 54 | ||
| 54 | /* Stats counters - lpfc_nvmet_xmt_fcp_op */ | 55 | /* Stats counters - lpfc_nvmet_xmt_fcp_op */ |
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 316c3df0c3fd..71c4746341ea 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c | |||
| @@ -6228,8 +6228,8 @@ static int megasas_probe_one(struct pci_dev *pdev, | |||
| 6228 | fail_start_aen: | 6228 | fail_start_aen: |
| 6229 | fail_io_attach: | 6229 | fail_io_attach: |
| 6230 | megasas_mgmt_info.count--; | 6230 | megasas_mgmt_info.count--; |
| 6231 | megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL; | ||
| 6232 | megasas_mgmt_info.max_index--; | 6231 | megasas_mgmt_info.max_index--; |
| 6232 | megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL; | ||
| 6233 | 6233 | ||
| 6234 | instance->instancet->disable_intr(instance); | 6234 | instance->instancet->disable_intr(instance); |
| 6235 | megasas_destroy_irqs(instance); | 6235 | megasas_destroy_irqs(instance); |
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index f990ab4d45e1..985510628f56 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c | |||
| @@ -425,7 +425,7 @@ static int megasas_create_sg_sense_fusion(struct megasas_instance *instance) | |||
| 425 | int | 425 | int |
| 426 | megasas_alloc_cmdlist_fusion(struct megasas_instance *instance) | 426 | megasas_alloc_cmdlist_fusion(struct megasas_instance *instance) |
| 427 | { | 427 | { |
| 428 | u32 max_mpt_cmd, i; | 428 | u32 max_mpt_cmd, i, j; |
| 429 | struct fusion_context *fusion; | 429 | struct fusion_context *fusion; |
| 430 | 430 | ||
| 431 | fusion = instance->ctrl_context; | 431 | fusion = instance->ctrl_context; |
| @@ -450,11 +450,15 @@ megasas_alloc_cmdlist_fusion(struct megasas_instance *instance) | |||
| 450 | fusion->cmd_list[i] = kzalloc(sizeof(struct megasas_cmd_fusion), | 450 | fusion->cmd_list[i] = kzalloc(sizeof(struct megasas_cmd_fusion), |
| 451 | GFP_KERNEL); | 451 | GFP_KERNEL); |
| 452 | if (!fusion->cmd_list[i]) { | 452 | if (!fusion->cmd_list[i]) { |
| 453 | for (j = 0; j < i; j++) | ||
| 454 | kfree(fusion->cmd_list[j]); | ||
| 455 | kfree(fusion->cmd_list); | ||
| 453 | dev_err(&instance->pdev->dev, | 456 | dev_err(&instance->pdev->dev, |
| 454 | "Failed from %s %d\n", __func__, __LINE__); | 457 | "Failed from %s %d\n", __func__, __LINE__); |
| 455 | return -ENOMEM; | 458 | return -ENOMEM; |
| 456 | } | 459 | } |
| 457 | } | 460 | } |
| 461 | |||
| 458 | return 0; | 462 | return 0; |
| 459 | } | 463 | } |
| 460 | int | 464 | int |
diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h index 4d038926a455..351f06dfc5a0 100644 --- a/drivers/scsi/qedf/qedf.h +++ b/drivers/scsi/qedf/qedf.h | |||
| @@ -528,7 +528,8 @@ struct fip_vlan { | |||
| 528 | #define QEDF_WRITE (1 << 0) | 528 | #define QEDF_WRITE (1 << 0) |
| 529 | #define MAX_FIBRE_LUNS 0xffffffff | 529 | #define MAX_FIBRE_LUNS 0xffffffff |
| 530 | 530 | ||
| 531 | #define QEDF_MAX_NUM_CQS 8 | 531 | #define MIN_NUM_CPUS_MSIX(x) min_t(u32, x->dev_info.num_cqs, \ |
| 532 | num_online_cpus()) | ||
| 532 | 533 | ||
| 533 | /* | 534 | /* |
| 534 | * PCI function probe defines | 535 | * PCI function probe defines |
diff --git a/drivers/scsi/qedf/qedf_els.c b/drivers/scsi/qedf/qedf_els.c index eb07f1de8afa..59c18ca4cda9 100644 --- a/drivers/scsi/qedf/qedf_els.c +++ b/drivers/scsi/qedf/qedf_els.c | |||
| @@ -489,7 +489,7 @@ static void qedf_srr_compl(struct qedf_els_cb_arg *cb_arg) | |||
| 489 | 489 | ||
| 490 | /* If a SRR times out, simply free resources */ | 490 | /* If a SRR times out, simply free resources */ |
| 491 | if (srr_req->event == QEDF_IOREQ_EV_ELS_TMO) | 491 | if (srr_req->event == QEDF_IOREQ_EV_ELS_TMO) |
| 492 | goto out_free; | 492 | goto out_put; |
| 493 | 493 | ||
| 494 | /* Normalize response data into struct fc_frame */ | 494 | /* Normalize response data into struct fc_frame */ |
| 495 | mp_req = &(srr_req->mp_req); | 495 | mp_req = &(srr_req->mp_req); |
| @@ -501,7 +501,7 @@ static void qedf_srr_compl(struct qedf_els_cb_arg *cb_arg) | |||
| 501 | if (!fp) { | 501 | if (!fp) { |
| 502 | QEDF_ERR(&(qedf->dbg_ctx), | 502 | QEDF_ERR(&(qedf->dbg_ctx), |
| 503 | "fc_frame_alloc failure.\n"); | 503 | "fc_frame_alloc failure.\n"); |
| 504 | goto out_free; | 504 | goto out_put; |
| 505 | } | 505 | } |
| 506 | 506 | ||
| 507 | /* Copy frame header from firmware into fp */ | 507 | /* Copy frame header from firmware into fp */ |
| @@ -526,9 +526,10 @@ static void qedf_srr_compl(struct qedf_els_cb_arg *cb_arg) | |||
| 526 | } | 526 | } |
| 527 | 527 | ||
| 528 | fc_frame_free(fp); | 528 | fc_frame_free(fp); |
| 529 | out_free: | 529 | out_put: |
| 530 | /* Put reference for original command since SRR completed */ | 530 | /* Put reference for original command since SRR completed */ |
| 531 | kref_put(&orig_io_req->refcount, qedf_release_cmd); | 531 | kref_put(&orig_io_req->refcount, qedf_release_cmd); |
| 532 | out_free: | ||
| 532 | kfree(cb_arg); | 533 | kfree(cb_arg); |
| 533 | } | 534 | } |
| 534 | 535 | ||
| @@ -780,7 +781,7 @@ static void qedf_rec_compl(struct qedf_els_cb_arg *cb_arg) | |||
| 780 | 781 | ||
| 781 | /* If a REC times out, free resources */ | 782 | /* If a REC times out, free resources */ |
| 782 | if (rec_req->event == QEDF_IOREQ_EV_ELS_TMO) | 783 | if (rec_req->event == QEDF_IOREQ_EV_ELS_TMO) |
| 783 | goto out_free; | 784 | goto out_put; |
| 784 | 785 | ||
| 785 | /* Normalize response data into struct fc_frame */ | 786 | /* Normalize response data into struct fc_frame */ |
| 786 | mp_req = &(rec_req->mp_req); | 787 | mp_req = &(rec_req->mp_req); |
| @@ -792,7 +793,7 @@ static void qedf_rec_compl(struct qedf_els_cb_arg *cb_arg) | |||
| 792 | if (!fp) { | 793 | if (!fp) { |
| 793 | QEDF_ERR(&(qedf->dbg_ctx), | 794 | QEDF_ERR(&(qedf->dbg_ctx), |
| 794 | "fc_frame_alloc failure.\n"); | 795 | "fc_frame_alloc failure.\n"); |
| 795 | goto out_free; | 796 | goto out_put; |
| 796 | } | 797 | } |
| 797 | 798 | ||
| 798 | /* Copy frame header from firmware into fp */ | 799 | /* Copy frame header from firmware into fp */ |
| @@ -884,9 +885,10 @@ static void qedf_rec_compl(struct qedf_els_cb_arg *cb_arg) | |||
| 884 | 885 | ||
| 885 | out_free_frame: | 886 | out_free_frame: |
| 886 | fc_frame_free(fp); | 887 | fc_frame_free(fp); |
| 887 | out_free: | 888 | out_put: |
| 888 | /* Put reference for original command since REC completed */ | 889 | /* Put reference for original command since REC completed */ |
| 889 | kref_put(&orig_io_req->refcount, qedf_release_cmd); | 890 | kref_put(&orig_io_req->refcount, qedf_release_cmd); |
| 891 | out_free: | ||
| 890 | kfree(cb_arg); | 892 | kfree(cb_arg); |
| 891 | } | 893 | } |
| 892 | 894 | ||
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c index 7786c97e033f..1d13c9ca517d 100644 --- a/drivers/scsi/qedf/qedf_main.c +++ b/drivers/scsi/qedf/qedf_main.c | |||
| @@ -2760,11 +2760,9 @@ static int qedf_set_fcoe_pf_param(struct qedf_ctx *qedf) | |||
| 2760 | * we allocation is the minimum off: | 2760 | * we allocation is the minimum off: |
| 2761 | * | 2761 | * |
| 2762 | * Number of CPUs | 2762 | * Number of CPUs |
| 2763 | * Number of MSI-X vectors | 2763 | * Number allocated by qed for our PCI function |
| 2764 | * Max number allocated in hardware (QEDF_MAX_NUM_CQS) | ||
| 2765 | */ | 2764 | */ |
| 2766 | qedf->num_queues = min((unsigned int)QEDF_MAX_NUM_CQS, | 2765 | qedf->num_queues = MIN_NUM_CPUS_MSIX(qedf); |
| 2767 | num_online_cpus()); | ||
| 2768 | 2766 | ||
| 2769 | QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Number of CQs is %d.\n", | 2767 | QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Number of CQs is %d.\n", |
| 2770 | qedf->num_queues); | 2768 | qedf->num_queues); |
| @@ -2962,6 +2960,13 @@ static int __qedf_probe(struct pci_dev *pdev, int mode) | |||
| 2962 | goto err1; | 2960 | goto err1; |
| 2963 | } | 2961 | } |
| 2964 | 2962 | ||
| 2963 | /* Learn information crucial for qedf to progress */ | ||
| 2964 | rc = qed_ops->fill_dev_info(qedf->cdev, &qedf->dev_info); | ||
| 2965 | if (rc) { | ||
| 2966 | QEDF_ERR(&(qedf->dbg_ctx), "Failed to dev info.\n"); | ||
| 2967 | goto err1; | ||
| 2968 | } | ||
| 2969 | |||
| 2965 | /* queue allocation code should come here | 2970 | /* queue allocation code should come here |
| 2966 | * order should be | 2971 | * order should be |
| 2967 | * slowpath_start | 2972 | * slowpath_start |
| @@ -2977,13 +2982,6 @@ static int __qedf_probe(struct pci_dev *pdev, int mode) | |||
| 2977 | } | 2982 | } |
| 2978 | qed_ops->common->update_pf_params(qedf->cdev, &qedf->pf_params); | 2983 | qed_ops->common->update_pf_params(qedf->cdev, &qedf->pf_params); |
| 2979 | 2984 | ||
| 2980 | /* Learn information crucial for qedf to progress */ | ||
| 2981 | rc = qed_ops->fill_dev_info(qedf->cdev, &qedf->dev_info); | ||
| 2982 | if (rc) { | ||
| 2983 | QEDF_ERR(&(qedf->dbg_ctx), "Failed to dev info.\n"); | ||
| 2984 | goto err1; | ||
| 2985 | } | ||
| 2986 | |||
| 2987 | /* Record BDQ producer doorbell addresses */ | 2985 | /* Record BDQ producer doorbell addresses */ |
| 2988 | qedf->bdq_primary_prod = qedf->dev_info.primary_dbq_rq_addr; | 2986 | qedf->bdq_primary_prod = qedf->dev_info.primary_dbq_rq_addr; |
| 2989 | qedf->bdq_secondary_prod = qedf->dev_info.secondary_bdq_rq_addr; | 2987 | qedf->bdq_secondary_prod = qedf->dev_info.secondary_bdq_rq_addr; |
diff --git a/drivers/scsi/qedi/Kconfig b/drivers/scsi/qedi/Kconfig index 21331453db7b..2ff753ce6e27 100644 --- a/drivers/scsi/qedi/Kconfig +++ b/drivers/scsi/qedi/Kconfig | |||
| @@ -5,6 +5,7 @@ config QEDI | |||
| 5 | select SCSI_ISCSI_ATTRS | 5 | select SCSI_ISCSI_ATTRS |
| 6 | select QED_LL2 | 6 | select QED_LL2 |
| 7 | select QED_ISCSI | 7 | select QED_ISCSI |
| 8 | select ISCSI_BOOT_SYSFS | ||
| 8 | ---help--- | 9 | ---help--- |
| 9 | This driver supports iSCSI offload for the QLogic FastLinQ | 10 | This driver supports iSCSI offload for the QLogic FastLinQ |
| 10 | 41000 Series Converged Network Adapters. | 11 | 41000 Series Converged Network Adapters. |
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c index 80edd28b635f..37da9a8b43b1 100644 --- a/drivers/scsi/qedi/qedi_iscsi.c +++ b/drivers/scsi/qedi/qedi_iscsi.c | |||
| @@ -824,7 +824,7 @@ qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr, | |||
| 824 | u32 iscsi_cid = QEDI_CID_RESERVED; | 824 | u32 iscsi_cid = QEDI_CID_RESERVED; |
| 825 | u16 len = 0; | 825 | u16 len = 0; |
| 826 | char *buf = NULL; | 826 | char *buf = NULL; |
| 827 | int ret; | 827 | int ret, tmp; |
| 828 | 828 | ||
| 829 | if (!shost) { | 829 | if (!shost) { |
| 830 | ret = -ENXIO; | 830 | ret = -ENXIO; |
| @@ -940,10 +940,10 @@ qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr, | |||
| 940 | 940 | ||
| 941 | ep_rel_conn: | 941 | ep_rel_conn: |
| 942 | qedi->ep_tbl[iscsi_cid] = NULL; | 942 | qedi->ep_tbl[iscsi_cid] = NULL; |
| 943 | ret = qedi_ops->release_conn(qedi->cdev, qedi_ep->handle); | 943 | tmp = qedi_ops->release_conn(qedi->cdev, qedi_ep->handle); |
| 944 | if (ret) | 944 | if (tmp) |
| 945 | QEDI_WARN(&qedi->dbg_ctx, "release_conn returned %d\n", | 945 | QEDI_WARN(&qedi->dbg_ctx, "release_conn returned %d\n", |
| 946 | ret); | 946 | tmp); |
| 947 | ep_free_sq: | 947 | ep_free_sq: |
| 948 | qedi_free_sq(qedi, qedi_ep); | 948 | qedi_free_sq(qedi, qedi_ep); |
| 949 | ep_conn_exit: | 949 | ep_conn_exit: |
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c index 33142610882f..b18646d6057f 100644 --- a/drivers/scsi/qla2xxx/qla_tmpl.c +++ b/drivers/scsi/qla2xxx/qla_tmpl.c | |||
| @@ -401,9 +401,6 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha, | |||
| 401 | for (i = 0; i < vha->hw->max_req_queues; i++) { | 401 | for (i = 0; i < vha->hw->max_req_queues; i++) { |
| 402 | struct req_que *req = vha->hw->req_q_map[i]; | 402 | struct req_que *req = vha->hw->req_q_map[i]; |
| 403 | 403 | ||
| 404 | if (!test_bit(i, vha->hw->req_qid_map)) | ||
| 405 | continue; | ||
| 406 | |||
| 407 | if (req || !buf) { | 404 | if (req || !buf) { |
| 408 | length = req ? | 405 | length = req ? |
| 409 | req->length : REQUEST_ENTRY_CNT_24XX; | 406 | req->length : REQUEST_ENTRY_CNT_24XX; |
| @@ -418,9 +415,6 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha, | |||
| 418 | for (i = 0; i < vha->hw->max_rsp_queues; i++) { | 415 | for (i = 0; i < vha->hw->max_rsp_queues; i++) { |
| 419 | struct rsp_que *rsp = vha->hw->rsp_q_map[i]; | 416 | struct rsp_que *rsp = vha->hw->rsp_q_map[i]; |
| 420 | 417 | ||
| 421 | if (!test_bit(i, vha->hw->rsp_qid_map)) | ||
| 422 | continue; | ||
| 423 | |||
| 424 | if (rsp || !buf) { | 418 | if (rsp || !buf) { |
| 425 | length = rsp ? | 419 | length = rsp ? |
| 426 | rsp->length : RESPONSE_ENTRY_CNT_MQ; | 420 | rsp->length : RESPONSE_ENTRY_CNT_MQ; |
| @@ -660,9 +654,6 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha, | |||
| 660 | for (i = 0; i < vha->hw->max_req_queues; i++) { | 654 | for (i = 0; i < vha->hw->max_req_queues; i++) { |
| 661 | struct req_que *req = vha->hw->req_q_map[i]; | 655 | struct req_que *req = vha->hw->req_q_map[i]; |
| 662 | 656 | ||
| 663 | if (!test_bit(i, vha->hw->req_qid_map)) | ||
| 664 | continue; | ||
| 665 | |||
| 666 | if (req || !buf) { | 657 | if (req || !buf) { |
| 667 | qla27xx_insert16(i, buf, len); | 658 | qla27xx_insert16(i, buf, len); |
| 668 | qla27xx_insert16(1, buf, len); | 659 | qla27xx_insert16(1, buf, len); |
| @@ -675,9 +666,6 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha, | |||
| 675 | for (i = 0; i < vha->hw->max_rsp_queues; i++) { | 666 | for (i = 0; i < vha->hw->max_rsp_queues; i++) { |
| 676 | struct rsp_que *rsp = vha->hw->rsp_q_map[i]; | 667 | struct rsp_que *rsp = vha->hw->rsp_q_map[i]; |
| 677 | 668 | ||
| 678 | if (!test_bit(i, vha->hw->rsp_qid_map)) | ||
| 679 | continue; | ||
| 680 | |||
| 681 | if (rsp || !buf) { | 669 | if (rsp || !buf) { |
| 682 | qla27xx_insert16(i, buf, len); | 670 | qla27xx_insert16(i, buf, len); |
| 683 | qla27xx_insert16(1, buf, len); | 671 | qla27xx_insert16(1, buf, len); |
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index b20da0d27ad7..3f82ea1b72dc 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c | |||
| @@ -500,7 +500,6 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd, | |||
| 500 | static void tcm_qla2xxx_handle_data_work(struct work_struct *work) | 500 | static void tcm_qla2xxx_handle_data_work(struct work_struct *work) |
| 501 | { | 501 | { |
| 502 | struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); | 502 | struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); |
| 503 | unsigned long flags; | ||
| 504 | 503 | ||
| 505 | /* | 504 | /* |
| 506 | * Ensure that the complete FCP WRITE payload has been received. | 505 | * Ensure that the complete FCP WRITE payload has been received. |
| @@ -508,17 +507,6 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work) | |||
| 508 | */ | 507 | */ |
| 509 | cmd->cmd_in_wq = 0; | 508 | cmd->cmd_in_wq = 0; |
| 510 | 509 | ||
| 511 | spin_lock_irqsave(&cmd->cmd_lock, flags); | ||
| 512 | cmd->data_work = 1; | ||
| 513 | if (cmd->aborted) { | ||
| 514 | cmd->data_work_free = 1; | ||
| 515 | spin_unlock_irqrestore(&cmd->cmd_lock, flags); | ||
| 516 | |||
| 517 | tcm_qla2xxx_free_cmd(cmd); | ||
| 518 | return; | ||
| 519 | } | ||
| 520 | spin_unlock_irqrestore(&cmd->cmd_lock, flags); | ||
| 521 | |||
| 522 | cmd->qpair->tgt_counters.qla_core_ret_ctio++; | 510 | cmd->qpair->tgt_counters.qla_core_ret_ctio++; |
| 523 | if (!cmd->write_data_transferred) { | 511 | if (!cmd->write_data_transferred) { |
| 524 | /* | 512 | /* |
| @@ -765,31 +753,13 @@ static void tcm_qla2xxx_queue_tm_rsp(struct se_cmd *se_cmd) | |||
| 765 | qlt_xmit_tm_rsp(mcmd); | 753 | qlt_xmit_tm_rsp(mcmd); |
| 766 | } | 754 | } |
| 767 | 755 | ||
| 768 | #define DATA_WORK_NOT_FREE(_cmd) (_cmd->data_work && !_cmd->data_work_free) | ||
| 769 | static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd) | 756 | static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd) |
| 770 | { | 757 | { |
| 771 | struct qla_tgt_cmd *cmd = container_of(se_cmd, | 758 | struct qla_tgt_cmd *cmd = container_of(se_cmd, |
| 772 | struct qla_tgt_cmd, se_cmd); | 759 | struct qla_tgt_cmd, se_cmd); |
| 773 | unsigned long flags; | ||
| 774 | 760 | ||
| 775 | if (qlt_abort_cmd(cmd)) | 761 | if (qlt_abort_cmd(cmd)) |
| 776 | return; | 762 | return; |
| 777 | |||
| 778 | spin_lock_irqsave(&cmd->cmd_lock, flags); | ||
| 779 | if ((cmd->state == QLA_TGT_STATE_NEW)|| | ||
| 780 | ((cmd->state == QLA_TGT_STATE_DATA_IN) && | ||
| 781 | DATA_WORK_NOT_FREE(cmd))) { | ||
| 782 | cmd->data_work_free = 1; | ||
| 783 | spin_unlock_irqrestore(&cmd->cmd_lock, flags); | ||
| 784 | /* | ||
| 785 | * cmd has not reached fw, Use this trigger to free it. | ||
| 786 | */ | ||
| 787 | tcm_qla2xxx_free_cmd(cmd); | ||
| 788 | return; | ||
| 789 | } | ||
| 790 | spin_unlock_irqrestore(&cmd->cmd_lock, flags); | ||
| 791 | return; | ||
| 792 | |||
| 793 | } | 763 | } |
| 794 | 764 | ||
| 795 | static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *, | 765 | static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *, |
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index 3d38c6d463b8..1bf274e3b2b6 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c | |||
| @@ -800,7 +800,11 @@ MODULE_LICENSE("GPL"); | |||
| 800 | module_param(scsi_logging_level, int, S_IRUGO|S_IWUSR); | 800 | module_param(scsi_logging_level, int, S_IRUGO|S_IWUSR); |
| 801 | MODULE_PARM_DESC(scsi_logging_level, "a bit mask of logging levels"); | 801 | MODULE_PARM_DESC(scsi_logging_level, "a bit mask of logging levels"); |
| 802 | 802 | ||
| 803 | #ifdef CONFIG_SCSI_MQ_DEFAULT | ||
| 803 | bool scsi_use_blk_mq = true; | 804 | bool scsi_use_blk_mq = true; |
| 805 | #else | ||
| 806 | bool scsi_use_blk_mq = false; | ||
| 807 | #endif | ||
| 804 | module_param_named(use_blk_mq, scsi_use_blk_mq, bool, S_IWUSR | S_IRUGO); | 808 | module_param_named(use_blk_mq, scsi_use_blk_mq, bool, S_IWUSR | S_IRUGO); |
| 805 | 809 | ||
| 806 | static int __init init_scsi(void) | 810 | static int __init init_scsi(void) |
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index 7e24aa30c3b0..892fbd9800d9 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c | |||
| @@ -1286,7 +1286,7 @@ store_fc_vport_delete(struct device *dev, struct device_attribute *attr, | |||
| 1286 | unsigned long flags; | 1286 | unsigned long flags; |
| 1287 | 1287 | ||
| 1288 | spin_lock_irqsave(shost->host_lock, flags); | 1288 | spin_lock_irqsave(shost->host_lock, flags); |
| 1289 | if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)) { | 1289 | if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING | FC_VPORT_DELETING)) { |
| 1290 | spin_unlock_irqrestore(shost->host_lock, flags); | 1290 | spin_unlock_irqrestore(shost->host_lock, flags); |
| 1291 | return -EBUSY; | 1291 | return -EBUSY; |
| 1292 | } | 1292 | } |
| @@ -2430,8 +2430,10 @@ fc_remove_host(struct Scsi_Host *shost) | |||
| 2430 | spin_lock_irqsave(shost->host_lock, flags); | 2430 | spin_lock_irqsave(shost->host_lock, flags); |
| 2431 | 2431 | ||
| 2432 | /* Remove any vports */ | 2432 | /* Remove any vports */ |
| 2433 | list_for_each_entry_safe(vport, next_vport, &fc_host->vports, peers) | 2433 | list_for_each_entry_safe(vport, next_vport, &fc_host->vports, peers) { |
| 2434 | vport->flags |= FC_VPORT_DELETING; | ||
| 2434 | fc_queue_work(shost, &vport->vport_delete_work); | 2435 | fc_queue_work(shost, &vport->vport_delete_work); |
| 2436 | } | ||
| 2435 | 2437 | ||
| 2436 | /* Remove any remote ports */ | 2438 | /* Remove any remote ports */ |
| 2437 | list_for_each_entry_safe(rport, next_rport, | 2439 | list_for_each_entry_safe(rport, next_rport, |
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index bea36adeee17..e2647f2d4430 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c | |||
| @@ -1277,6 +1277,9 @@ static void sd_uninit_command(struct scsi_cmnd *SCpnt) | |||
| 1277 | { | 1277 | { |
| 1278 | struct request *rq = SCpnt->request; | 1278 | struct request *rq = SCpnt->request; |
| 1279 | 1279 | ||
| 1280 | if (SCpnt->flags & SCMD_ZONE_WRITE_LOCK) | ||
| 1281 | sd_zbc_write_unlock_zone(SCpnt); | ||
| 1282 | |||
| 1280 | if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) | 1283 | if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) |
| 1281 | __free_page(rq->special_vec.bv_page); | 1284 | __free_page(rq->special_vec.bv_page); |
| 1282 | 1285 | ||
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c index 96855df9f49d..8aa54779aac1 100644 --- a/drivers/scsi/sd_zbc.c +++ b/drivers/scsi/sd_zbc.c | |||
| @@ -294,6 +294,9 @@ int sd_zbc_write_lock_zone(struct scsi_cmnd *cmd) | |||
| 294 | test_and_set_bit(zno, sdkp->zones_wlock)) | 294 | test_and_set_bit(zno, sdkp->zones_wlock)) |
| 295 | return BLKPREP_DEFER; | 295 | return BLKPREP_DEFER; |
| 296 | 296 | ||
| 297 | WARN_ON_ONCE(cmd->flags & SCMD_ZONE_WRITE_LOCK); | ||
| 298 | cmd->flags |= SCMD_ZONE_WRITE_LOCK; | ||
| 299 | |||
| 297 | return BLKPREP_OK; | 300 | return BLKPREP_OK; |
| 298 | } | 301 | } |
| 299 | 302 | ||
| @@ -302,9 +305,10 @@ void sd_zbc_write_unlock_zone(struct scsi_cmnd *cmd) | |||
| 302 | struct request *rq = cmd->request; | 305 | struct request *rq = cmd->request; |
| 303 | struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); | 306 | struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); |
| 304 | 307 | ||
| 305 | if (sdkp->zones_wlock) { | 308 | if (sdkp->zones_wlock && cmd->flags & SCMD_ZONE_WRITE_LOCK) { |
| 306 | unsigned int zno = sd_zbc_zone_no(sdkp, blk_rq_pos(rq)); | 309 | unsigned int zno = sd_zbc_zone_no(sdkp, blk_rq_pos(rq)); |
| 307 | WARN_ON_ONCE(!test_bit(zno, sdkp->zones_wlock)); | 310 | WARN_ON_ONCE(!test_bit(zno, sdkp->zones_wlock)); |
| 311 | cmd->flags &= ~SCMD_ZONE_WRITE_LOCK; | ||
| 308 | clear_bit_unlock(zno, sdkp->zones_wlock); | 312 | clear_bit_unlock(zno, sdkp->zones_wlock); |
| 309 | smp_mb__after_atomic(); | 313 | smp_mb__after_atomic(); |
| 310 | } | 314 | } |
| @@ -335,9 +339,6 @@ void sd_zbc_complete(struct scsi_cmnd *cmd, | |||
| 335 | case REQ_OP_WRITE_ZEROES: | 339 | case REQ_OP_WRITE_ZEROES: |
| 336 | case REQ_OP_WRITE_SAME: | 340 | case REQ_OP_WRITE_SAME: |
| 337 | 341 | ||
| 338 | /* Unlock the zone */ | ||
| 339 | sd_zbc_write_unlock_zone(cmd); | ||
| 340 | |||
| 341 | if (result && | 342 | if (result && |
| 342 | sshdr->sense_key == ILLEGAL_REQUEST && | 343 | sshdr->sense_key == ILLEGAL_REQUEST && |
| 343 | sshdr->asc == 0x21) | 344 | sshdr->asc == 0x21) |
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c index f1cdf32d7514..8927f9f54ad9 100644 --- a/drivers/scsi/ses.c +++ b/drivers/scsi/ses.c | |||
| @@ -99,7 +99,7 @@ static int ses_recv_diag(struct scsi_device *sdev, int page_code, | |||
| 99 | 99 | ||
| 100 | ret = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen, | 100 | ret = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen, |
| 101 | NULL, SES_TIMEOUT, SES_RETRIES, NULL); | 101 | NULL, SES_TIMEOUT, SES_RETRIES, NULL); |
| 102 | if (unlikely(!ret)) | 102 | if (unlikely(ret)) |
| 103 | return ret; | 103 | return ret; |
| 104 | 104 | ||
| 105 | recv_page_code = ((unsigned char *)buf)[0]; | 105 | recv_page_code = ((unsigned char *)buf)[0]; |
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 1e82d4128a84..84e782d8e7c3 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c | |||
| @@ -751,32 +751,6 @@ sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf, | |||
| 751 | return count; | 751 | return count; |
| 752 | } | 752 | } |
| 753 | 753 | ||
| 754 | static bool sg_is_valid_dxfer(sg_io_hdr_t *hp) | ||
| 755 | { | ||
| 756 | switch (hp->dxfer_direction) { | ||
| 757 | case SG_DXFER_NONE: | ||
| 758 | if (hp->dxferp || hp->dxfer_len > 0) | ||
| 759 | return false; | ||
| 760 | return true; | ||
| 761 | case SG_DXFER_FROM_DEV: | ||
| 762 | if (hp->dxfer_len < 0) | ||
| 763 | return false; | ||
| 764 | return true; | ||
| 765 | case SG_DXFER_TO_DEV: | ||
| 766 | case SG_DXFER_TO_FROM_DEV: | ||
| 767 | if (!hp->dxferp || hp->dxfer_len == 0) | ||
| 768 | return false; | ||
| 769 | return true; | ||
| 770 | case SG_DXFER_UNKNOWN: | ||
| 771 | if ((!hp->dxferp && hp->dxfer_len) || | ||
| 772 | (hp->dxferp && hp->dxfer_len == 0)) | ||
| 773 | return false; | ||
| 774 | return true; | ||
| 775 | default: | ||
| 776 | return false; | ||
| 777 | } | ||
| 778 | } | ||
| 779 | |||
| 780 | static int | 754 | static int |
| 781 | sg_common_write(Sg_fd * sfp, Sg_request * srp, | 755 | sg_common_write(Sg_fd * sfp, Sg_request * srp, |
| 782 | unsigned char *cmnd, int timeout, int blocking) | 756 | unsigned char *cmnd, int timeout, int blocking) |
| @@ -797,7 +771,7 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp, | |||
| 797 | "sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n", | 771 | "sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n", |
| 798 | (int) cmnd[0], (int) hp->cmd_len)); | 772 | (int) cmnd[0], (int) hp->cmd_len)); |
| 799 | 773 | ||
| 800 | if (!sg_is_valid_dxfer(hp)) | 774 | if (hp->dxfer_len >= SZ_256M) |
| 801 | return -EINVAL; | 775 | return -EINVAL; |
| 802 | 776 | ||
| 803 | k = sg_start_req(srp, cmnd); | 777 | k = sg_start_req(srp, cmnd); |
| @@ -1047,7 +1021,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) | |||
| 1047 | read_lock_irqsave(&sfp->rq_list_lock, iflags); | 1021 | read_lock_irqsave(&sfp->rq_list_lock, iflags); |
| 1048 | val = 0; | 1022 | val = 0; |
| 1049 | list_for_each_entry(srp, &sfp->rq_list, entry) { | 1023 | list_for_each_entry(srp, &sfp->rq_list, entry) { |
| 1050 | if (val > SG_MAX_QUEUE) | 1024 | if (val >= SG_MAX_QUEUE) |
| 1051 | break; | 1025 | break; |
| 1052 | memset(&rinfo[val], 0, SZ_SG_REQ_INFO); | 1026 | memset(&rinfo[val], 0, SZ_SG_REQ_INFO); |
| 1053 | rinfo[val].req_state = srp->done + 1; | 1027 | rinfo[val].req_state = srp->done + 1; |
diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h index 07ec8a8877de..e164ffade38a 100644 --- a/drivers/scsi/smartpqi/smartpqi.h +++ b/drivers/scsi/smartpqi/smartpqi.h | |||
| @@ -690,7 +690,7 @@ struct pqi_config_table_heartbeat { | |||
| 690 | 690 | ||
| 691 | #define PQI_MAX_OUTSTANDING_REQUESTS ((u32)~0) | 691 | #define PQI_MAX_OUTSTANDING_REQUESTS ((u32)~0) |
| 692 | #define PQI_MAX_OUTSTANDING_REQUESTS_KDUMP 32 | 692 | #define PQI_MAX_OUTSTANDING_REQUESTS_KDUMP 32 |
| 693 | #define PQI_MAX_TRANSFER_SIZE (4 * 1024U * 1024U) | 693 | #define PQI_MAX_TRANSFER_SIZE (1024U * 1024U) |
| 694 | #define PQI_MAX_TRANSFER_SIZE_KDUMP (512 * 1024U) | 694 | #define PQI_MAX_TRANSFER_SIZE_KDUMP (512 * 1024U) |
| 695 | 695 | ||
| 696 | #define RAID_MAP_MAX_ENTRIES 1024 | 696 | #define RAID_MAP_MAX_ENTRIES 1024 |
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index 8e5013d9cad4..94e402ed30f6 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c | |||
| @@ -4299,11 +4299,11 @@ static int st_probe(struct device *dev) | |||
| 4299 | kref_init(&tpnt->kref); | 4299 | kref_init(&tpnt->kref); |
| 4300 | tpnt->disk = disk; | 4300 | tpnt->disk = disk; |
| 4301 | disk->private_data = &tpnt->driver; | 4301 | disk->private_data = &tpnt->driver; |
| 4302 | disk->queue = SDp->request_queue; | ||
| 4303 | /* SCSI tape doesn't register this gendisk via add_disk(). Manually | 4302 | /* SCSI tape doesn't register this gendisk via add_disk(). Manually |
| 4304 | * take queue reference that release_disk() expects. */ | 4303 | * take queue reference that release_disk() expects. */ |
| 4305 | if (!blk_get_queue(disk->queue)) | 4304 | if (!blk_get_queue(SDp->request_queue)) |
| 4306 | goto out_put_disk; | 4305 | goto out_put_disk; |
| 4306 | disk->queue = SDp->request_queue; | ||
| 4307 | tpnt->driver = &st_template; | 4307 | tpnt->driver = &st_template; |
| 4308 | 4308 | ||
| 4309 | tpnt->device = SDp; | 4309 | tpnt->device = SDp; |
diff --git a/drivers/soc/imx/gpcv2.c b/drivers/soc/imx/gpcv2.c index 3039072911a5..afc7ecc3c187 100644 --- a/drivers/soc/imx/gpcv2.c +++ b/drivers/soc/imx/gpcv2.c | |||
| @@ -200,16 +200,11 @@ static int imx7_pgc_domain_probe(struct platform_device *pdev) | |||
| 200 | 200 | ||
| 201 | domain->dev = &pdev->dev; | 201 | domain->dev = &pdev->dev; |
| 202 | 202 | ||
| 203 | ret = pm_genpd_init(&domain->genpd, NULL, true); | ||
| 204 | if (ret) { | ||
| 205 | dev_err(domain->dev, "Failed to init power domain\n"); | ||
| 206 | return ret; | ||
| 207 | } | ||
| 208 | |||
| 209 | domain->regulator = devm_regulator_get_optional(domain->dev, "power"); | 203 | domain->regulator = devm_regulator_get_optional(domain->dev, "power"); |
| 210 | if (IS_ERR(domain->regulator)) { | 204 | if (IS_ERR(domain->regulator)) { |
| 211 | if (PTR_ERR(domain->regulator) != -ENODEV) { | 205 | if (PTR_ERR(domain->regulator) != -ENODEV) { |
| 212 | dev_err(domain->dev, "Failed to get domain's regulator\n"); | 206 | if (PTR_ERR(domain->regulator) != -EPROBE_DEFER) |
| 207 | dev_err(domain->dev, "Failed to get domain's regulator\n"); | ||
| 213 | return PTR_ERR(domain->regulator); | 208 | return PTR_ERR(domain->regulator); |
| 214 | } | 209 | } |
| 215 | } else { | 210 | } else { |
| @@ -217,6 +212,12 @@ static int imx7_pgc_domain_probe(struct platform_device *pdev) | |||
| 217 | domain->voltage, domain->voltage); | 212 | domain->voltage, domain->voltage); |
| 218 | } | 213 | } |
| 219 | 214 | ||
| 215 | ret = pm_genpd_init(&domain->genpd, NULL, true); | ||
| 216 | if (ret) { | ||
| 217 | dev_err(domain->dev, "Failed to init power domain\n"); | ||
| 218 | return ret; | ||
| 219 | } | ||
| 220 | |||
| 220 | ret = of_genpd_add_provider_simple(domain->dev->of_node, | 221 | ret = of_genpd_add_provider_simple(domain->dev->of_node, |
| 221 | &domain->genpd); | 222 | &domain->genpd); |
| 222 | if (ret) { | 223 | if (ret) { |
diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c index 279e7c5551dd..39225de9d7f1 100644 --- a/drivers/soc/ti/knav_qmss_queue.c +++ b/drivers/soc/ti/knav_qmss_queue.c | |||
| @@ -745,6 +745,9 @@ void *knav_pool_create(const char *name, | |||
| 745 | bool slot_found; | 745 | bool slot_found; |
| 746 | int ret; | 746 | int ret; |
| 747 | 747 | ||
| 748 | if (!kdev) | ||
| 749 | return ERR_PTR(-EPROBE_DEFER); | ||
| 750 | |||
| 748 | if (!kdev->dev) | 751 | if (!kdev->dev) |
| 749 | return ERR_PTR(-ENODEV); | 752 | return ERR_PTR(-ENODEV); |
| 750 | 753 | ||
diff --git a/drivers/soc/ti/ti_sci_pm_domains.c b/drivers/soc/ti/ti_sci_pm_domains.c index b0b283810e72..de31b9389e2e 100644 --- a/drivers/soc/ti/ti_sci_pm_domains.c +++ b/drivers/soc/ti/ti_sci_pm_domains.c | |||
| @@ -176,6 +176,8 @@ static int ti_sci_pm_domain_probe(struct platform_device *pdev) | |||
| 176 | 176 | ||
| 177 | ti_sci_pd->dev = dev; | 177 | ti_sci_pd->dev = dev; |
| 178 | 178 | ||
| 179 | ti_sci_pd->pd.name = "ti_sci_pd"; | ||
| 180 | |||
| 179 | ti_sci_pd->pd.attach_dev = ti_sci_pd_attach_dev; | 181 | ti_sci_pd->pd.attach_dev = ti_sci_pd_attach_dev; |
| 180 | ti_sci_pd->pd.detach_dev = ti_sci_pd_detach_dev; | 182 | ti_sci_pd->pd.detach_dev = ti_sci_pd_detach_dev; |
| 181 | 183 | ||
diff --git a/drivers/soc/zte/Kconfig b/drivers/soc/zte/Kconfig index 20bde38ce2f9..e9d750c510cd 100644 --- a/drivers/soc/zte/Kconfig +++ b/drivers/soc/zte/Kconfig | |||
| @@ -2,6 +2,7 @@ | |||
| 2 | # ZTE SoC drivers | 2 | # ZTE SoC drivers |
| 3 | # | 3 | # |
| 4 | menuconfig SOC_ZTE | 4 | menuconfig SOC_ZTE |
| 5 | depends on ARCH_ZX || COMPILE_TEST | ||
| 5 | bool "ZTE SoC driver support" | 6 | bool "ZTE SoC driver support" |
| 6 | 7 | ||
| 7 | if SOC_ZTE | 8 | if SOC_ZTE |
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c index ca11be21f64b..34ca7823255d 100644 --- a/drivers/staging/comedi/comedi_fops.c +++ b/drivers/staging/comedi/comedi_fops.c | |||
| @@ -2396,6 +2396,7 @@ static ssize_t comedi_write(struct file *file, const char __user *buf, | |||
| 2396 | continue; | 2396 | continue; |
| 2397 | } | 2397 | } |
| 2398 | 2398 | ||
| 2399 | set_current_state(TASK_RUNNING); | ||
| 2399 | wp = async->buf_write_ptr; | 2400 | wp = async->buf_write_ptr; |
| 2400 | n1 = min(n, async->prealloc_bufsz - wp); | 2401 | n1 = min(n, async->prealloc_bufsz - wp); |
| 2401 | n2 = n - n1; | 2402 | n2 = n - n1; |
| @@ -2528,6 +2529,8 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes, | |||
| 2528 | } | 2529 | } |
| 2529 | continue; | 2530 | continue; |
| 2530 | } | 2531 | } |
| 2532 | |||
| 2533 | set_current_state(TASK_RUNNING); | ||
| 2531 | rp = async->buf_read_ptr; | 2534 | rp = async->buf_read_ptr; |
| 2532 | n1 = min(n, async->prealloc_bufsz - rp); | 2535 | n1 = min(n, async->prealloc_bufsz - rp); |
| 2533 | n2 = n - n1; | 2536 | n2 = n - n1; |
diff --git a/drivers/staging/fsl-mc/bus/fsl-mc-allocator.c b/drivers/staging/fsl-mc/bus/fsl-mc-allocator.c index b37a6f48225f..8ea3920400a0 100644 --- a/drivers/staging/fsl-mc/bus/fsl-mc-allocator.c +++ b/drivers/staging/fsl-mc/bus/fsl-mc-allocator.c | |||
| @@ -16,9 +16,9 @@ | |||
| 16 | 16 | ||
| 17 | static bool __must_check fsl_mc_is_allocatable(const char *obj_type) | 17 | static bool __must_check fsl_mc_is_allocatable(const char *obj_type) |
| 18 | { | 18 | { |
| 19 | return strcmp(obj_type, "dpbp") || | 19 | return strcmp(obj_type, "dpbp") == 0 || |
| 20 | strcmp(obj_type, "dpmcp") || | 20 | strcmp(obj_type, "dpmcp") == 0 || |
| 21 | strcmp(obj_type, "dpcon"); | 21 | strcmp(obj_type, "dpcon") == 0; |
| 22 | } | 22 | } |
| 23 | 23 | ||
| 24 | /** | 24 | /** |
diff --git a/drivers/staging/iio/resolver/ad2s1210.c b/drivers/staging/iio/resolver/ad2s1210.c index a6a8393d6664..3e00df74b18c 100644 --- a/drivers/staging/iio/resolver/ad2s1210.c +++ b/drivers/staging/iio/resolver/ad2s1210.c | |||
| @@ -472,7 +472,7 @@ static int ad2s1210_read_raw(struct iio_dev *indio_dev, | |||
| 472 | long m) | 472 | long m) |
| 473 | { | 473 | { |
| 474 | struct ad2s1210_state *st = iio_priv(indio_dev); | 474 | struct ad2s1210_state *st = iio_priv(indio_dev); |
| 475 | bool negative; | 475 | u16 negative; |
| 476 | int ret = 0; | 476 | int ret = 0; |
| 477 | u16 pos; | 477 | u16 pos; |
| 478 | s16 vel; | 478 | s16 vel; |
diff --git a/drivers/staging/media/atomisp/i2c/ap1302.h b/drivers/staging/media/atomisp/i2c/ap1302.h index 9341232c580d..4d0b181a9671 100644 --- a/drivers/staging/media/atomisp/i2c/ap1302.h +++ b/drivers/staging/media/atomisp/i2c/ap1302.h | |||
| @@ -158,8 +158,8 @@ struct ap1302_res_struct { | |||
| 158 | }; | 158 | }; |
| 159 | 159 | ||
| 160 | struct ap1302_context_res { | 160 | struct ap1302_context_res { |
| 161 | s32 res_num; | 161 | u32 res_num; |
| 162 | s32 cur_res; | 162 | u32 cur_res; |
| 163 | struct ap1302_res_struct *res_table; | 163 | struct ap1302_res_struct *res_table; |
| 164 | }; | 164 | }; |
| 165 | 165 | ||
diff --git a/drivers/staging/media/atomisp/i2c/gc0310.h b/drivers/staging/media/atomisp/i2c/gc0310.h index f31eb277f542..7d8a0aeecb6c 100644 --- a/drivers/staging/media/atomisp/i2c/gc0310.h +++ b/drivers/staging/media/atomisp/i2c/gc0310.h | |||
| @@ -454,6 +454,6 @@ struct gc0310_resolution gc0310_res_video[] = { | |||
| 454 | #define N_RES_VIDEO (ARRAY_SIZE(gc0310_res_video)) | 454 | #define N_RES_VIDEO (ARRAY_SIZE(gc0310_res_video)) |
| 455 | 455 | ||
| 456 | static struct gc0310_resolution *gc0310_res = gc0310_res_preview; | 456 | static struct gc0310_resolution *gc0310_res = gc0310_res_preview; |
| 457 | static int N_RES = N_RES_PREVIEW; | 457 | static unsigned long N_RES = N_RES_PREVIEW; |
| 458 | #endif | 458 | #endif |
| 459 | 459 | ||
diff --git a/drivers/staging/media/atomisp/i2c/gc2235.h b/drivers/staging/media/atomisp/i2c/gc2235.h index ccbc757045a5..7c3d994180cc 100644 --- a/drivers/staging/media/atomisp/i2c/gc2235.h +++ b/drivers/staging/media/atomisp/i2c/gc2235.h | |||
| @@ -668,5 +668,5 @@ struct gc2235_resolution gc2235_res_video[] = { | |||
| 668 | #define N_RES_VIDEO (ARRAY_SIZE(gc2235_res_video)) | 668 | #define N_RES_VIDEO (ARRAY_SIZE(gc2235_res_video)) |
| 669 | 669 | ||
| 670 | static struct gc2235_resolution *gc2235_res = gc2235_res_preview; | 670 | static struct gc2235_resolution *gc2235_res = gc2235_res_preview; |
| 671 | static int N_RES = N_RES_PREVIEW; | 671 | static unsigned long N_RES = N_RES_PREVIEW; |
| 672 | #endif | 672 | #endif |
diff --git a/drivers/staging/media/atomisp/i2c/imx/imx.h b/drivers/staging/media/atomisp/i2c/imx/imx.h index 36b3f3a5a41f..41b4133ca995 100644 --- a/drivers/staging/media/atomisp/i2c/imx/imx.h +++ b/drivers/staging/media/atomisp/i2c/imx/imx.h | |||
| @@ -480,7 +480,7 @@ struct imx_device { | |||
| 480 | struct imx_vcm *vcm_driver; | 480 | struct imx_vcm *vcm_driver; |
| 481 | struct imx_otp *otp_driver; | 481 | struct imx_otp *otp_driver; |
| 482 | const struct imx_resolution *curr_res_table; | 482 | const struct imx_resolution *curr_res_table; |
| 483 | int entries_curr_table; | 483 | unsigned long entries_curr_table; |
| 484 | const struct firmware *fw; | 484 | const struct firmware *fw; |
| 485 | struct imx_reg_addr *reg_addr; | 485 | struct imx_reg_addr *reg_addr; |
| 486 | const struct imx_reg *param_hold; | 486 | const struct imx_reg *param_hold; |
diff --git a/drivers/staging/media/atomisp/i2c/ov2680.h b/drivers/staging/media/atomisp/i2c/ov2680.h index 944fe8e3bcbf..ab8907e6c9ef 100644 --- a/drivers/staging/media/atomisp/i2c/ov2680.h +++ b/drivers/staging/media/atomisp/i2c/ov2680.h | |||
| @@ -934,7 +934,6 @@ static struct ov2680_resolution ov2680_res_video[] = { | |||
| 934 | #define N_RES_VIDEO (ARRAY_SIZE(ov2680_res_video)) | 934 | #define N_RES_VIDEO (ARRAY_SIZE(ov2680_res_video)) |
| 935 | 935 | ||
| 936 | static struct ov2680_resolution *ov2680_res = ov2680_res_preview; | 936 | static struct ov2680_resolution *ov2680_res = ov2680_res_preview; |
| 937 | static int N_RES = N_RES_PREVIEW; | 937 | static unsigned long N_RES = N_RES_PREVIEW; |
| 938 | |||
| 939 | 938 | ||
| 940 | #endif | 939 | #endif |
diff --git a/drivers/staging/media/atomisp/i2c/ov2722.h b/drivers/staging/media/atomisp/i2c/ov2722.h index b0d40965d89e..73ecb1679718 100644 --- a/drivers/staging/media/atomisp/i2c/ov2722.h +++ b/drivers/staging/media/atomisp/i2c/ov2722.h | |||
| @@ -1263,5 +1263,5 @@ struct ov2722_resolution ov2722_res_video[] = { | |||
| 1263 | #define N_RES_VIDEO (ARRAY_SIZE(ov2722_res_video)) | 1263 | #define N_RES_VIDEO (ARRAY_SIZE(ov2722_res_video)) |
| 1264 | 1264 | ||
| 1265 | static struct ov2722_resolution *ov2722_res = ov2722_res_preview; | 1265 | static struct ov2722_resolution *ov2722_res = ov2722_res_preview; |
| 1266 | static int N_RES = N_RES_PREVIEW; | 1266 | static unsigned long N_RES = N_RES_PREVIEW; |
| 1267 | #endif | 1267 | #endif |
diff --git a/drivers/staging/media/atomisp/i2c/ov5693/ov5693.h b/drivers/staging/media/atomisp/i2c/ov5693/ov5693.h index d88ac1777d86..8c2e6794463b 100644 --- a/drivers/staging/media/atomisp/i2c/ov5693/ov5693.h +++ b/drivers/staging/media/atomisp/i2c/ov5693/ov5693.h | |||
| @@ -1377,5 +1377,5 @@ struct ov5693_resolution ov5693_res_video[] = { | |||
| 1377 | #define N_RES_VIDEO (ARRAY_SIZE(ov5693_res_video)) | 1377 | #define N_RES_VIDEO (ARRAY_SIZE(ov5693_res_video)) |
| 1378 | 1378 | ||
| 1379 | static struct ov5693_resolution *ov5693_res = ov5693_res_preview; | 1379 | static struct ov5693_resolution *ov5693_res = ov5693_res_preview; |
| 1380 | static int N_RES = N_RES_PREVIEW; | 1380 | static unsigned long N_RES = N_RES_PREVIEW; |
| 1381 | #endif | 1381 | #endif |
diff --git a/drivers/staging/media/atomisp/i2c/ov8858.h b/drivers/staging/media/atomisp/i2c/ov8858.h index 9be6a0e63861..d3fde200c013 100644 --- a/drivers/staging/media/atomisp/i2c/ov8858.h +++ b/drivers/staging/media/atomisp/i2c/ov8858.h | |||
| @@ -266,7 +266,7 @@ struct ov8858_device { | |||
| 266 | const struct ov8858_reg *regs; | 266 | const struct ov8858_reg *regs; |
| 267 | struct ov8858_vcm *vcm_driver; | 267 | struct ov8858_vcm *vcm_driver; |
| 268 | const struct ov8858_resolution *curr_res_table; | 268 | const struct ov8858_resolution *curr_res_table; |
| 269 | int entries_curr_table; | 269 | unsigned long entries_curr_table; |
| 270 | 270 | ||
| 271 | struct v4l2_ctrl_handler ctrl_handler; | 271 | struct v4l2_ctrl_handler ctrl_handler; |
| 272 | struct v4l2_ctrl *run_mode; | 272 | struct v4l2_ctrl *run_mode; |
diff --git a/drivers/staging/media/atomisp/i2c/ov8858_btns.h b/drivers/staging/media/atomisp/i2c/ov8858_btns.h index 09e3cdc1a394..f9a3cf8fbf1a 100644 --- a/drivers/staging/media/atomisp/i2c/ov8858_btns.h +++ b/drivers/staging/media/atomisp/i2c/ov8858_btns.h | |||
| @@ -266,7 +266,7 @@ struct ov8858_device { | |||
| 266 | const struct ov8858_reg *regs; | 266 | const struct ov8858_reg *regs; |
| 267 | struct ov8858_vcm *vcm_driver; | 267 | struct ov8858_vcm *vcm_driver; |
| 268 | const struct ov8858_resolution *curr_res_table; | 268 | const struct ov8858_resolution *curr_res_table; |
| 269 | int entries_curr_table; | 269 | unsigned long entries_curr_table; |
| 270 | 270 | ||
| 271 | struct v4l2_ctrl_handler ctrl_handler; | 271 | struct v4l2_ctrl_handler ctrl_handler; |
| 272 | struct v4l2_ctrl *run_mode; | 272 | struct v4l2_ctrl *run_mode; |
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/Makefile b/drivers/staging/media/atomisp/pci/atomisp2/Makefile index 726eaa293c55..2bd98f0667ec 100644 --- a/drivers/staging/media/atomisp/pci/atomisp2/Makefile +++ b/drivers/staging/media/atomisp/pci/atomisp2/Makefile | |||
| @@ -354,7 +354,9 @@ ccflags-y += $(INCLUDES) $(DEFINES) -fno-common | |||
| 354 | 354 | ||
| 355 | # HACK! While this driver is in bad shape, don't enable several warnings | 355 | # HACK! While this driver is in bad shape, don't enable several warnings |
| 356 | # that would be otherwise enabled with W=1 | 356 | # that would be otherwise enabled with W=1 |
| 357 | ccflags-y += -Wno-unused-const-variable -Wno-missing-prototypes \ | 357 | ccflags-y += $(call cc-disable-warning, implicit-fallthrough) |
| 358 | -Wno-unused-but-set-variable -Wno-missing-declarations \ | 358 | ccflags-y += $(call cc-disable-warning, missing-prototypes) |
| 359 | -Wno-suggest-attribute=format -Wno-missing-prototypes \ | 359 | ccflags-y += $(call cc-disable-warning, missing-declarations) |
| 360 | -Wno-implicit-fallthrough | 360 | ccflags-y += $(call cc-disable-warning, suggest-attribute=format) |
| 361 | ccflags-y += $(call cc-disable-warning, unused-const-variable) | ||
| 362 | ccflags-y += $(call cc-disable-warning, unused-but-set-variable) | ||
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_internal.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_internal.h index d3667132851b..c8e0c4fe3717 100644 --- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_internal.h +++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_internal.h | |||
| @@ -275,7 +275,7 @@ struct atomisp_device { | |||
| 275 | */ | 275 | */ |
| 276 | struct mutex streamoff_mutex; | 276 | struct mutex streamoff_mutex; |
| 277 | 277 | ||
| 278 | int input_cnt; | 278 | unsigned int input_cnt; |
| 279 | struct atomisp_input_subdev inputs[ATOM_ISP_MAX_INPUTS]; | 279 | struct atomisp_input_subdev inputs[ATOM_ISP_MAX_INPUTS]; |
| 280 | struct v4l2_subdev *flash; | 280 | struct v4l2_subdev *flash; |
| 281 | struct v4l2_subdev *motor; | 281 | struct v4l2_subdev *motor; |
diff --git a/drivers/staging/media/cxd2099/cxd2099.c b/drivers/staging/media/cxd2099/cxd2099.c index 370ecb959543..f28916ea69f1 100644 --- a/drivers/staging/media/cxd2099/cxd2099.c +++ b/drivers/staging/media/cxd2099/cxd2099.c | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * cxd2099.c: Driver for the CXD2099AR Common Interface Controller | 2 | * cxd2099.c: Driver for the CXD2099AR Common Interface Controller |
| 3 | * | 3 | * |
| 4 | * Copyright (C) 2010-2011 Digital Devices GmbH | 4 | * Copyright (C) 2010-2013 Digital Devices GmbH |
| 5 | * | 5 | * |
| 6 | * | 6 | * |
| 7 | * This program is free software; you can redistribute it and/or | 7 | * This program is free software; you can redistribute it and/or |
| @@ -33,7 +33,10 @@ | |||
| 33 | 33 | ||
| 34 | #include "cxd2099.h" | 34 | #include "cxd2099.h" |
| 35 | 35 | ||
| 36 | #define MAX_BUFFER_SIZE 248 | 36 | /* comment this line to deactivate the cxd2099ar buffer mode */ |
| 37 | #define BUFFER_MODE 1 | ||
| 38 | |||
| 39 | static int read_data(struct dvb_ca_en50221 *ca, int slot, u8 *ebuf, int ecount); | ||
| 37 | 40 | ||
| 38 | struct cxd { | 41 | struct cxd { |
| 39 | struct dvb_ca_en50221 en; | 42 | struct dvb_ca_en50221 en; |
| @@ -48,6 +51,7 @@ struct cxd { | |||
| 48 | int mode; | 51 | int mode; |
| 49 | int ready; | 52 | int ready; |
| 50 | int dr; | 53 | int dr; |
| 54 | int write_busy; | ||
| 51 | int slot_stat; | 55 | int slot_stat; |
| 52 | 56 | ||
| 53 | u8 amem[1024]; | 57 | u8 amem[1024]; |
| @@ -55,6 +59,9 @@ struct cxd { | |||
| 55 | 59 | ||
| 56 | int cammode; | 60 | int cammode; |
| 57 | struct mutex lock; | 61 | struct mutex lock; |
| 62 | |||
| 63 | u8 rbuf[1028]; | ||
| 64 | u8 wbuf[1028]; | ||
| 58 | }; | 65 | }; |
| 59 | 66 | ||
| 60 | static int i2c_write_reg(struct i2c_adapter *adapter, u8 adr, | 67 | static int i2c_write_reg(struct i2c_adapter *adapter, u8 adr, |
| @@ -73,7 +80,7 @@ static int i2c_write_reg(struct i2c_adapter *adapter, u8 adr, | |||
| 73 | } | 80 | } |
| 74 | 81 | ||
| 75 | static int i2c_write(struct i2c_adapter *adapter, u8 adr, | 82 | static int i2c_write(struct i2c_adapter *adapter, u8 adr, |
| 76 | u8 *data, u8 len) | 83 | u8 *data, u16 len) |
| 77 | { | 84 | { |
| 78 | struct i2c_msg msg = {.addr = adr, .flags = 0, .buf = data, .len = len}; | 85 | struct i2c_msg msg = {.addr = adr, .flags = 0, .buf = data, .len = len}; |
| 79 | 86 | ||
| @@ -100,12 +107,12 @@ static int i2c_read_reg(struct i2c_adapter *adapter, u8 adr, | |||
| 100 | } | 107 | } |
| 101 | 108 | ||
| 102 | static int i2c_read(struct i2c_adapter *adapter, u8 adr, | 109 | static int i2c_read(struct i2c_adapter *adapter, u8 adr, |
| 103 | u8 reg, u8 *data, u8 n) | 110 | u8 reg, u8 *data, u16 n) |
| 104 | { | 111 | { |
| 105 | struct i2c_msg msgs[2] = {{.addr = adr, .flags = 0, | 112 | struct i2c_msg msgs[2] = {{.addr = adr, .flags = 0, |
| 106 | .buf = ®, .len = 1}, | 113 | .buf = ®, .len = 1}, |
| 107 | {.addr = adr, .flags = I2C_M_RD, | 114 | {.addr = adr, .flags = I2C_M_RD, |
| 108 | .buf = data, .len = n} }; | 115 | .buf = data, .len = n} }; |
| 109 | 116 | ||
| 110 | if (i2c_transfer(adapter, msgs, 2) != 2) { | 117 | if (i2c_transfer(adapter, msgs, 2) != 2) { |
| 111 | dev_err(&adapter->dev, "error in i2c_read\n"); | 118 | dev_err(&adapter->dev, "error in i2c_read\n"); |
| @@ -114,14 +121,26 @@ static int i2c_read(struct i2c_adapter *adapter, u8 adr, | |||
| 114 | return 0; | 121 | return 0; |
| 115 | } | 122 | } |
| 116 | 123 | ||
| 117 | static int read_block(struct cxd *ci, u8 adr, u8 *data, u8 n) | 124 | static int read_block(struct cxd *ci, u8 adr, u8 *data, u16 n) |
| 118 | { | 125 | { |
| 119 | int status; | 126 | int status = 0; |
| 120 | 127 | ||
| 121 | status = i2c_write_reg(ci->i2c, ci->cfg.adr, 0, adr); | 128 | if (ci->lastaddress != adr) |
| 129 | status = i2c_write_reg(ci->i2c, ci->cfg.adr, 0, adr); | ||
| 122 | if (!status) { | 130 | if (!status) { |
| 123 | ci->lastaddress = adr; | 131 | ci->lastaddress = adr; |
| 124 | status = i2c_read(ci->i2c, ci->cfg.adr, 1, data, n); | 132 | |
| 133 | while (n) { | ||
| 134 | int len = n; | ||
| 135 | |||
| 136 | if (ci->cfg.max_i2c && (len > ci->cfg.max_i2c)) | ||
| 137 | len = ci->cfg.max_i2c; | ||
| 138 | status = i2c_read(ci->i2c, ci->cfg.adr, 1, data, len); | ||
| 139 | if (status) | ||
| 140 | return status; | ||
| 141 | data += len; | ||
| 142 | n -= len; | ||
| 143 | } | ||
| 125 | } | 144 | } |
| 126 | return status; | 145 | return status; |
| 127 | } | 146 | } |
| @@ -182,16 +201,16 @@ static int write_io(struct cxd *ci, u16 address, u8 val) | |||
| 182 | 201 | ||
| 183 | static int write_regm(struct cxd *ci, u8 reg, u8 val, u8 mask) | 202 | static int write_regm(struct cxd *ci, u8 reg, u8 val, u8 mask) |
| 184 | { | 203 | { |
| 185 | int status; | 204 | int status = 0; |
| 186 | 205 | ||
| 187 | status = i2c_write_reg(ci->i2c, ci->cfg.adr, 0, reg); | 206 | if (ci->lastaddress != reg) |
| 207 | status = i2c_write_reg(ci->i2c, ci->cfg.adr, 0, reg); | ||
| 188 | if (!status && reg >= 6 && reg <= 8 && mask != 0xff) | 208 | if (!status && reg >= 6 && reg <= 8 && mask != 0xff) |
| 189 | status = i2c_read_reg(ci->i2c, ci->cfg.adr, 1, &ci->regs[reg]); | 209 | status = i2c_read_reg(ci->i2c, ci->cfg.adr, 1, &ci->regs[reg]); |
| 210 | ci->lastaddress = reg; | ||
| 190 | ci->regs[reg] = (ci->regs[reg] & (~mask)) | val; | 211 | ci->regs[reg] = (ci->regs[reg] & (~mask)) | val; |
| 191 | if (!status) { | 212 | if (!status) |
| 192 | ci->lastaddress = reg; | ||
| 193 | status = i2c_write_reg(ci->i2c, ci->cfg.adr, 1, ci->regs[reg]); | 213 | status = i2c_write_reg(ci->i2c, ci->cfg.adr, 1, ci->regs[reg]); |
| 194 | } | ||
| 195 | if (reg == 0x20) | 214 | if (reg == 0x20) |
| 196 | ci->regs[reg] &= 0x7f; | 215 | ci->regs[reg] &= 0x7f; |
| 197 | return status; | 216 | return status; |
| @@ -203,16 +222,29 @@ static int write_reg(struct cxd *ci, u8 reg, u8 val) | |||
| 203 | } | 222 | } |
| 204 | 223 | ||
| 205 | #ifdef BUFFER_MODE | 224 | #ifdef BUFFER_MODE |
| 206 | static int write_block(struct cxd *ci, u8 adr, u8 *data, int n) | 225 | static int write_block(struct cxd *ci, u8 adr, u8 *data, u16 n) |
| 207 | { | 226 | { |
| 208 | int status; | 227 | int status = 0; |
| 209 | u8 buf[256] = {1}; | 228 | u8 *buf = ci->wbuf; |
| 210 | 229 | ||
| 211 | status = i2c_write_reg(ci->i2c, ci->cfg.adr, 0, adr); | 230 | if (ci->lastaddress != adr) |
| 212 | if (!status) { | 231 | status = i2c_write_reg(ci->i2c, ci->cfg.adr, 0, adr); |
| 213 | ci->lastaddress = adr; | 232 | if (status) |
| 214 | memcpy(buf + 1, data, n); | 233 | return status; |
| 215 | status = i2c_write(ci->i2c, ci->cfg.adr, buf, n + 1); | 234 | |
| 235 | ci->lastaddress = adr; | ||
| 236 | buf[0] = 1; | ||
| 237 | while (n) { | ||
| 238 | int len = n; | ||
| 239 | |||
| 240 | if (ci->cfg.max_i2c && (len + 1 > ci->cfg.max_i2c)) | ||
| 241 | len = ci->cfg.max_i2c - 1; | ||
| 242 | memcpy(buf + 1, data, len); | ||
| 243 | status = i2c_write(ci->i2c, ci->cfg.adr, buf, len + 1); | ||
| 244 | if (status) | ||
| 245 | return status; | ||
| 246 | n -= len; | ||
| 247 | data += len; | ||
| 216 | } | 248 | } |
| 217 | return status; | 249 | return status; |
| 218 | } | 250 | } |
| @@ -238,6 +270,8 @@ static void set_mode(struct cxd *ci, int mode) | |||
| 238 | 270 | ||
| 239 | static void cam_mode(struct cxd *ci, int mode) | 271 | static void cam_mode(struct cxd *ci, int mode) |
| 240 | { | 272 | { |
| 273 | u8 dummy; | ||
| 274 | |||
| 241 | if (mode == ci->cammode) | 275 | if (mode == ci->cammode) |
| 242 | return; | 276 | return; |
| 243 | 277 | ||
| @@ -246,16 +280,15 @@ static void cam_mode(struct cxd *ci, int mode) | |||
| 246 | write_regm(ci, 0x20, 0x80, 0x80); | 280 | write_regm(ci, 0x20, 0x80, 0x80); |
| 247 | break; | 281 | break; |
| 248 | case 0x01: | 282 | case 0x01: |
| 249 | #ifdef BUFFER_MODE | ||
| 250 | if (!ci->en.read_data) | 283 | if (!ci->en.read_data) |
| 251 | return; | 284 | return; |
| 285 | ci->write_busy = 0; | ||
| 252 | dev_info(&ci->i2c->dev, "enable cam buffer mode\n"); | 286 | dev_info(&ci->i2c->dev, "enable cam buffer mode\n"); |
| 253 | /* write_reg(ci, 0x0d, 0x00); */ | 287 | write_reg(ci, 0x0d, 0x00); |
| 254 | /* write_reg(ci, 0x0e, 0x01); */ | 288 | write_reg(ci, 0x0e, 0x01); |
| 255 | write_regm(ci, 0x08, 0x40, 0x40); | 289 | write_regm(ci, 0x08, 0x40, 0x40); |
| 256 | /* read_reg(ci, 0x12, &dummy); */ | 290 | read_reg(ci, 0x12, &dummy); |
| 257 | write_regm(ci, 0x08, 0x80, 0x80); | 291 | write_regm(ci, 0x08, 0x80, 0x80); |
| 258 | #endif | ||
| 259 | break; | 292 | break; |
| 260 | default: | 293 | default: |
| 261 | break; | 294 | break; |
| @@ -325,7 +358,10 @@ static int init(struct cxd *ci) | |||
| 325 | if (status < 0) | 358 | if (status < 0) |
| 326 | break; | 359 | break; |
| 327 | 360 | ||
| 328 | if (ci->cfg.clock_mode) { | 361 | if (ci->cfg.clock_mode == 2) { |
| 362 | /* bitrate*2^13/ 72000 */ | ||
| 363 | u32 reg = ((ci->cfg.bitrate << 13) + 71999) / 72000; | ||
| 364 | |||
| 329 | if (ci->cfg.polarity) { | 365 | if (ci->cfg.polarity) { |
| 330 | status = write_reg(ci, 0x09, 0x6f); | 366 | status = write_reg(ci, 0x09, 0x6f); |
| 331 | if (status < 0) | 367 | if (status < 0) |
| @@ -335,6 +371,25 @@ static int init(struct cxd *ci) | |||
| 335 | if (status < 0) | 371 | if (status < 0) |
| 336 | break; | 372 | break; |
| 337 | } | 373 | } |
| 374 | status = write_reg(ci, 0x20, 0x08); | ||
| 375 | if (status < 0) | ||
| 376 | break; | ||
| 377 | status = write_reg(ci, 0x21, (reg >> 8) & 0xff); | ||
| 378 | if (status < 0) | ||
| 379 | break; | ||
| 380 | status = write_reg(ci, 0x22, reg & 0xff); | ||
| 381 | if (status < 0) | ||
| 382 | break; | ||
| 383 | } else if (ci->cfg.clock_mode == 1) { | ||
| 384 | if (ci->cfg.polarity) { | ||
| 385 | status = write_reg(ci, 0x09, 0x6f); /* D */ | ||
| 386 | if (status < 0) | ||
| 387 | break; | ||
| 388 | } else { | ||
| 389 | status = write_reg(ci, 0x09, 0x6d); | ||
| 390 | if (status < 0) | ||
| 391 | break; | ||
| 392 | } | ||
| 338 | status = write_reg(ci, 0x20, 0x68); | 393 | status = write_reg(ci, 0x20, 0x68); |
| 339 | if (status < 0) | 394 | if (status < 0) |
| 340 | break; | 395 | break; |
| @@ -346,7 +401,7 @@ static int init(struct cxd *ci) | |||
| 346 | break; | 401 | break; |
| 347 | } else { | 402 | } else { |
| 348 | if (ci->cfg.polarity) { | 403 | if (ci->cfg.polarity) { |
| 349 | status = write_reg(ci, 0x09, 0x4f); | 404 | status = write_reg(ci, 0x09, 0x4f); /* C */ |
| 350 | if (status < 0) | 405 | if (status < 0) |
| 351 | break; | 406 | break; |
| 352 | } else { | 407 | } else { |
| @@ -354,7 +409,6 @@ static int init(struct cxd *ci) | |||
| 354 | if (status < 0) | 409 | if (status < 0) |
| 355 | break; | 410 | break; |
| 356 | } | 411 | } |
| 357 | |||
| 358 | status = write_reg(ci, 0x20, 0x28); | 412 | status = write_reg(ci, 0x20, 0x28); |
| 359 | if (status < 0) | 413 | if (status < 0) |
| 360 | break; | 414 | break; |
| @@ -401,7 +455,6 @@ static int read_attribute_mem(struct dvb_ca_en50221 *ca, | |||
| 401 | set_mode(ci, 1); | 455 | set_mode(ci, 1); |
| 402 | read_pccard(ci, address, &val, 1); | 456 | read_pccard(ci, address, &val, 1); |
| 403 | mutex_unlock(&ci->lock); | 457 | mutex_unlock(&ci->lock); |
| 404 | /* printk(KERN_INFO "%02x:%02x\n", address,val); */ | ||
| 405 | return val; | 458 | return val; |
| 406 | } | 459 | } |
| 407 | 460 | ||
| @@ -446,6 +499,9 @@ static int slot_reset(struct dvb_ca_en50221 *ca, int slot) | |||
| 446 | { | 499 | { |
| 447 | struct cxd *ci = ca->data; | 500 | struct cxd *ci = ca->data; |
| 448 | 501 | ||
| 502 | if (ci->cammode) | ||
| 503 | read_data(ca, slot, ci->rbuf, 0); | ||
| 504 | |||
| 449 | mutex_lock(&ci->lock); | 505 | mutex_lock(&ci->lock); |
| 450 | cam_mode(ci, 0); | 506 | cam_mode(ci, 0); |
| 451 | write_reg(ci, 0x00, 0x21); | 507 | write_reg(ci, 0x00, 0x21); |
| @@ -465,7 +521,6 @@ static int slot_reset(struct dvb_ca_en50221 *ca, int slot) | |||
| 465 | } | 521 | } |
| 466 | } | 522 | } |
| 467 | mutex_unlock(&ci->lock); | 523 | mutex_unlock(&ci->lock); |
| 468 | /* msleep(500); */ | ||
| 469 | return 0; | 524 | return 0; |
| 470 | } | 525 | } |
| 471 | 526 | ||
| @@ -474,11 +529,19 @@ static int slot_shutdown(struct dvb_ca_en50221 *ca, int slot) | |||
| 474 | struct cxd *ci = ca->data; | 529 | struct cxd *ci = ca->data; |
| 475 | 530 | ||
| 476 | dev_info(&ci->i2c->dev, "%s\n", __func__); | 531 | dev_info(&ci->i2c->dev, "%s\n", __func__); |
| 532 | if (ci->cammode) | ||
| 533 | read_data(ca, slot, ci->rbuf, 0); | ||
| 477 | mutex_lock(&ci->lock); | 534 | mutex_lock(&ci->lock); |
| 535 | write_reg(ci, 0x00, 0x21); | ||
| 536 | write_reg(ci, 0x06, 0x1F); | ||
| 537 | msleep(300); | ||
| 538 | |||
| 478 | write_regm(ci, 0x09, 0x08, 0x08); | 539 | write_regm(ci, 0x09, 0x08, 0x08); |
| 479 | write_regm(ci, 0x20, 0x80, 0x80); /* Reset CAM Mode */ | 540 | write_regm(ci, 0x20, 0x80, 0x80); /* Reset CAM Mode */ |
| 480 | write_regm(ci, 0x06, 0x07, 0x07); /* Clear IO Mode */ | 541 | write_regm(ci, 0x06, 0x07, 0x07); /* Clear IO Mode */ |
| 542 | |||
| 481 | ci->mode = -1; | 543 | ci->mode = -1; |
| 544 | ci->write_busy = 0; | ||
| 482 | mutex_unlock(&ci->lock); | 545 | mutex_unlock(&ci->lock); |
| 483 | return 0; | 546 | return 0; |
| 484 | } | 547 | } |
| @@ -490,9 +553,7 @@ static int slot_ts_enable(struct dvb_ca_en50221 *ca, int slot) | |||
| 490 | mutex_lock(&ci->lock); | 553 | mutex_lock(&ci->lock); |
| 491 | write_regm(ci, 0x09, 0x00, 0x08); | 554 | write_regm(ci, 0x09, 0x00, 0x08); |
| 492 | set_mode(ci, 0); | 555 | set_mode(ci, 0); |
| 493 | #ifdef BUFFER_MODE | ||
| 494 | cam_mode(ci, 1); | 556 | cam_mode(ci, 1); |
| 495 | #endif | ||
| 496 | mutex_unlock(&ci->lock); | 557 | mutex_unlock(&ci->lock); |
| 497 | return 0; | 558 | return 0; |
| 498 | } | 559 | } |
| @@ -506,12 +567,10 @@ static int campoll(struct cxd *ci) | |||
| 506 | return 0; | 567 | return 0; |
| 507 | write_reg(ci, 0x05, istat); | 568 | write_reg(ci, 0x05, istat); |
| 508 | 569 | ||
| 509 | if (istat & 0x40) { | 570 | if (istat & 0x40) |
| 510 | ci->dr = 1; | 571 | ci->dr = 1; |
| 511 | dev_info(&ci->i2c->dev, "DR\n"); | ||
| 512 | } | ||
| 513 | if (istat & 0x20) | 572 | if (istat & 0x20) |
| 514 | dev_info(&ci->i2c->dev, "WC\n"); | 573 | ci->write_busy = 0; |
| 515 | 574 | ||
| 516 | if (istat & 2) { | 575 | if (istat & 2) { |
| 517 | u8 slotstat; | 576 | u8 slotstat; |
| @@ -519,7 +578,8 @@ static int campoll(struct cxd *ci) | |||
| 519 | read_reg(ci, 0x01, &slotstat); | 578 | read_reg(ci, 0x01, &slotstat); |
| 520 | if (!(2 & slotstat)) { | 579 | if (!(2 & slotstat)) { |
| 521 | if (!ci->slot_stat) { | 580 | if (!ci->slot_stat) { |
| 522 | ci->slot_stat = DVB_CA_EN50221_POLL_CAM_PRESENT; | 581 | ci->slot_stat |= |
| 582 | DVB_CA_EN50221_POLL_CAM_PRESENT; | ||
| 523 | write_regm(ci, 0x03, 0x08, 0x08); | 583 | write_regm(ci, 0x03, 0x08, 0x08); |
| 524 | } | 584 | } |
| 525 | 585 | ||
| @@ -531,8 +591,8 @@ static int campoll(struct cxd *ci) | |||
| 531 | ci->ready = 0; | 591 | ci->ready = 0; |
| 532 | } | 592 | } |
| 533 | } | 593 | } |
| 534 | if (istat & 8 && | 594 | if ((istat & 8) && |
| 535 | ci->slot_stat == DVB_CA_EN50221_POLL_CAM_PRESENT) { | 595 | (ci->slot_stat == DVB_CA_EN50221_POLL_CAM_PRESENT)) { |
| 536 | ci->ready = 1; | 596 | ci->ready = 1; |
| 537 | ci->slot_stat |= DVB_CA_EN50221_POLL_CAM_READY; | 597 | ci->slot_stat |= DVB_CA_EN50221_POLL_CAM_READY; |
| 538 | } | 598 | } |
| @@ -553,7 +613,6 @@ static int poll_slot_status(struct dvb_ca_en50221 *ca, int slot, int open) | |||
| 553 | return ci->slot_stat; | 613 | return ci->slot_stat; |
| 554 | } | 614 | } |
| 555 | 615 | ||
| 556 | #ifdef BUFFER_MODE | ||
| 557 | static int read_data(struct dvb_ca_en50221 *ca, int slot, u8 *ebuf, int ecount) | 616 | static int read_data(struct dvb_ca_en50221 *ca, int slot, u8 *ebuf, int ecount) |
| 558 | { | 617 | { |
| 559 | struct cxd *ci = ca->data; | 618 | struct cxd *ci = ca->data; |
| @@ -564,30 +623,38 @@ static int read_data(struct dvb_ca_en50221 *ca, int slot, u8 *ebuf, int ecount) | |||
| 564 | campoll(ci); | 623 | campoll(ci); |
| 565 | mutex_unlock(&ci->lock); | 624 | mutex_unlock(&ci->lock); |
| 566 | 625 | ||
| 567 | dev_info(&ci->i2c->dev, "%s\n", __func__); | ||
| 568 | if (!ci->dr) | 626 | if (!ci->dr) |
| 569 | return 0; | 627 | return 0; |
| 570 | 628 | ||
| 571 | mutex_lock(&ci->lock); | 629 | mutex_lock(&ci->lock); |
| 572 | read_reg(ci, 0x0f, &msb); | 630 | read_reg(ci, 0x0f, &msb); |
| 573 | read_reg(ci, 0x10, &lsb); | 631 | read_reg(ci, 0x10, &lsb); |
| 574 | len = (msb << 8) | lsb; | 632 | len = ((u16)msb << 8) | lsb; |
| 633 | if (len > ecount || len < 2) { | ||
| 634 | /* read it anyway or cxd may hang */ | ||
| 635 | read_block(ci, 0x12, ci->rbuf, len); | ||
| 636 | mutex_unlock(&ci->lock); | ||
| 637 | return -EIO; | ||
| 638 | } | ||
| 575 | read_block(ci, 0x12, ebuf, len); | 639 | read_block(ci, 0x12, ebuf, len); |
| 576 | ci->dr = 0; | 640 | ci->dr = 0; |
| 577 | mutex_unlock(&ci->lock); | 641 | mutex_unlock(&ci->lock); |
| 578 | |||
| 579 | return len; | 642 | return len; |
| 580 | } | 643 | } |
| 581 | 644 | ||
| 645 | #ifdef BUFFER_MODE | ||
| 646 | |||
| 582 | static int write_data(struct dvb_ca_en50221 *ca, int slot, u8 *ebuf, int ecount) | 647 | static int write_data(struct dvb_ca_en50221 *ca, int slot, u8 *ebuf, int ecount) |
| 583 | { | 648 | { |
| 584 | struct cxd *ci = ca->data; | 649 | struct cxd *ci = ca->data; |
| 585 | 650 | ||
| 651 | if (ci->write_busy) | ||
| 652 | return -EAGAIN; | ||
| 586 | mutex_lock(&ci->lock); | 653 | mutex_lock(&ci->lock); |
| 587 | dev_info(&ci->i2c->dev, "%s %d\n", __func__, ecount); | ||
| 588 | write_reg(ci, 0x0d, ecount >> 8); | 654 | write_reg(ci, 0x0d, ecount >> 8); |
| 589 | write_reg(ci, 0x0e, ecount & 0xff); | 655 | write_reg(ci, 0x0e, ecount & 0xff); |
| 590 | write_block(ci, 0x11, ebuf, ecount); | 656 | write_block(ci, 0x11, ebuf, ecount); |
| 657 | ci->write_busy = 1; | ||
| 591 | mutex_unlock(&ci->lock); | 658 | mutex_unlock(&ci->lock); |
| 592 | return ecount; | 659 | return ecount; |
| 593 | } | 660 | } |
diff --git a/drivers/staging/media/cxd2099/cxd2099.h b/drivers/staging/media/cxd2099/cxd2099.h index 0eb607c5b423..f4b29b1d6eb8 100644 --- a/drivers/staging/media/cxd2099/cxd2099.h +++ b/drivers/staging/media/cxd2099/cxd2099.h | |||
| @@ -30,8 +30,10 @@ | |||
| 30 | struct cxd2099_cfg { | 30 | struct cxd2099_cfg { |
| 31 | u32 bitrate; | 31 | u32 bitrate; |
| 32 | u8 adr; | 32 | u8 adr; |
| 33 | u8 polarity:1; | 33 | u8 polarity; |
| 34 | u8 clock_mode:1; | 34 | u8 clock_mode; |
| 35 | |||
| 36 | u32 max_i2c; | ||
| 35 | }; | 37 | }; |
| 36 | 38 | ||
| 37 | #if defined(CONFIG_DVB_CXD2099) || \ | 39 | #if defined(CONFIG_DVB_CXD2099) || \ |
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c index d283341cfe43..56cd4e5e51b2 100644 --- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c +++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c | |||
| @@ -45,6 +45,7 @@ static struct usb_device_id rtw_usb_id_tbl[] = { | |||
| 45 | {USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */ | 45 | {USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */ |
| 46 | {USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */ | 46 | {USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */ |
| 47 | {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */ | 47 | {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */ |
| 48 | {USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill RNX-N150NUB */ | ||
| 48 | {} /* Terminating entry */ | 49 | {} /* Terminating entry */ |
| 49 | }; | 50 | }; |
| 50 | 51 | ||
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_cm.c b/drivers/target/iscsi/cxgbit/cxgbit_cm.c index e583dd8a418b..d4fa41be80f9 100644 --- a/drivers/target/iscsi/cxgbit/cxgbit_cm.c +++ b/drivers/target/iscsi/cxgbit/cxgbit_cm.c | |||
| @@ -1510,11 +1510,13 @@ cxgbit_pass_open_rpl(struct cxgbit_device *cdev, struct sk_buff *skb) | |||
| 1510 | 1510 | ||
| 1511 | if (!cnp) { | 1511 | if (!cnp) { |
| 1512 | pr_info("%s stid %d lookup failure\n", __func__, stid); | 1512 | pr_info("%s stid %d lookup failure\n", __func__, stid); |
| 1513 | return; | 1513 | goto rel_skb; |
| 1514 | } | 1514 | } |
| 1515 | 1515 | ||
| 1516 | cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status); | 1516 | cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status); |
| 1517 | cxgbit_put_cnp(cnp); | 1517 | cxgbit_put_cnp(cnp); |
| 1518 | rel_skb: | ||
| 1519 | __kfree_skb(skb); | ||
| 1518 | } | 1520 | } |
| 1519 | 1521 | ||
| 1520 | static void | 1522 | static void |
| @@ -1530,11 +1532,13 @@ cxgbit_close_listsrv_rpl(struct cxgbit_device *cdev, struct sk_buff *skb) | |||
| 1530 | 1532 | ||
| 1531 | if (!cnp) { | 1533 | if (!cnp) { |
| 1532 | pr_info("%s stid %d lookup failure\n", __func__, stid); | 1534 | pr_info("%s stid %d lookup failure\n", __func__, stid); |
| 1533 | return; | 1535 | goto rel_skb; |
| 1534 | } | 1536 | } |
| 1535 | 1537 | ||
| 1536 | cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status); | 1538 | cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status); |
| 1537 | cxgbit_put_cnp(cnp); | 1539 | cxgbit_put_cnp(cnp); |
| 1540 | rel_skb: | ||
| 1541 | __kfree_skb(skb); | ||
| 1538 | } | 1542 | } |
| 1539 | 1543 | ||
| 1540 | static void | 1544 | static void |
| @@ -1819,12 +1823,16 @@ static void cxgbit_set_tcb_rpl(struct cxgbit_device *cdev, struct sk_buff *skb) | |||
| 1819 | struct tid_info *t = lldi->tids; | 1823 | struct tid_info *t = lldi->tids; |
| 1820 | 1824 | ||
| 1821 | csk = lookup_tid(t, tid); | 1825 | csk = lookup_tid(t, tid); |
| 1822 | if (unlikely(!csk)) | 1826 | if (unlikely(!csk)) { |
| 1823 | pr_err("can't find connection for tid %u.\n", tid); | 1827 | pr_err("can't find connection for tid %u.\n", tid); |
| 1824 | else | 1828 | goto rel_skb; |
| 1829 | } else { | ||
| 1825 | cxgbit_wake_up(&csk->com.wr_wait, __func__, rpl->status); | 1830 | cxgbit_wake_up(&csk->com.wr_wait, __func__, rpl->status); |
| 1831 | } | ||
| 1826 | 1832 | ||
| 1827 | cxgbit_put_csk(csk); | 1833 | cxgbit_put_csk(csk); |
| 1834 | rel_skb: | ||
| 1835 | __kfree_skb(skb); | ||
| 1828 | } | 1836 | } |
| 1829 | 1837 | ||
| 1830 | static void cxgbit_rx_data(struct cxgbit_device *cdev, struct sk_buff *skb) | 1838 | static void cxgbit_rx_data(struct cxgbit_device *cdev, struct sk_buff *skb) |
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_target.c b/drivers/target/iscsi/cxgbit/cxgbit_target.c index dda13f1af38e..514986b57c2d 100644 --- a/drivers/target/iscsi/cxgbit/cxgbit_target.c +++ b/drivers/target/iscsi/cxgbit/cxgbit_target.c | |||
| @@ -827,7 +827,7 @@ cxgbit_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login, | |||
| 827 | 827 | ||
| 828 | static void | 828 | static void |
| 829 | cxgbit_skb_copy_to_sg(struct sk_buff *skb, struct scatterlist *sg, | 829 | cxgbit_skb_copy_to_sg(struct sk_buff *skb, struct scatterlist *sg, |
| 830 | unsigned int nents) | 830 | unsigned int nents, u32 skip) |
| 831 | { | 831 | { |
| 832 | struct skb_seq_state st; | 832 | struct skb_seq_state st; |
| 833 | const u8 *buf; | 833 | const u8 *buf; |
| @@ -846,7 +846,7 @@ cxgbit_skb_copy_to_sg(struct sk_buff *skb, struct scatterlist *sg, | |||
| 846 | } | 846 | } |
| 847 | 847 | ||
| 848 | consumed += sg_pcopy_from_buffer(sg, nents, (void *)buf, | 848 | consumed += sg_pcopy_from_buffer(sg, nents, (void *)buf, |
| 849 | buf_len, consumed); | 849 | buf_len, skip + consumed); |
| 850 | } | 850 | } |
| 851 | } | 851 | } |
| 852 | 852 | ||
| @@ -912,7 +912,7 @@ cxgbit_handle_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr, | |||
| 912 | struct scatterlist *sg = &cmd->se_cmd.t_data_sg[0]; | 912 | struct scatterlist *sg = &cmd->se_cmd.t_data_sg[0]; |
| 913 | u32 sg_nents = max(1UL, DIV_ROUND_UP(pdu_cb->dlen, PAGE_SIZE)); | 913 | u32 sg_nents = max(1UL, DIV_ROUND_UP(pdu_cb->dlen, PAGE_SIZE)); |
| 914 | 914 | ||
| 915 | cxgbit_skb_copy_to_sg(csk->skb, sg, sg_nents); | 915 | cxgbit_skb_copy_to_sg(csk->skb, sg, sg_nents, 0); |
| 916 | } | 916 | } |
| 917 | 917 | ||
| 918 | cmd->write_data_done += pdu_cb->dlen; | 918 | cmd->write_data_done += pdu_cb->dlen; |
| @@ -1069,11 +1069,13 @@ static int cxgbit_handle_iscsi_dataout(struct cxgbit_sock *csk) | |||
| 1069 | cmd->se_cmd.data_length); | 1069 | cmd->se_cmd.data_length); |
| 1070 | 1070 | ||
| 1071 | if (!(pdu_cb->flags & PDUCBF_RX_DATA_DDPD)) { | 1071 | if (!(pdu_cb->flags & PDUCBF_RX_DATA_DDPD)) { |
| 1072 | u32 skip = data_offset % PAGE_SIZE; | ||
| 1073 | |||
| 1072 | sg_off = data_offset / PAGE_SIZE; | 1074 | sg_off = data_offset / PAGE_SIZE; |
| 1073 | sg_start = &cmd->se_cmd.t_data_sg[sg_off]; | 1075 | sg_start = &cmd->se_cmd.t_data_sg[sg_off]; |
| 1074 | sg_nents = max(1UL, DIV_ROUND_UP(data_len, PAGE_SIZE)); | 1076 | sg_nents = max(1UL, DIV_ROUND_UP(skip + data_len, PAGE_SIZE)); |
| 1075 | 1077 | ||
| 1076 | cxgbit_skb_copy_to_sg(csk->skb, sg_start, sg_nents); | 1078 | cxgbit_skb_copy_to_sg(csk->skb, sg_start, sg_nents, skip); |
| 1077 | } | 1079 | } |
| 1078 | 1080 | ||
| 1079 | check_payload: | 1081 | check_payload: |
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 74e4975dd1b1..5001261f5d69 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c | |||
| @@ -418,6 +418,7 @@ int iscsit_reset_np_thread( | |||
| 418 | return 0; | 418 | return 0; |
| 419 | } | 419 | } |
| 420 | np->np_thread_state = ISCSI_NP_THREAD_RESET; | 420 | np->np_thread_state = ISCSI_NP_THREAD_RESET; |
| 421 | atomic_inc(&np->np_reset_count); | ||
| 421 | 422 | ||
| 422 | if (np->np_thread) { | 423 | if (np->np_thread) { |
| 423 | spin_unlock_bh(&np->np_thread_lock); | 424 | spin_unlock_bh(&np->np_thread_lock); |
| @@ -2167,6 +2168,7 @@ iscsit_setup_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, | |||
| 2167 | cmd->cmd_sn = be32_to_cpu(hdr->cmdsn); | 2168 | cmd->cmd_sn = be32_to_cpu(hdr->cmdsn); |
| 2168 | cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn); | 2169 | cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn); |
| 2169 | cmd->data_direction = DMA_NONE; | 2170 | cmd->data_direction = DMA_NONE; |
| 2171 | kfree(cmd->text_in_ptr); | ||
| 2170 | cmd->text_in_ptr = NULL; | 2172 | cmd->text_in_ptr = NULL; |
| 2171 | 2173 | ||
| 2172 | return 0; | 2174 | return 0; |
| @@ -3487,9 +3489,9 @@ iscsit_build_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn, | |||
| 3487 | return text_length; | 3489 | return text_length; |
| 3488 | 3490 | ||
| 3489 | if (completed) { | 3491 | if (completed) { |
| 3490 | hdr->flags |= ISCSI_FLAG_CMD_FINAL; | 3492 | hdr->flags = ISCSI_FLAG_CMD_FINAL; |
| 3491 | } else { | 3493 | } else { |
| 3492 | hdr->flags |= ISCSI_FLAG_TEXT_CONTINUE; | 3494 | hdr->flags = ISCSI_FLAG_TEXT_CONTINUE; |
| 3493 | cmd->read_data_done += text_length; | 3495 | cmd->read_data_done += text_length; |
| 3494 | if (cmd->targ_xfer_tag == 0xFFFFFFFF) | 3496 | if (cmd->targ_xfer_tag == 0xFFFFFFFF) |
| 3495 | cmd->targ_xfer_tag = session_get_next_ttt(conn->sess); | 3497 | cmd->targ_xfer_tag = session_get_next_ttt(conn->sess); |
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index e9bdc8b86e7d..dc13afbd4c88 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c | |||
| @@ -1243,9 +1243,11 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) | |||
| 1243 | flush_signals(current); | 1243 | flush_signals(current); |
| 1244 | 1244 | ||
| 1245 | spin_lock_bh(&np->np_thread_lock); | 1245 | spin_lock_bh(&np->np_thread_lock); |
| 1246 | if (np->np_thread_state == ISCSI_NP_THREAD_RESET) { | 1246 | if (atomic_dec_if_positive(&np->np_reset_count) >= 0) { |
| 1247 | np->np_thread_state = ISCSI_NP_THREAD_ACTIVE; | 1247 | np->np_thread_state = ISCSI_NP_THREAD_ACTIVE; |
| 1248 | spin_unlock_bh(&np->np_thread_lock); | ||
| 1248 | complete(&np->np_restart_comp); | 1249 | complete(&np->np_restart_comp); |
| 1250 | return 1; | ||
| 1249 | } else if (np->np_thread_state == ISCSI_NP_THREAD_SHUTDOWN) { | 1251 | } else if (np->np_thread_state == ISCSI_NP_THREAD_SHUTDOWN) { |
| 1250 | spin_unlock_bh(&np->np_thread_lock); | 1252 | spin_unlock_bh(&np->np_thread_lock); |
| 1251 | goto exit; | 1253 | goto exit; |
| @@ -1278,7 +1280,8 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) | |||
| 1278 | goto exit; | 1280 | goto exit; |
| 1279 | } else if (rc < 0) { | 1281 | } else if (rc < 0) { |
| 1280 | spin_lock_bh(&np->np_thread_lock); | 1282 | spin_lock_bh(&np->np_thread_lock); |
| 1281 | if (np->np_thread_state == ISCSI_NP_THREAD_RESET) { | 1283 | if (atomic_dec_if_positive(&np->np_reset_count) >= 0) { |
| 1284 | np->np_thread_state = ISCSI_NP_THREAD_ACTIVE; | ||
| 1282 | spin_unlock_bh(&np->np_thread_lock); | 1285 | spin_unlock_bh(&np->np_thread_lock); |
| 1283 | complete(&np->np_restart_comp); | 1286 | complete(&np->np_restart_comp); |
| 1284 | iscsit_put_transport(conn->conn_transport); | 1287 | iscsit_put_transport(conn->conn_transport); |
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c index 36913734c6bc..02e8a5d86658 100644 --- a/drivers/target/target_core_tpg.c +++ b/drivers/target/target_core_tpg.c | |||
| @@ -364,7 +364,7 @@ void core_tpg_del_initiator_node_acl(struct se_node_acl *acl) | |||
| 364 | mutex_lock(&tpg->acl_node_mutex); | 364 | mutex_lock(&tpg->acl_node_mutex); |
| 365 | if (acl->dynamic_node_acl) | 365 | if (acl->dynamic_node_acl) |
| 366 | acl->dynamic_node_acl = 0; | 366 | acl->dynamic_node_acl = 0; |
| 367 | list_del(&acl->acl_list); | 367 | list_del_init(&acl->acl_list); |
| 368 | mutex_unlock(&tpg->acl_node_mutex); | 368 | mutex_unlock(&tpg->acl_node_mutex); |
| 369 | 369 | ||
| 370 | target_shutdown_sessions(acl); | 370 | target_shutdown_sessions(acl); |
| @@ -548,7 +548,7 @@ int core_tpg_deregister(struct se_portal_group *se_tpg) | |||
| 548 | * in transport_deregister_session(). | 548 | * in transport_deregister_session(). |
| 549 | */ | 549 | */ |
| 550 | list_for_each_entry_safe(nacl, nacl_tmp, &node_list, acl_list) { | 550 | list_for_each_entry_safe(nacl, nacl_tmp, &node_list, acl_list) { |
| 551 | list_del(&nacl->acl_list); | 551 | list_del_init(&nacl->acl_list); |
| 552 | 552 | ||
| 553 | core_tpg_wait_for_nacl_pr_ref(nacl); | 553 | core_tpg_wait_for_nacl_pr_ref(nacl); |
| 554 | core_free_device_list_for_node(nacl, se_tpg); | 554 | core_free_device_list_for_node(nacl, se_tpg); |
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 97fed9a298bd..836d552b0385 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c | |||
| @@ -466,7 +466,7 @@ static void target_complete_nacl(struct kref *kref) | |||
| 466 | } | 466 | } |
| 467 | 467 | ||
| 468 | mutex_lock(&se_tpg->acl_node_mutex); | 468 | mutex_lock(&se_tpg->acl_node_mutex); |
| 469 | list_del(&nacl->acl_list); | 469 | list_del_init(&nacl->acl_list); |
| 470 | mutex_unlock(&se_tpg->acl_node_mutex); | 470 | mutex_unlock(&se_tpg->acl_node_mutex); |
| 471 | 471 | ||
| 472 | core_tpg_wait_for_nacl_pr_ref(nacl); | 472 | core_tpg_wait_for_nacl_pr_ref(nacl); |
| @@ -538,7 +538,7 @@ void transport_free_session(struct se_session *se_sess) | |||
| 538 | spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); | 538 | spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); |
| 539 | 539 | ||
| 540 | if (se_nacl->dynamic_stop) | 540 | if (se_nacl->dynamic_stop) |
| 541 | list_del(&se_nacl->acl_list); | 541 | list_del_init(&se_nacl->acl_list); |
| 542 | } | 542 | } |
| 543 | mutex_unlock(&se_tpg->acl_node_mutex); | 543 | mutex_unlock(&se_tpg->acl_node_mutex); |
| 544 | 544 | ||
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 80ee130f8253..942d094269fb 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c | |||
| @@ -563,8 +563,6 @@ static int scatter_data_area(struct tcmu_dev *udev, | |||
| 563 | block_remaining); | 563 | block_remaining); |
| 564 | to_offset = get_block_offset_user(udev, dbi, | 564 | to_offset = get_block_offset_user(udev, dbi, |
| 565 | block_remaining); | 565 | block_remaining); |
| 566 | offset = DATA_BLOCK_SIZE - block_remaining; | ||
| 567 | to += offset; | ||
| 568 | 566 | ||
| 569 | if (*iov_cnt != 0 && | 567 | if (*iov_cnt != 0 && |
| 570 | to_offset == iov_tail(*iov)) { | 568 | to_offset == iov_tail(*iov)) { |
| @@ -575,8 +573,10 @@ static int scatter_data_area(struct tcmu_dev *udev, | |||
| 575 | (*iov)->iov_len = copy_bytes; | 573 | (*iov)->iov_len = copy_bytes; |
| 576 | } | 574 | } |
| 577 | if (copy_data) { | 575 | if (copy_data) { |
| 578 | memcpy(to, from + sg->length - sg_remaining, | 576 | offset = DATA_BLOCK_SIZE - block_remaining; |
| 579 | copy_bytes); | 577 | memcpy(to + offset, |
| 578 | from + sg->length - sg_remaining, | ||
| 579 | copy_bytes); | ||
| 580 | tcmu_flush_dcache_range(to, copy_bytes); | 580 | tcmu_flush_dcache_range(to, copy_bytes); |
| 581 | } | 581 | } |
| 582 | sg_remaining -= copy_bytes; | 582 | sg_remaining -= copy_bytes; |
| @@ -637,9 +637,8 @@ static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd, | |||
| 637 | copy_bytes = min_t(size_t, sg_remaining, | 637 | copy_bytes = min_t(size_t, sg_remaining, |
| 638 | block_remaining); | 638 | block_remaining); |
| 639 | offset = DATA_BLOCK_SIZE - block_remaining; | 639 | offset = DATA_BLOCK_SIZE - block_remaining; |
| 640 | from += offset; | ||
| 641 | tcmu_flush_dcache_range(from, copy_bytes); | 640 | tcmu_flush_dcache_range(from, copy_bytes); |
| 642 | memcpy(to + sg->length - sg_remaining, from, | 641 | memcpy(to + sg->length - sg_remaining, from + offset, |
| 643 | copy_bytes); | 642 | copy_bytes); |
| 644 | 643 | ||
| 645 | sg_remaining -= copy_bytes; | 644 | sg_remaining -= copy_bytes; |
| @@ -1433,6 +1432,8 @@ static int tcmu_update_uio_info(struct tcmu_dev *udev) | |||
| 1433 | if (udev->dev_config[0]) | 1432 | if (udev->dev_config[0]) |
| 1434 | snprintf(str + used, size - used, "/%s", udev->dev_config); | 1433 | snprintf(str + used, size - used, "/%s", udev->dev_config); |
| 1435 | 1434 | ||
| 1435 | /* If the old string exists, free it */ | ||
| 1436 | kfree(info->name); | ||
| 1436 | info->name = str; | 1437 | info->name = str; |
| 1437 | 1438 | ||
| 1438 | return 0; | 1439 | return 0; |
diff --git a/drivers/thunderbolt/eeprom.c b/drivers/thunderbolt/eeprom.c index 308b6e17c88a..fe2f00ceafc5 100644 --- a/drivers/thunderbolt/eeprom.c +++ b/drivers/thunderbolt/eeprom.c | |||
| @@ -333,6 +333,15 @@ static int tb_drom_parse_entry_port(struct tb_switch *sw, | |||
| 333 | int res; | 333 | int res; |
| 334 | enum tb_port_type type; | 334 | enum tb_port_type type; |
| 335 | 335 | ||
| 336 | /* | ||
| 337 | * Some DROMs list more ports than the controller actually has | ||
| 338 | * so we skip those but allow the parser to continue. | ||
| 339 | */ | ||
| 340 | if (header->index > sw->config.max_port_number) { | ||
| 341 | dev_info_once(&sw->dev, "ignoring unnecessary extra entries in DROM\n"); | ||
| 342 | return 0; | ||
| 343 | } | ||
| 344 | |||
| 336 | port = &sw->ports[header->index]; | 345 | port = &sw->ports[header->index]; |
| 337 | port->disabled = header->port_disabled; | 346 | port->disabled = header->port_disabled; |
| 338 | if (port->disabled) | 347 | if (port->disabled) |
diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c index 8ee340290219..bdaac1ff00a5 100644 --- a/drivers/thunderbolt/icm.c +++ b/drivers/thunderbolt/icm.c | |||
| @@ -904,7 +904,14 @@ static int icm_driver_ready(struct tb *tb) | |||
| 904 | 904 | ||
| 905 | static int icm_suspend(struct tb *tb) | 905 | static int icm_suspend(struct tb *tb) |
| 906 | { | 906 | { |
| 907 | return nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_SAVE_DEVS, 0); | 907 | int ret; |
| 908 | |||
| 909 | ret = nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_SAVE_DEVS, 0); | ||
| 910 | if (ret) | ||
| 911 | tb_info(tb, "Ignoring mailbox command error (%d) in %s\n", | ||
| 912 | ret, __func__); | ||
| 913 | |||
| 914 | return 0; | ||
| 908 | } | 915 | } |
| 909 | 916 | ||
| 910 | /* | 917 | /* |
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c index 40219a706309..e9391bbd4036 100644 --- a/drivers/thunderbolt/switch.c +++ b/drivers/thunderbolt/switch.c | |||
| @@ -30,7 +30,7 @@ static DEFINE_IDA(nvm_ida); | |||
| 30 | 30 | ||
| 31 | struct nvm_auth_status { | 31 | struct nvm_auth_status { |
| 32 | struct list_head list; | 32 | struct list_head list; |
| 33 | uuid_be uuid; | 33 | uuid_t uuid; |
| 34 | u32 status; | 34 | u32 status; |
| 35 | }; | 35 | }; |
| 36 | 36 | ||
| @@ -47,7 +47,7 @@ static struct nvm_auth_status *__nvm_get_auth_status(const struct tb_switch *sw) | |||
| 47 | struct nvm_auth_status *st; | 47 | struct nvm_auth_status *st; |
| 48 | 48 | ||
| 49 | list_for_each_entry(st, &nvm_auth_status_cache, list) { | 49 | list_for_each_entry(st, &nvm_auth_status_cache, list) { |
| 50 | if (!uuid_be_cmp(st->uuid, *sw->uuid)) | 50 | if (uuid_equal(&st->uuid, sw->uuid)) |
| 51 | return st; | 51 | return st; |
| 52 | } | 52 | } |
| 53 | 53 | ||
| @@ -1461,7 +1461,7 @@ struct tb_sw_lookup { | |||
| 1461 | struct tb *tb; | 1461 | struct tb *tb; |
| 1462 | u8 link; | 1462 | u8 link; |
| 1463 | u8 depth; | 1463 | u8 depth; |
| 1464 | const uuid_be *uuid; | 1464 | const uuid_t *uuid; |
| 1465 | }; | 1465 | }; |
| 1466 | 1466 | ||
| 1467 | static int tb_switch_match(struct device *dev, void *data) | 1467 | static int tb_switch_match(struct device *dev, void *data) |
| @@ -1518,7 +1518,7 @@ struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth) | |||
| 1518 | * Returned switch has reference count increased so the caller needs to | 1518 | * Returned switch has reference count increased so the caller needs to |
| 1519 | * call tb_switch_put() when done with the switch. | 1519 | * call tb_switch_put() when done with the switch. |
| 1520 | */ | 1520 | */ |
| 1521 | struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_be *uuid) | 1521 | struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid) |
| 1522 | { | 1522 | { |
| 1523 | struct tb_sw_lookup lookup; | 1523 | struct tb_sw_lookup lookup; |
| 1524 | struct device *dev; | 1524 | struct device *dev; |
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h index 3d9f64676e58..e0deee4f1eb0 100644 --- a/drivers/thunderbolt/tb.h +++ b/drivers/thunderbolt/tb.h | |||
| @@ -101,7 +101,7 @@ struct tb_switch { | |||
| 101 | struct tb_dma_port *dma_port; | 101 | struct tb_dma_port *dma_port; |
| 102 | struct tb *tb; | 102 | struct tb *tb; |
| 103 | u64 uid; | 103 | u64 uid; |
| 104 | uuid_be *uuid; | 104 | uuid_t *uuid; |
| 105 | u16 vendor; | 105 | u16 vendor; |
| 106 | u16 device; | 106 | u16 device; |
| 107 | const char *vendor_name; | 107 | const char *vendor_name; |
| @@ -407,7 +407,7 @@ void tb_sw_set_unplugged(struct tb_switch *sw); | |||
| 407 | struct tb_switch *get_switch_at_route(struct tb_switch *sw, u64 route); | 407 | struct tb_switch *get_switch_at_route(struct tb_switch *sw, u64 route); |
| 408 | struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, | 408 | struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, |
| 409 | u8 depth); | 409 | u8 depth); |
| 410 | struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_be *uuid); | 410 | struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid); |
| 411 | 411 | ||
| 412 | static inline unsigned int tb_switch_phy_port_from_link(unsigned int link) | 412 | static inline unsigned int tb_switch_phy_port_from_link(unsigned int link) |
| 413 | { | 413 | { |
diff --git a/drivers/thunderbolt/tb_msgs.h b/drivers/thunderbolt/tb_msgs.h index 85b6d33c0919..de6441e4a060 100644 --- a/drivers/thunderbolt/tb_msgs.h +++ b/drivers/thunderbolt/tb_msgs.h | |||
| @@ -179,7 +179,7 @@ struct icm_fr_pkg_get_topology_response { | |||
| 179 | 179 | ||
| 180 | struct icm_fr_event_device_connected { | 180 | struct icm_fr_event_device_connected { |
| 181 | struct icm_pkg_header hdr; | 181 | struct icm_pkg_header hdr; |
| 182 | uuid_be ep_uuid; | 182 | uuid_t ep_uuid; |
| 183 | u8 connection_key; | 183 | u8 connection_key; |
| 184 | u8 connection_id; | 184 | u8 connection_id; |
| 185 | u16 link_info; | 185 | u16 link_info; |
| @@ -193,7 +193,7 @@ struct icm_fr_event_device_connected { | |||
| 193 | 193 | ||
| 194 | struct icm_fr_pkg_approve_device { | 194 | struct icm_fr_pkg_approve_device { |
| 195 | struct icm_pkg_header hdr; | 195 | struct icm_pkg_header hdr; |
| 196 | uuid_be ep_uuid; | 196 | uuid_t ep_uuid; |
| 197 | u8 connection_key; | 197 | u8 connection_key; |
| 198 | u8 connection_id; | 198 | u8 connection_id; |
| 199 | u16 reserved; | 199 | u16 reserved; |
| @@ -207,7 +207,7 @@ struct icm_fr_event_device_disconnected { | |||
| 207 | 207 | ||
| 208 | struct icm_fr_pkg_add_device_key { | 208 | struct icm_fr_pkg_add_device_key { |
| 209 | struct icm_pkg_header hdr; | 209 | struct icm_pkg_header hdr; |
| 210 | uuid_be ep_uuid; | 210 | uuid_t ep_uuid; |
| 211 | u8 connection_key; | 211 | u8 connection_key; |
| 212 | u8 connection_id; | 212 | u8 connection_id; |
| 213 | u16 reserved; | 213 | u16 reserved; |
| @@ -216,7 +216,7 @@ struct icm_fr_pkg_add_device_key { | |||
| 216 | 216 | ||
| 217 | struct icm_fr_pkg_add_device_key_response { | 217 | struct icm_fr_pkg_add_device_key_response { |
| 218 | struct icm_pkg_header hdr; | 218 | struct icm_pkg_header hdr; |
| 219 | uuid_be ep_uuid; | 219 | uuid_t ep_uuid; |
| 220 | u8 connection_key; | 220 | u8 connection_key; |
| 221 | u8 connection_id; | 221 | u8 connection_id; |
| 222 | u16 reserved; | 222 | u16 reserved; |
| @@ -224,7 +224,7 @@ struct icm_fr_pkg_add_device_key_response { | |||
| 224 | 224 | ||
| 225 | struct icm_fr_pkg_challenge_device { | 225 | struct icm_fr_pkg_challenge_device { |
| 226 | struct icm_pkg_header hdr; | 226 | struct icm_pkg_header hdr; |
| 227 | uuid_be ep_uuid; | 227 | uuid_t ep_uuid; |
| 228 | u8 connection_key; | 228 | u8 connection_key; |
| 229 | u8 connection_id; | 229 | u8 connection_id; |
| 230 | u16 reserved; | 230 | u16 reserved; |
| @@ -233,7 +233,7 @@ struct icm_fr_pkg_challenge_device { | |||
| 233 | 233 | ||
| 234 | struct icm_fr_pkg_challenge_device_response { | 234 | struct icm_fr_pkg_challenge_device_response { |
| 235 | struct icm_pkg_header hdr; | 235 | struct icm_pkg_header hdr; |
| 236 | uuid_be ep_uuid; | 236 | uuid_t ep_uuid; |
| 237 | u8 connection_key; | 237 | u8 connection_key; |
| 238 | u8 connection_id; | 238 | u8 connection_id; |
| 239 | u16 reserved; | 239 | u16 reserved; |
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c index 284749fb0f6b..a6d5164c33a9 100644 --- a/drivers/tty/pty.c +++ b/drivers/tty/pty.c | |||
| @@ -69,13 +69,8 @@ static void pty_close(struct tty_struct *tty, struct file *filp) | |||
| 69 | #ifdef CONFIG_UNIX98_PTYS | 69 | #ifdef CONFIG_UNIX98_PTYS |
| 70 | if (tty->driver == ptm_driver) { | 70 | if (tty->driver == ptm_driver) { |
| 71 | mutex_lock(&devpts_mutex); | 71 | mutex_lock(&devpts_mutex); |
| 72 | if (tty->link->driver_data) { | 72 | if (tty->link->driver_data) |
| 73 | struct path *path = tty->link->driver_data; | 73 | devpts_pty_kill(tty->link->driver_data); |
| 74 | |||
| 75 | devpts_pty_kill(path->dentry); | ||
| 76 | path_put(path); | ||
| 77 | kfree(path); | ||
| 78 | } | ||
| 79 | mutex_unlock(&devpts_mutex); | 74 | mutex_unlock(&devpts_mutex); |
| 80 | } | 75 | } |
| 81 | #endif | 76 | #endif |
| @@ -607,25 +602,24 @@ static inline void legacy_pty_init(void) { } | |||
| 607 | static struct cdev ptmx_cdev; | 602 | static struct cdev ptmx_cdev; |
| 608 | 603 | ||
| 609 | /** | 604 | /** |
| 610 | * pty_open_peer - open the peer of a pty | 605 | * ptm_open_peer - open the peer of a pty |
| 611 | * @tty: the peer of the pty being opened | 606 | * @master: the open struct file of the ptmx device node |
| 607 | * @tty: the master of the pty being opened | ||
| 608 | * @flags: the flags for open | ||
| 612 | * | 609 | * |
| 613 | * Open the cached dentry in tty->link, providing a safe way for userspace | 610 | * Provide a race free way for userspace to open the slave end of a pty |
| 614 | * to get the slave end of a pty (where they have the master fd and cannot | 611 | * (where they have the master fd and cannot access or trust the mount |
| 615 | * access or trust the mount namespace /dev/pts was mounted inside). | 612 | * namespace /dev/pts was mounted inside). |
| 616 | */ | 613 | */ |
| 617 | static struct file *pty_open_peer(struct tty_struct *tty, int flags) | 614 | int ptm_open_peer(struct file *master, struct tty_struct *tty, int flags) |
| 618 | { | ||
| 619 | if (tty->driver->subtype != PTY_TYPE_MASTER) | ||
| 620 | return ERR_PTR(-EIO); | ||
| 621 | return dentry_open(tty->link->driver_data, flags, current_cred()); | ||
| 622 | } | ||
| 623 | |||
| 624 | static int pty_get_peer(struct tty_struct *tty, int flags) | ||
| 625 | { | 615 | { |
| 626 | int fd = -1; | 616 | int fd = -1; |
| 627 | struct file *filp = NULL; | 617 | struct file *filp; |
| 628 | int retval = -EINVAL; | 618 | int retval = -EINVAL; |
| 619 | struct path path; | ||
| 620 | |||
| 621 | if (tty->driver != ptm_driver) | ||
| 622 | return -EIO; | ||
| 629 | 623 | ||
| 630 | fd = get_unused_fd_flags(0); | 624 | fd = get_unused_fd_flags(0); |
| 631 | if (fd < 0) { | 625 | if (fd < 0) { |
| @@ -633,7 +627,16 @@ static int pty_get_peer(struct tty_struct *tty, int flags) | |||
| 633 | goto err; | 627 | goto err; |
| 634 | } | 628 | } |
| 635 | 629 | ||
| 636 | filp = pty_open_peer(tty, flags); | 630 | /* Compute the slave's path */ |
| 631 | path.mnt = devpts_mntget(master, tty->driver_data); | ||
| 632 | if (IS_ERR(path.mnt)) { | ||
| 633 | retval = PTR_ERR(path.mnt); | ||
| 634 | goto err_put; | ||
| 635 | } | ||
| 636 | path.dentry = tty->link->driver_data; | ||
| 637 | |||
| 638 | filp = dentry_open(&path, flags, current_cred()); | ||
| 639 | mntput(path.mnt); | ||
| 637 | if (IS_ERR(filp)) { | 640 | if (IS_ERR(filp)) { |
| 638 | retval = PTR_ERR(filp); | 641 | retval = PTR_ERR(filp); |
| 639 | goto err_put; | 642 | goto err_put; |
| @@ -662,8 +665,6 @@ static int pty_unix98_ioctl(struct tty_struct *tty, | |||
| 662 | return pty_get_pktmode(tty, (int __user *)arg); | 665 | return pty_get_pktmode(tty, (int __user *)arg); |
| 663 | case TIOCGPTN: /* Get PT Number */ | 666 | case TIOCGPTN: /* Get PT Number */ |
| 664 | return put_user(tty->index, (unsigned int __user *)arg); | 667 | return put_user(tty->index, (unsigned int __user *)arg); |
| 665 | case TIOCGPTPEER: /* Open the other end */ | ||
| 666 | return pty_get_peer(tty, (int) arg); | ||
| 667 | case TIOCSIG: /* Send signal to other side of pty */ | 668 | case TIOCSIG: /* Send signal to other side of pty */ |
| 668 | return pty_signal(tty, (int) arg); | 669 | return pty_signal(tty, (int) arg); |
| 669 | } | 670 | } |
| @@ -791,7 +792,6 @@ static int ptmx_open(struct inode *inode, struct file *filp) | |||
| 791 | { | 792 | { |
| 792 | struct pts_fs_info *fsi; | 793 | struct pts_fs_info *fsi; |
| 793 | struct tty_struct *tty; | 794 | struct tty_struct *tty; |
| 794 | struct path *pts_path; | ||
| 795 | struct dentry *dentry; | 795 | struct dentry *dentry; |
| 796 | int retval; | 796 | int retval; |
| 797 | int index; | 797 | int index; |
| @@ -845,26 +845,16 @@ static int ptmx_open(struct inode *inode, struct file *filp) | |||
| 845 | retval = PTR_ERR(dentry); | 845 | retval = PTR_ERR(dentry); |
| 846 | goto err_release; | 846 | goto err_release; |
| 847 | } | 847 | } |
| 848 | /* We need to cache a fake path for TIOCGPTPEER. */ | 848 | tty->link->driver_data = dentry; |
| 849 | pts_path = kmalloc(sizeof(struct path), GFP_KERNEL); | ||
| 850 | if (!pts_path) | ||
| 851 | goto err_release; | ||
| 852 | pts_path->mnt = filp->f_path.mnt; | ||
| 853 | pts_path->dentry = dentry; | ||
| 854 | path_get(pts_path); | ||
| 855 | tty->link->driver_data = pts_path; | ||
| 856 | 849 | ||
| 857 | retval = ptm_driver->ops->open(tty, filp); | 850 | retval = ptm_driver->ops->open(tty, filp); |
| 858 | if (retval) | 851 | if (retval) |
| 859 | goto err_path_put; | 852 | goto err_release; |
| 860 | 853 | ||
| 861 | tty_debug_hangup(tty, "opening (count=%d)\n", tty->count); | 854 | tty_debug_hangup(tty, "opening (count=%d)\n", tty->count); |
| 862 | 855 | ||
| 863 | tty_unlock(tty); | 856 | tty_unlock(tty); |
| 864 | return 0; | 857 | return 0; |
| 865 | err_path_put: | ||
| 866 | path_put(pts_path); | ||
| 867 | kfree(pts_path); | ||
| 868 | err_release: | 858 | err_release: |
| 869 | tty_unlock(tty); | 859 | tty_unlock(tty); |
| 870 | // This will also put-ref the fsi | 860 | // This will also put-ref the fsi |
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c index b5def356af63..1aab3010fbfa 100644 --- a/drivers/tty/serial/8250/8250_core.c +++ b/drivers/tty/serial/8250/8250_core.c | |||
| @@ -1043,13 +1043,24 @@ int serial8250_register_8250_port(struct uart_8250_port *up) | |||
| 1043 | if (up->dl_write) | 1043 | if (up->dl_write) |
| 1044 | uart->dl_write = up->dl_write; | 1044 | uart->dl_write = up->dl_write; |
| 1045 | 1045 | ||
| 1046 | if (serial8250_isa_config != NULL) | 1046 | if (uart->port.type != PORT_8250_CIR) { |
| 1047 | serial8250_isa_config(0, &uart->port, | 1047 | if (serial8250_isa_config != NULL) |
| 1048 | &uart->capabilities); | 1048 | serial8250_isa_config(0, &uart->port, |
| 1049 | &uart->capabilities); | ||
| 1050 | |||
| 1051 | ret = uart_add_one_port(&serial8250_reg, | ||
| 1052 | &uart->port); | ||
| 1053 | if (ret == 0) | ||
| 1054 | ret = uart->port.line; | ||
| 1055 | } else { | ||
| 1056 | dev_info(uart->port.dev, | ||
| 1057 | "skipping CIR port at 0x%lx / 0x%llx, IRQ %d\n", | ||
| 1058 | uart->port.iobase, | ||
| 1059 | (unsigned long long)uart->port.mapbase, | ||
| 1060 | uart->port.irq); | ||
| 1049 | 1061 | ||
| 1050 | ret = uart_add_one_port(&serial8250_reg, &uart->port); | 1062 | ret = 0; |
| 1051 | if (ret == 0) | 1063 | } |
| 1052 | ret = uart->port.line; | ||
| 1053 | } | 1064 | } |
| 1054 | mutex_unlock(&serial_mutex); | 1065 | mutex_unlock(&serial_mutex); |
| 1055 | 1066 | ||
diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c index b5c98e5bf524..c6360fbdf808 100644 --- a/drivers/tty/serial/8250/8250_exar.c +++ b/drivers/tty/serial/8250/8250_exar.c | |||
| @@ -261,7 +261,7 @@ __xr17v35x_register_gpio(struct pci_dev *pcidev, | |||
| 261 | } | 261 | } |
| 262 | 262 | ||
| 263 | static const struct property_entry exar_gpio_properties[] = { | 263 | static const struct property_entry exar_gpio_properties[] = { |
| 264 | PROPERTY_ENTRY_U32("linux,first-pin", 0), | 264 | PROPERTY_ENTRY_U32("exar,first-pin", 0), |
| 265 | PROPERTY_ENTRY_U32("ngpios", 16), | 265 | PROPERTY_ENTRY_U32("ngpios", 16), |
| 266 | { } | 266 | { } |
| 267 | }; | 267 | }; |
| @@ -326,7 +326,7 @@ static int iot2040_rs485_config(struct uart_port *port, | |||
| 326 | } | 326 | } |
| 327 | 327 | ||
| 328 | static const struct property_entry iot2040_gpio_properties[] = { | 328 | static const struct property_entry iot2040_gpio_properties[] = { |
| 329 | PROPERTY_ENTRY_U32("linux,first-pin", 10), | 329 | PROPERTY_ENTRY_U32("exar,first-pin", 10), |
| 330 | PROPERTY_ENTRY_U32("ngpios", 1), | 330 | PROPERTY_ENTRY_U32("ngpios", 1), |
| 331 | { } | 331 | { } |
| 332 | }; | 332 | }; |
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c index 8a857bb34fbb..1888d168a41c 100644 --- a/drivers/tty/serial/amba-pl011.c +++ b/drivers/tty/serial/amba-pl011.c | |||
| @@ -142,15 +142,7 @@ static struct vendor_data vendor_sbsa = { | |||
| 142 | .fixed_options = true, | 142 | .fixed_options = true, |
| 143 | }; | 143 | }; |
| 144 | 144 | ||
| 145 | /* | 145 | #ifdef CONFIG_ACPI_SPCR_TABLE |
| 146 | * Erratum 44 for QDF2432v1 and QDF2400v1 SoCs describes the BUSY bit as | ||
| 147 | * occasionally getting stuck as 1. To avoid the potential for a hang, check | ||
| 148 | * TXFE == 0 instead of BUSY == 1. This may not be suitable for all UART | ||
| 149 | * implementations, so only do so if an affected platform is detected in | ||
| 150 | * parse_spcr(). | ||
| 151 | */ | ||
| 152 | static bool qdf2400_e44_present = false; | ||
| 153 | |||
| 154 | static struct vendor_data vendor_qdt_qdf2400_e44 = { | 146 | static struct vendor_data vendor_qdt_qdf2400_e44 = { |
| 155 | .reg_offset = pl011_std_offsets, | 147 | .reg_offset = pl011_std_offsets, |
| 156 | .fr_busy = UART011_FR_TXFE, | 148 | .fr_busy = UART011_FR_TXFE, |
| @@ -165,6 +157,7 @@ static struct vendor_data vendor_qdt_qdf2400_e44 = { | |||
| 165 | .always_enabled = true, | 157 | .always_enabled = true, |
| 166 | .fixed_options = true, | 158 | .fixed_options = true, |
| 167 | }; | 159 | }; |
| 160 | #endif | ||
| 168 | 161 | ||
| 169 | static u16 pl011_st_offsets[REG_ARRAY_SIZE] = { | 162 | static u16 pl011_st_offsets[REG_ARRAY_SIZE] = { |
| 170 | [REG_DR] = UART01x_DR, | 163 | [REG_DR] = UART01x_DR, |
| @@ -2375,12 +2368,14 @@ static int __init pl011_console_match(struct console *co, char *name, int idx, | |||
| 2375 | resource_size_t addr; | 2368 | resource_size_t addr; |
| 2376 | int i; | 2369 | int i; |
| 2377 | 2370 | ||
| 2378 | if (strcmp(name, "qdf2400_e44") == 0) { | 2371 | /* |
| 2379 | pr_info_once("UART: Working around QDF2400 SoC erratum 44"); | 2372 | * Systems affected by the Qualcomm Technologies QDF2400 E44 erratum |
| 2380 | qdf2400_e44_present = true; | 2373 | * have a distinct console name, so make sure we check for that. |
| 2381 | } else if (strcmp(name, "pl011") != 0) { | 2374 | * The actual implementation of the erratum occurs in the probe |
| 2375 | * function. | ||
| 2376 | */ | ||
| 2377 | if ((strcmp(name, "qdf2400_e44") != 0) && (strcmp(name, "pl011") != 0)) | ||
| 2382 | return -ENODEV; | 2378 | return -ENODEV; |
| 2383 | } | ||
| 2384 | 2379 | ||
| 2385 | if (uart_parse_earlycon(options, &iotype, &addr, &options)) | 2380 | if (uart_parse_earlycon(options, &iotype, &addr, &options)) |
| 2386 | return -ENODEV; | 2381 | return -ENODEV; |
| @@ -2734,11 +2729,17 @@ static int sbsa_uart_probe(struct platform_device *pdev) | |||
| 2734 | } | 2729 | } |
| 2735 | uap->port.irq = ret; | 2730 | uap->port.irq = ret; |
| 2736 | 2731 | ||
| 2737 | uap->reg_offset = vendor_sbsa.reg_offset; | 2732 | #ifdef CONFIG_ACPI_SPCR_TABLE |
| 2738 | uap->vendor = qdf2400_e44_present ? | 2733 | if (qdf2400_e44_present) { |
| 2739 | &vendor_qdt_qdf2400_e44 : &vendor_sbsa; | 2734 | dev_info(&pdev->dev, "working around QDF2400 SoC erratum 44\n"); |
| 2735 | uap->vendor = &vendor_qdt_qdf2400_e44; | ||
| 2736 | } else | ||
| 2737 | #endif | ||
| 2738 | uap->vendor = &vendor_sbsa; | ||
| 2739 | |||
| 2740 | uap->reg_offset = uap->vendor->reg_offset; | ||
| 2740 | uap->fifosize = 32; | 2741 | uap->fifosize = 32; |
| 2741 | uap->port.iotype = vendor_sbsa.access_32b ? UPIO_MEM32 : UPIO_MEM; | 2742 | uap->port.iotype = uap->vendor->access_32b ? UPIO_MEM32 : UPIO_MEM; |
| 2742 | uap->port.ops = &sbsa_uart_pops; | 2743 | uap->port.ops = &sbsa_uart_pops; |
| 2743 | uap->fixed_baud = baudrate; | 2744 | uap->fixed_baud = baudrate; |
| 2744 | 2745 | ||
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index 974b13d24401..10c4038c0e8d 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c | |||
| @@ -2518,6 +2518,9 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
| 2518 | case TIOCSSERIAL: | 2518 | case TIOCSSERIAL: |
| 2519 | tty_warn_deprecated_flags(p); | 2519 | tty_warn_deprecated_flags(p); |
| 2520 | break; | 2520 | break; |
| 2521 | case TIOCGPTPEER: | ||
| 2522 | /* Special because the struct file is needed */ | ||
| 2523 | return ptm_open_peer(file, tty, (int)arg); | ||
| 2521 | default: | 2524 | default: |
| 2522 | retval = tty_jobctrl_ioctl(tty, real_tty, file, cmd, arg); | 2525 | retval = tty_jobctrl_ioctl(tty, real_tty, file, cmd, arg); |
| 2523 | if (retval != -ENOIOCTLCMD) | 2526 | if (retval != -ENOIOCTLCMD) |
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index ab1bb3b538ac..7f277b092b5b 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c | |||
| @@ -1888,7 +1888,7 @@ void usb_hcd_flush_endpoint(struct usb_device *udev, | |||
| 1888 | /* No more submits can occur */ | 1888 | /* No more submits can occur */ |
| 1889 | spin_lock_irq(&hcd_urb_list_lock); | 1889 | spin_lock_irq(&hcd_urb_list_lock); |
| 1890 | rescan: | 1890 | rescan: |
| 1891 | list_for_each_entry (urb, &ep->urb_list, urb_list) { | 1891 | list_for_each_entry_reverse(urb, &ep->urb_list, urb_list) { |
| 1892 | int is_in; | 1892 | int is_in; |
| 1893 | 1893 | ||
| 1894 | if (urb->unlinked) | 1894 | if (urb->unlinked) |
| @@ -2485,6 +2485,8 @@ void usb_hc_died (struct usb_hcd *hcd) | |||
| 2485 | } | 2485 | } |
| 2486 | if (usb_hcd_is_primary_hcd(hcd) && hcd->shared_hcd) { | 2486 | if (usb_hcd_is_primary_hcd(hcd) && hcd->shared_hcd) { |
| 2487 | hcd = hcd->shared_hcd; | 2487 | hcd = hcd->shared_hcd; |
| 2488 | clear_bit(HCD_FLAG_RH_RUNNING, &hcd->flags); | ||
| 2489 | set_bit(HCD_FLAG_DEAD, &hcd->flags); | ||
| 2488 | if (hcd->rh_registered) { | 2490 | if (hcd->rh_registered) { |
| 2489 | clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); | 2491 | clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); |
| 2490 | 2492 | ||
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 6e6797d145dd..822f8c50e423 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c | |||
| @@ -4725,7 +4725,8 @@ hub_power_remaining(struct usb_hub *hub) | |||
| 4725 | static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus, | 4725 | static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus, |
| 4726 | u16 portchange) | 4726 | u16 portchange) |
| 4727 | { | 4727 | { |
| 4728 | int status, i; | 4728 | int status = -ENODEV; |
| 4729 | int i; | ||
| 4729 | unsigned unit_load; | 4730 | unsigned unit_load; |
| 4730 | struct usb_device *hdev = hub->hdev; | 4731 | struct usb_device *hdev = hub->hdev; |
| 4731 | struct usb_hcd *hcd = bus_to_hcd(hdev->bus); | 4732 | struct usb_hcd *hcd = bus_to_hcd(hdev->bus); |
| @@ -4929,9 +4930,10 @@ loop: | |||
| 4929 | 4930 | ||
| 4930 | done: | 4931 | done: |
| 4931 | hub_port_disable(hub, port1, 1); | 4932 | hub_port_disable(hub, port1, 1); |
| 4932 | if (hcd->driver->relinquish_port && !hub->hdev->parent) | 4933 | if (hcd->driver->relinquish_port && !hub->hdev->parent) { |
| 4933 | hcd->driver->relinquish_port(hcd, port1); | 4934 | if (status != -ENOTCONN && status != -ENODEV) |
| 4934 | 4935 | hcd->driver->relinquish_port(hcd, port1); | |
| 4936 | } | ||
| 4935 | } | 4937 | } |
| 4936 | 4938 | ||
| 4937 | /* Handle physical or logical connection change events. | 4939 | /* Handle physical or logical connection change events. |
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 3116edfcdc18..574da2b4529c 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c | |||
| @@ -150,6 +150,9 @@ static const struct usb_device_id usb_quirk_list[] = { | |||
| 150 | /* appletouch */ | 150 | /* appletouch */ |
| 151 | { USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME }, | 151 | { USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME }, |
| 152 | 152 | ||
| 153 | /* Genesys Logic hub, internally used by Moshi USB to Ethernet Adapter */ | ||
| 154 | { USB_DEVICE(0x05e3, 0x0616), .driver_info = USB_QUIRK_NO_LPM }, | ||
| 155 | |||
| 153 | /* Avision AV600U */ | 156 | /* Avision AV600U */ |
| 154 | { USB_DEVICE(0x0638, 0x0a13), .driver_info = | 157 | { USB_DEVICE(0x0638, 0x0a13), .driver_info = |
| 155 | USB_QUIRK_STRING_FETCH_255 }, | 158 | USB_QUIRK_STRING_FETCH_255 }, |
| @@ -249,6 +252,7 @@ static const struct usb_device_id usb_amd_resume_quirk_list[] = { | |||
| 249 | { USB_DEVICE(0x093a, 0x2500), .driver_info = USB_QUIRK_RESET_RESUME }, | 252 | { USB_DEVICE(0x093a, 0x2500), .driver_info = USB_QUIRK_RESET_RESUME }, |
| 250 | { USB_DEVICE(0x093a, 0x2510), .driver_info = USB_QUIRK_RESET_RESUME }, | 253 | { USB_DEVICE(0x093a, 0x2510), .driver_info = USB_QUIRK_RESET_RESUME }, |
| 251 | { USB_DEVICE(0x093a, 0x2521), .driver_info = USB_QUIRK_RESET_RESUME }, | 254 | { USB_DEVICE(0x093a, 0x2521), .driver_info = USB_QUIRK_RESET_RESUME }, |
| 255 | { USB_DEVICE(0x03f0, 0x2b4a), .driver_info = USB_QUIRK_RESET_RESUME }, | ||
| 252 | 256 | ||
| 253 | /* Logitech Optical Mouse M90/M100 */ | 257 | /* Logitech Optical Mouse M90/M100 */ |
| 254 | { USB_DEVICE(0x046d, 0xc05a), .driver_info = USB_QUIRK_RESET_RESUME }, | 258 | { USB_DEVICE(0x046d, 0xc05a), .driver_info = USB_QUIRK_RESET_RESUME }, |
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 6b299c7b7656..f064f1549333 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c | |||
| @@ -896,9 +896,40 @@ static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb, | |||
| 896 | if (!node) { | 896 | if (!node) { |
| 897 | trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST; | 897 | trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST; |
| 898 | 898 | ||
| 899 | /* | ||
| 900 | * USB Specification 2.0 Section 5.9.2 states that: "If | ||
| 901 | * there is only a single transaction in the microframe, | ||
| 902 | * only a DATA0 data packet PID is used. If there are | ||
| 903 | * two transactions per microframe, DATA1 is used for | ||
| 904 | * the first transaction data packet and DATA0 is used | ||
| 905 | * for the second transaction data packet. If there are | ||
| 906 | * three transactions per microframe, DATA2 is used for | ||
| 907 | * the first transaction data packet, DATA1 is used for | ||
| 908 | * the second, and DATA0 is used for the third." | ||
| 909 | * | ||
| 910 | * IOW, we should satisfy the following cases: | ||
| 911 | * | ||
| 912 | * 1) length <= maxpacket | ||
| 913 | * - DATA0 | ||
| 914 | * | ||
| 915 | * 2) maxpacket < length <= (2 * maxpacket) | ||
| 916 | * - DATA1, DATA0 | ||
| 917 | * | ||
| 918 | * 3) (2 * maxpacket) < length <= (3 * maxpacket) | ||
| 919 | * - DATA2, DATA1, DATA0 | ||
| 920 | */ | ||
| 899 | if (speed == USB_SPEED_HIGH) { | 921 | if (speed == USB_SPEED_HIGH) { |
| 900 | struct usb_ep *ep = &dep->endpoint; | 922 | struct usb_ep *ep = &dep->endpoint; |
| 901 | trb->size |= DWC3_TRB_SIZE_PCM1(ep->mult - 1); | 923 | unsigned int mult = ep->mult - 1; |
| 924 | unsigned int maxp = usb_endpoint_maxp(ep->desc); | ||
| 925 | |||
| 926 | if (length <= (2 * maxp)) | ||
| 927 | mult--; | ||
| 928 | |||
| 929 | if (length <= maxp) | ||
| 930 | mult--; | ||
| 931 | |||
| 932 | trb->size |= DWC3_TRB_SIZE_PCM1(mult); | ||
| 902 | } | 933 | } |
| 903 | } else { | 934 | } else { |
| 904 | trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS; | 935 | trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS; |
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c index 62dc9c7798e7..e1de8fe599a3 100644 --- a/drivers/usb/gadget/udc/renesas_usb3.c +++ b/drivers/usb/gadget/udc/renesas_usb3.c | |||
| @@ -838,21 +838,32 @@ static struct renesas_usb3_request *usb3_get_request(struct renesas_usb3_ep | |||
| 838 | return usb3_req; | 838 | return usb3_req; |
| 839 | } | 839 | } |
| 840 | 840 | ||
| 841 | static void usb3_request_done(struct renesas_usb3_ep *usb3_ep, | 841 | static void __usb3_request_done(struct renesas_usb3_ep *usb3_ep, |
| 842 | struct renesas_usb3_request *usb3_req, int status) | 842 | struct renesas_usb3_request *usb3_req, |
| 843 | int status) | ||
| 843 | { | 844 | { |
| 844 | struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep); | 845 | struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep); |
| 845 | unsigned long flags; | ||
| 846 | 846 | ||
| 847 | dev_dbg(usb3_to_dev(usb3), "giveback: ep%2d, %u, %u, %d\n", | 847 | dev_dbg(usb3_to_dev(usb3), "giveback: ep%2d, %u, %u, %d\n", |
| 848 | usb3_ep->num, usb3_req->req.length, usb3_req->req.actual, | 848 | usb3_ep->num, usb3_req->req.length, usb3_req->req.actual, |
| 849 | status); | 849 | status); |
| 850 | usb3_req->req.status = status; | 850 | usb3_req->req.status = status; |
| 851 | spin_lock_irqsave(&usb3->lock, flags); | ||
| 852 | usb3_ep->started = false; | 851 | usb3_ep->started = false; |
| 853 | list_del_init(&usb3_req->queue); | 852 | list_del_init(&usb3_req->queue); |
| 854 | spin_unlock_irqrestore(&usb3->lock, flags); | 853 | spin_unlock(&usb3->lock); |
| 855 | usb_gadget_giveback_request(&usb3_ep->ep, &usb3_req->req); | 854 | usb_gadget_giveback_request(&usb3_ep->ep, &usb3_req->req); |
| 855 | spin_lock(&usb3->lock); | ||
| 856 | } | ||
| 857 | |||
| 858 | static void usb3_request_done(struct renesas_usb3_ep *usb3_ep, | ||
| 859 | struct renesas_usb3_request *usb3_req, int status) | ||
| 860 | { | ||
| 861 | struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep); | ||
| 862 | unsigned long flags; | ||
| 863 | |||
| 864 | spin_lock_irqsave(&usb3->lock, flags); | ||
| 865 | __usb3_request_done(usb3_ep, usb3_req, status); | ||
| 866 | spin_unlock_irqrestore(&usb3->lock, flags); | ||
| 856 | } | 867 | } |
| 857 | 868 | ||
| 858 | static void usb3_irq_epc_pipe0_status_end(struct renesas_usb3 *usb3) | 869 | static void usb3_irq_epc_pipe0_status_end(struct renesas_usb3 *usb3) |
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c index c8989c62a262..c8f38649f749 100644 --- a/drivers/usb/host/pci-quirks.c +++ b/drivers/usb/host/pci-quirks.c | |||
| @@ -98,6 +98,7 @@ enum amd_chipset_gen { | |||
| 98 | AMD_CHIPSET_HUDSON2, | 98 | AMD_CHIPSET_HUDSON2, |
| 99 | AMD_CHIPSET_BOLTON, | 99 | AMD_CHIPSET_BOLTON, |
| 100 | AMD_CHIPSET_YANGTZE, | 100 | AMD_CHIPSET_YANGTZE, |
| 101 | AMD_CHIPSET_TAISHAN, | ||
| 101 | AMD_CHIPSET_UNKNOWN, | 102 | AMD_CHIPSET_UNKNOWN, |
| 102 | }; | 103 | }; |
| 103 | 104 | ||
| @@ -141,6 +142,11 @@ static int amd_chipset_sb_type_init(struct amd_chipset_info *pinfo) | |||
| 141 | pinfo->sb_type.gen = AMD_CHIPSET_SB700; | 142 | pinfo->sb_type.gen = AMD_CHIPSET_SB700; |
| 142 | else if (rev >= 0x40 && rev <= 0x4f) | 143 | else if (rev >= 0x40 && rev <= 0x4f) |
| 143 | pinfo->sb_type.gen = AMD_CHIPSET_SB800; | 144 | pinfo->sb_type.gen = AMD_CHIPSET_SB800; |
| 145 | } | ||
| 146 | pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD, | ||
| 147 | 0x145c, NULL); | ||
| 148 | if (pinfo->smbus_dev) { | ||
| 149 | pinfo->sb_type.gen = AMD_CHIPSET_TAISHAN; | ||
| 144 | } else { | 150 | } else { |
| 145 | pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD, | 151 | pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD, |
| 146 | PCI_DEVICE_ID_AMD_HUDSON2_SMBUS, NULL); | 152 | PCI_DEVICE_ID_AMD_HUDSON2_SMBUS, NULL); |
| @@ -260,11 +266,12 @@ int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *pdev) | |||
| 260 | { | 266 | { |
| 261 | /* Make sure amd chipset type has already been initialized */ | 267 | /* Make sure amd chipset type has already been initialized */ |
| 262 | usb_amd_find_chipset_info(); | 268 | usb_amd_find_chipset_info(); |
| 263 | if (amd_chipset.sb_type.gen != AMD_CHIPSET_YANGTZE) | 269 | if (amd_chipset.sb_type.gen == AMD_CHIPSET_YANGTZE || |
| 264 | return 0; | 270 | amd_chipset.sb_type.gen == AMD_CHIPSET_TAISHAN) { |
| 265 | 271 | dev_dbg(&pdev->dev, "QUIRK: Enable AMD remote wakeup fix\n"); | |
| 266 | dev_dbg(&pdev->dev, "QUIRK: Enable AMD remote wakeup fix\n"); | 272 | return 1; |
| 267 | return 1; | 273 | } |
| 274 | return 0; | ||
| 268 | } | 275 | } |
| 269 | EXPORT_SYMBOL_GPL(usb_hcd_amd_remote_wakeup_quirk); | 276 | EXPORT_SYMBOL_GPL(usb_hcd_amd_remote_wakeup_quirk); |
| 270 | 277 | ||
| @@ -1150,3 +1157,23 @@ static void quirk_usb_early_handoff(struct pci_dev *pdev) | |||
| 1150 | } | 1157 | } |
| 1151 | DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID, | 1158 | DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID, |
| 1152 | PCI_CLASS_SERIAL_USB, 8, quirk_usb_early_handoff); | 1159 | PCI_CLASS_SERIAL_USB, 8, quirk_usb_early_handoff); |
| 1160 | |||
| 1161 | bool usb_xhci_needs_pci_reset(struct pci_dev *pdev) | ||
| 1162 | { | ||
| 1163 | /* | ||
| 1164 | * Our dear uPD72020{1,2} friend only partially resets when | ||
| 1165 | * asked to via the XHCI interface, and may end up doing DMA | ||
| 1166 | * at the wrong addresses, as it keeps the top 32bit of some | ||
| 1167 | * addresses from its previous programming under obscure | ||
| 1168 | * circumstances. | ||
| 1169 | * Give it a good wack at probe time. Unfortunately, this | ||
| 1170 | * needs to happen before we've had a chance to discover any | ||
| 1171 | * quirk, or the system will be in a rather bad state. | ||
| 1172 | */ | ||
| 1173 | if (pdev->vendor == PCI_VENDOR_ID_RENESAS && | ||
| 1174 | (pdev->device == 0x0014 || pdev->device == 0x0015)) | ||
| 1175 | return true; | ||
| 1176 | |||
| 1177 | return false; | ||
| 1178 | } | ||
| 1179 | EXPORT_SYMBOL_GPL(usb_xhci_needs_pci_reset); | ||
diff --git a/drivers/usb/host/pci-quirks.h b/drivers/usb/host/pci-quirks.h index 655994480198..5582cbafecd4 100644 --- a/drivers/usb/host/pci-quirks.h +++ b/drivers/usb/host/pci-quirks.h | |||
| @@ -15,6 +15,7 @@ void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev); | |||
| 15 | void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev); | 15 | void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev); |
| 16 | void usb_disable_xhci_ports(struct pci_dev *xhci_pdev); | 16 | void usb_disable_xhci_ports(struct pci_dev *xhci_pdev); |
| 17 | void sb800_prefetch(struct device *dev, int on); | 17 | void sb800_prefetch(struct device *dev, int on); |
| 18 | bool usb_xhci_needs_pci_reset(struct pci_dev *pdev); | ||
| 18 | #else | 19 | #else |
| 19 | struct pci_dev; | 20 | struct pci_dev; |
| 20 | static inline void usb_amd_quirk_pll_disable(void) {} | 21 | static inline void usb_amd_quirk_pll_disable(void) {} |
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 5b0fa553c8bc..8071c8fdd15e 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c | |||
| @@ -284,6 +284,13 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) | |||
| 284 | 284 | ||
| 285 | driver = (struct hc_driver *)id->driver_data; | 285 | driver = (struct hc_driver *)id->driver_data; |
| 286 | 286 | ||
| 287 | /* For some HW implementation, a XHCI reset is just not enough... */ | ||
| 288 | if (usb_xhci_needs_pci_reset(dev)) { | ||
| 289 | dev_info(&dev->dev, "Resetting\n"); | ||
| 290 | if (pci_reset_function_locked(dev)) | ||
| 291 | dev_warn(&dev->dev, "Reset failed"); | ||
| 292 | } | ||
| 293 | |||
| 287 | /* Prevent runtime suspending between USB-2 and USB-3 initialization */ | 294 | /* Prevent runtime suspending between USB-2 and USB-3 initialization */ |
| 288 | pm_runtime_get_noresume(&dev->dev); | 295 | pm_runtime_get_noresume(&dev->dev); |
| 289 | 296 | ||
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c index 76decb8011eb..3344ffd5bb13 100644 --- a/drivers/usb/musb/musb_host.c +++ b/drivers/usb/musb/musb_host.c | |||
| @@ -139,6 +139,7 @@ static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep) | |||
| 139 | "Could not flush host TX%d fifo: csr: %04x\n", | 139 | "Could not flush host TX%d fifo: csr: %04x\n", |
| 140 | ep->epnum, csr)) | 140 | ep->epnum, csr)) |
| 141 | return; | 141 | return; |
| 142 | mdelay(1); | ||
| 142 | } | 143 | } |
| 143 | } | 144 | } |
| 144 | 145 | ||
diff --git a/drivers/usb/phy/phy-msm-usb.c b/drivers/usb/phy/phy-msm-usb.c index 8fb86a5f458e..3d0dd2f97415 100644 --- a/drivers/usb/phy/phy-msm-usb.c +++ b/drivers/usb/phy/phy-msm-usb.c | |||
| @@ -197,6 +197,7 @@ struct msm_otg { | |||
| 197 | struct regulator *v3p3; | 197 | struct regulator *v3p3; |
| 198 | struct regulator *v1p8; | 198 | struct regulator *v1p8; |
| 199 | struct regulator *vddcx; | 199 | struct regulator *vddcx; |
| 200 | struct regulator_bulk_data supplies[3]; | ||
| 200 | 201 | ||
| 201 | struct reset_control *phy_rst; | 202 | struct reset_control *phy_rst; |
| 202 | struct reset_control *link_rst; | 203 | struct reset_control *link_rst; |
| @@ -1731,7 +1732,6 @@ static int msm_otg_reboot_notify(struct notifier_block *this, | |||
| 1731 | 1732 | ||
| 1732 | static int msm_otg_probe(struct platform_device *pdev) | 1733 | static int msm_otg_probe(struct platform_device *pdev) |
| 1733 | { | 1734 | { |
| 1734 | struct regulator_bulk_data regs[3]; | ||
| 1735 | int ret = 0; | 1735 | int ret = 0; |
| 1736 | struct device_node *np = pdev->dev.of_node; | 1736 | struct device_node *np = pdev->dev.of_node; |
| 1737 | struct msm_otg_platform_data *pdata; | 1737 | struct msm_otg_platform_data *pdata; |
| @@ -1817,17 +1817,18 @@ static int msm_otg_probe(struct platform_device *pdev) | |||
| 1817 | return motg->irq; | 1817 | return motg->irq; |
| 1818 | } | 1818 | } |
| 1819 | 1819 | ||
| 1820 | regs[0].supply = "vddcx"; | 1820 | motg->supplies[0].supply = "vddcx"; |
| 1821 | regs[1].supply = "v3p3"; | 1821 | motg->supplies[1].supply = "v3p3"; |
| 1822 | regs[2].supply = "v1p8"; | 1822 | motg->supplies[2].supply = "v1p8"; |
| 1823 | 1823 | ||
| 1824 | ret = devm_regulator_bulk_get(motg->phy.dev, ARRAY_SIZE(regs), regs); | 1824 | ret = devm_regulator_bulk_get(motg->phy.dev, ARRAY_SIZE(motg->supplies), |
| 1825 | motg->supplies); | ||
| 1825 | if (ret) | 1826 | if (ret) |
| 1826 | return ret; | 1827 | return ret; |
| 1827 | 1828 | ||
| 1828 | motg->vddcx = regs[0].consumer; | 1829 | motg->vddcx = motg->supplies[0].consumer; |
| 1829 | motg->v3p3 = regs[1].consumer; | 1830 | motg->v3p3 = motg->supplies[1].consumer; |
| 1830 | motg->v1p8 = regs[2].consumer; | 1831 | motg->v1p8 = motg->supplies[2].consumer; |
| 1831 | 1832 | ||
| 1832 | clk_set_rate(motg->clk, 60000000); | 1833 | clk_set_rate(motg->clk, 60000000); |
| 1833 | 1834 | ||
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c index 93fba9033b00..2c8161bcf5b5 100644 --- a/drivers/usb/renesas_usbhs/mod_gadget.c +++ b/drivers/usb/renesas_usbhs/mod_gadget.c | |||
| @@ -639,14 +639,11 @@ static int usbhsg_ep_disable(struct usb_ep *ep) | |||
| 639 | struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep); | 639 | struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep); |
| 640 | struct usbhs_pipe *pipe; | 640 | struct usbhs_pipe *pipe; |
| 641 | unsigned long flags; | 641 | unsigned long flags; |
| 642 | int ret = 0; | ||
| 643 | 642 | ||
| 644 | spin_lock_irqsave(&uep->lock, flags); | 643 | spin_lock_irqsave(&uep->lock, flags); |
| 645 | pipe = usbhsg_uep_to_pipe(uep); | 644 | pipe = usbhsg_uep_to_pipe(uep); |
| 646 | if (!pipe) { | 645 | if (!pipe) |
| 647 | ret = -EINVAL; | ||
| 648 | goto out; | 646 | goto out; |
| 649 | } | ||
| 650 | 647 | ||
| 651 | usbhsg_pipe_disable(uep); | 648 | usbhsg_pipe_disable(uep); |
| 652 | usbhs_pipe_free(pipe); | 649 | usbhs_pipe_free(pipe); |
diff --git a/drivers/usb/renesas_usbhs/rcar3.c b/drivers/usb/renesas_usbhs/rcar3.c index d544b331c9f2..02b67abfc2a1 100644 --- a/drivers/usb/renesas_usbhs/rcar3.c +++ b/drivers/usb/renesas_usbhs/rcar3.c | |||
| @@ -20,9 +20,13 @@ | |||
| 20 | /* Low Power Status register (LPSTS) */ | 20 | /* Low Power Status register (LPSTS) */ |
| 21 | #define LPSTS_SUSPM 0x4000 | 21 | #define LPSTS_SUSPM 0x4000 |
| 22 | 22 | ||
| 23 | /* USB General control register 2 (UGCTRL2), bit[31:6] should be 0 */ | 23 | /* |
| 24 | * USB General control register 2 (UGCTRL2) | ||
| 25 | * Remarks: bit[31:11] and bit[9:6] should be 0 | ||
| 26 | */ | ||
| 24 | #define UGCTRL2_RESERVED_3 0x00000001 /* bit[3:0] should be B'0001 */ | 27 | #define UGCTRL2_RESERVED_3 0x00000001 /* bit[3:0] should be B'0001 */ |
| 25 | #define UGCTRL2_USB0SEL_OTG 0x00000030 | 28 | #define UGCTRL2_USB0SEL_OTG 0x00000030 |
| 29 | #define UGCTRL2_VBUSSEL 0x00000400 | ||
| 26 | 30 | ||
| 27 | static void usbhs_write32(struct usbhs_priv *priv, u32 reg, u32 data) | 31 | static void usbhs_write32(struct usbhs_priv *priv, u32 reg, u32 data) |
| 28 | { | 32 | { |
| @@ -34,7 +38,8 @@ static int usbhs_rcar3_power_ctrl(struct platform_device *pdev, | |||
| 34 | { | 38 | { |
| 35 | struct usbhs_priv *priv = usbhs_pdev_to_priv(pdev); | 39 | struct usbhs_priv *priv = usbhs_pdev_to_priv(pdev); |
| 36 | 40 | ||
| 37 | usbhs_write32(priv, UGCTRL2, UGCTRL2_RESERVED_3 | UGCTRL2_USB0SEL_OTG); | 41 | usbhs_write32(priv, UGCTRL2, UGCTRL2_RESERVED_3 | UGCTRL2_USB0SEL_OTG | |
| 42 | UGCTRL2_VBUSSEL); | ||
| 38 | 43 | ||
| 39 | if (enable) { | 44 | if (enable) { |
| 40 | usbhs_bset(priv, LPSTS, LPSTS_SUSPM, LPSTS_SUSPM); | 45 | usbhs_bset(priv, LPSTS, LPSTS_SUSPM, LPSTS_SUSPM); |
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index f64e914a8985..2d945c9f975c 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c | |||
| @@ -142,6 +142,7 @@ static const struct usb_device_id id_table[] = { | |||
| 142 | { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */ | 142 | { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */ |
| 143 | { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */ | 143 | { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */ |
| 144 | { USB_DEVICE(0x10C4, 0x8A5E) }, /* CEL EM3588 ZigBee USB Stick Long Range */ | 144 | { USB_DEVICE(0x10C4, 0x8A5E) }, /* CEL EM3588 ZigBee USB Stick Long Range */ |
| 145 | { USB_DEVICE(0x10C4, 0x8B34) }, /* Qivicon ZigBee USB Radio Stick */ | ||
| 145 | { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ | 146 | { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ |
| 146 | { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ | 147 | { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ |
| 147 | { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */ | 148 | { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */ |
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index ebe51f11105d..fe123153b1a5 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c | |||
| @@ -2025,6 +2025,8 @@ static const struct usb_device_id option_ids[] = { | |||
| 2025 | { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d04, 0xff) }, /* D-Link DWM-158 */ | 2025 | { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d04, 0xff) }, /* D-Link DWM-158 */ |
| 2026 | { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e19, 0xff), /* D-Link DWM-221 B1 */ | 2026 | { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e19, 0xff), /* D-Link DWM-221 B1 */ |
| 2027 | .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, | 2027 | .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, |
| 2028 | { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff), /* D-Link DWM-222 */ | ||
| 2029 | .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, | ||
| 2028 | { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */ | 2030 | { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */ |
| 2029 | { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */ | 2031 | { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */ |
| 2030 | { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */ | 2032 | { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */ |
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c index c9ebefd8f35f..a585b477415d 100644 --- a/drivers/usb/serial/pl2303.c +++ b/drivers/usb/serial/pl2303.c | |||
| @@ -52,6 +52,8 @@ static const struct usb_device_id id_table[] = { | |||
| 52 | { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) }, | 52 | { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) }, |
| 53 | { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID), | 53 | { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID), |
| 54 | .driver_info = PL2303_QUIRK_ENDPOINT_HACK }, | 54 | .driver_info = PL2303_QUIRK_ENDPOINT_HACK }, |
| 55 | { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_UC485), | ||
| 56 | .driver_info = PL2303_QUIRK_ENDPOINT_HACK }, | ||
| 55 | { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID2) }, | 57 | { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID2) }, |
| 56 | { USB_DEVICE(ATEN_VENDOR_ID2, ATEN_PRODUCT_ID) }, | 58 | { USB_DEVICE(ATEN_VENDOR_ID2, ATEN_PRODUCT_ID) }, |
| 57 | { USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID) }, | 59 | { USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID) }, |
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h index 09d9be88209e..3b5a15d1dc0d 100644 --- a/drivers/usb/serial/pl2303.h +++ b/drivers/usb/serial/pl2303.h | |||
| @@ -27,6 +27,7 @@ | |||
| 27 | #define ATEN_VENDOR_ID 0x0557 | 27 | #define ATEN_VENDOR_ID 0x0557 |
| 28 | #define ATEN_VENDOR_ID2 0x0547 | 28 | #define ATEN_VENDOR_ID2 0x0547 |
| 29 | #define ATEN_PRODUCT_ID 0x2008 | 29 | #define ATEN_PRODUCT_ID 0x2008 |
| 30 | #define ATEN_PRODUCT_UC485 0x2021 | ||
| 30 | #define ATEN_PRODUCT_ID2 0x2118 | 31 | #define ATEN_PRODUCT_ID2 0x2118 |
| 31 | 32 | ||
| 32 | #define IODATA_VENDOR_ID 0x04bb | 33 | #define IODATA_VENDOR_ID 0x04bb |
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h index cbea9f329e71..cde115359793 100644 --- a/drivers/usb/storage/unusual_uas.h +++ b/drivers/usb/storage/unusual_uas.h | |||
| @@ -124,9 +124,9 @@ UNUSUAL_DEV(0x0bc2, 0xab2a, 0x0000, 0x9999, | |||
| 124 | /* Reported-by: Benjamin Tissoires <benjamin.tissoires@redhat.com> */ | 124 | /* Reported-by: Benjamin Tissoires <benjamin.tissoires@redhat.com> */ |
| 125 | UNUSUAL_DEV(0x13fd, 0x3940, 0x0000, 0x9999, | 125 | UNUSUAL_DEV(0x13fd, 0x3940, 0x0000, 0x9999, |
| 126 | "Initio Corporation", | 126 | "Initio Corporation", |
| 127 | "", | 127 | "INIC-3069", |
| 128 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, | 128 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, |
| 129 | US_FL_NO_ATA_1X), | 129 | US_FL_NO_ATA_1X | US_FL_IGNORE_RESIDUE), |
| 130 | 130 | ||
| 131 | /* Reported-by: Tom Arild Naess <tanaess@gmail.com> */ | 131 | /* Reported-by: Tom Arild Naess <tanaess@gmail.com> */ |
| 132 | UNUSUAL_DEV(0x152d, 0x0539, 0x0000, 0x9999, | 132 | UNUSUAL_DEV(0x152d, 0x0539, 0x0000, 0x9999, |
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c index 06615934fed1..0dceb9fa3a06 100644 --- a/drivers/usb/storage/usb.c +++ b/drivers/usb/storage/usb.c | |||
| @@ -315,6 +315,7 @@ static int usb_stor_control_thread(void * __us) | |||
| 315 | { | 315 | { |
| 316 | struct us_data *us = (struct us_data *)__us; | 316 | struct us_data *us = (struct us_data *)__us; |
| 317 | struct Scsi_Host *host = us_to_host(us); | 317 | struct Scsi_Host *host = us_to_host(us); |
| 318 | struct scsi_cmnd *srb; | ||
| 318 | 319 | ||
| 319 | for (;;) { | 320 | for (;;) { |
| 320 | usb_stor_dbg(us, "*** thread sleeping\n"); | 321 | usb_stor_dbg(us, "*** thread sleeping\n"); |
| @@ -330,6 +331,7 @@ static int usb_stor_control_thread(void * __us) | |||
| 330 | scsi_lock(host); | 331 | scsi_lock(host); |
| 331 | 332 | ||
| 332 | /* When we are called with no command pending, we're done */ | 333 | /* When we are called with no command pending, we're done */ |
| 334 | srb = us->srb; | ||
| 333 | if (us->srb == NULL) { | 335 | if (us->srb == NULL) { |
| 334 | scsi_unlock(host); | 336 | scsi_unlock(host); |
| 335 | mutex_unlock(&us->dev_mutex); | 337 | mutex_unlock(&us->dev_mutex); |
| @@ -398,14 +400,11 @@ static int usb_stor_control_thread(void * __us) | |||
| 398 | /* lock access to the state */ | 400 | /* lock access to the state */ |
| 399 | scsi_lock(host); | 401 | scsi_lock(host); |
| 400 | 402 | ||
| 401 | /* indicate that the command is done */ | 403 | /* was the command aborted? */ |
| 402 | if (us->srb->result != DID_ABORT << 16) { | 404 | if (us->srb->result == DID_ABORT << 16) { |
| 403 | usb_stor_dbg(us, "scsi cmd done, result=0x%x\n", | ||
| 404 | us->srb->result); | ||
| 405 | us->srb->scsi_done(us->srb); | ||
| 406 | } else { | ||
| 407 | SkipForAbort: | 405 | SkipForAbort: |
| 408 | usb_stor_dbg(us, "scsi command aborted\n"); | 406 | usb_stor_dbg(us, "scsi command aborted\n"); |
| 407 | srb = NULL; /* Don't call srb->scsi_done() */ | ||
| 409 | } | 408 | } |
| 410 | 409 | ||
| 411 | /* | 410 | /* |
| @@ -429,6 +428,13 @@ SkipForAbort: | |||
| 429 | 428 | ||
| 430 | /* unlock the device pointers */ | 429 | /* unlock the device pointers */ |
| 431 | mutex_unlock(&us->dev_mutex); | 430 | mutex_unlock(&us->dev_mutex); |
| 431 | |||
| 432 | /* now that the locks are released, notify the SCSI core */ | ||
| 433 | if (srb) { | ||
| 434 | usb_stor_dbg(us, "scsi cmd done, result=0x%x\n", | ||
| 435 | srb->result); | ||
| 436 | srb->scsi_done(srb); | ||
| 437 | } | ||
| 432 | } /* for (;;) */ | 438 | } /* for (;;) */ |
| 433 | 439 | ||
| 434 | /* Wait until we are told to stop */ | 440 | /* Wait until we are told to stop */ |
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c index 063c1ce6fa42..f041b1a6cf66 100644 --- a/drivers/vfio/pci/vfio_pci.c +++ b/drivers/vfio/pci/vfio_pci.c | |||
| @@ -226,7 +226,14 @@ static int vfio_pci_enable(struct vfio_pci_device *vdev) | |||
| 226 | if (ret) | 226 | if (ret) |
| 227 | return ret; | 227 | return ret; |
| 228 | 228 | ||
| 229 | vdev->reset_works = (pci_reset_function(pdev) == 0); | 229 | /* If reset fails because of the device lock, fail this path entirely */ |
| 230 | ret = pci_try_reset_function(pdev); | ||
| 231 | if (ret == -EAGAIN) { | ||
| 232 | pci_disable_device(pdev); | ||
| 233 | return ret; | ||
| 234 | } | ||
| 235 | |||
| 236 | vdev->reset_works = !ret; | ||
| 230 | pci_save_state(pdev); | 237 | pci_save_state(pdev); |
| 231 | vdev->pci_saved_state = pci_store_saved_state(pdev); | 238 | vdev->pci_saved_state = pci_store_saved_state(pdev); |
| 232 | if (!vdev->pci_saved_state) | 239 | if (!vdev->pci_saved_state) |
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c index 330a57024cbc..5628fe114347 100644 --- a/drivers/vfio/pci/vfio_pci_config.c +++ b/drivers/vfio/pci/vfio_pci_config.c | |||
| @@ -839,7 +839,7 @@ static int vfio_exp_config_write(struct vfio_pci_device *vdev, int pos, | |||
| 839 | /* Permissions for PCI Express capability */ | 839 | /* Permissions for PCI Express capability */ |
| 840 | static int __init init_pci_cap_exp_perm(struct perm_bits *perm) | 840 | static int __init init_pci_cap_exp_perm(struct perm_bits *perm) |
| 841 | { | 841 | { |
| 842 | /* Alloc larger of two possible sizes */ | 842 | /* Alloc largest of possible sizes */ |
| 843 | if (alloc_perm_bits(perm, PCI_CAP_EXP_ENDPOINT_SIZEOF_V2)) | 843 | if (alloc_perm_bits(perm, PCI_CAP_EXP_ENDPOINT_SIZEOF_V2)) |
| 844 | return -ENOMEM; | 844 | return -ENOMEM; |
| 845 | 845 | ||
| @@ -1243,11 +1243,16 @@ static int vfio_cap_len(struct vfio_pci_device *vdev, u8 cap, u8 pos) | |||
| 1243 | vdev->extended_caps = (dword != 0); | 1243 | vdev->extended_caps = (dword != 0); |
| 1244 | } | 1244 | } |
| 1245 | 1245 | ||
| 1246 | /* length based on version */ | 1246 | /* length based on version and type */ |
| 1247 | if ((pcie_caps_reg(pdev) & PCI_EXP_FLAGS_VERS) == 1) | 1247 | if ((pcie_caps_reg(pdev) & PCI_EXP_FLAGS_VERS) == 1) { |
| 1248 | if (pci_pcie_type(pdev) == PCI_EXP_TYPE_RC_END) | ||
| 1249 | return 0xc; /* "All Devices" only, no link */ | ||
| 1248 | return PCI_CAP_EXP_ENDPOINT_SIZEOF_V1; | 1250 | return PCI_CAP_EXP_ENDPOINT_SIZEOF_V1; |
| 1249 | else | 1251 | } else { |
| 1252 | if (pci_pcie_type(pdev) == PCI_EXP_TYPE_RC_END) | ||
| 1253 | return 0x2c; /* No link */ | ||
| 1250 | return PCI_CAP_EXP_ENDPOINT_SIZEOF_V2; | 1254 | return PCI_CAP_EXP_ENDPOINT_SIZEOF_V2; |
| 1255 | } | ||
| 1251 | case PCI_CAP_ID_HT: | 1256 | case PCI_CAP_ID_HT: |
| 1252 | ret = pci_read_config_byte(pdev, pos + 3, &byte); | 1257 | ret = pci_read_config_byte(pdev, pos + 3, &byte); |
| 1253 | if (ret) | 1258 | if (ret) |
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index e4613a3c362d..9cb3f722dce1 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c | |||
| @@ -308,7 +308,6 @@ static void vhost_vq_reset(struct vhost_dev *dev, | |||
| 308 | vq->avail = NULL; | 308 | vq->avail = NULL; |
| 309 | vq->used = NULL; | 309 | vq->used = NULL; |
| 310 | vq->last_avail_idx = 0; | 310 | vq->last_avail_idx = 0; |
| 311 | vq->last_used_event = 0; | ||
| 312 | vq->avail_idx = 0; | 311 | vq->avail_idx = 0; |
| 313 | vq->last_used_idx = 0; | 312 | vq->last_used_idx = 0; |
| 314 | vq->signalled_used = 0; | 313 | vq->signalled_used = 0; |
| @@ -1402,7 +1401,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp) | |||
| 1402 | r = -EINVAL; | 1401 | r = -EINVAL; |
| 1403 | break; | 1402 | break; |
| 1404 | } | 1403 | } |
| 1405 | vq->last_avail_idx = vq->last_used_event = s.num; | 1404 | vq->last_avail_idx = s.num; |
| 1406 | /* Forget the cached index value. */ | 1405 | /* Forget the cached index value. */ |
| 1407 | vq->avail_idx = vq->last_avail_idx; | 1406 | vq->avail_idx = vq->last_avail_idx; |
| 1408 | break; | 1407 | break; |
| @@ -2241,6 +2240,10 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) | |||
| 2241 | __u16 old, new; | 2240 | __u16 old, new; |
| 2242 | __virtio16 event; | 2241 | __virtio16 event; |
| 2243 | bool v; | 2242 | bool v; |
| 2243 | /* Flush out used index updates. This is paired | ||
| 2244 | * with the barrier that the Guest executes when enabling | ||
| 2245 | * interrupts. */ | ||
| 2246 | smp_mb(); | ||
| 2244 | 2247 | ||
| 2245 | if (vhost_has_feature(vq, VIRTIO_F_NOTIFY_ON_EMPTY) && | 2248 | if (vhost_has_feature(vq, VIRTIO_F_NOTIFY_ON_EMPTY) && |
| 2246 | unlikely(vq->avail_idx == vq->last_avail_idx)) | 2249 | unlikely(vq->avail_idx == vq->last_avail_idx)) |
| @@ -2248,10 +2251,6 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) | |||
| 2248 | 2251 | ||
| 2249 | if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) { | 2252 | if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) { |
| 2250 | __virtio16 flags; | 2253 | __virtio16 flags; |
| 2251 | /* Flush out used index updates. This is paired | ||
| 2252 | * with the barrier that the Guest executes when enabling | ||
| 2253 | * interrupts. */ | ||
| 2254 | smp_mb(); | ||
| 2255 | if (vhost_get_avail(vq, flags, &vq->avail->flags)) { | 2254 | if (vhost_get_avail(vq, flags, &vq->avail->flags)) { |
| 2256 | vq_err(vq, "Failed to get flags"); | 2255 | vq_err(vq, "Failed to get flags"); |
| 2257 | return true; | 2256 | return true; |
| @@ -2266,26 +2265,11 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) | |||
| 2266 | if (unlikely(!v)) | 2265 | if (unlikely(!v)) |
| 2267 | return true; | 2266 | return true; |
| 2268 | 2267 | ||
| 2269 | /* We're sure if the following conditions are met, there's no | ||
| 2270 | * need to notify guest: | ||
| 2271 | * 1) cached used event is ahead of new | ||
| 2272 | * 2) old to new updating does not cross cached used event. */ | ||
| 2273 | if (vring_need_event(vq->last_used_event, new + vq->num, new) && | ||
| 2274 | !vring_need_event(vq->last_used_event, new, old)) | ||
| 2275 | return false; | ||
| 2276 | |||
| 2277 | /* Flush out used index updates. This is paired | ||
| 2278 | * with the barrier that the Guest executes when enabling | ||
| 2279 | * interrupts. */ | ||
| 2280 | smp_mb(); | ||
| 2281 | |||
| 2282 | if (vhost_get_avail(vq, event, vhost_used_event(vq))) { | 2268 | if (vhost_get_avail(vq, event, vhost_used_event(vq))) { |
| 2283 | vq_err(vq, "Failed to get used event idx"); | 2269 | vq_err(vq, "Failed to get used event idx"); |
| 2284 | return true; | 2270 | return true; |
| 2285 | } | 2271 | } |
| 2286 | vq->last_used_event = vhost16_to_cpu(vq, event); | 2272 | return vring_need_event(vhost16_to_cpu(vq, event), new, old); |
| 2287 | |||
| 2288 | return vring_need_event(vq->last_used_event, new, old); | ||
| 2289 | } | 2273 | } |
| 2290 | 2274 | ||
| 2291 | /* This actually signals the guest, using eventfd. */ | 2275 | /* This actually signals the guest, using eventfd. */ |
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index f72095868b93..bb7c29b8b9fc 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h | |||
| @@ -115,9 +115,6 @@ struct vhost_virtqueue { | |||
| 115 | /* Last index we used. */ | 115 | /* Last index we used. */ |
| 116 | u16 last_used_idx; | 116 | u16 last_used_idx; |
| 117 | 117 | ||
| 118 | /* Last used evet we've seen */ | ||
| 119 | u16 last_used_event; | ||
| 120 | |||
| 121 | /* Used flags */ | 118 | /* Used flags */ |
| 122 | u16 used_flags; | 119 | u16 used_flags; |
| 123 | 120 | ||
diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c index ff01bed7112f..1e784adb89b1 100644 --- a/drivers/video/fbdev/efifb.c +++ b/drivers/video/fbdev/efifb.c | |||
| @@ -17,6 +17,7 @@ | |||
| 17 | #include <asm/efi.h> | 17 | #include <asm/efi.h> |
| 18 | 18 | ||
| 19 | static bool request_mem_succeeded = false; | 19 | static bool request_mem_succeeded = false; |
| 20 | static bool nowc = false; | ||
| 20 | 21 | ||
| 21 | static struct fb_var_screeninfo efifb_defined = { | 22 | static struct fb_var_screeninfo efifb_defined = { |
| 22 | .activate = FB_ACTIVATE_NOW, | 23 | .activate = FB_ACTIVATE_NOW, |
| @@ -99,6 +100,8 @@ static int efifb_setup(char *options) | |||
| 99 | screen_info.lfb_height = simple_strtoul(this_opt+7, NULL, 0); | 100 | screen_info.lfb_height = simple_strtoul(this_opt+7, NULL, 0); |
| 100 | else if (!strncmp(this_opt, "width:", 6)) | 101 | else if (!strncmp(this_opt, "width:", 6)) |
| 101 | screen_info.lfb_width = simple_strtoul(this_opt+6, NULL, 0); | 102 | screen_info.lfb_width = simple_strtoul(this_opt+6, NULL, 0); |
| 103 | else if (!strcmp(this_opt, "nowc")) | ||
| 104 | nowc = true; | ||
| 102 | } | 105 | } |
| 103 | } | 106 | } |
| 104 | 107 | ||
| @@ -255,7 +258,10 @@ static int efifb_probe(struct platform_device *dev) | |||
| 255 | info->apertures->ranges[0].base = efifb_fix.smem_start; | 258 | info->apertures->ranges[0].base = efifb_fix.smem_start; |
| 256 | info->apertures->ranges[0].size = size_remap; | 259 | info->apertures->ranges[0].size = size_remap; |
| 257 | 260 | ||
| 258 | info->screen_base = ioremap_wc(efifb_fix.smem_start, efifb_fix.smem_len); | 261 | if (nowc) |
| 262 | info->screen_base = ioremap(efifb_fix.smem_start, efifb_fix.smem_len); | ||
| 263 | else | ||
| 264 | info->screen_base = ioremap_wc(efifb_fix.smem_start, efifb_fix.smem_len); | ||
| 259 | if (!info->screen_base) { | 265 | if (!info->screen_base) { |
| 260 | pr_err("efifb: abort, cannot ioremap video memory 0x%x @ 0x%lx\n", | 266 | pr_err("efifb: abort, cannot ioremap video memory 0x%x @ 0x%lx\n", |
| 261 | efifb_fix.smem_len, efifb_fix.smem_start); | 267 | efifb_fix.smem_len, efifb_fix.smem_start); |
diff --git a/drivers/video/fbdev/imxfb.c b/drivers/video/fbdev/imxfb.c index c166e0725be5..ba82f97fb42b 100644 --- a/drivers/video/fbdev/imxfb.c +++ b/drivers/video/fbdev/imxfb.c | |||
| @@ -1073,20 +1073,16 @@ static int imxfb_remove(struct platform_device *pdev) | |||
| 1073 | imxfb_disable_controller(fbi); | 1073 | imxfb_disable_controller(fbi); |
| 1074 | 1074 | ||
| 1075 | unregister_framebuffer(info); | 1075 | unregister_framebuffer(info); |
| 1076 | 1076 | fb_dealloc_cmap(&info->cmap); | |
| 1077 | pdata = dev_get_platdata(&pdev->dev); | 1077 | pdata = dev_get_platdata(&pdev->dev); |
| 1078 | if (pdata && pdata->exit) | 1078 | if (pdata && pdata->exit) |
| 1079 | pdata->exit(fbi->pdev); | 1079 | pdata->exit(fbi->pdev); |
| 1080 | |||
| 1081 | fb_dealloc_cmap(&info->cmap); | ||
| 1082 | kfree(info->pseudo_palette); | ||
| 1083 | framebuffer_release(info); | ||
| 1084 | |||
| 1085 | dma_free_wc(&pdev->dev, fbi->map_size, info->screen_base, | 1080 | dma_free_wc(&pdev->dev, fbi->map_size, info->screen_base, |
| 1086 | fbi->map_dma); | 1081 | fbi->map_dma); |
| 1087 | |||
| 1088 | iounmap(fbi->regs); | 1082 | iounmap(fbi->regs); |
| 1089 | release_mem_region(res->start, resource_size(res)); | 1083 | release_mem_region(res->start, resource_size(res)); |
| 1084 | kfree(info->pseudo_palette); | ||
| 1085 | framebuffer_release(info); | ||
| 1090 | 1086 | ||
| 1091 | return 0; | 1087 | return 0; |
| 1092 | } | 1088 | } |
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/core.c b/drivers/video/fbdev/omap2/omapfb/dss/core.c index eecf695c16f4..09e5bb013d28 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/core.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/core.c | |||
| @@ -193,7 +193,6 @@ static struct notifier_block omap_dss_pm_notif_block = { | |||
| 193 | 193 | ||
| 194 | static int __init omap_dss_probe(struct platform_device *pdev) | 194 | static int __init omap_dss_probe(struct platform_device *pdev) |
| 195 | { | 195 | { |
| 196 | struct omap_dss_board_info *pdata = pdev->dev.platform_data; | ||
| 197 | int r; | 196 | int r; |
| 198 | 197 | ||
| 199 | core.pdev = pdev; | 198 | core.pdev = pdev; |
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 22caf808bfab..f0b3a0b9d42f 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c | |||
| @@ -104,12 +104,6 @@ static u32 page_to_balloon_pfn(struct page *page) | |||
| 104 | return pfn * VIRTIO_BALLOON_PAGES_PER_PAGE; | 104 | return pfn * VIRTIO_BALLOON_PAGES_PER_PAGE; |
| 105 | } | 105 | } |
| 106 | 106 | ||
| 107 | static struct page *balloon_pfn_to_page(u32 pfn) | ||
| 108 | { | ||
| 109 | BUG_ON(pfn % VIRTIO_BALLOON_PAGES_PER_PAGE); | ||
| 110 | return pfn_to_page(pfn / VIRTIO_BALLOON_PAGES_PER_PAGE); | ||
| 111 | } | ||
| 112 | |||
| 113 | static void balloon_ack(struct virtqueue *vq) | 107 | static void balloon_ack(struct virtqueue *vq) |
| 114 | { | 108 | { |
| 115 | struct virtio_balloon *vb = vq->vdev->priv; | 109 | struct virtio_balloon *vb = vq->vdev->priv; |
| @@ -138,8 +132,10 @@ static void set_page_pfns(struct virtio_balloon *vb, | |||
| 138 | { | 132 | { |
| 139 | unsigned int i; | 133 | unsigned int i; |
| 140 | 134 | ||
| 141 | /* Set balloon pfns pointing at this page. | 135 | /* |
| 142 | * Note that the first pfn points at start of the page. */ | 136 | * Set balloon pfns pointing at this page. |
| 137 | * Note that the first pfn points at start of the page. | ||
| 138 | */ | ||
| 143 | for (i = 0; i < VIRTIO_BALLOON_PAGES_PER_PAGE; i++) | 139 | for (i = 0; i < VIRTIO_BALLOON_PAGES_PER_PAGE; i++) |
| 144 | pfns[i] = cpu_to_virtio32(vb->vdev, | 140 | pfns[i] = cpu_to_virtio32(vb->vdev, |
| 145 | page_to_balloon_pfn(page) + i); | 141 | page_to_balloon_pfn(page) + i); |
| @@ -182,18 +178,16 @@ static unsigned fill_balloon(struct virtio_balloon *vb, size_t num) | |||
| 182 | return num_allocated_pages; | 178 | return num_allocated_pages; |
| 183 | } | 179 | } |
| 184 | 180 | ||
| 185 | static void release_pages_balloon(struct virtio_balloon *vb) | 181 | static void release_pages_balloon(struct virtio_balloon *vb, |
| 182 | struct list_head *pages) | ||
| 186 | { | 183 | { |
| 187 | unsigned int i; | 184 | struct page *page, *next; |
| 188 | struct page *page; | ||
| 189 | 185 | ||
| 190 | /* Find pfns pointing at start of each page, get pages and free them. */ | 186 | list_for_each_entry_safe(page, next, pages, lru) { |
| 191 | for (i = 0; i < vb->num_pfns; i += VIRTIO_BALLOON_PAGES_PER_PAGE) { | ||
| 192 | page = balloon_pfn_to_page(virtio32_to_cpu(vb->vdev, | ||
| 193 | vb->pfns[i])); | ||
| 194 | if (!virtio_has_feature(vb->vdev, | 187 | if (!virtio_has_feature(vb->vdev, |
| 195 | VIRTIO_BALLOON_F_DEFLATE_ON_OOM)) | 188 | VIRTIO_BALLOON_F_DEFLATE_ON_OOM)) |
| 196 | adjust_managed_page_count(page, 1); | 189 | adjust_managed_page_count(page, 1); |
| 190 | list_del(&page->lru); | ||
| 197 | put_page(page); /* balloon reference */ | 191 | put_page(page); /* balloon reference */ |
| 198 | } | 192 | } |
| 199 | } | 193 | } |
| @@ -203,6 +197,7 @@ static unsigned leak_balloon(struct virtio_balloon *vb, size_t num) | |||
| 203 | unsigned num_freed_pages; | 197 | unsigned num_freed_pages; |
| 204 | struct page *page; | 198 | struct page *page; |
| 205 | struct balloon_dev_info *vb_dev_info = &vb->vb_dev_info; | 199 | struct balloon_dev_info *vb_dev_info = &vb->vb_dev_info; |
| 200 | LIST_HEAD(pages); | ||
| 206 | 201 | ||
| 207 | /* We can only do one array worth at a time. */ | 202 | /* We can only do one array worth at a time. */ |
| 208 | num = min(num, ARRAY_SIZE(vb->pfns)); | 203 | num = min(num, ARRAY_SIZE(vb->pfns)); |
| @@ -216,6 +211,7 @@ static unsigned leak_balloon(struct virtio_balloon *vb, size_t num) | |||
| 216 | if (!page) | 211 | if (!page) |
| 217 | break; | 212 | break; |
| 218 | set_page_pfns(vb, vb->pfns + vb->num_pfns, page); | 213 | set_page_pfns(vb, vb->pfns + vb->num_pfns, page); |
| 214 | list_add(&page->lru, &pages); | ||
| 219 | vb->num_pages -= VIRTIO_BALLOON_PAGES_PER_PAGE; | 215 | vb->num_pages -= VIRTIO_BALLOON_PAGES_PER_PAGE; |
| 220 | } | 216 | } |
| 221 | 217 | ||
| @@ -227,7 +223,7 @@ static unsigned leak_balloon(struct virtio_balloon *vb, size_t num) | |||
| 227 | */ | 223 | */ |
| 228 | if (vb->num_pfns != 0) | 224 | if (vb->num_pfns != 0) |
| 229 | tell_host(vb, vb->deflate_vq); | 225 | tell_host(vb, vb->deflate_vq); |
| 230 | release_pages_balloon(vb); | 226 | release_pages_balloon(vb, &pages); |
| 231 | mutex_unlock(&vb->balloon_lock); | 227 | mutex_unlock(&vb->balloon_lock); |
| 232 | return num_freed_pages; | 228 | return num_freed_pages; |
| 233 | } | 229 | } |
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c index 007a4f366086..1c4797e53f68 100644 --- a/drivers/virtio/virtio_pci_common.c +++ b/drivers/virtio/virtio_pci_common.c | |||
| @@ -107,6 +107,7 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors, | |||
| 107 | { | 107 | { |
| 108 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); | 108 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); |
| 109 | const char *name = dev_name(&vp_dev->vdev.dev); | 109 | const char *name = dev_name(&vp_dev->vdev.dev); |
| 110 | unsigned flags = PCI_IRQ_MSIX; | ||
| 110 | unsigned i, v; | 111 | unsigned i, v; |
| 111 | int err = -ENOMEM; | 112 | int err = -ENOMEM; |
| 112 | 113 | ||
| @@ -126,10 +127,13 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors, | |||
| 126 | GFP_KERNEL)) | 127 | GFP_KERNEL)) |
| 127 | goto error; | 128 | goto error; |
| 128 | 129 | ||
| 130 | if (desc) { | ||
| 131 | flags |= PCI_IRQ_AFFINITY; | ||
| 132 | desc->pre_vectors++; /* virtio config vector */ | ||
| 133 | } | ||
| 134 | |||
| 129 | err = pci_alloc_irq_vectors_affinity(vp_dev->pci_dev, nvectors, | 135 | err = pci_alloc_irq_vectors_affinity(vp_dev->pci_dev, nvectors, |
| 130 | nvectors, PCI_IRQ_MSIX | | 136 | nvectors, flags, desc); |
| 131 | (desc ? PCI_IRQ_AFFINITY : 0), | ||
| 132 | desc); | ||
| 133 | if (err < 0) | 137 | if (err < 0) |
| 134 | goto error; | 138 | goto error; |
| 135 | vp_dev->msix_enabled = 1; | 139 | vp_dev->msix_enabled = 1; |
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile index 8feab810aed9..7f188b8d0c67 100644 --- a/drivers/xen/Makefile +++ b/drivers/xen/Makefile | |||
| @@ -7,9 +7,6 @@ obj-y += xenbus/ | |||
| 7 | nostackp := $(call cc-option, -fno-stack-protector) | 7 | nostackp := $(call cc-option, -fno-stack-protector) |
| 8 | CFLAGS_features.o := $(nostackp) | 8 | CFLAGS_features.o := $(nostackp) |
| 9 | 9 | ||
| 10 | CFLAGS_efi.o += -fshort-wchar | ||
| 11 | LDFLAGS += $(call ld-option, --no-wchar-size-warning) | ||
| 12 | |||
| 13 | dom0-$(CONFIG_ARM64) += arm-device.o | 10 | dom0-$(CONFIG_ARM64) += arm-device.o |
| 14 | dom0-$(CONFIG_PCI) += pci.o | 11 | dom0-$(CONFIG_PCI) += pci.o |
| 15 | dom0-$(CONFIG_USB_SUPPORT) += dbgp.o | 12 | dom0-$(CONFIG_USB_SUPPORT) += dbgp.o |
diff --git a/drivers/xen/biomerge.c b/drivers/xen/biomerge.c index 4da69dbf7dca..1bdd02a6d6ac 100644 --- a/drivers/xen/biomerge.c +++ b/drivers/xen/biomerge.c | |||
| @@ -10,8 +10,7 @@ bool xen_biovec_phys_mergeable(const struct bio_vec *vec1, | |||
| 10 | unsigned long bfn1 = pfn_to_bfn(page_to_pfn(vec1->bv_page)); | 10 | unsigned long bfn1 = pfn_to_bfn(page_to_pfn(vec1->bv_page)); |
| 11 | unsigned long bfn2 = pfn_to_bfn(page_to_pfn(vec2->bv_page)); | 11 | unsigned long bfn2 = pfn_to_bfn(page_to_pfn(vec2->bv_page)); |
| 12 | 12 | ||
| 13 | return __BIOVEC_PHYS_MERGEABLE(vec1, vec2) && | 13 | return bfn1 + PFN_DOWN(vec1->bv_offset + vec1->bv_len) == bfn2; |
| 14 | ((bfn1 == bfn2) || ((bfn1+1) == bfn2)); | ||
| 15 | #else | 14 | #else |
| 16 | /* | 15 | /* |
| 17 | * XXX: Add support for merging bio_vec when using different page | 16 | * XXX: Add support for merging bio_vec when using different page |
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index b241bfa529ce..2d43118077e4 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c | |||
| @@ -343,14 +343,6 @@ static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu) | |||
| 343 | info->cpu = cpu; | 343 | info->cpu = cpu; |
| 344 | } | 344 | } |
| 345 | 345 | ||
| 346 | static void xen_evtchn_mask_all(void) | ||
| 347 | { | ||
| 348 | unsigned int evtchn; | ||
| 349 | |||
| 350 | for (evtchn = 0; evtchn < xen_evtchn_nr_channels(); evtchn++) | ||
| 351 | mask_evtchn(evtchn); | ||
| 352 | } | ||
| 353 | |||
| 354 | /** | 346 | /** |
| 355 | * notify_remote_via_irq - send event to remote end of event channel via irq | 347 | * notify_remote_via_irq - send event to remote end of event channel via irq |
| 356 | * @irq: irq of event channel to send event to | 348 | * @irq: irq of event channel to send event to |
| @@ -582,7 +574,7 @@ static void shutdown_pirq(struct irq_data *data) | |||
| 582 | 574 | ||
| 583 | static void enable_pirq(struct irq_data *data) | 575 | static void enable_pirq(struct irq_data *data) |
| 584 | { | 576 | { |
| 585 | startup_pirq(data); | 577 | enable_dynirq(data); |
| 586 | } | 578 | } |
| 587 | 579 | ||
| 588 | static void disable_pirq(struct irq_data *data) | 580 | static void disable_pirq(struct irq_data *data) |
| @@ -1573,7 +1565,6 @@ void xen_irq_resume(void) | |||
| 1573 | struct irq_info *info; | 1565 | struct irq_info *info; |
| 1574 | 1566 | ||
| 1575 | /* New event-channel space is not 'live' yet. */ | 1567 | /* New event-channel space is not 'live' yet. */ |
| 1576 | xen_evtchn_mask_all(); | ||
| 1577 | xen_evtchn_resume(); | 1568 | xen_evtchn_resume(); |
| 1578 | 1569 | ||
| 1579 | /* No IRQ <-> event-channel mappings. */ | 1570 | /* No IRQ <-> event-channel mappings. */ |
| @@ -1681,6 +1672,7 @@ module_param(fifo_events, bool, 0); | |||
| 1681 | void __init xen_init_IRQ(void) | 1672 | void __init xen_init_IRQ(void) |
| 1682 | { | 1673 | { |
| 1683 | int ret = -EINVAL; | 1674 | int ret = -EINVAL; |
| 1675 | unsigned int evtchn; | ||
| 1684 | 1676 | ||
| 1685 | if (fifo_events) | 1677 | if (fifo_events) |
| 1686 | ret = xen_evtchn_fifo_init(); | 1678 | ret = xen_evtchn_fifo_init(); |
| @@ -1692,7 +1684,8 @@ void __init xen_init_IRQ(void) | |||
| 1692 | BUG_ON(!evtchn_to_irq); | 1684 | BUG_ON(!evtchn_to_irq); |
| 1693 | 1685 | ||
| 1694 | /* No event channels are 'live' right now. */ | 1686 | /* No event channels are 'live' right now. */ |
| 1695 | xen_evtchn_mask_all(); | 1687 | for (evtchn = 0; evtchn < xen_evtchn_nr_channels(); evtchn++) |
| 1688 | mask_evtchn(evtchn); | ||
| 1696 | 1689 | ||
| 1697 | pirq_needs_eoi = pirq_needs_eoi_flag; | 1690 | pirq_needs_eoi = pirq_needs_eoi_flag; |
| 1698 | 1691 | ||
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index f3bf8f4e2d6c..82360594fa8e 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c | |||
| @@ -484,13 +484,6 @@ static void mn_invl_range_start(struct mmu_notifier *mn, | |||
| 484 | mutex_unlock(&priv->lock); | 484 | mutex_unlock(&priv->lock); |
| 485 | } | 485 | } |
| 486 | 486 | ||
| 487 | static void mn_invl_page(struct mmu_notifier *mn, | ||
| 488 | struct mm_struct *mm, | ||
| 489 | unsigned long address) | ||
| 490 | { | ||
| 491 | mn_invl_range_start(mn, mm, address, address + PAGE_SIZE); | ||
| 492 | } | ||
| 493 | |||
| 494 | static void mn_release(struct mmu_notifier *mn, | 487 | static void mn_release(struct mmu_notifier *mn, |
| 495 | struct mm_struct *mm) | 488 | struct mm_struct *mm) |
| 496 | { | 489 | { |
| @@ -522,7 +515,6 @@ static void mn_release(struct mmu_notifier *mn, | |||
| 522 | 515 | ||
| 523 | static const struct mmu_notifier_ops gntdev_mmu_ops = { | 516 | static const struct mmu_notifier_ops gntdev_mmu_ops = { |
| 524 | .release = mn_release, | 517 | .release = mn_release, |
| 525 | .invalidate_page = mn_invl_page, | ||
| 526 | .invalidate_range_start = mn_invl_range_start, | 518 | .invalidate_range_start = mn_invl_range_start, |
| 527 | }; | 519 | }; |
| 528 | 520 | ||
diff --git a/drivers/xen/xen-selfballoon.c b/drivers/xen/xen-selfballoon.c index 66620713242a..a67e955cacd1 100644 --- a/drivers/xen/xen-selfballoon.c +++ b/drivers/xen/xen-selfballoon.c | |||
| @@ -151,8 +151,8 @@ static unsigned long frontswap_inertia_counter; | |||
| 151 | static void frontswap_selfshrink(void) | 151 | static void frontswap_selfshrink(void) |
| 152 | { | 152 | { |
| 153 | static unsigned long cur_frontswap_pages; | 153 | static unsigned long cur_frontswap_pages; |
| 154 | static unsigned long last_frontswap_pages; | 154 | unsigned long last_frontswap_pages; |
| 155 | static unsigned long tgt_frontswap_pages; | 155 | unsigned long tgt_frontswap_pages; |
| 156 | 156 | ||
| 157 | last_frontswap_pages = cur_frontswap_pages; | 157 | last_frontswap_pages = cur_frontswap_pages; |
| 158 | cur_frontswap_pages = frontswap_curr_pages(); | 158 | cur_frontswap_pages = frontswap_curr_pages(); |
diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c index e46080214955..3e59590c7254 100644 --- a/drivers/xen/xenbus/xenbus_xs.c +++ b/drivers/xen/xenbus/xenbus_xs.c | |||
| @@ -857,6 +857,8 @@ static int xenwatch_thread(void *unused) | |||
| 857 | struct list_head *ent; | 857 | struct list_head *ent; |
| 858 | struct xs_watch_event *event; | 858 | struct xs_watch_event *event; |
| 859 | 859 | ||
| 860 | xenwatch_pid = current->pid; | ||
| 861 | |||
| 860 | for (;;) { | 862 | for (;;) { |
| 861 | wait_event_interruptible(watch_events_waitq, | 863 | wait_event_interruptible(watch_events_waitq, |
| 862 | !list_empty(&watch_events)); | 864 | !list_empty(&watch_events)); |
| @@ -925,7 +927,6 @@ int xs_init(void) | |||
| 925 | task = kthread_run(xenwatch_thread, NULL, "xenwatch"); | 927 | task = kthread_run(xenwatch_thread, NULL, "xenwatch"); |
| 926 | if (IS_ERR(task)) | 928 | if (IS_ERR(task)) |
| 927 | return PTR_ERR(task); | 929 | return PTR_ERR(task); |
| 928 | xenwatch_pid = task->pid; | ||
| 929 | 930 | ||
| 930 | /* shutdown watches for kexec boot */ | 931 | /* shutdown watches for kexec boot */ |
| 931 | xs_reset_watches(); | 932 | xs_reset_watches(); |
diff --git a/drivers/xen/xenfs/super.c b/drivers/xen/xenfs/super.c index 967f069385d0..71ddfb4cf61c 100644 --- a/drivers/xen/xenfs/super.c +++ b/drivers/xen/xenfs/super.c | |||
| @@ -87,7 +87,6 @@ static int __init xenfs_init(void) | |||
| 87 | if (xen_domain()) | 87 | if (xen_domain()) |
| 88 | return register_filesystem(&xenfs_type); | 88 | return register_filesystem(&xenfs_type); |
| 89 | 89 | ||
| 90 | pr_info("not registering filesystem on non-xen platform\n"); | ||
| 91 | return 0; | 90 | return 0; |
| 92 | } | 91 | } |
| 93 | 92 | ||
