diff options
author | Dave Airlie <airlied@redhat.com> | 2018-03-28 00:30:41 -0400 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2018-03-28 00:30:41 -0400 |
commit | 2b4f44eec2be2688511c2b617d0e1b4f94c45ba4 (patch) | |
tree | 533c03602f4ae6d6404db6fa56c88e6f83e1bebe /drivers | |
parent | 33d009cd889490838c5db9b9339856c9e3d3facc (diff) | |
parent | 3eb2ce825ea1ad89d20f7a3b5780df850e4be274 (diff) |
Backmerge tag 'v4.16-rc7' into drm-next
Linux 4.16-rc7
This was requested by Daniel, and things were getting
a bit hard to reconcile, most of the conflicts were
trivial though.
Diffstat (limited to 'drivers')
607 files changed, 6184 insertions, 4593 deletions
diff --git a/drivers/acpi/acpi_watchdog.c b/drivers/acpi/acpi_watchdog.c index 11b113f8e367..ebb626ffb5fa 100644 --- a/drivers/acpi/acpi_watchdog.c +++ b/drivers/acpi/acpi_watchdog.c | |||
@@ -74,10 +74,10 @@ void __init acpi_watchdog_init(void) | |||
74 | res.start = gas->address; | 74 | res.start = gas->address; |
75 | if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { | 75 | if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { |
76 | res.flags = IORESOURCE_MEM; | 76 | res.flags = IORESOURCE_MEM; |
77 | res.end = res.start + ALIGN(gas->access_width, 4); | 77 | res.end = res.start + ALIGN(gas->access_width, 4) - 1; |
78 | } else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) { | 78 | } else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) { |
79 | res.flags = IORESOURCE_IO; | 79 | res.flags = IORESOURCE_IO; |
80 | res.end = res.start + gas->access_width; | 80 | res.end = res.start + gas->access_width - 1; |
81 | } else { | 81 | } else { |
82 | pr_warn("Unsupported address space: %u\n", | 82 | pr_warn("Unsupported address space: %u\n", |
83 | gas->space_id); | 83 | gas->space_id); |
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index 7128488a3a72..f2eb6c37ea0a 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c | |||
@@ -70,7 +70,6 @@ static async_cookie_t async_cookie; | |||
70 | static bool battery_driver_registered; | 70 | static bool battery_driver_registered; |
71 | static int battery_bix_broken_package; | 71 | static int battery_bix_broken_package; |
72 | static int battery_notification_delay_ms; | 72 | static int battery_notification_delay_ms; |
73 | static int battery_full_discharging; | ||
74 | static unsigned int cache_time = 1000; | 73 | static unsigned int cache_time = 1000; |
75 | module_param(cache_time, uint, 0644); | 74 | module_param(cache_time, uint, 0644); |
76 | MODULE_PARM_DESC(cache_time, "cache time in milliseconds"); | 75 | MODULE_PARM_DESC(cache_time, "cache time in milliseconds"); |
@@ -215,12 +214,9 @@ static int acpi_battery_get_property(struct power_supply *psy, | |||
215 | return -ENODEV; | 214 | return -ENODEV; |
216 | switch (psp) { | 215 | switch (psp) { |
217 | case POWER_SUPPLY_PROP_STATUS: | 216 | case POWER_SUPPLY_PROP_STATUS: |
218 | if (battery->state & ACPI_BATTERY_STATE_DISCHARGING) { | 217 | if (battery->state & ACPI_BATTERY_STATE_DISCHARGING) |
219 | if (battery_full_discharging && battery->rate_now == 0) | 218 | val->intval = POWER_SUPPLY_STATUS_DISCHARGING; |
220 | val->intval = POWER_SUPPLY_STATUS_FULL; | 219 | else if (battery->state & ACPI_BATTERY_STATE_CHARGING) |
221 | else | ||
222 | val->intval = POWER_SUPPLY_STATUS_DISCHARGING; | ||
223 | } else if (battery->state & ACPI_BATTERY_STATE_CHARGING) | ||
224 | val->intval = POWER_SUPPLY_STATUS_CHARGING; | 220 | val->intval = POWER_SUPPLY_STATUS_CHARGING; |
225 | else if (acpi_battery_is_charged(battery)) | 221 | else if (acpi_battery_is_charged(battery)) |
226 | val->intval = POWER_SUPPLY_STATUS_FULL; | 222 | val->intval = POWER_SUPPLY_STATUS_FULL; |
@@ -1170,12 +1166,6 @@ battery_notification_delay_quirk(const struct dmi_system_id *d) | |||
1170 | return 0; | 1166 | return 0; |
1171 | } | 1167 | } |
1172 | 1168 | ||
1173 | static int __init battery_full_discharging_quirk(const struct dmi_system_id *d) | ||
1174 | { | ||
1175 | battery_full_discharging = 1; | ||
1176 | return 0; | ||
1177 | } | ||
1178 | |||
1179 | static const struct dmi_system_id bat_dmi_table[] __initconst = { | 1169 | static const struct dmi_system_id bat_dmi_table[] __initconst = { |
1180 | { | 1170 | { |
1181 | .callback = battery_bix_broken_package_quirk, | 1171 | .callback = battery_bix_broken_package_quirk, |
@@ -1193,38 +1183,6 @@ static const struct dmi_system_id bat_dmi_table[] __initconst = { | |||
1193 | DMI_MATCH(DMI_PRODUCT_NAME, "Aspire V5-573G"), | 1183 | DMI_MATCH(DMI_PRODUCT_NAME, "Aspire V5-573G"), |
1194 | }, | 1184 | }, |
1195 | }, | 1185 | }, |
1196 | { | ||
1197 | .callback = battery_full_discharging_quirk, | ||
1198 | .ident = "ASUS GL502VSK", | ||
1199 | .matches = { | ||
1200 | DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), | ||
1201 | DMI_MATCH(DMI_PRODUCT_NAME, "GL502VSK"), | ||
1202 | }, | ||
1203 | }, | ||
1204 | { | ||
1205 | .callback = battery_full_discharging_quirk, | ||
1206 | .ident = "ASUS UX305LA", | ||
1207 | .matches = { | ||
1208 | DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), | ||
1209 | DMI_MATCH(DMI_PRODUCT_NAME, "UX305LA"), | ||
1210 | }, | ||
1211 | }, | ||
1212 | { | ||
1213 | .callback = battery_full_discharging_quirk, | ||
1214 | .ident = "ASUS UX360UA", | ||
1215 | .matches = { | ||
1216 | DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), | ||
1217 | DMI_MATCH(DMI_PRODUCT_NAME, "UX360UA"), | ||
1218 | }, | ||
1219 | }, | ||
1220 | { | ||
1221 | .callback = battery_full_discharging_quirk, | ||
1222 | .ident = "ASUS UX410UAK", | ||
1223 | .matches = { | ||
1224 | DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), | ||
1225 | DMI_MATCH(DMI_PRODUCT_NAME, "UX410UAK"), | ||
1226 | }, | ||
1227 | }, | ||
1228 | {}, | 1186 | {}, |
1229 | }; | 1187 | }; |
1230 | 1188 | ||
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 676c9788e1c8..0dad0bd9327b 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c | |||
@@ -660,13 +660,15 @@ struct acpi_device *acpi_companion_match(const struct device *dev) | |||
660 | * acpi_of_match_device - Match device object using the "compatible" property. | 660 | * acpi_of_match_device - Match device object using the "compatible" property. |
661 | * @adev: ACPI device object to match. | 661 | * @adev: ACPI device object to match. |
662 | * @of_match_table: List of device IDs to match against. | 662 | * @of_match_table: List of device IDs to match against. |
663 | * @of_id: OF ID if matched | ||
663 | * | 664 | * |
664 | * If @dev has an ACPI companion which has ACPI_DT_NAMESPACE_HID in its list of | 665 | * If @dev has an ACPI companion which has ACPI_DT_NAMESPACE_HID in its list of |
665 | * identifiers and a _DSD object with the "compatible" property, use that | 666 | * identifiers and a _DSD object with the "compatible" property, use that |
666 | * property to match against the given list of identifiers. | 667 | * property to match against the given list of identifiers. |
667 | */ | 668 | */ |
668 | static bool acpi_of_match_device(struct acpi_device *adev, | 669 | static bool acpi_of_match_device(struct acpi_device *adev, |
669 | const struct of_device_id *of_match_table) | 670 | const struct of_device_id *of_match_table, |
671 | const struct of_device_id **of_id) | ||
670 | { | 672 | { |
671 | const union acpi_object *of_compatible, *obj; | 673 | const union acpi_object *of_compatible, *obj; |
672 | int i, nval; | 674 | int i, nval; |
@@ -690,8 +692,11 @@ static bool acpi_of_match_device(struct acpi_device *adev, | |||
690 | const struct of_device_id *id; | 692 | const struct of_device_id *id; |
691 | 693 | ||
692 | for (id = of_match_table; id->compatible[0]; id++) | 694 | for (id = of_match_table; id->compatible[0]; id++) |
693 | if (!strcasecmp(obj->string.pointer, id->compatible)) | 695 | if (!strcasecmp(obj->string.pointer, id->compatible)) { |
696 | if (of_id) | ||
697 | *of_id = id; | ||
694 | return true; | 698 | return true; |
699 | } | ||
695 | } | 700 | } |
696 | 701 | ||
697 | return false; | 702 | return false; |
@@ -762,10 +767,11 @@ static bool __acpi_match_device_cls(const struct acpi_device_id *id, | |||
762 | return true; | 767 | return true; |
763 | } | 768 | } |
764 | 769 | ||
765 | static const struct acpi_device_id *__acpi_match_device( | 770 | static bool __acpi_match_device(struct acpi_device *device, |
766 | struct acpi_device *device, | 771 | const struct acpi_device_id *acpi_ids, |
767 | const struct acpi_device_id *ids, | 772 | const struct of_device_id *of_ids, |
768 | const struct of_device_id *of_ids) | 773 | const struct acpi_device_id **acpi_id, |
774 | const struct of_device_id **of_id) | ||
769 | { | 775 | { |
770 | const struct acpi_device_id *id; | 776 | const struct acpi_device_id *id; |
771 | struct acpi_hardware_id *hwid; | 777 | struct acpi_hardware_id *hwid; |
@@ -775,30 +781,32 @@ static const struct acpi_device_id *__acpi_match_device( | |||
775 | * driver for it. | 781 | * driver for it. |
776 | */ | 782 | */ |
777 | if (!device || !device->status.present) | 783 | if (!device || !device->status.present) |
778 | return NULL; | 784 | return false; |
779 | 785 | ||
780 | list_for_each_entry(hwid, &device->pnp.ids, list) { | 786 | list_for_each_entry(hwid, &device->pnp.ids, list) { |
781 | /* First, check the ACPI/PNP IDs provided by the caller. */ | 787 | /* First, check the ACPI/PNP IDs provided by the caller. */ |
782 | for (id = ids; id->id[0] || id->cls; id++) { | 788 | if (acpi_ids) { |
783 | if (id->id[0] && !strcmp((char *) id->id, hwid->id)) | 789 | for (id = acpi_ids; id->id[0] || id->cls; id++) { |
784 | return id; | 790 | if (id->id[0] && !strcmp((char *)id->id, hwid->id)) |
785 | else if (id->cls && __acpi_match_device_cls(id, hwid)) | 791 | goto out_acpi_match; |
786 | return id; | 792 | if (id->cls && __acpi_match_device_cls(id, hwid)) |
793 | goto out_acpi_match; | ||
794 | } | ||
787 | } | 795 | } |
788 | 796 | ||
789 | /* | 797 | /* |
790 | * Next, check ACPI_DT_NAMESPACE_HID and try to match the | 798 | * Next, check ACPI_DT_NAMESPACE_HID and try to match the |
791 | * "compatible" property if found. | 799 | * "compatible" property if found. |
792 | * | ||
793 | * The id returned by the below is not valid, but the only | ||
794 | * caller passing non-NULL of_ids here is only interested in | ||
795 | * whether or not the return value is NULL. | ||
796 | */ | 800 | */ |
797 | if (!strcmp(ACPI_DT_NAMESPACE_HID, hwid->id) | 801 | if (!strcmp(ACPI_DT_NAMESPACE_HID, hwid->id)) |
798 | && acpi_of_match_device(device, of_ids)) | 802 | return acpi_of_match_device(device, of_ids, of_id); |
799 | return id; | ||
800 | } | 803 | } |
801 | return NULL; | 804 | return false; |
805 | |||
806 | out_acpi_match: | ||
807 | if (acpi_id) | ||
808 | *acpi_id = id; | ||
809 | return true; | ||
802 | } | 810 | } |
803 | 811 | ||
804 | /** | 812 | /** |
@@ -815,32 +823,29 @@ static const struct acpi_device_id *__acpi_match_device( | |||
815 | const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids, | 823 | const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids, |
816 | const struct device *dev) | 824 | const struct device *dev) |
817 | { | 825 | { |
818 | return __acpi_match_device(acpi_companion_match(dev), ids, NULL); | 826 | const struct acpi_device_id *id = NULL; |
827 | |||
828 | __acpi_match_device(acpi_companion_match(dev), ids, NULL, &id, NULL); | ||
829 | return id; | ||
819 | } | 830 | } |
820 | EXPORT_SYMBOL_GPL(acpi_match_device); | 831 | EXPORT_SYMBOL_GPL(acpi_match_device); |
821 | 832 | ||
822 | void *acpi_get_match_data(const struct device *dev) | 833 | const void *acpi_device_get_match_data(const struct device *dev) |
823 | { | 834 | { |
824 | const struct acpi_device_id *match; | 835 | const struct acpi_device_id *match; |
825 | 836 | ||
826 | if (!dev->driver) | ||
827 | return NULL; | ||
828 | |||
829 | if (!dev->driver->acpi_match_table) | ||
830 | return NULL; | ||
831 | |||
832 | match = acpi_match_device(dev->driver->acpi_match_table, dev); | 837 | match = acpi_match_device(dev->driver->acpi_match_table, dev); |
833 | if (!match) | 838 | if (!match) |
834 | return NULL; | 839 | return NULL; |
835 | 840 | ||
836 | return (void *)match->driver_data; | 841 | return (const void *)match->driver_data; |
837 | } | 842 | } |
838 | EXPORT_SYMBOL_GPL(acpi_get_match_data); | 843 | EXPORT_SYMBOL_GPL(acpi_device_get_match_data); |
839 | 844 | ||
840 | int acpi_match_device_ids(struct acpi_device *device, | 845 | int acpi_match_device_ids(struct acpi_device *device, |
841 | const struct acpi_device_id *ids) | 846 | const struct acpi_device_id *ids) |
842 | { | 847 | { |
843 | return __acpi_match_device(device, ids, NULL) ? 0 : -ENOENT; | 848 | return __acpi_match_device(device, ids, NULL, NULL, NULL) ? 0 : -ENOENT; |
844 | } | 849 | } |
845 | EXPORT_SYMBOL(acpi_match_device_ids); | 850 | EXPORT_SYMBOL(acpi_match_device_ids); |
846 | 851 | ||
@@ -849,10 +854,12 @@ bool acpi_driver_match_device(struct device *dev, | |||
849 | { | 854 | { |
850 | if (!drv->acpi_match_table) | 855 | if (!drv->acpi_match_table) |
851 | return acpi_of_match_device(ACPI_COMPANION(dev), | 856 | return acpi_of_match_device(ACPI_COMPANION(dev), |
852 | drv->of_match_table); | 857 | drv->of_match_table, |
858 | NULL); | ||
853 | 859 | ||
854 | return !!__acpi_match_device(acpi_companion_match(dev), | 860 | return __acpi_match_device(acpi_companion_match(dev), |
855 | drv->acpi_match_table, drv->of_match_table); | 861 | drv->acpi_match_table, drv->of_match_table, |
862 | NULL, NULL); | ||
856 | } | 863 | } |
857 | EXPORT_SYMBOL_GPL(acpi_driver_match_device); | 864 | EXPORT_SYMBOL_GPL(acpi_driver_match_device); |
858 | 865 | ||
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index d9f38c645e4a..30a572956557 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c | |||
@@ -1927,6 +1927,9 @@ static int acpi_ec_suspend_noirq(struct device *dev) | |||
1927 | ec->reference_count >= 1) | 1927 | ec->reference_count >= 1) |
1928 | acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE); | 1928 | acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE); |
1929 | 1929 | ||
1930 | if (acpi_sleep_no_ec_events()) | ||
1931 | acpi_ec_enter_noirq(ec); | ||
1932 | |||
1930 | return 0; | 1933 | return 0; |
1931 | } | 1934 | } |
1932 | 1935 | ||
@@ -1934,6 +1937,9 @@ static int acpi_ec_resume_noirq(struct device *dev) | |||
1934 | { | 1937 | { |
1935 | struct acpi_ec *ec = acpi_driver_data(to_acpi_device(dev)); | 1938 | struct acpi_ec *ec = acpi_driver_data(to_acpi_device(dev)); |
1936 | 1939 | ||
1940 | if (acpi_sleep_no_ec_events()) | ||
1941 | acpi_ec_leave_noirq(ec); | ||
1942 | |||
1937 | if (ec_no_wakeup && test_bit(EC_FLAGS_STARTED, &ec->flags) && | 1943 | if (ec_no_wakeup && test_bit(EC_FLAGS_STARTED, &ec->flags) && |
1938 | ec->reference_count >= 1) | 1944 | ec->reference_count >= 1) |
1939 | acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE); | 1945 | acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE); |
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index bbe48ad20886..eb09ef55c38a 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c | |||
@@ -2675,10 +2675,14 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc, | |||
2675 | else | 2675 | else |
2676 | ndr_desc->numa_node = NUMA_NO_NODE; | 2676 | ndr_desc->numa_node = NUMA_NO_NODE; |
2677 | 2677 | ||
2678 | if(acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_CACHE_FLUSH) | 2678 | /* |
2679 | * Persistence domain bits are hierarchical, if | ||
2680 | * ACPI_NFIT_CAPABILITY_CACHE_FLUSH is set then | ||
2681 | * ACPI_NFIT_CAPABILITY_MEM_FLUSH is implied. | ||
2682 | */ | ||
2683 | if (acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_CACHE_FLUSH) | ||
2679 | set_bit(ND_REGION_PERSIST_CACHE, &ndr_desc->flags); | 2684 | set_bit(ND_REGION_PERSIST_CACHE, &ndr_desc->flags); |
2680 | 2685 | else if (acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_MEM_FLUSH) | |
2681 | if (acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_MEM_FLUSH) | ||
2682 | set_bit(ND_REGION_PERSIST_MEMCTRL, &ndr_desc->flags); | 2686 | set_bit(ND_REGION_PERSIST_MEMCTRL, &ndr_desc->flags); |
2683 | 2687 | ||
2684 | list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { | 2688 | list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { |
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c index 8ccaae3550d2..85167603b9c9 100644 --- a/drivers/acpi/numa.c +++ b/drivers/acpi/numa.c | |||
@@ -103,25 +103,27 @@ int acpi_map_pxm_to_node(int pxm) | |||
103 | */ | 103 | */ |
104 | int acpi_map_pxm_to_online_node(int pxm) | 104 | int acpi_map_pxm_to_online_node(int pxm) |
105 | { | 105 | { |
106 | int node, n, dist, min_dist; | 106 | int node, min_node; |
107 | 107 | ||
108 | node = acpi_map_pxm_to_node(pxm); | 108 | node = acpi_map_pxm_to_node(pxm); |
109 | 109 | ||
110 | if (node == NUMA_NO_NODE) | 110 | if (node == NUMA_NO_NODE) |
111 | node = 0; | 111 | node = 0; |
112 | 112 | ||
113 | min_node = node; | ||
113 | if (!node_online(node)) { | 114 | if (!node_online(node)) { |
114 | min_dist = INT_MAX; | 115 | int min_dist = INT_MAX, dist, n; |
116 | |||
115 | for_each_online_node(n) { | 117 | for_each_online_node(n) { |
116 | dist = node_distance(node, n); | 118 | dist = node_distance(node, n); |
117 | if (dist < min_dist) { | 119 | if (dist < min_dist) { |
118 | min_dist = dist; | 120 | min_dist = dist; |
119 | node = n; | 121 | min_node = n; |
120 | } | 122 | } |
121 | } | 123 | } |
122 | } | 124 | } |
123 | 125 | ||
124 | return node; | 126 | return min_node; |
125 | } | 127 | } |
126 | EXPORT_SYMBOL(acpi_map_pxm_to_online_node); | 128 | EXPORT_SYMBOL(acpi_map_pxm_to_online_node); |
127 | 129 | ||
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c index 466d1503aba0..5815356ea6ad 100644 --- a/drivers/acpi/property.c +++ b/drivers/acpi/property.c | |||
@@ -1271,11 +1271,11 @@ static int acpi_fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode, | |||
1271 | return 0; | 1271 | return 0; |
1272 | } | 1272 | } |
1273 | 1273 | ||
1274 | static void * | 1274 | static const void * |
1275 | acpi_fwnode_device_get_match_data(const struct fwnode_handle *fwnode, | 1275 | acpi_fwnode_device_get_match_data(const struct fwnode_handle *fwnode, |
1276 | const struct device *dev) | 1276 | const struct device *dev) |
1277 | { | 1277 | { |
1278 | return acpi_get_match_data(dev); | 1278 | return acpi_device_get_match_data(dev); |
1279 | } | 1279 | } |
1280 | 1280 | ||
1281 | #define DECLARE_ACPI_FWNODE_OPS(ops) \ | 1281 | #define DECLARE_ACPI_FWNODE_OPS(ops) \ |
diff --git a/drivers/acpi/spcr.c b/drivers/acpi/spcr.c index 89e97d21a89c..9d52743080a4 100644 --- a/drivers/acpi/spcr.c +++ b/drivers/acpi/spcr.c | |||
@@ -115,6 +115,7 @@ int __init acpi_parse_spcr(bool enable_earlycon, bool enable_console) | |||
115 | table->serial_port.access_width))) { | 115 | table->serial_port.access_width))) { |
116 | default: | 116 | default: |
117 | pr_err("Unexpected SPCR Access Width. Defaulting to byte size\n"); | 117 | pr_err("Unexpected SPCR Access Width. Defaulting to byte size\n"); |
118 | /* fall through */ | ||
118 | case 8: | 119 | case 8: |
119 | iotype = "mmio"; | 120 | iotype = "mmio"; |
120 | break; | 121 | break; |
diff --git a/drivers/android/binder.c b/drivers/android/binder.c index 15e3d3c2260d..764b63a5aade 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c | |||
@@ -1991,8 +1991,14 @@ static void binder_send_failed_reply(struct binder_transaction *t, | |||
1991 | &target_thread->reply_error.work); | 1991 | &target_thread->reply_error.work); |
1992 | wake_up_interruptible(&target_thread->wait); | 1992 | wake_up_interruptible(&target_thread->wait); |
1993 | } else { | 1993 | } else { |
1994 | WARN(1, "Unexpected reply error: %u\n", | 1994 | /* |
1995 | target_thread->reply_error.cmd); | 1995 | * Cannot get here for normal operation, but |
1996 | * we can if multiple synchronous transactions | ||
1997 | * are sent without blocking for responses. | ||
1998 | * Just ignore the 2nd error in this case. | ||
1999 | */ | ||
2000 | pr_warn("Unexpected reply error: %u\n", | ||
2001 | target_thread->reply_error.cmd); | ||
1996 | } | 2002 | } |
1997 | binder_inner_proc_unlock(target_thread->proc); | 2003 | binder_inner_proc_unlock(target_thread->proc); |
1998 | binder_thread_dec_tmpref(target_thread); | 2004 | binder_thread_dec_tmpref(target_thread); |
@@ -2193,7 +2199,7 @@ static void binder_transaction_buffer_release(struct binder_proc *proc, | |||
2193 | int debug_id = buffer->debug_id; | 2199 | int debug_id = buffer->debug_id; |
2194 | 2200 | ||
2195 | binder_debug(BINDER_DEBUG_TRANSACTION, | 2201 | binder_debug(BINDER_DEBUG_TRANSACTION, |
2196 | "%d buffer release %d, size %zd-%zd, failed at %p\n", | 2202 | "%d buffer release %d, size %zd-%zd, failed at %pK\n", |
2197 | proc->pid, buffer->debug_id, | 2203 | proc->pid, buffer->debug_id, |
2198 | buffer->data_size, buffer->offsets_size, failed_at); | 2204 | buffer->data_size, buffer->offsets_size, failed_at); |
2199 | 2205 | ||
@@ -3705,7 +3711,7 @@ static int binder_thread_write(struct binder_proc *proc, | |||
3705 | } | 3711 | } |
3706 | } | 3712 | } |
3707 | binder_debug(BINDER_DEBUG_DEAD_BINDER, | 3713 | binder_debug(BINDER_DEBUG_DEAD_BINDER, |
3708 | "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n", | 3714 | "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n", |
3709 | proc->pid, thread->pid, (u64)cookie, | 3715 | proc->pid, thread->pid, (u64)cookie, |
3710 | death); | 3716 | death); |
3711 | if (death == NULL) { | 3717 | if (death == NULL) { |
@@ -4376,6 +4382,15 @@ static int binder_thread_release(struct binder_proc *proc, | |||
4376 | 4382 | ||
4377 | binder_inner_proc_unlock(thread->proc); | 4383 | binder_inner_proc_unlock(thread->proc); |
4378 | 4384 | ||
4385 | /* | ||
4386 | * This is needed to avoid races between wake_up_poll() above and | ||
4387 | * and ep_remove_waitqueue() called for other reasons (eg the epoll file | ||
4388 | * descriptor being closed); ep_remove_waitqueue() holds an RCU read | ||
4389 | * lock, so we can be sure it's done after calling synchronize_rcu(). | ||
4390 | */ | ||
4391 | if (thread->looper & BINDER_LOOPER_STATE_POLL) | ||
4392 | synchronize_rcu(); | ||
4393 | |||
4379 | if (send_reply) | 4394 | if (send_reply) |
4380 | binder_send_failed_reply(send_reply, BR_DEAD_REPLY); | 4395 | binder_send_failed_reply(send_reply, BR_DEAD_REPLY); |
4381 | binder_release_work(proc, &thread->todo); | 4396 | binder_release_work(proc, &thread->todo); |
@@ -4391,6 +4406,8 @@ static __poll_t binder_poll(struct file *filp, | |||
4391 | bool wait_for_proc_work; | 4406 | bool wait_for_proc_work; |
4392 | 4407 | ||
4393 | thread = binder_get_thread(proc); | 4408 | thread = binder_get_thread(proc); |
4409 | if (!thread) | ||
4410 | return POLLERR; | ||
4394 | 4411 | ||
4395 | binder_inner_proc_lock(thread->proc); | 4412 | binder_inner_proc_lock(thread->proc); |
4396 | thread->looper |= BINDER_LOOPER_STATE_POLL; | 4413 | thread->looper |= BINDER_LOOPER_STATE_POLL; |
@@ -5034,7 +5051,7 @@ static void print_binder_transaction_ilocked(struct seq_file *m, | |||
5034 | spin_lock(&t->lock); | 5051 | spin_lock(&t->lock); |
5035 | to_proc = t->to_proc; | 5052 | to_proc = t->to_proc; |
5036 | seq_printf(m, | 5053 | seq_printf(m, |
5037 | "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d", | 5054 | "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d", |
5038 | prefix, t->debug_id, t, | 5055 | prefix, t->debug_id, t, |
5039 | t->from ? t->from->proc->pid : 0, | 5056 | t->from ? t->from->proc->pid : 0, |
5040 | t->from ? t->from->pid : 0, | 5057 | t->from ? t->from->pid : 0, |
@@ -5058,7 +5075,7 @@ static void print_binder_transaction_ilocked(struct seq_file *m, | |||
5058 | } | 5075 | } |
5059 | if (buffer->target_node) | 5076 | if (buffer->target_node) |
5060 | seq_printf(m, " node %d", buffer->target_node->debug_id); | 5077 | seq_printf(m, " node %d", buffer->target_node->debug_id); |
5061 | seq_printf(m, " size %zd:%zd data %p\n", | 5078 | seq_printf(m, " size %zd:%zd data %pK\n", |
5062 | buffer->data_size, buffer->offsets_size, | 5079 | buffer->data_size, buffer->offsets_size, |
5063 | buffer->data); | 5080 | buffer->data); |
5064 | } | 5081 | } |
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 355a95a83a34..1ff17799769d 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c | |||
@@ -550,7 +550,9 @@ static const struct pci_device_id ahci_pci_tbl[] = { | |||
550 | .driver_data = board_ahci_yes_fbs }, | 550 | .driver_data = board_ahci_yes_fbs }, |
551 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9230), | 551 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9230), |
552 | .driver_data = board_ahci_yes_fbs }, | 552 | .driver_data = board_ahci_yes_fbs }, |
553 | { PCI_DEVICE(PCI_VENDOR_ID_TTI, 0x0642), | 553 | { PCI_DEVICE(PCI_VENDOR_ID_TTI, 0x0642), /* highpoint rocketraid 642L */ |
554 | .driver_data = board_ahci_yes_fbs }, | ||
555 | { PCI_DEVICE(PCI_VENDOR_ID_TTI, 0x0645), /* highpoint rocketraid 644L */ | ||
554 | .driver_data = board_ahci_yes_fbs }, | 556 | .driver_data = board_ahci_yes_fbs }, |
555 | 557 | ||
556 | /* Promise */ | 558 | /* Promise */ |
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c index a0de7a38430c..7adcf3caabd0 100644 --- a/drivers/ata/libahci.c +++ b/drivers/ata/libahci.c | |||
@@ -665,6 +665,16 @@ int ahci_stop_engine(struct ata_port *ap) | |||
665 | if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0) | 665 | if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0) |
666 | return 0; | 666 | return 0; |
667 | 667 | ||
668 | /* | ||
669 | * Don't try to issue commands but return with ENODEV if the | ||
670 | * AHCI controller not available anymore (e.g. due to PCIe hot | ||
671 | * unplugging). Otherwise a 500ms delay for each port is added. | ||
672 | */ | ||
673 | if (tmp == 0xffffffff) { | ||
674 | dev_err(ap->host->dev, "AHCI controller unavailable!\n"); | ||
675 | return -ENODEV; | ||
676 | } | ||
677 | |||
668 | /* setting HBA to idle */ | 678 | /* setting HBA to idle */ |
669 | tmp &= ~PORT_CMD_START; | 679 | tmp &= ~PORT_CMD_START; |
670 | writel(tmp, port_mmio + PORT_CMD); | 680 | writel(tmp, port_mmio + PORT_CMD); |
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c index 341d0ef82cbd..30cc8f1a31e1 100644 --- a/drivers/ata/libahci_platform.c +++ b/drivers/ata/libahci_platform.c | |||
@@ -340,7 +340,7 @@ static int ahci_platform_get_regulator(struct ahci_host_priv *hpriv, u32 port, | |||
340 | * 2) regulator for controlling the targets power (optional) | 340 | * 2) regulator for controlling the targets power (optional) |
341 | * 3) 0 - AHCI_MAX_CLKS clocks, as specified in the devs devicetree node, | 341 | * 3) 0 - AHCI_MAX_CLKS clocks, as specified in the devs devicetree node, |
342 | * or for non devicetree enabled platforms a single clock | 342 | * or for non devicetree enabled platforms a single clock |
343 | * 4) phys (optional) | 343 | * 4) phys (optional) |
344 | * | 344 | * |
345 | * RETURNS: | 345 | * RETURNS: |
346 | * The allocated ahci_host_priv on success, otherwise an ERR_PTR value | 346 | * The allocated ahci_host_priv on success, otherwise an ERR_PTR value |
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 3c09122bf038..7431ccd03316 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
@@ -4530,6 +4530,25 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { | |||
4530 | { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER }, | 4530 | { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER }, |
4531 | { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER }, | 4531 | { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER }, |
4532 | 4532 | ||
4533 | /* Crucial BX100 SSD 500GB has broken LPM support */ | ||
4534 | { "CT500BX100SSD1", NULL, ATA_HORKAGE_NOLPM }, | ||
4535 | |||
4536 | /* 512GB MX100 with MU01 firmware has both queued TRIM and LPM issues */ | ||
4537 | { "Crucial_CT512MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | | ||
4538 | ATA_HORKAGE_ZERO_AFTER_TRIM | | ||
4539 | ATA_HORKAGE_NOLPM, }, | ||
4540 | /* 512GB MX100 with newer firmware has only LPM issues */ | ||
4541 | { "Crucial_CT512MX100*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM | | ||
4542 | ATA_HORKAGE_NOLPM, }, | ||
4543 | |||
4544 | /* 480GB+ M500 SSDs have both queued TRIM and LPM issues */ | ||
4545 | { "Crucial_CT480M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | | ||
4546 | ATA_HORKAGE_ZERO_AFTER_TRIM | | ||
4547 | ATA_HORKAGE_NOLPM, }, | ||
4548 | { "Crucial_CT960M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | | ||
4549 | ATA_HORKAGE_ZERO_AFTER_TRIM | | ||
4550 | ATA_HORKAGE_NOLPM, }, | ||
4551 | |||
4533 | /* devices that don't properly handle queued TRIM commands */ | 4552 | /* devices that don't properly handle queued TRIM commands */ |
4534 | { "Micron_M500_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | | 4553 | { "Micron_M500_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | |
4535 | ATA_HORKAGE_ZERO_AFTER_TRIM, }, | 4554 | ATA_HORKAGE_ZERO_AFTER_TRIM, }, |
@@ -4541,7 +4560,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { | |||
4541 | ATA_HORKAGE_ZERO_AFTER_TRIM, }, | 4560 | ATA_HORKAGE_ZERO_AFTER_TRIM, }, |
4542 | { "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | | 4561 | { "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | |
4543 | ATA_HORKAGE_ZERO_AFTER_TRIM, }, | 4562 | ATA_HORKAGE_ZERO_AFTER_TRIM, }, |
4544 | { "Samsung SSD 8*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | | 4563 | { "Samsung SSD 840*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | |
4564 | ATA_HORKAGE_ZERO_AFTER_TRIM, }, | ||
4565 | { "Samsung SSD 850*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | | ||
4545 | ATA_HORKAGE_ZERO_AFTER_TRIM, }, | 4566 | ATA_HORKAGE_ZERO_AFTER_TRIM, }, |
4546 | { "FCCT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | | 4567 | { "FCCT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | |
4547 | ATA_HORKAGE_ZERO_AFTER_TRIM, }, | 4568 | ATA_HORKAGE_ZERO_AFTER_TRIM, }, |
@@ -5401,8 +5422,7 @@ void ata_qc_issue(struct ata_queued_cmd *qc) | |||
5401 | * We guarantee to LLDs that they will have at least one | 5422 | * We guarantee to LLDs that they will have at least one |
5402 | * non-zero sg if the command is a data command. | 5423 | * non-zero sg if the command is a data command. |
5403 | */ | 5424 | */ |
5404 | if (WARN_ON_ONCE(ata_is_data(prot) && | 5425 | if (ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes)) |
5405 | (!qc->sg || !qc->n_elem || !qc->nbytes))) | ||
5406 | goto sys_err; | 5426 | goto sys_err; |
5407 | 5427 | ||
5408 | if (ata_is_dma(prot) || (ata_is_pio(prot) && | 5428 | if (ata_is_dma(prot) || (ata_is_pio(prot) && |
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 11c3137d7b0a..c016829a38fd 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c | |||
@@ -815,7 +815,8 @@ void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap) | |||
815 | 815 | ||
816 | if (ap->pflags & ATA_PFLAG_LOADING) | 816 | if (ap->pflags & ATA_PFLAG_LOADING) |
817 | ap->pflags &= ~ATA_PFLAG_LOADING; | 817 | ap->pflags &= ~ATA_PFLAG_LOADING; |
818 | else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG) | 818 | else if ((ap->pflags & ATA_PFLAG_SCSI_HOTPLUG) && |
819 | !(ap->flags & ATA_FLAG_SAS_HOST)) | ||
819 | schedule_delayed_work(&ap->hotplug_task, 0); | 820 | schedule_delayed_work(&ap->hotplug_task, 0); |
820 | 821 | ||
821 | if (ap->pflags & ATA_PFLAG_RECOVERED) | 822 | if (ap->pflags & ATA_PFLAG_RECOVERED) |
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 66be961c93a4..89a9d4a2efc8 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c | |||
@@ -3316,6 +3316,12 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc) | |||
3316 | goto invalid_fld; | 3316 | goto invalid_fld; |
3317 | } | 3317 | } |
3318 | 3318 | ||
3319 | /* We may not issue NCQ commands to devices not supporting NCQ */ | ||
3320 | if (ata_is_ncq(tf->protocol) && !ata_ncq_enabled(dev)) { | ||
3321 | fp = 1; | ||
3322 | goto invalid_fld; | ||
3323 | } | ||
3324 | |||
3319 | /* sanity check for pio multi commands */ | 3325 | /* sanity check for pio multi commands */ |
3320 | if ((cdb[1] & 0xe0) && !is_multi_taskfile(tf)) { | 3326 | if ((cdb[1] & 0xe0) && !is_multi_taskfile(tf)) { |
3321 | fp = 1; | 3327 | fp = 1; |
@@ -4282,7 +4288,7 @@ static inline void ata_scsi_dump_cdb(struct ata_port *ap, | |||
4282 | #ifdef ATA_DEBUG | 4288 | #ifdef ATA_DEBUG |
4283 | struct scsi_device *scsidev = cmd->device; | 4289 | struct scsi_device *scsidev = cmd->device; |
4284 | 4290 | ||
4285 | DPRINTK("CDB (%u:%d,%d,%d) %9ph\n", | 4291 | DPRINTK("CDB (%u:%d,%d,%lld) %9ph\n", |
4286 | ap->print_id, | 4292 | ap->print_id, |
4287 | scsidev->channel, scsidev->id, scsidev->lun, | 4293 | scsidev->channel, scsidev->id, scsidev->lun, |
4288 | cmd->cmnd); | 4294 | cmd->cmnd); |
@@ -4309,7 +4315,9 @@ static inline int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, | |||
4309 | if (likely((scsi_op != ATA_16) || !atapi_passthru16)) { | 4315 | if (likely((scsi_op != ATA_16) || !atapi_passthru16)) { |
4310 | /* relay SCSI command to ATAPI device */ | 4316 | /* relay SCSI command to ATAPI device */ |
4311 | int len = COMMAND_SIZE(scsi_op); | 4317 | int len = COMMAND_SIZE(scsi_op); |
4312 | if (unlikely(len > scmd->cmd_len || len > dev->cdb_len)) | 4318 | if (unlikely(len > scmd->cmd_len || |
4319 | len > dev->cdb_len || | ||
4320 | scmd->cmd_len > ATAPI_CDB_LEN)) | ||
4313 | goto bad_cdb_len; | 4321 | goto bad_cdb_len; |
4314 | 4322 | ||
4315 | xlat_func = atapi_xlat; | 4323 | xlat_func = atapi_xlat; |
diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c index 80ee2f2a50d0..6456e07db72a 100644 --- a/drivers/ata/sata_rcar.c +++ b/drivers/ata/sata_rcar.c | |||
@@ -146,6 +146,7 @@ | |||
146 | enum sata_rcar_type { | 146 | enum sata_rcar_type { |
147 | RCAR_GEN1_SATA, | 147 | RCAR_GEN1_SATA, |
148 | RCAR_GEN2_SATA, | 148 | RCAR_GEN2_SATA, |
149 | RCAR_GEN3_SATA, | ||
149 | RCAR_R8A7790_ES1_SATA, | 150 | RCAR_R8A7790_ES1_SATA, |
150 | }; | 151 | }; |
151 | 152 | ||
@@ -784,26 +785,11 @@ static void sata_rcar_setup_port(struct ata_host *host) | |||
784 | ioaddr->command_addr = ioaddr->cmd_addr + (ATA_REG_CMD << 2); | 785 | ioaddr->command_addr = ioaddr->cmd_addr + (ATA_REG_CMD << 2); |
785 | } | 786 | } |
786 | 787 | ||
787 | static void sata_rcar_init_controller(struct ata_host *host) | 788 | static void sata_rcar_init_module(struct sata_rcar_priv *priv) |
788 | { | 789 | { |
789 | struct sata_rcar_priv *priv = host->private_data; | ||
790 | void __iomem *base = priv->base; | 790 | void __iomem *base = priv->base; |
791 | u32 val; | 791 | u32 val; |
792 | 792 | ||
793 | /* reset and setup phy */ | ||
794 | switch (priv->type) { | ||
795 | case RCAR_GEN1_SATA: | ||
796 | sata_rcar_gen1_phy_init(priv); | ||
797 | break; | ||
798 | case RCAR_GEN2_SATA: | ||
799 | case RCAR_R8A7790_ES1_SATA: | ||
800 | sata_rcar_gen2_phy_init(priv); | ||
801 | break; | ||
802 | default: | ||
803 | dev_warn(host->dev, "SATA phy is not initialized\n"); | ||
804 | break; | ||
805 | } | ||
806 | |||
807 | /* SATA-IP reset state */ | 793 | /* SATA-IP reset state */ |
808 | val = ioread32(base + ATAPI_CONTROL1_REG); | 794 | val = ioread32(base + ATAPI_CONTROL1_REG); |
809 | val |= ATAPI_CONTROL1_RESET; | 795 | val |= ATAPI_CONTROL1_RESET; |
@@ -824,10 +810,33 @@ static void sata_rcar_init_controller(struct ata_host *host) | |||
824 | /* ack and mask */ | 810 | /* ack and mask */ |
825 | iowrite32(0, base + SATAINTSTAT_REG); | 811 | iowrite32(0, base + SATAINTSTAT_REG); |
826 | iowrite32(0x7ff, base + SATAINTMASK_REG); | 812 | iowrite32(0x7ff, base + SATAINTMASK_REG); |
813 | |||
827 | /* enable interrupts */ | 814 | /* enable interrupts */ |
828 | iowrite32(ATAPI_INT_ENABLE_SATAINT, base + ATAPI_INT_ENABLE_REG); | 815 | iowrite32(ATAPI_INT_ENABLE_SATAINT, base + ATAPI_INT_ENABLE_REG); |
829 | } | 816 | } |
830 | 817 | ||
818 | static void sata_rcar_init_controller(struct ata_host *host) | ||
819 | { | ||
820 | struct sata_rcar_priv *priv = host->private_data; | ||
821 | |||
822 | /* reset and setup phy */ | ||
823 | switch (priv->type) { | ||
824 | case RCAR_GEN1_SATA: | ||
825 | sata_rcar_gen1_phy_init(priv); | ||
826 | break; | ||
827 | case RCAR_GEN2_SATA: | ||
828 | case RCAR_GEN3_SATA: | ||
829 | case RCAR_R8A7790_ES1_SATA: | ||
830 | sata_rcar_gen2_phy_init(priv); | ||
831 | break; | ||
832 | default: | ||
833 | dev_warn(host->dev, "SATA phy is not initialized\n"); | ||
834 | break; | ||
835 | } | ||
836 | |||
837 | sata_rcar_init_module(priv); | ||
838 | } | ||
839 | |||
831 | static const struct of_device_id sata_rcar_match[] = { | 840 | static const struct of_device_id sata_rcar_match[] = { |
832 | { | 841 | { |
833 | /* Deprecated by "renesas,sata-r8a7779" */ | 842 | /* Deprecated by "renesas,sata-r8a7779" */ |
@@ -856,7 +865,7 @@ static const struct of_device_id sata_rcar_match[] = { | |||
856 | }, | 865 | }, |
857 | { | 866 | { |
858 | .compatible = "renesas,sata-r8a7795", | 867 | .compatible = "renesas,sata-r8a7795", |
859 | .data = (void *)RCAR_GEN2_SATA | 868 | .data = (void *)RCAR_GEN3_SATA |
860 | }, | 869 | }, |
861 | { | 870 | { |
862 | .compatible = "renesas,rcar-gen2-sata", | 871 | .compatible = "renesas,rcar-gen2-sata", |
@@ -864,7 +873,7 @@ static const struct of_device_id sata_rcar_match[] = { | |||
864 | }, | 873 | }, |
865 | { | 874 | { |
866 | .compatible = "renesas,rcar-gen3-sata", | 875 | .compatible = "renesas,rcar-gen3-sata", |
867 | .data = (void *)RCAR_GEN2_SATA | 876 | .data = (void *)RCAR_GEN3_SATA |
868 | }, | 877 | }, |
869 | { }, | 878 | { }, |
870 | }; | 879 | }; |
@@ -982,11 +991,18 @@ static int sata_rcar_resume(struct device *dev) | |||
982 | if (ret) | 991 | if (ret) |
983 | return ret; | 992 | return ret; |
984 | 993 | ||
985 | /* ack and mask */ | 994 | if (priv->type == RCAR_GEN3_SATA) { |
986 | iowrite32(0, base + SATAINTSTAT_REG); | 995 | sata_rcar_gen2_phy_init(priv); |
987 | iowrite32(0x7ff, base + SATAINTMASK_REG); | 996 | sata_rcar_init_module(priv); |
988 | /* enable interrupts */ | 997 | } else { |
989 | iowrite32(ATAPI_INT_ENABLE_SATAINT, base + ATAPI_INT_ENABLE_REG); | 998 | /* ack and mask */ |
999 | iowrite32(0, base + SATAINTSTAT_REG); | ||
1000 | iowrite32(0x7ff, base + SATAINTMASK_REG); | ||
1001 | |||
1002 | /* enable interrupts */ | ||
1003 | iowrite32(ATAPI_INT_ENABLE_SATAINT, | ||
1004 | base + ATAPI_INT_ENABLE_REG); | ||
1005 | } | ||
990 | 1006 | ||
991 | ata_host_resume(host); | 1007 | ata_host_resume(host); |
992 | 1008 | ||
diff --git a/drivers/auxdisplay/img-ascii-lcd.c b/drivers/auxdisplay/img-ascii-lcd.c index 9180b9bd5821..834509506ef6 100644 --- a/drivers/auxdisplay/img-ascii-lcd.c +++ b/drivers/auxdisplay/img-ascii-lcd.c | |||
@@ -97,7 +97,7 @@ static struct img_ascii_lcd_config boston_config = { | |||
97 | static void malta_update(struct img_ascii_lcd_ctx *ctx) | 97 | static void malta_update(struct img_ascii_lcd_ctx *ctx) |
98 | { | 98 | { |
99 | unsigned int i; | 99 | unsigned int i; |
100 | int err; | 100 | int err = 0; |
101 | 101 | ||
102 | for (i = 0; i < ctx->cfg->num_chars; i++) { | 102 | for (i = 0; i < ctx->cfg->num_chars; i++) { |
103 | err = regmap_write(ctx->regmap, | 103 | err = regmap_write(ctx->regmap, |
@@ -180,7 +180,7 @@ static int sead3_wait_lcd_idle(struct img_ascii_lcd_ctx *ctx) | |||
180 | static void sead3_update(struct img_ascii_lcd_ctx *ctx) | 180 | static void sead3_update(struct img_ascii_lcd_ctx *ctx) |
181 | { | 181 | { |
182 | unsigned int i; | 182 | unsigned int i; |
183 | int err; | 183 | int err = 0; |
184 | 184 | ||
185 | for (i = 0; i < ctx->cfg->num_chars; i++) { | 185 | for (i = 0; i < ctx->cfg->num_chars; i++) { |
186 | err = sead3_wait_lcd_idle(ctx); | 186 | err = sead3_wait_lcd_idle(ctx); |
@@ -224,7 +224,7 @@ MODULE_DEVICE_TABLE(of, img_ascii_lcd_matches); | |||
224 | 224 | ||
225 | /** | 225 | /** |
226 | * img_ascii_lcd_scroll() - scroll the display by a character | 226 | * img_ascii_lcd_scroll() - scroll the display by a character |
227 | * @arg: really a pointer to the private data structure | 227 | * @t: really a pointer to the private data structure |
228 | * | 228 | * |
229 | * Scroll the current message along the LCD by one character, rearming the | 229 | * Scroll the current message along the LCD by one character, rearming the |
230 | * timer if required. | 230 | * timer if required. |
diff --git a/drivers/auxdisplay/panel.c b/drivers/auxdisplay/panel.c index ea7869c0d7f9..ec5e8800f8ad 100644 --- a/drivers/auxdisplay/panel.c +++ b/drivers/auxdisplay/panel.c | |||
@@ -1372,7 +1372,7 @@ static void panel_process_inputs(void) | |||
1372 | break; | 1372 | break; |
1373 | input->rise_timer = 0; | 1373 | input->rise_timer = 0; |
1374 | input->state = INPUT_ST_RISING; | 1374 | input->state = INPUT_ST_RISING; |
1375 | /* no break here, fall through */ | 1375 | /* fall through */ |
1376 | case INPUT_ST_RISING: | 1376 | case INPUT_ST_RISING: |
1377 | if ((phys_curr & input->mask) != input->value) { | 1377 | if ((phys_curr & input->mask) != input->value) { |
1378 | input->state = INPUT_ST_LOW; | 1378 | input->state = INPUT_ST_LOW; |
@@ -1385,11 +1385,11 @@ static void panel_process_inputs(void) | |||
1385 | } | 1385 | } |
1386 | input->high_timer = 0; | 1386 | input->high_timer = 0; |
1387 | input->state = INPUT_ST_HIGH; | 1387 | input->state = INPUT_ST_HIGH; |
1388 | /* no break here, fall through */ | 1388 | /* fall through */ |
1389 | case INPUT_ST_HIGH: | 1389 | case INPUT_ST_HIGH: |
1390 | if (input_state_high(input)) | 1390 | if (input_state_high(input)) |
1391 | break; | 1391 | break; |
1392 | /* no break here, fall through */ | 1392 | /* fall through */ |
1393 | case INPUT_ST_FALLING: | 1393 | case INPUT_ST_FALLING: |
1394 | input_state_falling(input); | 1394 | input_state_falling(input); |
1395 | } | 1395 | } |
diff --git a/drivers/base/core.c b/drivers/base/core.c index b2261f92f2f1..5847364f25d9 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c | |||
@@ -310,6 +310,9 @@ static void __device_link_del(struct device_link *link) | |||
310 | dev_info(link->consumer, "Dropping the link to %s\n", | 310 | dev_info(link->consumer, "Dropping the link to %s\n", |
311 | dev_name(link->supplier)); | 311 | dev_name(link->supplier)); |
312 | 312 | ||
313 | if (link->flags & DL_FLAG_PM_RUNTIME) | ||
314 | pm_runtime_drop_link(link->consumer); | ||
315 | |||
313 | list_del(&link->s_node); | 316 | list_del(&link->s_node); |
314 | list_del(&link->c_node); | 317 | list_del(&link->c_node); |
315 | device_link_free(link); | 318 | device_link_free(link); |
diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c index a8ac86e4d79e..6637fc319269 100644 --- a/drivers/base/power/wakeirq.c +++ b/drivers/base/power/wakeirq.c | |||
@@ -321,7 +321,8 @@ void dev_pm_arm_wake_irq(struct wake_irq *wirq) | |||
321 | return; | 321 | return; |
322 | 322 | ||
323 | if (device_may_wakeup(wirq->dev)) { | 323 | if (device_may_wakeup(wirq->dev)) { |
324 | if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED) | 324 | if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED && |
325 | !pm_runtime_status_suspended(wirq->dev)) | ||
325 | enable_irq(wirq->irq); | 326 | enable_irq(wirq->irq); |
326 | 327 | ||
327 | enable_irq_wake(wirq->irq); | 328 | enable_irq_wake(wirq->irq); |
@@ -343,7 +344,8 @@ void dev_pm_disarm_wake_irq(struct wake_irq *wirq) | |||
343 | if (device_may_wakeup(wirq->dev)) { | 344 | if (device_may_wakeup(wirq->dev)) { |
344 | disable_irq_wake(wirq->irq); | 345 | disable_irq_wake(wirq->irq); |
345 | 346 | ||
346 | if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED) | 347 | if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED && |
348 | !pm_runtime_status_suspended(wirq->dev)) | ||
347 | disable_irq_nosync(wirq->irq); | 349 | disable_irq_nosync(wirq->irq); |
348 | } | 350 | } |
349 | } | 351 | } |
diff --git a/drivers/base/property.c b/drivers/base/property.c index 302236281d83..8f205f6461ed 100644 --- a/drivers/base/property.c +++ b/drivers/base/property.c | |||
@@ -1410,9 +1410,8 @@ int fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode, | |||
1410 | } | 1410 | } |
1411 | EXPORT_SYMBOL(fwnode_graph_parse_endpoint); | 1411 | EXPORT_SYMBOL(fwnode_graph_parse_endpoint); |
1412 | 1412 | ||
1413 | void *device_get_match_data(struct device *dev) | 1413 | const void *device_get_match_data(struct device *dev) |
1414 | { | 1414 | { |
1415 | return fwnode_call_ptr_op(dev_fwnode(dev), device_get_match_data, | 1415 | return fwnode_call_ptr_op(dev_fwnode(dev), device_get_match_data, dev); |
1416 | dev); | ||
1417 | } | 1416 | } |
1418 | EXPORT_SYMBOL_GPL(device_get_match_data); | 1417 | EXPORT_SYMBOL_GPL(device_get_match_data); |
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c index e5aa62fcf5a8..3aaf6af3ec23 100644 --- a/drivers/block/amiflop.c +++ b/drivers/block/amiflop.c | |||
@@ -1758,7 +1758,7 @@ static struct kobject *floppy_find(dev_t dev, int *part, void *data) | |||
1758 | if (unit[drive].type->code == FD_NODRIVE) | 1758 | if (unit[drive].type->code == FD_NODRIVE) |
1759 | return NULL; | 1759 | return NULL; |
1760 | *part = 0; | 1760 | *part = 0; |
1761 | return get_disk(unit[drive].gendisk); | 1761 | return get_disk_and_module(unit[drive].gendisk); |
1762 | } | 1762 | } |
1763 | 1763 | ||
1764 | static int __init amiga_floppy_probe(struct platform_device *pdev) | 1764 | static int __init amiga_floppy_probe(struct platform_device *pdev) |
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c index 8bc3b9fd8dd2..dfb2c2622e5a 100644 --- a/drivers/block/ataflop.c +++ b/drivers/block/ataflop.c | |||
@@ -1917,7 +1917,7 @@ static struct kobject *floppy_find(dev_t dev, int *part, void *data) | |||
1917 | if (drive >= FD_MAX_UNITS || type > NUM_DISK_MINORS) | 1917 | if (drive >= FD_MAX_UNITS || type > NUM_DISK_MINORS) |
1918 | return NULL; | 1918 | return NULL; |
1919 | *part = 0; | 1919 | *part = 0; |
1920 | return get_disk(unit[drive].disk); | 1920 | return get_disk_and_module(unit[drive].disk); |
1921 | } | 1921 | } |
1922 | 1922 | ||
1923 | static int __init atari_floppy_init (void) | 1923 | static int __init atari_floppy_init (void) |
diff --git a/drivers/block/brd.c b/drivers/block/brd.c index 8028a3a7e7fd..deea78e485da 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c | |||
@@ -456,7 +456,7 @@ static struct kobject *brd_probe(dev_t dev, int *part, void *data) | |||
456 | 456 | ||
457 | mutex_lock(&brd_devices_mutex); | 457 | mutex_lock(&brd_devices_mutex); |
458 | brd = brd_init_one(MINOR(dev) / max_part, &new); | 458 | brd = brd_init_one(MINOR(dev) / max_part, &new); |
459 | kobj = brd ? get_disk(brd->brd_disk) : NULL; | 459 | kobj = brd ? get_disk_and_module(brd->brd_disk) : NULL; |
460 | mutex_unlock(&brd_devices_mutex); | 460 | mutex_unlock(&brd_devices_mutex); |
461 | 461 | ||
462 | if (new) | 462 | if (new) |
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index eae484acfbbc..8ec7235fc93b 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c | |||
@@ -4505,7 +4505,7 @@ static struct kobject *floppy_find(dev_t dev, int *part, void *data) | |||
4505 | if (((*part >> 2) & 0x1f) >= ARRAY_SIZE(floppy_type)) | 4505 | if (((*part >> 2) & 0x1f) >= ARRAY_SIZE(floppy_type)) |
4506 | return NULL; | 4506 | return NULL; |
4507 | *part = 0; | 4507 | *part = 0; |
4508 | return get_disk(disks[drive]); | 4508 | return get_disk_and_module(disks[drive]); |
4509 | } | 4509 | } |
4510 | 4510 | ||
4511 | static int __init do_floppy_init(void) | 4511 | static int __init do_floppy_init(void) |
diff --git a/drivers/block/loop.c b/drivers/block/loop.c index d5fe720cf149..ee62d2d517bf 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c | |||
@@ -266,7 +266,7 @@ static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos) | |||
266 | struct iov_iter i; | 266 | struct iov_iter i; |
267 | ssize_t bw; | 267 | ssize_t bw; |
268 | 268 | ||
269 | iov_iter_bvec(&i, ITER_BVEC, bvec, 1, bvec->bv_len); | 269 | iov_iter_bvec(&i, ITER_BVEC | WRITE, bvec, 1, bvec->bv_len); |
270 | 270 | ||
271 | file_start_write(file); | 271 | file_start_write(file); |
272 | bw = vfs_iter_write(file, &i, ppos, 0); | 272 | bw = vfs_iter_write(file, &i, ppos, 0); |
@@ -1922,7 +1922,7 @@ static struct kobject *loop_probe(dev_t dev, int *part, void *data) | |||
1922 | if (err < 0) | 1922 | if (err < 0) |
1923 | kobj = NULL; | 1923 | kobj = NULL; |
1924 | else | 1924 | else |
1925 | kobj = get_disk(lo->lo_disk); | 1925 | kobj = get_disk_and_module(lo->lo_disk); |
1926 | mutex_unlock(&loop_index_mutex); | 1926 | mutex_unlock(&loop_index_mutex); |
1927 | 1927 | ||
1928 | *part = 0; | 1928 | *part = 0; |
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 5f2a4240a204..86258b00a1d4 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c | |||
@@ -1591,7 +1591,7 @@ again: | |||
1591 | if (new_index < 0) { | 1591 | if (new_index < 0) { |
1592 | mutex_unlock(&nbd_index_mutex); | 1592 | mutex_unlock(&nbd_index_mutex); |
1593 | printk(KERN_ERR "nbd: failed to add new device\n"); | 1593 | printk(KERN_ERR "nbd: failed to add new device\n"); |
1594 | return ret; | 1594 | return new_index; |
1595 | } | 1595 | } |
1596 | nbd = idr_find(&nbd_index_idr, new_index); | 1596 | nbd = idr_find(&nbd_index_idr, new_index); |
1597 | } | 1597 | } |
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 531a0915066b..c61d20c9f3f8 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c | |||
@@ -1122,7 +1122,7 @@ static int pkt_start_recovery(struct packet_data *pkt) | |||
1122 | pkt->sector = new_sector; | 1122 | pkt->sector = new_sector; |
1123 | 1123 | ||
1124 | bio_reset(pkt->bio); | 1124 | bio_reset(pkt->bio); |
1125 | bio_set_set(pkt->bio, pd->bdev); | 1125 | bio_set_dev(pkt->bio, pd->bdev); |
1126 | bio_set_op_attrs(pkt->bio, REQ_OP_WRITE, 0); | 1126 | bio_set_op_attrs(pkt->bio, REQ_OP_WRITE, 0); |
1127 | pkt->bio->bi_iter.bi_sector = new_sector; | 1127 | pkt->bio->bi_iter.bi_sector = new_sector; |
1128 | pkt->bio->bi_iter.bi_size = pkt->frames * CD_FRAMESIZE; | 1128 | pkt->bio->bi_iter.bi_size = pkt->frames * CD_FRAMESIZE; |
diff --git a/drivers/block/swim.c b/drivers/block/swim.c index 84434d3ea19b..64e066eba72e 100644 --- a/drivers/block/swim.c +++ b/drivers/block/swim.c | |||
@@ -799,7 +799,7 @@ static struct kobject *floppy_find(dev_t dev, int *part, void *data) | |||
799 | return NULL; | 799 | return NULL; |
800 | 800 | ||
801 | *part = 0; | 801 | *part = 0; |
802 | return get_disk(swd->unit[drive].disk); | 802 | return get_disk_and_module(swd->unit[drive].disk); |
803 | } | 803 | } |
804 | 804 | ||
805 | static int swim_add_floppy(struct swim_priv *swd, enum drive_location location) | 805 | static int swim_add_floppy(struct swim_priv *swd, enum drive_location location) |
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index e126e4cac2ca..92ec1bbece51 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c | |||
@@ -262,6 +262,7 @@ static DEFINE_SPINLOCK(minor_lock); | |||
262 | 262 | ||
263 | static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo); | 263 | static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo); |
264 | static void blkfront_gather_backend_features(struct blkfront_info *info); | 264 | static void blkfront_gather_backend_features(struct blkfront_info *info); |
265 | static int negotiate_mq(struct blkfront_info *info); | ||
265 | 266 | ||
266 | static int get_id_from_freelist(struct blkfront_ring_info *rinfo) | 267 | static int get_id_from_freelist(struct blkfront_ring_info *rinfo) |
267 | { | 268 | { |
@@ -1774,11 +1775,18 @@ static int talk_to_blkback(struct xenbus_device *dev, | |||
1774 | unsigned int i, max_page_order; | 1775 | unsigned int i, max_page_order; |
1775 | unsigned int ring_page_order; | 1776 | unsigned int ring_page_order; |
1776 | 1777 | ||
1778 | if (!info) | ||
1779 | return -ENODEV; | ||
1780 | |||
1777 | max_page_order = xenbus_read_unsigned(info->xbdev->otherend, | 1781 | max_page_order = xenbus_read_unsigned(info->xbdev->otherend, |
1778 | "max-ring-page-order", 0); | 1782 | "max-ring-page-order", 0); |
1779 | ring_page_order = min(xen_blkif_max_ring_order, max_page_order); | 1783 | ring_page_order = min(xen_blkif_max_ring_order, max_page_order); |
1780 | info->nr_ring_pages = 1 << ring_page_order; | 1784 | info->nr_ring_pages = 1 << ring_page_order; |
1781 | 1785 | ||
1786 | err = negotiate_mq(info); | ||
1787 | if (err) | ||
1788 | goto destroy_blkring; | ||
1789 | |||
1782 | for (i = 0; i < info->nr_rings; i++) { | 1790 | for (i = 0; i < info->nr_rings; i++) { |
1783 | struct blkfront_ring_info *rinfo = &info->rinfo[i]; | 1791 | struct blkfront_ring_info *rinfo = &info->rinfo[i]; |
1784 | 1792 | ||
@@ -1978,11 +1986,6 @@ static int blkfront_probe(struct xenbus_device *dev, | |||
1978 | } | 1986 | } |
1979 | 1987 | ||
1980 | info->xbdev = dev; | 1988 | info->xbdev = dev; |
1981 | err = negotiate_mq(info); | ||
1982 | if (err) { | ||
1983 | kfree(info); | ||
1984 | return err; | ||
1985 | } | ||
1986 | 1989 | ||
1987 | mutex_init(&info->mutex); | 1990 | mutex_init(&info->mutex); |
1988 | info->vdevice = vdevice; | 1991 | info->vdevice = vdevice; |
@@ -2099,10 +2102,6 @@ static int blkfront_resume(struct xenbus_device *dev) | |||
2099 | 2102 | ||
2100 | blkif_free(info, info->connected == BLKIF_STATE_CONNECTED); | 2103 | blkif_free(info, info->connected == BLKIF_STATE_CONNECTED); |
2101 | 2104 | ||
2102 | err = negotiate_mq(info); | ||
2103 | if (err) | ||
2104 | return err; | ||
2105 | |||
2106 | err = talk_to_blkback(dev, info); | 2105 | err = talk_to_blkback(dev, info); |
2107 | if (!err) | 2106 | if (!err) |
2108 | blk_mq_update_nr_hw_queues(&info->tag_set, info->nr_rings); | 2107 | blk_mq_update_nr_hw_queues(&info->tag_set, info->nr_rings); |
diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c index 41c95c9b2ab4..8f9130ab5887 100644 --- a/drivers/block/z2ram.c +++ b/drivers/block/z2ram.c | |||
@@ -332,7 +332,7 @@ static const struct block_device_operations z2_fops = | |||
332 | static struct kobject *z2_find(dev_t dev, int *part, void *data) | 332 | static struct kobject *z2_find(dev_t dev, int *part, void *data) |
333 | { | 333 | { |
334 | *part = 0; | 334 | *part = 0; |
335 | return get_disk(z2ram_gendisk); | 335 | return get_disk_and_module(z2ram_gendisk); |
336 | } | 336 | } |
337 | 337 | ||
338 | static struct request_queue *z2_queue; | 338 | static struct request_queue *z2_queue; |
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 2a55380ad730..366a49c7c08f 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c | |||
@@ -21,6 +21,7 @@ | |||
21 | * | 21 | * |
22 | */ | 22 | */ |
23 | 23 | ||
24 | #include <linux/dmi.h> | ||
24 | #include <linux/module.h> | 25 | #include <linux/module.h> |
25 | #include <linux/usb.h> | 26 | #include <linux/usb.h> |
26 | #include <linux/usb/quirks.h> | 27 | #include <linux/usb/quirks.h> |
@@ -230,7 +231,6 @@ static const struct usb_device_id blacklist_table[] = { | |||
230 | { USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 }, | 231 | { USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 }, |
231 | { USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 }, | 232 | { USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 }, |
232 | { USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 }, | 233 | { USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 }, |
233 | { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 }, | ||
234 | { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 }, | 234 | { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 }, |
235 | { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 }, | 235 | { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 }, |
236 | { USB_DEVICE(0x0cf3, 0x311e), .driver_info = BTUSB_ATH3012 }, | 236 | { USB_DEVICE(0x0cf3, 0x311e), .driver_info = BTUSB_ATH3012 }, |
@@ -263,6 +263,7 @@ static const struct usb_device_id blacklist_table[] = { | |||
263 | { USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 }, | 263 | { USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 }, |
264 | 264 | ||
265 | /* QCA ROME chipset */ | 265 | /* QCA ROME chipset */ |
266 | { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_QCA_ROME }, | ||
266 | { USB_DEVICE(0x0cf3, 0xe007), .driver_info = BTUSB_QCA_ROME }, | 267 | { USB_DEVICE(0x0cf3, 0xe007), .driver_info = BTUSB_QCA_ROME }, |
267 | { USB_DEVICE(0x0cf3, 0xe009), .driver_info = BTUSB_QCA_ROME }, | 268 | { USB_DEVICE(0x0cf3, 0xe009), .driver_info = BTUSB_QCA_ROME }, |
268 | { USB_DEVICE(0x0cf3, 0xe010), .driver_info = BTUSB_QCA_ROME }, | 269 | { USB_DEVICE(0x0cf3, 0xe010), .driver_info = BTUSB_QCA_ROME }, |
@@ -379,6 +380,21 @@ static const struct usb_device_id blacklist_table[] = { | |||
379 | { } /* Terminating entry */ | 380 | { } /* Terminating entry */ |
380 | }; | 381 | }; |
381 | 382 | ||
383 | /* The Bluetooth USB module build into some devices needs to be reset on resume, | ||
384 | * this is a problem with the platform (likely shutting off all power) not with | ||
385 | * the module itself. So we use a DMI list to match known broken platforms. | ||
386 | */ | ||
387 | static const struct dmi_system_id btusb_needs_reset_resume_table[] = { | ||
388 | { | ||
389 | /* Dell OptiPlex 3060 (QCA ROME device 0cf3:e007) */ | ||
390 | .matches = { | ||
391 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), | ||
392 | DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 3060"), | ||
393 | }, | ||
394 | }, | ||
395 | {} | ||
396 | }; | ||
397 | |||
382 | #define BTUSB_MAX_ISOC_FRAMES 10 | 398 | #define BTUSB_MAX_ISOC_FRAMES 10 |
383 | 399 | ||
384 | #define BTUSB_INTR_RUNNING 0 | 400 | #define BTUSB_INTR_RUNNING 0 |
@@ -2945,6 +2961,9 @@ static int btusb_probe(struct usb_interface *intf, | |||
2945 | hdev->send = btusb_send_frame; | 2961 | hdev->send = btusb_send_frame; |
2946 | hdev->notify = btusb_notify; | 2962 | hdev->notify = btusb_notify; |
2947 | 2963 | ||
2964 | if (dmi_check_system(btusb_needs_reset_resume_table)) | ||
2965 | interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME; | ||
2966 | |||
2948 | #ifdef CONFIG_PM | 2967 | #ifdef CONFIG_PM |
2949 | err = btusb_config_oob_wake(hdev); | 2968 | err = btusb_config_oob_wake(hdev); |
2950 | if (err) | 2969 | if (err) |
@@ -3031,12 +3050,6 @@ static int btusb_probe(struct usb_interface *intf, | |||
3031 | if (id->driver_info & BTUSB_QCA_ROME) { | 3050 | if (id->driver_info & BTUSB_QCA_ROME) { |
3032 | data->setup_on_usb = btusb_setup_qca; | 3051 | data->setup_on_usb = btusb_setup_qca; |
3033 | hdev->set_bdaddr = btusb_set_bdaddr_ath3012; | 3052 | hdev->set_bdaddr = btusb_set_bdaddr_ath3012; |
3034 | |||
3035 | /* QCA Rome devices lose their updated firmware over suspend, | ||
3036 | * but the USB hub doesn't notice any status change. | ||
3037 | * explicitly request a device reset on resume. | ||
3038 | */ | ||
3039 | interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME; | ||
3040 | } | 3053 | } |
3041 | 3054 | ||
3042 | #ifdef CONFIG_BT_HCIBTUSB_RTL | 3055 | #ifdef CONFIG_BT_HCIBTUSB_RTL |
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c index 0438a64b8185..40b9fb247010 100644 --- a/drivers/bluetooth/hci_bcm.c +++ b/drivers/bluetooth/hci_bcm.c | |||
@@ -244,7 +244,9 @@ static irqreturn_t bcm_host_wake(int irq, void *data) | |||
244 | 244 | ||
245 | bt_dev_dbg(bdev, "Host wake IRQ"); | 245 | bt_dev_dbg(bdev, "Host wake IRQ"); |
246 | 246 | ||
247 | pm_request_resume(bdev->dev); | 247 | pm_runtime_get(bdev->dev); |
248 | pm_runtime_mark_last_busy(bdev->dev); | ||
249 | pm_runtime_put_autosuspend(bdev->dev); | ||
248 | 250 | ||
249 | return IRQ_HANDLED; | 251 | return IRQ_HANDLED; |
250 | } | 252 | } |
@@ -301,7 +303,7 @@ static const struct bcm_set_sleep_mode default_sleep_params = { | |||
301 | .usb_auto_sleep = 0, | 303 | .usb_auto_sleep = 0, |
302 | .usb_resume_timeout = 0, | 304 | .usb_resume_timeout = 0, |
303 | .break_to_host = 0, | 305 | .break_to_host = 0, |
304 | .pulsed_host_wake = 0, | 306 | .pulsed_host_wake = 1, |
305 | }; | 307 | }; |
306 | 308 | ||
307 | static int bcm_setup_sleep(struct hci_uart *hu) | 309 | static int bcm_setup_sleep(struct hci_uart *hu) |
@@ -586,8 +588,11 @@ static int bcm_recv(struct hci_uart *hu, const void *data, int count) | |||
586 | } else if (!bcm->rx_skb) { | 588 | } else if (!bcm->rx_skb) { |
587 | /* Delay auto-suspend when receiving completed packet */ | 589 | /* Delay auto-suspend when receiving completed packet */ |
588 | mutex_lock(&bcm_device_lock); | 590 | mutex_lock(&bcm_device_lock); |
589 | if (bcm->dev && bcm_device_exists(bcm->dev)) | 591 | if (bcm->dev && bcm_device_exists(bcm->dev)) { |
590 | pm_request_resume(bcm->dev->dev); | 592 | pm_runtime_get(bcm->dev->dev); |
593 | pm_runtime_mark_last_busy(bcm->dev->dev); | ||
594 | pm_runtime_put_autosuspend(bcm->dev->dev); | ||
595 | } | ||
591 | mutex_unlock(&bcm_device_lock); | 596 | mutex_unlock(&bcm_device_lock); |
592 | } | 597 | } |
593 | 598 | ||
@@ -922,12 +927,13 @@ static int bcm_get_resources(struct bcm_device *dev) | |||
922 | 927 | ||
923 | dev->clk = devm_clk_get(dev->dev, NULL); | 928 | dev->clk = devm_clk_get(dev->dev, NULL); |
924 | 929 | ||
925 | dev->device_wakeup = devm_gpiod_get(dev->dev, "device-wakeup", | 930 | dev->device_wakeup = devm_gpiod_get_optional(dev->dev, "device-wakeup", |
926 | GPIOD_OUT_LOW); | 931 | GPIOD_OUT_LOW); |
927 | if (IS_ERR(dev->device_wakeup)) | 932 | if (IS_ERR(dev->device_wakeup)) |
928 | return PTR_ERR(dev->device_wakeup); | 933 | return PTR_ERR(dev->device_wakeup); |
929 | 934 | ||
930 | dev->shutdown = devm_gpiod_get(dev->dev, "shutdown", GPIOD_OUT_LOW); | 935 | dev->shutdown = devm_gpiod_get_optional(dev->dev, "shutdown", |
936 | GPIOD_OUT_LOW); | ||
931 | if (IS_ERR(dev->shutdown)) | 937 | if (IS_ERR(dev->shutdown)) |
932 | return PTR_ERR(dev->shutdown); | 938 | return PTR_ERR(dev->shutdown); |
933 | 939 | ||
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index 4d46003c46cf..cdaeeea7999c 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c | |||
@@ -630,7 +630,7 @@ static int sysc_init_dts_quirks(struct sysc *ddata) | |||
630 | for (i = 0; i < ARRAY_SIZE(sysc_dts_quirks); i++) { | 630 | for (i = 0; i < ARRAY_SIZE(sysc_dts_quirks); i++) { |
631 | prop = of_get_property(np, sysc_dts_quirks[i].name, &len); | 631 | prop = of_get_property(np, sysc_dts_quirks[i].name, &len); |
632 | if (!prop) | 632 | if (!prop) |
633 | break; | 633 | continue; |
634 | 634 | ||
635 | ddata->cfg.quirks |= sysc_dts_quirks[i].mask; | 635 | ddata->cfg.quirks |= sysc_dts_quirks[i].mask; |
636 | } | 636 | } |
diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c index d1f5bb534e0e..6e9df558325b 100644 --- a/drivers/char/hw_random/via-rng.c +++ b/drivers/char/hw_random/via-rng.c | |||
@@ -162,7 +162,7 @@ static int via_rng_init(struct hwrng *rng) | |||
162 | /* Enable secondary noise source on CPUs where it is present. */ | 162 | /* Enable secondary noise source on CPUs where it is present. */ |
163 | 163 | ||
164 | /* Nehemiah stepping 8 and higher */ | 164 | /* Nehemiah stepping 8 and higher */ |
165 | if ((c->x86_model == 9) && (c->x86_mask > 7)) | 165 | if ((c->x86_model == 9) && (c->x86_stepping > 7)) |
166 | lo |= VIA_NOISESRC2; | 166 | lo |= VIA_NOISESRC2; |
167 | 167 | ||
168 | /* Esther */ | 168 | /* Esther */ |
diff --git a/drivers/char/tpm/st33zp24/st33zp24.c b/drivers/char/tpm/st33zp24/st33zp24.c index 4d1dc8b46877..f95b9c75175b 100644 --- a/drivers/char/tpm/st33zp24/st33zp24.c +++ b/drivers/char/tpm/st33zp24/st33zp24.c | |||
@@ -457,7 +457,7 @@ static int st33zp24_recv(struct tpm_chip *chip, unsigned char *buf, | |||
457 | size_t count) | 457 | size_t count) |
458 | { | 458 | { |
459 | int size = 0; | 459 | int size = 0; |
460 | int expected; | 460 | u32 expected; |
461 | 461 | ||
462 | if (!chip) | 462 | if (!chip) |
463 | return -EBUSY; | 463 | return -EBUSY; |
@@ -474,7 +474,7 @@ static int st33zp24_recv(struct tpm_chip *chip, unsigned char *buf, | |||
474 | } | 474 | } |
475 | 475 | ||
476 | expected = be32_to_cpu(*(__be32 *)(buf + 2)); | 476 | expected = be32_to_cpu(*(__be32 *)(buf + 2)); |
477 | if (expected > count) { | 477 | if (expected > count || expected < TPM_HEADER_SIZE) { |
478 | size = -EIO; | 478 | size = -EIO; |
479 | goto out; | 479 | goto out; |
480 | } | 480 | } |
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c index 76df4fbcf089..9e80a953d693 100644 --- a/drivers/char/tpm/tpm-interface.c +++ b/drivers/char/tpm/tpm-interface.c | |||
@@ -1190,6 +1190,10 @@ int tpm_get_random(struct tpm_chip *chip, u8 *out, size_t max) | |||
1190 | break; | 1190 | break; |
1191 | 1191 | ||
1192 | recd = be32_to_cpu(tpm_cmd.params.getrandom_out.rng_data_len); | 1192 | recd = be32_to_cpu(tpm_cmd.params.getrandom_out.rng_data_len); |
1193 | if (recd > num_bytes) { | ||
1194 | total = -EFAULT; | ||
1195 | break; | ||
1196 | } | ||
1193 | 1197 | ||
1194 | rlength = be32_to_cpu(tpm_cmd.header.out.length); | 1198 | rlength = be32_to_cpu(tpm_cmd.header.out.length); |
1195 | if (rlength < offsetof(struct tpm_getrandom_out, rng_data) + | 1199 | if (rlength < offsetof(struct tpm_getrandom_out, rng_data) + |
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c index c17e75348a99..a700f8f9ead7 100644 --- a/drivers/char/tpm/tpm2-cmd.c +++ b/drivers/char/tpm/tpm2-cmd.c | |||
@@ -683,6 +683,10 @@ static int tpm2_unseal_cmd(struct tpm_chip *chip, | |||
683 | if (!rc) { | 683 | if (!rc) { |
684 | data_len = be16_to_cpup( | 684 | data_len = be16_to_cpup( |
685 | (__be16 *) &buf.data[TPM_HEADER_SIZE + 4]); | 685 | (__be16 *) &buf.data[TPM_HEADER_SIZE + 4]); |
686 | if (data_len < MIN_KEY_SIZE || data_len > MAX_KEY_SIZE + 1) { | ||
687 | rc = -EFAULT; | ||
688 | goto out; | ||
689 | } | ||
686 | 690 | ||
687 | rlength = be32_to_cpu(((struct tpm2_cmd *)&buf) | 691 | rlength = be32_to_cpu(((struct tpm2_cmd *)&buf) |
688 | ->header.out.length); | 692 | ->header.out.length); |
diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c index c1dd39eaaeeb..6116cd05e228 100644 --- a/drivers/char/tpm/tpm_i2c_infineon.c +++ b/drivers/char/tpm/tpm_i2c_infineon.c | |||
@@ -473,7 +473,8 @@ static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count) | |||
473 | static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count) | 473 | static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count) |
474 | { | 474 | { |
475 | int size = 0; | 475 | int size = 0; |
476 | int expected, status; | 476 | int status; |
477 | u32 expected; | ||
477 | 478 | ||
478 | if (count < TPM_HEADER_SIZE) { | 479 | if (count < TPM_HEADER_SIZE) { |
479 | size = -EIO; | 480 | size = -EIO; |
@@ -488,7 +489,7 @@ static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count) | |||
488 | } | 489 | } |
489 | 490 | ||
490 | expected = be32_to_cpu(*(__be32 *)(buf + 2)); | 491 | expected = be32_to_cpu(*(__be32 *)(buf + 2)); |
491 | if ((size_t) expected > count) { | 492 | if (((size_t) expected > count) || (expected < TPM_HEADER_SIZE)) { |
492 | size = -EIO; | 493 | size = -EIO; |
493 | goto out; | 494 | goto out; |
494 | } | 495 | } |
diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c index c6428771841f..caa86b19c76d 100644 --- a/drivers/char/tpm/tpm_i2c_nuvoton.c +++ b/drivers/char/tpm/tpm_i2c_nuvoton.c | |||
@@ -281,7 +281,11 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count) | |||
281 | struct device *dev = chip->dev.parent; | 281 | struct device *dev = chip->dev.parent; |
282 | struct i2c_client *client = to_i2c_client(dev); | 282 | struct i2c_client *client = to_i2c_client(dev); |
283 | s32 rc; | 283 | s32 rc; |
284 | int expected, status, burst_count, retries, size = 0; | 284 | int status; |
285 | int burst_count; | ||
286 | int retries; | ||
287 | int size = 0; | ||
288 | u32 expected; | ||
285 | 289 | ||
286 | if (count < TPM_HEADER_SIZE) { | 290 | if (count < TPM_HEADER_SIZE) { |
287 | i2c_nuvoton_ready(chip); /* return to idle */ | 291 | i2c_nuvoton_ready(chip); /* return to idle */ |
@@ -323,7 +327,7 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count) | |||
323 | * to machine native | 327 | * to machine native |
324 | */ | 328 | */ |
325 | expected = be32_to_cpu(*(__be32 *) (buf + 2)); | 329 | expected = be32_to_cpu(*(__be32 *) (buf + 2)); |
326 | if (expected > count) { | 330 | if (expected > count || expected < size) { |
327 | dev_err(dev, "%s() expected > count\n", __func__); | 331 | dev_err(dev, "%s() expected > count\n", __func__); |
328 | size = -EIO; | 332 | size = -EIO; |
329 | continue; | 333 | continue; |
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c index 183a5f54d875..da074e3db19b 100644 --- a/drivers/char/tpm/tpm_tis_core.c +++ b/drivers/char/tpm/tpm_tis_core.c | |||
@@ -270,7 +270,8 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count) | |||
270 | { | 270 | { |
271 | struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); | 271 | struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); |
272 | int size = 0; | 272 | int size = 0; |
273 | int expected, status; | 273 | int status; |
274 | u32 expected; | ||
274 | 275 | ||
275 | if (count < TPM_HEADER_SIZE) { | 276 | if (count < TPM_HEADER_SIZE) { |
276 | size = -EIO; | 277 | size = -EIO; |
@@ -285,7 +286,7 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count) | |||
285 | } | 286 | } |
286 | 287 | ||
287 | expected = be32_to_cpu(*(__be32 *) (buf + 2)); | 288 | expected = be32_to_cpu(*(__be32 *) (buf + 2)); |
288 | if (expected > count) { | 289 | if (expected > count || expected < TPM_HEADER_SIZE) { |
289 | size = -EIO; | 290 | size = -EIO; |
290 | goto out; | 291 | goto out; |
291 | } | 292 | } |
diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c index 44301a3d9963..a07f6451694a 100644 --- a/drivers/clk/bcm/clk-bcm2835.c +++ b/drivers/clk/bcm/clk-bcm2835.c | |||
@@ -449,17 +449,17 @@ struct bcm2835_pll_ana_bits { | |||
449 | static const struct bcm2835_pll_ana_bits bcm2835_ana_default = { | 449 | static const struct bcm2835_pll_ana_bits bcm2835_ana_default = { |
450 | .mask0 = 0, | 450 | .mask0 = 0, |
451 | .set0 = 0, | 451 | .set0 = 0, |
452 | .mask1 = (u32)~(A2W_PLL_KI_MASK | A2W_PLL_KP_MASK), | 452 | .mask1 = A2W_PLL_KI_MASK | A2W_PLL_KP_MASK, |
453 | .set1 = (2 << A2W_PLL_KI_SHIFT) | (8 << A2W_PLL_KP_SHIFT), | 453 | .set1 = (2 << A2W_PLL_KI_SHIFT) | (8 << A2W_PLL_KP_SHIFT), |
454 | .mask3 = (u32)~A2W_PLL_KA_MASK, | 454 | .mask3 = A2W_PLL_KA_MASK, |
455 | .set3 = (2 << A2W_PLL_KA_SHIFT), | 455 | .set3 = (2 << A2W_PLL_KA_SHIFT), |
456 | .fb_prediv_mask = BIT(14), | 456 | .fb_prediv_mask = BIT(14), |
457 | }; | 457 | }; |
458 | 458 | ||
459 | static const struct bcm2835_pll_ana_bits bcm2835_ana_pllh = { | 459 | static const struct bcm2835_pll_ana_bits bcm2835_ana_pllh = { |
460 | .mask0 = (u32)~(A2W_PLLH_KA_MASK | A2W_PLLH_KI_LOW_MASK), | 460 | .mask0 = A2W_PLLH_KA_MASK | A2W_PLLH_KI_LOW_MASK, |
461 | .set0 = (2 << A2W_PLLH_KA_SHIFT) | (2 << A2W_PLLH_KI_LOW_SHIFT), | 461 | .set0 = (2 << A2W_PLLH_KA_SHIFT) | (2 << A2W_PLLH_KI_LOW_SHIFT), |
462 | .mask1 = (u32)~(A2W_PLLH_KI_HIGH_MASK | A2W_PLLH_KP_MASK), | 462 | .mask1 = A2W_PLLH_KI_HIGH_MASK | A2W_PLLH_KP_MASK, |
463 | .set1 = (6 << A2W_PLLH_KP_SHIFT), | 463 | .set1 = (6 << A2W_PLLH_KP_SHIFT), |
464 | .mask3 = 0, | 464 | .mask3 = 0, |
465 | .set3 = 0, | 465 | .set3 = 0, |
@@ -623,8 +623,10 @@ static int bcm2835_pll_on(struct clk_hw *hw) | |||
623 | ~A2W_PLL_CTRL_PWRDN); | 623 | ~A2W_PLL_CTRL_PWRDN); |
624 | 624 | ||
625 | /* Take the PLL out of reset. */ | 625 | /* Take the PLL out of reset. */ |
626 | spin_lock(&cprman->regs_lock); | ||
626 | cprman_write(cprman, data->cm_ctrl_reg, | 627 | cprman_write(cprman, data->cm_ctrl_reg, |
627 | cprman_read(cprman, data->cm_ctrl_reg) & ~CM_PLL_ANARST); | 628 | cprman_read(cprman, data->cm_ctrl_reg) & ~CM_PLL_ANARST); |
629 | spin_unlock(&cprman->regs_lock); | ||
628 | 630 | ||
629 | /* Wait for the PLL to lock. */ | 631 | /* Wait for the PLL to lock. */ |
630 | timeout = ktime_add_ns(ktime_get(), LOCK_TIMEOUT_NS); | 632 | timeout = ktime_add_ns(ktime_get(), LOCK_TIMEOUT_NS); |
@@ -701,9 +703,11 @@ static int bcm2835_pll_set_rate(struct clk_hw *hw, | |||
701 | } | 703 | } |
702 | 704 | ||
703 | /* Unmask the reference clock from the oscillator. */ | 705 | /* Unmask the reference clock from the oscillator. */ |
706 | spin_lock(&cprman->regs_lock); | ||
704 | cprman_write(cprman, A2W_XOSC_CTRL, | 707 | cprman_write(cprman, A2W_XOSC_CTRL, |
705 | cprman_read(cprman, A2W_XOSC_CTRL) | | 708 | cprman_read(cprman, A2W_XOSC_CTRL) | |
706 | data->reference_enable_mask); | 709 | data->reference_enable_mask); |
710 | spin_unlock(&cprman->regs_lock); | ||
707 | 711 | ||
708 | if (do_ana_setup_first) | 712 | if (do_ana_setup_first) |
709 | bcm2835_pll_write_ana(cprman, data->ana_reg_base, ana); | 713 | bcm2835_pll_write_ana(cprman, data->ana_reg_base, ana); |
diff --git a/drivers/clk/clk-aspeed.c b/drivers/clk/clk-aspeed.c index 9f7f931d6b2f..5eb50c31e455 100644 --- a/drivers/clk/clk-aspeed.c +++ b/drivers/clk/clk-aspeed.c | |||
@@ -205,6 +205,18 @@ static const struct aspeed_clk_soc_data ast2400_data = { | |||
205 | .calc_pll = aspeed_ast2400_calc_pll, | 205 | .calc_pll = aspeed_ast2400_calc_pll, |
206 | }; | 206 | }; |
207 | 207 | ||
208 | static int aspeed_clk_is_enabled(struct clk_hw *hw) | ||
209 | { | ||
210 | struct aspeed_clk_gate *gate = to_aspeed_clk_gate(hw); | ||
211 | u32 clk = BIT(gate->clock_idx); | ||
212 | u32 enval = (gate->flags & CLK_GATE_SET_TO_DISABLE) ? 0 : clk; | ||
213 | u32 reg; | ||
214 | |||
215 | regmap_read(gate->map, ASPEED_CLK_STOP_CTRL, ®); | ||
216 | |||
217 | return ((reg & clk) == enval) ? 1 : 0; | ||
218 | } | ||
219 | |||
208 | static int aspeed_clk_enable(struct clk_hw *hw) | 220 | static int aspeed_clk_enable(struct clk_hw *hw) |
209 | { | 221 | { |
210 | struct aspeed_clk_gate *gate = to_aspeed_clk_gate(hw); | 222 | struct aspeed_clk_gate *gate = to_aspeed_clk_gate(hw); |
@@ -215,6 +227,11 @@ static int aspeed_clk_enable(struct clk_hw *hw) | |||
215 | 227 | ||
216 | spin_lock_irqsave(gate->lock, flags); | 228 | spin_lock_irqsave(gate->lock, flags); |
217 | 229 | ||
230 | if (aspeed_clk_is_enabled(hw)) { | ||
231 | spin_unlock_irqrestore(gate->lock, flags); | ||
232 | return 0; | ||
233 | } | ||
234 | |||
218 | if (gate->reset_idx >= 0) { | 235 | if (gate->reset_idx >= 0) { |
219 | /* Put IP in reset */ | 236 | /* Put IP in reset */ |
220 | regmap_update_bits(gate->map, ASPEED_RESET_CTRL, rst, rst); | 237 | regmap_update_bits(gate->map, ASPEED_RESET_CTRL, rst, rst); |
@@ -255,17 +272,6 @@ static void aspeed_clk_disable(struct clk_hw *hw) | |||
255 | spin_unlock_irqrestore(gate->lock, flags); | 272 | spin_unlock_irqrestore(gate->lock, flags); |
256 | } | 273 | } |
257 | 274 | ||
258 | static int aspeed_clk_is_enabled(struct clk_hw *hw) | ||
259 | { | ||
260 | struct aspeed_clk_gate *gate = to_aspeed_clk_gate(hw); | ||
261 | u32 clk = BIT(gate->clock_idx); | ||
262 | u32 reg; | ||
263 | |||
264 | regmap_read(gate->map, ASPEED_CLK_STOP_CTRL, ®); | ||
265 | |||
266 | return (reg & clk) ? 0 : 1; | ||
267 | } | ||
268 | |||
269 | static const struct clk_ops aspeed_clk_gate_ops = { | 275 | static const struct clk_ops aspeed_clk_gate_ops = { |
270 | .enable = aspeed_clk_enable, | 276 | .enable = aspeed_clk_enable, |
271 | .disable = aspeed_clk_disable, | 277 | .disable = aspeed_clk_disable, |
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 0f686a9dac3e..076d4244d672 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c | |||
@@ -1125,8 +1125,10 @@ static int clk_core_round_rate_nolock(struct clk_core *core, | |||
1125 | { | 1125 | { |
1126 | lockdep_assert_held(&prepare_lock); | 1126 | lockdep_assert_held(&prepare_lock); |
1127 | 1127 | ||
1128 | if (!core) | 1128 | if (!core) { |
1129 | req->rate = 0; | ||
1129 | return 0; | 1130 | return 0; |
1131 | } | ||
1130 | 1132 | ||
1131 | clk_core_init_rate_req(core, req); | 1133 | clk_core_init_rate_req(core, req); |
1132 | 1134 | ||
@@ -2309,8 +2311,11 @@ static int clk_core_set_phase_nolock(struct clk_core *core, int degrees) | |||
2309 | 2311 | ||
2310 | trace_clk_set_phase(core, degrees); | 2312 | trace_clk_set_phase(core, degrees); |
2311 | 2313 | ||
2312 | if (core->ops->set_phase) | 2314 | if (core->ops->set_phase) { |
2313 | ret = core->ops->set_phase(core->hw, degrees); | 2315 | ret = core->ops->set_phase(core->hw, degrees); |
2316 | if (!ret) | ||
2317 | core->phase = degrees; | ||
2318 | } | ||
2314 | 2319 | ||
2315 | trace_clk_set_phase_complete(core, degrees); | 2320 | trace_clk_set_phase_complete(core, degrees); |
2316 | 2321 | ||
@@ -2968,22 +2973,37 @@ static int __clk_core_init(struct clk_core *core) | |||
2968 | core->rate = core->req_rate = rate; | 2973 | core->rate = core->req_rate = rate; |
2969 | 2974 | ||
2970 | /* | 2975 | /* |
2976 | * Enable CLK_IS_CRITICAL clocks so newly added critical clocks | ||
2977 | * don't get accidentally disabled when walking the orphan tree and | ||
2978 | * reparenting clocks | ||
2979 | */ | ||
2980 | if (core->flags & CLK_IS_CRITICAL) { | ||
2981 | unsigned long flags; | ||
2982 | |||
2983 | clk_core_prepare(core); | ||
2984 | |||
2985 | flags = clk_enable_lock(); | ||
2986 | clk_core_enable(core); | ||
2987 | clk_enable_unlock(flags); | ||
2988 | } | ||
2989 | |||
2990 | /* | ||
2971 | * walk the list of orphan clocks and reparent any that newly finds a | 2991 | * walk the list of orphan clocks and reparent any that newly finds a |
2972 | * parent. | 2992 | * parent. |
2973 | */ | 2993 | */ |
2974 | hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) { | 2994 | hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) { |
2975 | struct clk_core *parent = __clk_init_parent(orphan); | 2995 | struct clk_core *parent = __clk_init_parent(orphan); |
2976 | unsigned long flags; | ||
2977 | 2996 | ||
2978 | /* | 2997 | /* |
2979 | * we could call __clk_set_parent, but that would result in a | 2998 | * We need to use __clk_set_parent_before() and _after() to |
2980 | * redundant call to the .set_rate op, if it exists | 2999 | * to properly migrate any prepare/enable count of the orphan |
3000 | * clock. This is important for CLK_IS_CRITICAL clocks, which | ||
3001 | * are enabled during init but might not have a parent yet. | ||
2981 | */ | 3002 | */ |
2982 | if (parent) { | 3003 | if (parent) { |
2983 | /* update the clk tree topology */ | 3004 | /* update the clk tree topology */ |
2984 | flags = clk_enable_lock(); | 3005 | __clk_set_parent_before(orphan, parent); |
2985 | clk_reparent(orphan, parent); | 3006 | __clk_set_parent_after(orphan, parent, NULL); |
2986 | clk_enable_unlock(flags); | ||
2987 | __clk_recalc_accuracies(orphan); | 3007 | __clk_recalc_accuracies(orphan); |
2988 | __clk_recalc_rates(orphan, 0); | 3008 | __clk_recalc_rates(orphan, 0); |
2989 | } | 3009 | } |
@@ -3000,16 +3020,6 @@ static int __clk_core_init(struct clk_core *core) | |||
3000 | if (core->ops->init) | 3020 | if (core->ops->init) |
3001 | core->ops->init(core->hw); | 3021 | core->ops->init(core->hw); |
3002 | 3022 | ||
3003 | if (core->flags & CLK_IS_CRITICAL) { | ||
3004 | unsigned long flags; | ||
3005 | |||
3006 | clk_core_prepare(core); | ||
3007 | |||
3008 | flags = clk_enable_lock(); | ||
3009 | clk_core_enable(core); | ||
3010 | clk_enable_unlock(flags); | ||
3011 | } | ||
3012 | |||
3013 | kref_init(&core->ref); | 3023 | kref_init(&core->ref); |
3014 | out: | 3024 | out: |
3015 | clk_pm_runtime_put(core); | 3025 | clk_pm_runtime_put(core); |
diff --git a/drivers/clk/hisilicon/clk-hi3660-stub.c b/drivers/clk/hisilicon/clk-hi3660-stub.c index 9b6c72bbddf9..e8b2c43b1bb8 100644 --- a/drivers/clk/hisilicon/clk-hi3660-stub.c +++ b/drivers/clk/hisilicon/clk-hi3660-stub.c | |||
@@ -149,6 +149,8 @@ static int hi3660_stub_clk_probe(struct platform_device *pdev) | |||
149 | return PTR_ERR(stub_clk_chan.mbox); | 149 | return PTR_ERR(stub_clk_chan.mbox); |
150 | 150 | ||
151 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 151 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
152 | if (!res) | ||
153 | return -EINVAL; | ||
152 | freq_reg = devm_ioremap(dev, res->start, resource_size(res)); | 154 | freq_reg = devm_ioremap(dev, res->start, resource_size(res)); |
153 | if (!freq_reg) | 155 | if (!freq_reg) |
154 | return -ENOMEM; | 156 | return -ENOMEM; |
diff --git a/drivers/clk/imx/clk-imx51-imx53.c b/drivers/clk/imx/clk-imx51-imx53.c index c864992e6983..caa8bd40692c 100644 --- a/drivers/clk/imx/clk-imx51-imx53.c +++ b/drivers/clk/imx/clk-imx51-imx53.c | |||
@@ -131,7 +131,17 @@ static const char *ieee1588_sels[] = { "pll3_sw", "pll4_sw", "dummy" /* usbphy2_ | |||
131 | static struct clk *clk[IMX5_CLK_END]; | 131 | static struct clk *clk[IMX5_CLK_END]; |
132 | static struct clk_onecell_data clk_data; | 132 | static struct clk_onecell_data clk_data; |
133 | 133 | ||
134 | static struct clk ** const uart_clks[] __initconst = { | 134 | static struct clk ** const uart_clks_mx51[] __initconst = { |
135 | &clk[IMX5_CLK_UART1_IPG_GATE], | ||
136 | &clk[IMX5_CLK_UART1_PER_GATE], | ||
137 | &clk[IMX5_CLK_UART2_IPG_GATE], | ||
138 | &clk[IMX5_CLK_UART2_PER_GATE], | ||
139 | &clk[IMX5_CLK_UART3_IPG_GATE], | ||
140 | &clk[IMX5_CLK_UART3_PER_GATE], | ||
141 | NULL | ||
142 | }; | ||
143 | |||
144 | static struct clk ** const uart_clks_mx50_mx53[] __initconst = { | ||
135 | &clk[IMX5_CLK_UART1_IPG_GATE], | 145 | &clk[IMX5_CLK_UART1_IPG_GATE], |
136 | &clk[IMX5_CLK_UART1_PER_GATE], | 146 | &clk[IMX5_CLK_UART1_PER_GATE], |
137 | &clk[IMX5_CLK_UART2_IPG_GATE], | 147 | &clk[IMX5_CLK_UART2_IPG_GATE], |
@@ -321,8 +331,6 @@ static void __init mx5_clocks_common_init(void __iomem *ccm_base) | |||
321 | clk_prepare_enable(clk[IMX5_CLK_TMAX1]); | 331 | clk_prepare_enable(clk[IMX5_CLK_TMAX1]); |
322 | clk_prepare_enable(clk[IMX5_CLK_TMAX2]); /* esdhc2, fec */ | 332 | clk_prepare_enable(clk[IMX5_CLK_TMAX2]); /* esdhc2, fec */ |
323 | clk_prepare_enable(clk[IMX5_CLK_TMAX3]); /* esdhc1, esdhc4 */ | 333 | clk_prepare_enable(clk[IMX5_CLK_TMAX3]); /* esdhc1, esdhc4 */ |
324 | |||
325 | imx_register_uart_clocks(uart_clks); | ||
326 | } | 334 | } |
327 | 335 | ||
328 | static void __init mx50_clocks_init(struct device_node *np) | 336 | static void __init mx50_clocks_init(struct device_node *np) |
@@ -388,6 +396,8 @@ static void __init mx50_clocks_init(struct device_node *np) | |||
388 | 396 | ||
389 | r = clk_round_rate(clk[IMX5_CLK_USBOH3_PER_GATE], 54000000); | 397 | r = clk_round_rate(clk[IMX5_CLK_USBOH3_PER_GATE], 54000000); |
390 | clk_set_rate(clk[IMX5_CLK_USBOH3_PER_GATE], r); | 398 | clk_set_rate(clk[IMX5_CLK_USBOH3_PER_GATE], r); |
399 | |||
400 | imx_register_uart_clocks(uart_clks_mx50_mx53); | ||
391 | } | 401 | } |
392 | CLK_OF_DECLARE(imx50_ccm, "fsl,imx50-ccm", mx50_clocks_init); | 402 | CLK_OF_DECLARE(imx50_ccm, "fsl,imx50-ccm", mx50_clocks_init); |
393 | 403 | ||
@@ -477,6 +487,8 @@ static void __init mx51_clocks_init(struct device_node *np) | |||
477 | val = readl(MXC_CCM_CLPCR); | 487 | val = readl(MXC_CCM_CLPCR); |
478 | val |= 1 << 23; | 488 | val |= 1 << 23; |
479 | writel(val, MXC_CCM_CLPCR); | 489 | writel(val, MXC_CCM_CLPCR); |
490 | |||
491 | imx_register_uart_clocks(uart_clks_mx51); | ||
480 | } | 492 | } |
481 | CLK_OF_DECLARE(imx51_ccm, "fsl,imx51-ccm", mx51_clocks_init); | 493 | CLK_OF_DECLARE(imx51_ccm, "fsl,imx51-ccm", mx51_clocks_init); |
482 | 494 | ||
@@ -606,5 +618,7 @@ static void __init mx53_clocks_init(struct device_node *np) | |||
606 | 618 | ||
607 | r = clk_round_rate(clk[IMX5_CLK_USBOH3_PER_GATE], 54000000); | 619 | r = clk_round_rate(clk[IMX5_CLK_USBOH3_PER_GATE], 54000000); |
608 | clk_set_rate(clk[IMX5_CLK_USBOH3_PER_GATE], r); | 620 | clk_set_rate(clk[IMX5_CLK_USBOH3_PER_GATE], r); |
621 | |||
622 | imx_register_uart_clocks(uart_clks_mx50_mx53); | ||
609 | } | 623 | } |
610 | CLK_OF_DECLARE(imx53_ccm, "fsl,imx53-ccm", mx53_clocks_init); | 624 | CLK_OF_DECLARE(imx53_ccm, "fsl,imx53-ccm", mx53_clocks_init); |
diff --git a/drivers/clk/qcom/apcs-msm8916.c b/drivers/clk/qcom/apcs-msm8916.c index 246957f1a413..b1cc8dbcd327 100644 --- a/drivers/clk/qcom/apcs-msm8916.c +++ b/drivers/clk/qcom/apcs-msm8916.c | |||
@@ -49,11 +49,10 @@ static int qcom_apcs_msm8916_clk_probe(struct platform_device *pdev) | |||
49 | struct clk_regmap_mux_div *a53cc; | 49 | struct clk_regmap_mux_div *a53cc; |
50 | struct regmap *regmap; | 50 | struct regmap *regmap; |
51 | struct clk_init_data init = { }; | 51 | struct clk_init_data init = { }; |
52 | int ret; | 52 | int ret = -ENODEV; |
53 | 53 | ||
54 | regmap = dev_get_regmap(parent, NULL); | 54 | regmap = dev_get_regmap(parent, NULL); |
55 | if (IS_ERR(regmap)) { | 55 | if (!regmap) { |
56 | ret = PTR_ERR(regmap); | ||
57 | dev_err(dev, "failed to get regmap: %d\n", ret); | 56 | dev_err(dev, "failed to get regmap: %d\n", ret); |
58 | return ret; | 57 | return ret; |
59 | } | 58 | } |
diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c index 72b16ed1012b..3b97f60540ad 100644 --- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c +++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c | |||
@@ -762,7 +762,7 @@ static struct ccu_mp out_a_clk = { | |||
762 | .features = CCU_FEATURE_FIXED_PREDIV, | 762 | .features = CCU_FEATURE_FIXED_PREDIV, |
763 | .hw.init = CLK_HW_INIT_PARENTS("out-a", | 763 | .hw.init = CLK_HW_INIT_PARENTS("out-a", |
764 | clk_out_parents, | 764 | clk_out_parents, |
765 | &ccu_div_ops, | 765 | &ccu_mp_ops, |
766 | 0), | 766 | 0), |
767 | }, | 767 | }, |
768 | }; | 768 | }; |
@@ -783,7 +783,7 @@ static struct ccu_mp out_b_clk = { | |||
783 | .features = CCU_FEATURE_FIXED_PREDIV, | 783 | .features = CCU_FEATURE_FIXED_PREDIV, |
784 | .hw.init = CLK_HW_INIT_PARENTS("out-b", | 784 | .hw.init = CLK_HW_INIT_PARENTS("out-b", |
785 | clk_out_parents, | 785 | clk_out_parents, |
786 | &ccu_div_ops, | 786 | &ccu_mp_ops, |
787 | 0), | 787 | 0), |
788 | }, | 788 | }, |
789 | }; | 789 | }; |
@@ -804,7 +804,7 @@ static struct ccu_mp out_c_clk = { | |||
804 | .features = CCU_FEATURE_FIXED_PREDIV, | 804 | .features = CCU_FEATURE_FIXED_PREDIV, |
805 | .hw.init = CLK_HW_INIT_PARENTS("out-c", | 805 | .hw.init = CLK_HW_INIT_PARENTS("out-c", |
806 | clk_out_parents, | 806 | clk_out_parents, |
807 | &ccu_div_ops, | 807 | &ccu_mp_ops, |
808 | 0), | 808 | 0), |
809 | }, | 809 | }, |
810 | }; | 810 | }; |
diff --git a/drivers/clk/ti/clk-33xx.c b/drivers/clk/ti/clk-33xx.c index 612491a26070..12e0a2d19911 100644 --- a/drivers/clk/ti/clk-33xx.c +++ b/drivers/clk/ti/clk-33xx.c | |||
@@ -45,7 +45,7 @@ static const struct omap_clkctrl_bit_data am3_gpio4_bit_data[] __initconst = { | |||
45 | 45 | ||
46 | static const struct omap_clkctrl_reg_data am3_l4_per_clkctrl_regs[] __initconst = { | 46 | static const struct omap_clkctrl_reg_data am3_l4_per_clkctrl_regs[] __initconst = { |
47 | { AM3_CPGMAC0_CLKCTRL, NULL, CLKF_SW_SUP, "cpsw_125mhz_gclk", "cpsw_125mhz_clkdm" }, | 47 | { AM3_CPGMAC0_CLKCTRL, NULL, CLKF_SW_SUP, "cpsw_125mhz_gclk", "cpsw_125mhz_clkdm" }, |
48 | { AM3_LCDC_CLKCTRL, NULL, CLKF_SW_SUP, "lcd_gclk", "lcdc_clkdm" }, | 48 | { AM3_LCDC_CLKCTRL, NULL, CLKF_SW_SUP | CLKF_SET_RATE_PARENT, "lcd_gclk", "lcdc_clkdm" }, |
49 | { AM3_USB_OTG_HS_CLKCTRL, NULL, CLKF_SW_SUP, "usbotg_fck", "l3s_clkdm" }, | 49 | { AM3_USB_OTG_HS_CLKCTRL, NULL, CLKF_SW_SUP, "usbotg_fck", "l3s_clkdm" }, |
50 | { AM3_TPTC0_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" }, | 50 | { AM3_TPTC0_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" }, |
51 | { AM3_EMIF_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_ddr_m2_div2_ck", "l3_clkdm" }, | 51 | { AM3_EMIF_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_ddr_m2_div2_ck", "l3_clkdm" }, |
diff --git a/drivers/clk/ti/clk-43xx.c b/drivers/clk/ti/clk-43xx.c index 2b7c2e017665..63c5ddb50187 100644 --- a/drivers/clk/ti/clk-43xx.c +++ b/drivers/clk/ti/clk-43xx.c | |||
@@ -187,7 +187,7 @@ static const struct omap_clkctrl_reg_data am4_l4_per_clkctrl_regs[] __initconst | |||
187 | { AM4_OCP2SCP0_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" }, | 187 | { AM4_OCP2SCP0_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" }, |
188 | { AM4_OCP2SCP1_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" }, | 188 | { AM4_OCP2SCP1_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" }, |
189 | { AM4_EMIF_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_ddr_m2_ck", "emif_clkdm" }, | 189 | { AM4_EMIF_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_ddr_m2_ck", "emif_clkdm" }, |
190 | { AM4_DSS_CORE_CLKCTRL, NULL, CLKF_SW_SUP, "disp_clk", "dss_clkdm" }, | 190 | { AM4_DSS_CORE_CLKCTRL, NULL, CLKF_SW_SUP | CLKF_SET_RATE_PARENT, "disp_clk", "dss_clkdm" }, |
191 | { AM4_CPGMAC0_CLKCTRL, NULL, CLKF_SW_SUP, "cpsw_125mhz_gclk", "cpsw_125mhz_clkdm" }, | 191 | { AM4_CPGMAC0_CLKCTRL, NULL, CLKF_SW_SUP, "cpsw_125mhz_gclk", "cpsw_125mhz_clkdm" }, |
192 | { 0 }, | 192 | { 0 }, |
193 | }; | 193 | }; |
diff --git a/drivers/clk/ti/clkctrl.c b/drivers/clk/ti/clkctrl.c index afa0d6bfc5c1..421b05392220 100644 --- a/drivers/clk/ti/clkctrl.c +++ b/drivers/clk/ti/clkctrl.c | |||
@@ -537,6 +537,8 @@ static void __init _ti_omap4_clkctrl_setup(struct device_node *node) | |||
537 | init.parent_names = ®_data->parent; | 537 | init.parent_names = ®_data->parent; |
538 | init.num_parents = 1; | 538 | init.num_parents = 1; |
539 | init.flags = 0; | 539 | init.flags = 0; |
540 | if (reg_data->flags & CLKF_SET_RATE_PARENT) | ||
541 | init.flags |= CLK_SET_RATE_PARENT; | ||
540 | init.name = kasprintf(GFP_KERNEL, "%s:%s:%04x:%d", | 542 | init.name = kasprintf(GFP_KERNEL, "%s:%s:%04x:%d", |
541 | node->parent->name, node->name, | 543 | node->parent->name, node->name, |
542 | reg_data->offset, 0); | 544 | reg_data->offset, 0); |
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index b3b4ed9b6874..d2e5382821a4 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig | |||
@@ -386,6 +386,7 @@ config ATMEL_PIT | |||
386 | 386 | ||
387 | config ATMEL_ST | 387 | config ATMEL_ST |
388 | bool "Atmel ST timer support" if COMPILE_TEST | 388 | bool "Atmel ST timer support" if COMPILE_TEST |
389 | depends on HAS_IOMEM | ||
389 | select TIMER_OF | 390 | select TIMER_OF |
390 | select MFD_SYSCON | 391 | select MFD_SYSCON |
391 | help | 392 | help |
diff --git a/drivers/clocksource/arc_timer.c b/drivers/clocksource/arc_timer.c index 4927355f9cbe..471b428d8034 100644 --- a/drivers/clocksource/arc_timer.c +++ b/drivers/clocksource/arc_timer.c | |||
@@ -251,9 +251,14 @@ static irqreturn_t timer_irq_handler(int irq, void *dev_id) | |||
251 | int irq_reenable = clockevent_state_periodic(evt); | 251 | int irq_reenable = clockevent_state_periodic(evt); |
252 | 252 | ||
253 | /* | 253 | /* |
254 | * Any write to CTRL reg ACks the interrupt, we rewrite the | 254 | * 1. ACK the interrupt |
255 | * Count when [N]ot [H]alted bit. | 255 | * - For ARC700, any write to CTRL reg ACKs it, so just rewrite |
256 | * And re-arm it if perioid by [I]nterrupt [E]nable bit | 256 | * Count when [N]ot [H]alted bit. |
257 | * - For HS3x, it is a bit subtle. On taken count-down interrupt, | ||
258 | * IP bit [3] is set, which needs to be cleared for ACK'ing. | ||
259 | * The write below can only update the other two bits, hence | ||
260 | * explicitly clears IP bit | ||
261 | * 2. Re-arm interrupt if periodic by writing to IE bit [0] | ||
257 | */ | 262 | */ |
258 | write_aux_reg(ARC_REG_TIMER0_CTRL, irq_reenable | TIMER_CTRL_NH); | 263 | write_aux_reg(ARC_REG_TIMER0_CTRL, irq_reenable | TIMER_CTRL_NH); |
259 | 264 | ||
diff --git a/drivers/clocksource/fsl_ftm_timer.c b/drivers/clocksource/fsl_ftm_timer.c index 3ee7e6fea621..846d18daf893 100644 --- a/drivers/clocksource/fsl_ftm_timer.c +++ b/drivers/clocksource/fsl_ftm_timer.c | |||
@@ -281,7 +281,7 @@ static int __init __ftm_clk_init(struct device_node *np, char *cnt_name, | |||
281 | 281 | ||
282 | static unsigned long __init ftm_clk_init(struct device_node *np) | 282 | static unsigned long __init ftm_clk_init(struct device_node *np) |
283 | { | 283 | { |
284 | unsigned long freq; | 284 | long freq; |
285 | 285 | ||
286 | freq = __ftm_clk_init(np, "ftm-evt-counter-en", "ftm-evt"); | 286 | freq = __ftm_clk_init(np, "ftm-evt-counter-en", "ftm-evt"); |
287 | if (freq <= 0) | 287 | if (freq <= 0) |
diff --git a/drivers/clocksource/mips-gic-timer.c b/drivers/clocksource/mips-gic-timer.c index a04808a21d4e..986b6796b631 100644 --- a/drivers/clocksource/mips-gic-timer.c +++ b/drivers/clocksource/mips-gic-timer.c | |||
@@ -166,7 +166,7 @@ static int __init __gic_clocksource_init(void) | |||
166 | 166 | ||
167 | /* Set clocksource mask. */ | 167 | /* Set clocksource mask. */ |
168 | count_width = read_gic_config() & GIC_CONFIG_COUNTBITS; | 168 | count_width = read_gic_config() & GIC_CONFIG_COUNTBITS; |
169 | count_width >>= __fls(GIC_CONFIG_COUNTBITS); | 169 | count_width >>= __ffs(GIC_CONFIG_COUNTBITS); |
170 | count_width *= 4; | 170 | count_width *= 4; |
171 | count_width += 32; | 171 | count_width += 32; |
172 | gic_clocksource.mask = CLOCKSOURCE_MASK(count_width); | 172 | gic_clocksource.mask = CLOCKSOURCE_MASK(count_width); |
@@ -205,12 +205,12 @@ static int __init gic_clocksource_of_init(struct device_node *node) | |||
205 | } else if (of_property_read_u32(node, "clock-frequency", | 205 | } else if (of_property_read_u32(node, "clock-frequency", |
206 | &gic_frequency)) { | 206 | &gic_frequency)) { |
207 | pr_err("GIC frequency not specified.\n"); | 207 | pr_err("GIC frequency not specified.\n"); |
208 | return -EINVAL;; | 208 | return -EINVAL; |
209 | } | 209 | } |
210 | gic_timer_irq = irq_of_parse_and_map(node, 0); | 210 | gic_timer_irq = irq_of_parse_and_map(node, 0); |
211 | if (!gic_timer_irq) { | 211 | if (!gic_timer_irq) { |
212 | pr_err("GIC timer IRQ not specified.\n"); | 212 | pr_err("GIC timer IRQ not specified.\n"); |
213 | return -EINVAL;; | 213 | return -EINVAL; |
214 | } | 214 | } |
215 | 215 | ||
216 | ret = __gic_clocksource_init(); | 216 | ret = __gic_clocksource_init(); |
diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c index 2a3fe83ec337..3b56ea3f52af 100644 --- a/drivers/clocksource/timer-sun5i.c +++ b/drivers/clocksource/timer-sun5i.c | |||
@@ -334,7 +334,7 @@ static int __init sun5i_timer_init(struct device_node *node) | |||
334 | timer_base = of_io_request_and_map(node, 0, of_node_full_name(node)); | 334 | timer_base = of_io_request_and_map(node, 0, of_node_full_name(node)); |
335 | if (IS_ERR(timer_base)) { | 335 | if (IS_ERR(timer_base)) { |
336 | pr_err("Can't map registers\n"); | 336 | pr_err("Can't map registers\n"); |
337 | return PTR_ERR(timer_base);; | 337 | return PTR_ERR(timer_base); |
338 | } | 338 | } |
339 | 339 | ||
340 | irq = irq_of_parse_and_map(node, 0); | 340 | irq = irq_of_parse_and_map(node, 0); |
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index 3a88e33b0cfe..fb586e09682d 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm | |||
@@ -44,10 +44,10 @@ config ARM_DT_BL_CPUFREQ | |||
44 | 44 | ||
45 | config ARM_SCPI_CPUFREQ | 45 | config ARM_SCPI_CPUFREQ |
46 | tristate "SCPI based CPUfreq driver" | 46 | tristate "SCPI based CPUfreq driver" |
47 | depends on ARM_BIG_LITTLE_CPUFREQ && ARM_SCPI_PROTOCOL && COMMON_CLK_SCPI | 47 | depends on ARM_SCPI_PROTOCOL && COMMON_CLK_SCPI |
48 | help | 48 | help |
49 | This adds the CPUfreq driver support for ARM big.LITTLE platforms | 49 | This adds the CPUfreq driver support for ARM platforms using SCPI |
50 | using SCPI protocol for CPU power management. | 50 | protocol for CPU power management. |
51 | 51 | ||
52 | This driver uses SCPI Message Protocol driver to interact with the | 52 | This driver uses SCPI Message Protocol driver to interact with the |
53 | firmware providing the CPU DVFS functionality. | 53 | firmware providing the CPU DVFS functionality. |
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c index 3a2ca0f79daf..d0c34df0529c 100644 --- a/drivers/cpufreq/acpi-cpufreq.c +++ b/drivers/cpufreq/acpi-cpufreq.c | |||
@@ -629,7 +629,7 @@ static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c) | |||
629 | if (c->x86_vendor == X86_VENDOR_INTEL) { | 629 | if (c->x86_vendor == X86_VENDOR_INTEL) { |
630 | if ((c->x86 == 15) && | 630 | if ((c->x86 == 15) && |
631 | (c->x86_model == 6) && | 631 | (c->x86_model == 6) && |
632 | (c->x86_mask == 8)) { | 632 | (c->x86_stepping == 8)) { |
633 | pr_info("Intel(R) Xeon(R) 7100 Errata AL30, processors may lock up on frequency changes: disabling acpi-cpufreq\n"); | 633 | pr_info("Intel(R) Xeon(R) 7100 Errata AL30, processors may lock up on frequency changes: disabling acpi-cpufreq\n"); |
634 | return -ENODEV; | 634 | return -ENODEV; |
635 | } | 635 | } |
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c index 942632a27b50..f730b6528c18 100644 --- a/drivers/cpufreq/longhaul.c +++ b/drivers/cpufreq/longhaul.c | |||
@@ -775,7 +775,7 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy) | |||
775 | break; | 775 | break; |
776 | 776 | ||
777 | case 7: | 777 | case 7: |
778 | switch (c->x86_mask) { | 778 | switch (c->x86_stepping) { |
779 | case 0: | 779 | case 0: |
780 | longhaul_version = TYPE_LONGHAUL_V1; | 780 | longhaul_version = TYPE_LONGHAUL_V1; |
781 | cpu_model = CPU_SAMUEL2; | 781 | cpu_model = CPU_SAMUEL2; |
@@ -787,7 +787,7 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy) | |||
787 | break; | 787 | break; |
788 | case 1 ... 15: | 788 | case 1 ... 15: |
789 | longhaul_version = TYPE_LONGHAUL_V2; | 789 | longhaul_version = TYPE_LONGHAUL_V2; |
790 | if (c->x86_mask < 8) { | 790 | if (c->x86_stepping < 8) { |
791 | cpu_model = CPU_SAMUEL2; | 791 | cpu_model = CPU_SAMUEL2; |
792 | cpuname = "C3 'Samuel 2' [C5B]"; | 792 | cpuname = "C3 'Samuel 2' [C5B]"; |
793 | } else { | 793 | } else { |
@@ -814,7 +814,7 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy) | |||
814 | numscales = 32; | 814 | numscales = 32; |
815 | memcpy(mults, nehemiah_mults, sizeof(nehemiah_mults)); | 815 | memcpy(mults, nehemiah_mults, sizeof(nehemiah_mults)); |
816 | memcpy(eblcr, nehemiah_eblcr, sizeof(nehemiah_eblcr)); | 816 | memcpy(eblcr, nehemiah_eblcr, sizeof(nehemiah_eblcr)); |
817 | switch (c->x86_mask) { | 817 | switch (c->x86_stepping) { |
818 | case 0 ... 1: | 818 | case 0 ... 1: |
819 | cpu_model = CPU_NEHEMIAH; | 819 | cpu_model = CPU_NEHEMIAH; |
820 | cpuname = "C3 'Nehemiah A' [C5XLOE]"; | 820 | cpuname = "C3 'Nehemiah A' [C5XLOE]"; |
diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c index fd77812313f3..a25741b1281b 100644 --- a/drivers/cpufreq/p4-clockmod.c +++ b/drivers/cpufreq/p4-clockmod.c | |||
@@ -168,7 +168,7 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy) | |||
168 | #endif | 168 | #endif |
169 | 169 | ||
170 | /* Errata workaround */ | 170 | /* Errata workaround */ |
171 | cpuid = (c->x86 << 8) | (c->x86_model << 4) | c->x86_mask; | 171 | cpuid = (c->x86 << 8) | (c->x86_model << 4) | c->x86_stepping; |
172 | switch (cpuid) { | 172 | switch (cpuid) { |
173 | case 0x0f07: | 173 | case 0x0f07: |
174 | case 0x0f0a: | 174 | case 0x0f0a: |
diff --git a/drivers/cpufreq/powernow-k7.c b/drivers/cpufreq/powernow-k7.c index 80ac313e6c59..302e9ce793a0 100644 --- a/drivers/cpufreq/powernow-k7.c +++ b/drivers/cpufreq/powernow-k7.c | |||
@@ -131,7 +131,7 @@ static int check_powernow(void) | |||
131 | return 0; | 131 | return 0; |
132 | } | 132 | } |
133 | 133 | ||
134 | if ((c->x86_model == 6) && (c->x86_mask == 0)) { | 134 | if ((c->x86_model == 6) && (c->x86_stepping == 0)) { |
135 | pr_info("K7 660[A0] core detected, enabling errata workarounds\n"); | 135 | pr_info("K7 660[A0] core detected, enabling errata workarounds\n"); |
136 | have_a0 = 1; | 136 | have_a0 = 1; |
137 | } | 137 | } |
diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c index 7b596fa38ad2..6bebc1f9f55a 100644 --- a/drivers/cpufreq/s3c24xx-cpufreq.c +++ b/drivers/cpufreq/s3c24xx-cpufreq.c | |||
@@ -351,7 +351,13 @@ struct clk *s3c_cpufreq_clk_get(struct device *dev, const char *name) | |||
351 | static int s3c_cpufreq_init(struct cpufreq_policy *policy) | 351 | static int s3c_cpufreq_init(struct cpufreq_policy *policy) |
352 | { | 352 | { |
353 | policy->clk = clk_arm; | 353 | policy->clk = clk_arm; |
354 | return cpufreq_generic_init(policy, ftab, cpu_cur.info->latency); | 354 | |
355 | policy->cpuinfo.transition_latency = cpu_cur.info->latency; | ||
356 | |||
357 | if (ftab) | ||
358 | return cpufreq_table_validate_and_show(policy, ftab); | ||
359 | |||
360 | return 0; | ||
355 | } | 361 | } |
356 | 362 | ||
357 | static int __init s3c_cpufreq_initclks(void) | 363 | static int __init s3c_cpufreq_initclks(void) |
diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c index c32a833e1b00..d300a163945f 100644 --- a/drivers/cpufreq/scpi-cpufreq.c +++ b/drivers/cpufreq/scpi-cpufreq.c | |||
@@ -51,15 +51,23 @@ static unsigned int scpi_cpufreq_get_rate(unsigned int cpu) | |||
51 | static int | 51 | static int |
52 | scpi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index) | 52 | scpi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index) |
53 | { | 53 | { |
54 | unsigned long freq = policy->freq_table[index].frequency; | ||
54 | struct scpi_data *priv = policy->driver_data; | 55 | struct scpi_data *priv = policy->driver_data; |
55 | u64 rate = policy->freq_table[index].frequency * 1000; | 56 | u64 rate = freq * 1000; |
56 | int ret; | 57 | int ret; |
57 | 58 | ||
58 | ret = clk_set_rate(priv->clk, rate); | 59 | ret = clk_set_rate(priv->clk, rate); |
59 | if (!ret && (clk_get_rate(priv->clk) != rate)) | ||
60 | ret = -EIO; | ||
61 | 60 | ||
62 | return ret; | 61 | if (ret) |
62 | return ret; | ||
63 | |||
64 | if (clk_get_rate(priv->clk) != rate) | ||
65 | return -EIO; | ||
66 | |||
67 | arch_set_freq_scale(policy->related_cpus, freq, | ||
68 | policy->cpuinfo.max_freq); | ||
69 | |||
70 | return 0; | ||
63 | } | 71 | } |
64 | 72 | ||
65 | static int | 73 | static int |
diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c index 41bc5397f4bb..4fa5adf16c70 100644 --- a/drivers/cpufreq/speedstep-centrino.c +++ b/drivers/cpufreq/speedstep-centrino.c | |||
@@ -37,7 +37,7 @@ struct cpu_id | |||
37 | { | 37 | { |
38 | __u8 x86; /* CPU family */ | 38 | __u8 x86; /* CPU family */ |
39 | __u8 x86_model; /* model */ | 39 | __u8 x86_model; /* model */ |
40 | __u8 x86_mask; /* stepping */ | 40 | __u8 x86_stepping; /* stepping */ |
41 | }; | 41 | }; |
42 | 42 | ||
43 | enum { | 43 | enum { |
@@ -277,7 +277,7 @@ static int centrino_verify_cpu_id(const struct cpuinfo_x86 *c, | |||
277 | { | 277 | { |
278 | if ((c->x86 == x->x86) && | 278 | if ((c->x86 == x->x86) && |
279 | (c->x86_model == x->x86_model) && | 279 | (c->x86_model == x->x86_model) && |
280 | (c->x86_mask == x->x86_mask)) | 280 | (c->x86_stepping == x->x86_stepping)) |
281 | return 1; | 281 | return 1; |
282 | return 0; | 282 | return 0; |
283 | } | 283 | } |
diff --git a/drivers/cpufreq/speedstep-lib.c b/drivers/cpufreq/speedstep-lib.c index 8085ec9000d1..e3a9962ee410 100644 --- a/drivers/cpufreq/speedstep-lib.c +++ b/drivers/cpufreq/speedstep-lib.c | |||
@@ -272,9 +272,9 @@ unsigned int speedstep_detect_processor(void) | |||
272 | ebx = cpuid_ebx(0x00000001); | 272 | ebx = cpuid_ebx(0x00000001); |
273 | ebx &= 0x000000FF; | 273 | ebx &= 0x000000FF; |
274 | 274 | ||
275 | pr_debug("ebx value is %x, x86_mask is %x\n", ebx, c->x86_mask); | 275 | pr_debug("ebx value is %x, x86_stepping is %x\n", ebx, c->x86_stepping); |
276 | 276 | ||
277 | switch (c->x86_mask) { | 277 | switch (c->x86_stepping) { |
278 | case 4: | 278 | case 4: |
279 | /* | 279 | /* |
280 | * B-stepping [M-P4-M] | 280 | * B-stepping [M-P4-M] |
@@ -361,7 +361,7 @@ unsigned int speedstep_detect_processor(void) | |||
361 | msr_lo, msr_hi); | 361 | msr_lo, msr_hi); |
362 | if ((msr_hi & (1<<18)) && | 362 | if ((msr_hi & (1<<18)) && |
363 | (relaxed_check ? 1 : (msr_hi & (3<<24)))) { | 363 | (relaxed_check ? 1 : (msr_hi & (3<<24)))) { |
364 | if (c->x86_mask == 0x01) { | 364 | if (c->x86_stepping == 0x01) { |
365 | pr_debug("early PIII version\n"); | 365 | pr_debug("early PIII version\n"); |
366 | return SPEEDSTEP_CPU_PIII_C_EARLY; | 366 | return SPEEDSTEP_CPU_PIII_C_EARLY; |
367 | } else | 367 | } else |
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index 75d280cb2dc0..e843cf410373 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c | |||
@@ -228,12 +228,16 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask, | |||
228 | * without any error (HW optimizations for later | 228 | * without any error (HW optimizations for later |
229 | * CAAM eras), then try again. | 229 | * CAAM eras), then try again. |
230 | */ | 230 | */ |
231 | if (ret) | ||
232 | break; | ||
233 | |||
231 | rdsta_val = rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_IFMASK; | 234 | rdsta_val = rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_IFMASK; |
232 | if ((status && status != JRSTA_SSRC_JUMP_HALT_CC) || | 235 | if ((status && status != JRSTA_SSRC_JUMP_HALT_CC) || |
233 | !(rdsta_val & (1 << sh_idx))) | 236 | !(rdsta_val & (1 << sh_idx))) { |
234 | ret = -EAGAIN; | 237 | ret = -EAGAIN; |
235 | if (ret) | ||
236 | break; | 238 | break; |
239 | } | ||
240 | |||
237 | dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx); | 241 | dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx); |
238 | /* Clear the contents before recreating the descriptor */ | 242 | /* Clear the contents before recreating the descriptor */ |
239 | memset(desc, 0x00, CAAM_CMD_SZ * 7); | 243 | memset(desc, 0x00, CAAM_CMD_SZ * 7); |
diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c index fcfa5b1eae61..b3afb6cc9d72 100644 --- a/drivers/crypto/ccp/psp-dev.c +++ b/drivers/crypto/ccp/psp-dev.c | |||
@@ -211,7 +211,7 @@ static int __sev_platform_shutdown_locked(int *error) | |||
211 | { | 211 | { |
212 | int ret; | 212 | int ret; |
213 | 213 | ||
214 | ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, 0, error); | 214 | ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, NULL, error); |
215 | if (ret) | 215 | if (ret) |
216 | return ret; | 216 | return ret; |
217 | 217 | ||
@@ -271,7 +271,7 @@ static int sev_ioctl_do_reset(struct sev_issue_cmd *argp) | |||
271 | return rc; | 271 | return rc; |
272 | } | 272 | } |
273 | 273 | ||
274 | return __sev_do_cmd_locked(SEV_CMD_FACTORY_RESET, 0, &argp->error); | 274 | return __sev_do_cmd_locked(SEV_CMD_FACTORY_RESET, NULL, &argp->error); |
275 | } | 275 | } |
276 | 276 | ||
277 | static int sev_ioctl_do_platform_status(struct sev_issue_cmd *argp) | 277 | static int sev_ioctl_do_platform_status(struct sev_issue_cmd *argp) |
@@ -299,7 +299,7 @@ static int sev_ioctl_do_pek_pdh_gen(int cmd, struct sev_issue_cmd *argp) | |||
299 | return rc; | 299 | return rc; |
300 | } | 300 | } |
301 | 301 | ||
302 | return __sev_do_cmd_locked(cmd, 0, &argp->error); | 302 | return __sev_do_cmd_locked(cmd, NULL, &argp->error); |
303 | } | 303 | } |
304 | 304 | ||
305 | static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp) | 305 | static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp) |
@@ -624,7 +624,7 @@ EXPORT_SYMBOL_GPL(sev_guest_decommission); | |||
624 | 624 | ||
625 | int sev_guest_df_flush(int *error) | 625 | int sev_guest_df_flush(int *error) |
626 | { | 626 | { |
627 | return sev_do_cmd(SEV_CMD_DF_FLUSH, 0, error); | 627 | return sev_do_cmd(SEV_CMD_DF_FLUSH, NULL, error); |
628 | } | 628 | } |
629 | EXPORT_SYMBOL_GPL(sev_guest_df_flush); | 629 | EXPORT_SYMBOL_GPL(sev_guest_df_flush); |
630 | 630 | ||
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index 4b6642a25df5..1c6cbda56afe 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c | |||
@@ -512,7 +512,7 @@ static int __init padlock_init(void) | |||
512 | 512 | ||
513 | printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n"); | 513 | printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n"); |
514 | 514 | ||
515 | if (c->x86 == 6 && c->x86_model == 15 && c->x86_mask == 2) { | 515 | if (c->x86 == 6 && c->x86_model == 15 && c->x86_stepping == 2) { |
516 | ecb_fetch_blocks = MAX_ECB_FETCH_BLOCKS; | 516 | ecb_fetch_blocks = MAX_ECB_FETCH_BLOCKS; |
517 | cbc_fetch_blocks = MAX_CBC_FETCH_BLOCKS; | 517 | cbc_fetch_blocks = MAX_CBC_FETCH_BLOCKS; |
518 | printk(KERN_NOTICE PFX "VIA Nano stepping 2 detected: enabling workaround.\n"); | 518 | printk(KERN_NOTICE PFX "VIA Nano stepping 2 detected: enabling workaround.\n"); |
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c index 188f44b7eb27..5d64c08b7f47 100644 --- a/drivers/crypto/s5p-sss.c +++ b/drivers/crypto/s5p-sss.c | |||
@@ -1922,15 +1922,21 @@ static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode) | |||
1922 | uint32_t aes_control; | 1922 | uint32_t aes_control; |
1923 | unsigned long flags; | 1923 | unsigned long flags; |
1924 | int err; | 1924 | int err; |
1925 | u8 *iv; | ||
1925 | 1926 | ||
1926 | aes_control = SSS_AES_KEY_CHANGE_MODE; | 1927 | aes_control = SSS_AES_KEY_CHANGE_MODE; |
1927 | if (mode & FLAGS_AES_DECRYPT) | 1928 | if (mode & FLAGS_AES_DECRYPT) |
1928 | aes_control |= SSS_AES_MODE_DECRYPT; | 1929 | aes_control |= SSS_AES_MODE_DECRYPT; |
1929 | 1930 | ||
1930 | if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CBC) | 1931 | if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CBC) { |
1931 | aes_control |= SSS_AES_CHAIN_MODE_CBC; | 1932 | aes_control |= SSS_AES_CHAIN_MODE_CBC; |
1932 | else if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CTR) | 1933 | iv = req->info; |
1934 | } else if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CTR) { | ||
1933 | aes_control |= SSS_AES_CHAIN_MODE_CTR; | 1935 | aes_control |= SSS_AES_CHAIN_MODE_CTR; |
1936 | iv = req->info; | ||
1937 | } else { | ||
1938 | iv = NULL; /* AES_ECB */ | ||
1939 | } | ||
1934 | 1940 | ||
1935 | if (dev->ctx->keylen == AES_KEYSIZE_192) | 1941 | if (dev->ctx->keylen == AES_KEYSIZE_192) |
1936 | aes_control |= SSS_AES_KEY_SIZE_192; | 1942 | aes_control |= SSS_AES_KEY_SIZE_192; |
@@ -1961,7 +1967,7 @@ static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode) | |||
1961 | goto outdata_error; | 1967 | goto outdata_error; |
1962 | 1968 | ||
1963 | SSS_AES_WRITE(dev, AES_CONTROL, aes_control); | 1969 | SSS_AES_WRITE(dev, AES_CONTROL, aes_control); |
1964 | s5p_set_aes(dev, dev->ctx->aes_key, req->info, dev->ctx->keylen); | 1970 | s5p_set_aes(dev, dev->ctx->aes_key, iv, dev->ctx->keylen); |
1965 | 1971 | ||
1966 | s5p_set_dma_indata(dev, dev->sg_src); | 1972 | s5p_set_dma_indata(dev, dev->sg_src); |
1967 | s5p_set_dma_outdata(dev, dev->sg_dst); | 1973 | s5p_set_dma_outdata(dev, dev->sg_dst); |
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-prng.c b/drivers/crypto/sunxi-ss/sun4i-ss-prng.c index 0d01d1624252..63d636424161 100644 --- a/drivers/crypto/sunxi-ss/sun4i-ss-prng.c +++ b/drivers/crypto/sunxi-ss/sun4i-ss-prng.c | |||
@@ -28,7 +28,7 @@ int sun4i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src, | |||
28 | algt = container_of(alg, struct sun4i_ss_alg_template, alg.rng); | 28 | algt = container_of(alg, struct sun4i_ss_alg_template, alg.rng); |
29 | ss = algt->ss; | 29 | ss = algt->ss; |
30 | 30 | ||
31 | spin_lock(&ss->slock); | 31 | spin_lock_bh(&ss->slock); |
32 | 32 | ||
33 | writel(mode, ss->base + SS_CTL); | 33 | writel(mode, ss->base + SS_CTL); |
34 | 34 | ||
@@ -51,6 +51,6 @@ int sun4i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src, | |||
51 | } | 51 | } |
52 | 52 | ||
53 | writel(0, ss->base + SS_CTL); | 53 | writel(0, ss->base + SS_CTL); |
54 | spin_unlock(&ss->slock); | 54 | spin_unlock_bh(&ss->slock); |
55 | return dlen; | 55 | return 0; |
56 | } | 56 | } |
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 9c80e0cb1664..6882fa2f8bad 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c | |||
@@ -1138,6 +1138,10 @@ static int talitos_sg_map(struct device *dev, struct scatterlist *src, | |||
1138 | struct talitos_private *priv = dev_get_drvdata(dev); | 1138 | struct talitos_private *priv = dev_get_drvdata(dev); |
1139 | bool is_sec1 = has_ftr_sec1(priv); | 1139 | bool is_sec1 = has_ftr_sec1(priv); |
1140 | 1140 | ||
1141 | if (!src) { | ||
1142 | to_talitos_ptr(ptr, 0, 0, is_sec1); | ||
1143 | return 1; | ||
1144 | } | ||
1141 | if (sg_count == 1) { | 1145 | if (sg_count == 1) { |
1142 | to_talitos_ptr(ptr, sg_dma_address(src) + offset, len, is_sec1); | 1146 | to_talitos_ptr(ptr, sg_dma_address(src) + offset, len, is_sec1); |
1143 | return sg_count; | 1147 | return sg_count; |
diff --git a/drivers/dax/super.c b/drivers/dax/super.c index 473af694ad1c..ecdc292aa4e4 100644 --- a/drivers/dax/super.c +++ b/drivers/dax/super.c | |||
@@ -246,12 +246,6 @@ long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages, | |||
246 | { | 246 | { |
247 | long avail; | 247 | long avail; |
248 | 248 | ||
249 | /* | ||
250 | * The device driver is allowed to sleep, in order to make the | ||
251 | * memory directly accessible. | ||
252 | */ | ||
253 | might_sleep(); | ||
254 | |||
255 | if (!dax_dev) | 249 | if (!dax_dev) |
256 | return -EOPNOTSUPP; | 250 | return -EOPNOTSUPP; |
257 | 251 | ||
diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c index f652a0e0f5a2..3548caa9e933 100644 --- a/drivers/dma/mv_xor_v2.c +++ b/drivers/dma/mv_xor_v2.c | |||
@@ -163,6 +163,7 @@ struct mv_xor_v2_device { | |||
163 | void __iomem *dma_base; | 163 | void __iomem *dma_base; |
164 | void __iomem *glob_base; | 164 | void __iomem *glob_base; |
165 | struct clk *clk; | 165 | struct clk *clk; |
166 | struct clk *reg_clk; | ||
166 | struct tasklet_struct irq_tasklet; | 167 | struct tasklet_struct irq_tasklet; |
167 | struct list_head free_sw_desc; | 168 | struct list_head free_sw_desc; |
168 | struct dma_device dmadev; | 169 | struct dma_device dmadev; |
@@ -749,13 +750,26 @@ static int mv_xor_v2_probe(struct platform_device *pdev) | |||
749 | if (ret) | 750 | if (ret) |
750 | return ret; | 751 | return ret; |
751 | 752 | ||
753 | xor_dev->reg_clk = devm_clk_get(&pdev->dev, "reg"); | ||
754 | if (PTR_ERR(xor_dev->reg_clk) != -ENOENT) { | ||
755 | if (!IS_ERR(xor_dev->reg_clk)) { | ||
756 | ret = clk_prepare_enable(xor_dev->reg_clk); | ||
757 | if (ret) | ||
758 | return ret; | ||
759 | } else { | ||
760 | return PTR_ERR(xor_dev->reg_clk); | ||
761 | } | ||
762 | } | ||
763 | |||
752 | xor_dev->clk = devm_clk_get(&pdev->dev, NULL); | 764 | xor_dev->clk = devm_clk_get(&pdev->dev, NULL); |
753 | if (IS_ERR(xor_dev->clk) && PTR_ERR(xor_dev->clk) == -EPROBE_DEFER) | 765 | if (IS_ERR(xor_dev->clk) && PTR_ERR(xor_dev->clk) == -EPROBE_DEFER) { |
754 | return -EPROBE_DEFER; | 766 | ret = EPROBE_DEFER; |
767 | goto disable_reg_clk; | ||
768 | } | ||
755 | if (!IS_ERR(xor_dev->clk)) { | 769 | if (!IS_ERR(xor_dev->clk)) { |
756 | ret = clk_prepare_enable(xor_dev->clk); | 770 | ret = clk_prepare_enable(xor_dev->clk); |
757 | if (ret) | 771 | if (ret) |
758 | return ret; | 772 | goto disable_reg_clk; |
759 | } | 773 | } |
760 | 774 | ||
761 | ret = platform_msi_domain_alloc_irqs(&pdev->dev, 1, | 775 | ret = platform_msi_domain_alloc_irqs(&pdev->dev, 1, |
@@ -866,8 +880,9 @@ free_hw_desq: | |||
866 | free_msi_irqs: | 880 | free_msi_irqs: |
867 | platform_msi_domain_free_irqs(&pdev->dev); | 881 | platform_msi_domain_free_irqs(&pdev->dev); |
868 | disable_clk: | 882 | disable_clk: |
869 | if (!IS_ERR(xor_dev->clk)) | 883 | clk_disable_unprepare(xor_dev->clk); |
870 | clk_disable_unprepare(xor_dev->clk); | 884 | disable_reg_clk: |
885 | clk_disable_unprepare(xor_dev->reg_clk); | ||
871 | return ret; | 886 | return ret; |
872 | } | 887 | } |
873 | 888 | ||
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c index e3ff162c03fc..d0cacdb0713e 100644 --- a/drivers/dma/sh/rcar-dmac.c +++ b/drivers/dma/sh/rcar-dmac.c | |||
@@ -917,7 +917,7 @@ rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl, | |||
917 | 917 | ||
918 | rcar_dmac_chan_configure_desc(chan, desc); | 918 | rcar_dmac_chan_configure_desc(chan, desc); |
919 | 919 | ||
920 | max_chunk_size = (RCAR_DMATCR_MASK + 1) << desc->xfer_shift; | 920 | max_chunk_size = RCAR_DMATCR_MASK << desc->xfer_shift; |
921 | 921 | ||
922 | /* | 922 | /* |
923 | * Allocate and fill the transfer chunk descriptors. We own the only | 923 | * Allocate and fill the transfer chunk descriptors. We own the only |
diff --git a/drivers/dma/stm32-dmamux.c b/drivers/dma/stm32-dmamux.c index 4dbb30cf94ac..b922db90939a 100644 --- a/drivers/dma/stm32-dmamux.c +++ b/drivers/dma/stm32-dmamux.c | |||
@@ -118,14 +118,15 @@ static void *stm32_dmamux_route_allocate(struct of_phandle_args *dma_spec, | |||
118 | spin_lock_irqsave(&dmamux->lock, flags); | 118 | spin_lock_irqsave(&dmamux->lock, flags); |
119 | mux->chan_id = find_first_zero_bit(dmamux->dma_inuse, | 119 | mux->chan_id = find_first_zero_bit(dmamux->dma_inuse, |
120 | dmamux->dma_requests); | 120 | dmamux->dma_requests); |
121 | set_bit(mux->chan_id, dmamux->dma_inuse); | ||
122 | spin_unlock_irqrestore(&dmamux->lock, flags); | ||
123 | 121 | ||
124 | if (mux->chan_id == dmamux->dma_requests) { | 122 | if (mux->chan_id == dmamux->dma_requests) { |
123 | spin_unlock_irqrestore(&dmamux->lock, flags); | ||
125 | dev_err(&pdev->dev, "Run out of free DMA requests\n"); | 124 | dev_err(&pdev->dev, "Run out of free DMA requests\n"); |
126 | ret = -ENOMEM; | 125 | ret = -ENOMEM; |
127 | goto error; | 126 | goto error_chan_id; |
128 | } | 127 | } |
128 | set_bit(mux->chan_id, dmamux->dma_inuse); | ||
129 | spin_unlock_irqrestore(&dmamux->lock, flags); | ||
129 | 130 | ||
130 | /* Look for DMA Master */ | 131 | /* Look for DMA Master */ |
131 | for (i = 1, min = 0, max = dmamux->dma_reqs[i]; | 132 | for (i = 1, min = 0, max = dmamux->dma_reqs[i]; |
@@ -173,6 +174,8 @@ static void *stm32_dmamux_route_allocate(struct of_phandle_args *dma_spec, | |||
173 | 174 | ||
174 | error: | 175 | error: |
175 | clear_bit(mux->chan_id, dmamux->dma_inuse); | 176 | clear_bit(mux->chan_id, dmamux->dma_inuse); |
177 | |||
178 | error_chan_id: | ||
176 | kfree(mux); | 179 | kfree(mux); |
177 | return ERR_PTR(ret); | 180 | return ERR_PTR(ret); |
178 | } | 181 | } |
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index 8b16ec595fa7..329cb96f886f 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c | |||
@@ -3147,7 +3147,7 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt) | |||
3147 | struct amd64_family_type *fam_type = NULL; | 3147 | struct amd64_family_type *fam_type = NULL; |
3148 | 3148 | ||
3149 | pvt->ext_model = boot_cpu_data.x86_model >> 4; | 3149 | pvt->ext_model = boot_cpu_data.x86_model >> 4; |
3150 | pvt->stepping = boot_cpu_data.x86_mask; | 3150 | pvt->stepping = boot_cpu_data.x86_stepping; |
3151 | pvt->model = boot_cpu_data.x86_model; | 3151 | pvt->model = boot_cpu_data.x86_model; |
3152 | pvt->fam = boot_cpu_data.x86; | 3152 | pvt->fam = boot_cpu_data.x86; |
3153 | 3153 | ||
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c index f34430f99fd8..872100215ca0 100644 --- a/drivers/edac/sb_edac.c +++ b/drivers/edac/sb_edac.c | |||
@@ -279,7 +279,7 @@ static const u32 correrrthrsld[] = { | |||
279 | * sbridge structs | 279 | * sbridge structs |
280 | */ | 280 | */ |
281 | 281 | ||
282 | #define NUM_CHANNELS 4 /* Max channels per MC */ | 282 | #define NUM_CHANNELS 6 /* Max channels per MC */ |
283 | #define MAX_DIMMS 3 /* Max DIMMS per channel */ | 283 | #define MAX_DIMMS 3 /* Max DIMMS per channel */ |
284 | #define KNL_MAX_CHAS 38 /* KNL max num. of Cache Home Agents */ | 284 | #define KNL_MAX_CHAS 38 /* KNL max num. of Cache Home Agents */ |
285 | #define KNL_MAX_CHANNELS 6 /* KNL max num. of PCI channels */ | 285 | #define KNL_MAX_CHANNELS 6 /* KNL max num. of PCI channels */ |
diff --git a/drivers/extcon/extcon-axp288.c b/drivers/extcon/extcon-axp288.c index 0a44d43802fe..3ec4c715e240 100644 --- a/drivers/extcon/extcon-axp288.c +++ b/drivers/extcon/extcon-axp288.c | |||
@@ -1,7 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * extcon-axp288.c - X-Power AXP288 PMIC extcon cable detection driver | 2 | * extcon-axp288.c - X-Power AXP288 PMIC extcon cable detection driver |
3 | * | 3 | * |
4 | * Copyright (C) 2016-2017 Hans de Goede <hdegoede@redhat.com> | ||
5 | * Copyright (C) 2015 Intel Corporation | 4 | * Copyright (C) 2015 Intel Corporation |
6 | * Author: Ramakrishna Pallala <ramakrishna.pallala@intel.com> | 5 | * Author: Ramakrishna Pallala <ramakrishna.pallala@intel.com> |
7 | * | 6 | * |
@@ -98,15 +97,13 @@ struct axp288_extcon_info { | |||
98 | struct device *dev; | 97 | struct device *dev; |
99 | struct regmap *regmap; | 98 | struct regmap *regmap; |
100 | struct regmap_irq_chip_data *regmap_irqc; | 99 | struct regmap_irq_chip_data *regmap_irqc; |
101 | struct delayed_work det_work; | ||
102 | int irq[EXTCON_IRQ_END]; | 100 | int irq[EXTCON_IRQ_END]; |
103 | struct extcon_dev *edev; | 101 | struct extcon_dev *edev; |
104 | unsigned int previous_cable; | 102 | unsigned int previous_cable; |
105 | bool first_detect_done; | ||
106 | }; | 103 | }; |
107 | 104 | ||
108 | /* Power up/down reason string array */ | 105 | /* Power up/down reason string array */ |
109 | static char *axp288_pwr_up_down_info[] = { | 106 | static const char * const axp288_pwr_up_down_info[] = { |
110 | "Last wake caused by user pressing the power button", | 107 | "Last wake caused by user pressing the power button", |
111 | "Last wake caused by a charger insertion", | 108 | "Last wake caused by a charger insertion", |
112 | "Last wake caused by a battery insertion", | 109 | "Last wake caused by a battery insertion", |
@@ -124,7 +121,7 @@ static char *axp288_pwr_up_down_info[] = { | |||
124 | */ | 121 | */ |
125 | static void axp288_extcon_log_rsi(struct axp288_extcon_info *info) | 122 | static void axp288_extcon_log_rsi(struct axp288_extcon_info *info) |
126 | { | 123 | { |
127 | char **rsi; | 124 | const char * const *rsi; |
128 | unsigned int val, i, clear_mask = 0; | 125 | unsigned int val, i, clear_mask = 0; |
129 | int ret; | 126 | int ret; |
130 | 127 | ||
@@ -140,25 +137,6 @@ static void axp288_extcon_log_rsi(struct axp288_extcon_info *info) | |||
140 | regmap_write(info->regmap, AXP288_PS_BOOT_REASON_REG, clear_mask); | 137 | regmap_write(info->regmap, AXP288_PS_BOOT_REASON_REG, clear_mask); |
141 | } | 138 | } |
142 | 139 | ||
143 | static void axp288_chrg_detect_complete(struct axp288_extcon_info *info) | ||
144 | { | ||
145 | /* | ||
146 | * We depend on other drivers to do things like mux the data lines, | ||
147 | * enable/disable vbus based on the id-pin, etc. Sometimes the BIOS has | ||
148 | * not set these things up correctly resulting in the initial charger | ||
149 | * cable type detection giving a wrong result and we end up not charging | ||
150 | * or charging at only 0.5A. | ||
151 | * | ||
152 | * So we schedule a second cable type detection after 2 seconds to | ||
153 | * give the other drivers time to load and do their thing. | ||
154 | */ | ||
155 | if (!info->first_detect_done) { | ||
156 | queue_delayed_work(system_wq, &info->det_work, | ||
157 | msecs_to_jiffies(2000)); | ||
158 | info->first_detect_done = true; | ||
159 | } | ||
160 | } | ||
161 | |||
162 | static int axp288_handle_chrg_det_event(struct axp288_extcon_info *info) | 140 | static int axp288_handle_chrg_det_event(struct axp288_extcon_info *info) |
163 | { | 141 | { |
164 | int ret, stat, cfg, pwr_stat; | 142 | int ret, stat, cfg, pwr_stat; |
@@ -223,8 +201,6 @@ no_vbus: | |||
223 | info->previous_cable = cable; | 201 | info->previous_cable = cable; |
224 | } | 202 | } |
225 | 203 | ||
226 | axp288_chrg_detect_complete(info); | ||
227 | |||
228 | return 0; | 204 | return 0; |
229 | 205 | ||
230 | dev_det_ret: | 206 | dev_det_ret: |
@@ -246,11 +222,8 @@ static irqreturn_t axp288_extcon_isr(int irq, void *data) | |||
246 | return IRQ_HANDLED; | 222 | return IRQ_HANDLED; |
247 | } | 223 | } |
248 | 224 | ||
249 | static void axp288_extcon_det_work(struct work_struct *work) | 225 | static void axp288_extcon_enable(struct axp288_extcon_info *info) |
250 | { | 226 | { |
251 | struct axp288_extcon_info *info = | ||
252 | container_of(work, struct axp288_extcon_info, det_work.work); | ||
253 | |||
254 | regmap_update_bits(info->regmap, AXP288_BC_GLOBAL_REG, | 227 | regmap_update_bits(info->regmap, AXP288_BC_GLOBAL_REG, |
255 | BC_GLOBAL_RUN, 0); | 228 | BC_GLOBAL_RUN, 0); |
256 | /* Enable the charger detection logic */ | 229 | /* Enable the charger detection logic */ |
@@ -272,7 +245,6 @@ static int axp288_extcon_probe(struct platform_device *pdev) | |||
272 | info->regmap = axp20x->regmap; | 245 | info->regmap = axp20x->regmap; |
273 | info->regmap_irqc = axp20x->regmap_irqc; | 246 | info->regmap_irqc = axp20x->regmap_irqc; |
274 | info->previous_cable = EXTCON_NONE; | 247 | info->previous_cable = EXTCON_NONE; |
275 | INIT_DELAYED_WORK(&info->det_work, axp288_extcon_det_work); | ||
276 | 248 | ||
277 | platform_set_drvdata(pdev, info); | 249 | platform_set_drvdata(pdev, info); |
278 | 250 | ||
@@ -318,7 +290,7 @@ static int axp288_extcon_probe(struct platform_device *pdev) | |||
318 | } | 290 | } |
319 | 291 | ||
320 | /* Start charger cable type detection */ | 292 | /* Start charger cable type detection */ |
321 | queue_delayed_work(system_wq, &info->det_work, 0); | 293 | axp288_extcon_enable(info); |
322 | 294 | ||
323 | return 0; | 295 | return 0; |
324 | } | 296 | } |
diff --git a/drivers/extcon/extcon-intel-int3496.c b/drivers/extcon/extcon-intel-int3496.c index c8691b5a9cb0..191e99f06a9a 100644 --- a/drivers/extcon/extcon-intel-int3496.c +++ b/drivers/extcon/extcon-intel-int3496.c | |||
@@ -153,8 +153,9 @@ static int int3496_probe(struct platform_device *pdev) | |||
153 | return ret; | 153 | return ret; |
154 | } | 154 | } |
155 | 155 | ||
156 | /* queue initial processing of id-pin */ | 156 | /* process id-pin so that we start with the right status */ |
157 | queue_delayed_work(system_wq, &data->work, 0); | 157 | queue_delayed_work(system_wq, &data->work, 0); |
158 | flush_delayed_work(&data->work); | ||
158 | 159 | ||
159 | platform_set_drvdata(pdev, data); | 160 | platform_set_drvdata(pdev, data); |
160 | 161 | ||
diff --git a/drivers/firmware/dcdbas.c b/drivers/firmware/dcdbas.c index c16600f30611..0bdea60c65dd 100644 --- a/drivers/firmware/dcdbas.c +++ b/drivers/firmware/dcdbas.c | |||
@@ -639,7 +639,7 @@ static void __exit dcdbas_exit(void) | |||
639 | platform_driver_unregister(&dcdbas_driver); | 639 | platform_driver_unregister(&dcdbas_driver); |
640 | } | 640 | } |
641 | 641 | ||
642 | module_init(dcdbas_init); | 642 | subsys_initcall_sync(dcdbas_init); |
643 | module_exit(dcdbas_exit); | 643 | module_exit(dcdbas_exit); |
644 | 644 | ||
645 | MODULE_DESCRIPTION(DRIVER_DESCRIPTION " (version " DRIVER_VERSION ")"); | 645 | MODULE_DESCRIPTION(DRIVER_DESCRIPTION " (version " DRIVER_VERSION ")"); |
diff --git a/drivers/firmware/efi/libstub/tpm.c b/drivers/firmware/efi/libstub/tpm.c index da661bf8cb96..13c1edd37e96 100644 --- a/drivers/firmware/efi/libstub/tpm.c +++ b/drivers/firmware/efi/libstub/tpm.c | |||
@@ -68,11 +68,11 @@ void efi_retrieve_tpm2_eventlog_1_2(efi_system_table_t *sys_table_arg) | |||
68 | efi_guid_t linux_eventlog_guid = LINUX_EFI_TPM_EVENT_LOG_GUID; | 68 | efi_guid_t linux_eventlog_guid = LINUX_EFI_TPM_EVENT_LOG_GUID; |
69 | efi_status_t status; | 69 | efi_status_t status; |
70 | efi_physical_addr_t log_location, log_last_entry; | 70 | efi_physical_addr_t log_location, log_last_entry; |
71 | struct linux_efi_tpm_eventlog *log_tbl; | 71 | struct linux_efi_tpm_eventlog *log_tbl = NULL; |
72 | unsigned long first_entry_addr, last_entry_addr; | 72 | unsigned long first_entry_addr, last_entry_addr; |
73 | size_t log_size, last_entry_size; | 73 | size_t log_size, last_entry_size; |
74 | efi_bool_t truncated; | 74 | efi_bool_t truncated; |
75 | void *tcg2_protocol; | 75 | void *tcg2_protocol = NULL; |
76 | 76 | ||
77 | status = efi_call_early(locate_protocol, &tcg2_guid, NULL, | 77 | status = efi_call_early(locate_protocol, &tcg2_guid, NULL, |
78 | &tcg2_protocol); | 78 | &tcg2_protocol); |
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c index e76de57dd617..ebaea8b1594b 100644 --- a/drivers/gpio/gpio-rcar.c +++ b/drivers/gpio/gpio-rcar.c | |||
@@ -14,7 +14,6 @@ | |||
14 | * GNU General Public License for more details. | 14 | * GNU General Public License for more details. |
15 | */ | 15 | */ |
16 | 16 | ||
17 | #include <linux/clk.h> | ||
18 | #include <linux/err.h> | 17 | #include <linux/err.h> |
19 | #include <linux/gpio.h> | 18 | #include <linux/gpio.h> |
20 | #include <linux/init.h> | 19 | #include <linux/init.h> |
@@ -37,10 +36,9 @@ struct gpio_rcar_priv { | |||
37 | struct platform_device *pdev; | 36 | struct platform_device *pdev; |
38 | struct gpio_chip gpio_chip; | 37 | struct gpio_chip gpio_chip; |
39 | struct irq_chip irq_chip; | 38 | struct irq_chip irq_chip; |
40 | struct clk *clk; | ||
41 | unsigned int irq_parent; | 39 | unsigned int irq_parent; |
40 | atomic_t wakeup_path; | ||
42 | bool has_both_edge_trigger; | 41 | bool has_both_edge_trigger; |
43 | bool needs_clk; | ||
44 | }; | 42 | }; |
45 | 43 | ||
46 | #define IOINTSEL 0x00 /* General IO/Interrupt Switching Register */ | 44 | #define IOINTSEL 0x00 /* General IO/Interrupt Switching Register */ |
@@ -186,13 +184,10 @@ static int gpio_rcar_irq_set_wake(struct irq_data *d, unsigned int on) | |||
186 | } | 184 | } |
187 | } | 185 | } |
188 | 186 | ||
189 | if (!p->clk) | ||
190 | return 0; | ||
191 | |||
192 | if (on) | 187 | if (on) |
193 | clk_enable(p->clk); | 188 | atomic_inc(&p->wakeup_path); |
194 | else | 189 | else |
195 | clk_disable(p->clk); | 190 | atomic_dec(&p->wakeup_path); |
196 | 191 | ||
197 | return 0; | 192 | return 0; |
198 | } | 193 | } |
@@ -330,17 +325,14 @@ static int gpio_rcar_direction_output(struct gpio_chip *chip, unsigned offset, | |||
330 | 325 | ||
331 | struct gpio_rcar_info { | 326 | struct gpio_rcar_info { |
332 | bool has_both_edge_trigger; | 327 | bool has_both_edge_trigger; |
333 | bool needs_clk; | ||
334 | }; | 328 | }; |
335 | 329 | ||
336 | static const struct gpio_rcar_info gpio_rcar_info_gen1 = { | 330 | static const struct gpio_rcar_info gpio_rcar_info_gen1 = { |
337 | .has_both_edge_trigger = false, | 331 | .has_both_edge_trigger = false, |
338 | .needs_clk = false, | ||
339 | }; | 332 | }; |
340 | 333 | ||
341 | static const struct gpio_rcar_info gpio_rcar_info_gen2 = { | 334 | static const struct gpio_rcar_info gpio_rcar_info_gen2 = { |
342 | .has_both_edge_trigger = true, | 335 | .has_both_edge_trigger = true, |
343 | .needs_clk = true, | ||
344 | }; | 336 | }; |
345 | 337 | ||
346 | static const struct of_device_id gpio_rcar_of_table[] = { | 338 | static const struct of_device_id gpio_rcar_of_table[] = { |
@@ -403,7 +395,6 @@ static int gpio_rcar_parse_dt(struct gpio_rcar_priv *p, unsigned int *npins) | |||
403 | ret = of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, 0, &args); | 395 | ret = of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, 0, &args); |
404 | *npins = ret == 0 ? args.args[2] : RCAR_MAX_GPIO_PER_BANK; | 396 | *npins = ret == 0 ? args.args[2] : RCAR_MAX_GPIO_PER_BANK; |
405 | p->has_both_edge_trigger = info->has_both_edge_trigger; | 397 | p->has_both_edge_trigger = info->has_both_edge_trigger; |
406 | p->needs_clk = info->needs_clk; | ||
407 | 398 | ||
408 | if (*npins == 0 || *npins > RCAR_MAX_GPIO_PER_BANK) { | 399 | if (*npins == 0 || *npins > RCAR_MAX_GPIO_PER_BANK) { |
409 | dev_warn(&p->pdev->dev, | 400 | dev_warn(&p->pdev->dev, |
@@ -440,16 +431,6 @@ static int gpio_rcar_probe(struct platform_device *pdev) | |||
440 | 431 | ||
441 | platform_set_drvdata(pdev, p); | 432 | platform_set_drvdata(pdev, p); |
442 | 433 | ||
443 | p->clk = devm_clk_get(dev, NULL); | ||
444 | if (IS_ERR(p->clk)) { | ||
445 | if (p->needs_clk) { | ||
446 | dev_err(dev, "unable to get clock\n"); | ||
447 | ret = PTR_ERR(p->clk); | ||
448 | goto err0; | ||
449 | } | ||
450 | p->clk = NULL; | ||
451 | } | ||
452 | |||
453 | pm_runtime_enable(dev); | 434 | pm_runtime_enable(dev); |
454 | 435 | ||
455 | irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | 436 | irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); |
@@ -531,11 +512,24 @@ static int gpio_rcar_remove(struct platform_device *pdev) | |||
531 | return 0; | 512 | return 0; |
532 | } | 513 | } |
533 | 514 | ||
515 | static int __maybe_unused gpio_rcar_suspend(struct device *dev) | ||
516 | { | ||
517 | struct gpio_rcar_priv *p = dev_get_drvdata(dev); | ||
518 | |||
519 | if (atomic_read(&p->wakeup_path)) | ||
520 | device_set_wakeup_path(dev); | ||
521 | |||
522 | return 0; | ||
523 | } | ||
524 | |||
525 | static SIMPLE_DEV_PM_OPS(gpio_rcar_pm_ops, gpio_rcar_suspend, NULL); | ||
526 | |||
534 | static struct platform_driver gpio_rcar_device_driver = { | 527 | static struct platform_driver gpio_rcar_device_driver = { |
535 | .probe = gpio_rcar_probe, | 528 | .probe = gpio_rcar_probe, |
536 | .remove = gpio_rcar_remove, | 529 | .remove = gpio_rcar_remove, |
537 | .driver = { | 530 | .driver = { |
538 | .name = "gpio_rcar", | 531 | .name = "gpio_rcar", |
532 | .pm = &gpio_rcar_pm_ops, | ||
539 | .of_match_table = of_match_ptr(gpio_rcar_of_table), | 533 | .of_match_table = of_match_ptr(gpio_rcar_of_table), |
540 | } | 534 | } |
541 | }; | 535 | }; |
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c index 564bb7a31da4..84e5a9df2344 100644 --- a/drivers/gpio/gpiolib-of.c +++ b/drivers/gpio/gpiolib-of.c | |||
@@ -241,6 +241,19 @@ struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id, | |||
241 | 241 | ||
242 | desc = of_get_named_gpiod_flags(dev->of_node, prop_name, idx, | 242 | desc = of_get_named_gpiod_flags(dev->of_node, prop_name, idx, |
243 | &of_flags); | 243 | &of_flags); |
244 | /* | ||
245 | * -EPROBE_DEFER in our case means that we found a | ||
246 | * valid GPIO property, but no controller has been | ||
247 | * registered so far. | ||
248 | * | ||
249 | * This means we don't need to look any further for | ||
250 | * alternate name conventions, and we should really | ||
251 | * preserve the return code for our user to be able to | ||
252 | * retry probing later. | ||
253 | */ | ||
254 | if (IS_ERR(desc) && PTR_ERR(desc) == -EPROBE_DEFER) | ||
255 | return desc; | ||
256 | |||
244 | if (!IS_ERR(desc) || (PTR_ERR(desc) != -ENOENT)) | 257 | if (!IS_ERR(desc) || (PTR_ERR(desc) != -ENOENT)) |
245 | break; | 258 | break; |
246 | } | 259 | } |
@@ -250,7 +263,7 @@ struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id, | |||
250 | desc = of_find_spi_gpio(dev, con_id, &of_flags); | 263 | desc = of_find_spi_gpio(dev, con_id, &of_flags); |
251 | 264 | ||
252 | /* Special handling for regulator GPIOs if used */ | 265 | /* Special handling for regulator GPIOs if used */ |
253 | if (IS_ERR(desc)) | 266 | if (IS_ERR(desc) && PTR_ERR(desc) != -EPROBE_DEFER) |
254 | desc = of_find_regulator_gpio(dev, con_id, &of_flags); | 267 | desc = of_find_regulator_gpio(dev, con_id, &of_flags); |
255 | 268 | ||
256 | if (IS_ERR(desc)) | 269 | if (IS_ERR(desc)) |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c index 9da8d5802980..96501ff0e55b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c | |||
@@ -729,9 +729,11 @@ amdgpu_connector_lvds_detect(struct drm_connector *connector, bool force) | |||
729 | enum drm_connector_status ret = connector_status_disconnected; | 729 | enum drm_connector_status ret = connector_status_disconnected; |
730 | int r; | 730 | int r; |
731 | 731 | ||
732 | r = pm_runtime_get_sync(connector->dev->dev); | 732 | if (!drm_kms_helper_is_poll_worker()) { |
733 | if (r < 0) | 733 | r = pm_runtime_get_sync(connector->dev->dev); |
734 | return connector_status_disconnected; | 734 | if (r < 0) |
735 | return connector_status_disconnected; | ||
736 | } | ||
735 | 737 | ||
736 | if (encoder) { | 738 | if (encoder) { |
737 | struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); | 739 | struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); |
@@ -750,8 +752,12 @@ amdgpu_connector_lvds_detect(struct drm_connector *connector, bool force) | |||
750 | /* check acpi lid status ??? */ | 752 | /* check acpi lid status ??? */ |
751 | 753 | ||
752 | amdgpu_connector_update_scratch_regs(connector, ret); | 754 | amdgpu_connector_update_scratch_regs(connector, ret); |
753 | pm_runtime_mark_last_busy(connector->dev->dev); | 755 | |
754 | pm_runtime_put_autosuspend(connector->dev->dev); | 756 | if (!drm_kms_helper_is_poll_worker()) { |
757 | pm_runtime_mark_last_busy(connector->dev->dev); | ||
758 | pm_runtime_put_autosuspend(connector->dev->dev); | ||
759 | } | ||
760 | |||
755 | return ret; | 761 | return ret; |
756 | } | 762 | } |
757 | 763 | ||
@@ -861,9 +867,11 @@ amdgpu_connector_vga_detect(struct drm_connector *connector, bool force) | |||
861 | enum drm_connector_status ret = connector_status_disconnected; | 867 | enum drm_connector_status ret = connector_status_disconnected; |
862 | int r; | 868 | int r; |
863 | 869 | ||
864 | r = pm_runtime_get_sync(connector->dev->dev); | 870 | if (!drm_kms_helper_is_poll_worker()) { |
865 | if (r < 0) | 871 | r = pm_runtime_get_sync(connector->dev->dev); |
866 | return connector_status_disconnected; | 872 | if (r < 0) |
873 | return connector_status_disconnected; | ||
874 | } | ||
867 | 875 | ||
868 | encoder = amdgpu_connector_best_single_encoder(connector); | 876 | encoder = amdgpu_connector_best_single_encoder(connector); |
869 | if (!encoder) | 877 | if (!encoder) |
@@ -917,8 +925,10 @@ amdgpu_connector_vga_detect(struct drm_connector *connector, bool force) | |||
917 | amdgpu_connector_update_scratch_regs(connector, ret); | 925 | amdgpu_connector_update_scratch_regs(connector, ret); |
918 | 926 | ||
919 | out: | 927 | out: |
920 | pm_runtime_mark_last_busy(connector->dev->dev); | 928 | if (!drm_kms_helper_is_poll_worker()) { |
921 | pm_runtime_put_autosuspend(connector->dev->dev); | 929 | pm_runtime_mark_last_busy(connector->dev->dev); |
930 | pm_runtime_put_autosuspend(connector->dev->dev); | ||
931 | } | ||
922 | 932 | ||
923 | return ret; | 933 | return ret; |
924 | } | 934 | } |
@@ -981,9 +991,11 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force) | |||
981 | enum drm_connector_status ret = connector_status_disconnected; | 991 | enum drm_connector_status ret = connector_status_disconnected; |
982 | bool dret = false, broken_edid = false; | 992 | bool dret = false, broken_edid = false; |
983 | 993 | ||
984 | r = pm_runtime_get_sync(connector->dev->dev); | 994 | if (!drm_kms_helper_is_poll_worker()) { |
985 | if (r < 0) | 995 | r = pm_runtime_get_sync(connector->dev->dev); |
986 | return connector_status_disconnected; | 996 | if (r < 0) |
997 | return connector_status_disconnected; | ||
998 | } | ||
987 | 999 | ||
988 | if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) { | 1000 | if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) { |
989 | ret = connector->status; | 1001 | ret = connector->status; |
@@ -1108,8 +1120,10 @@ out: | |||
1108 | amdgpu_connector_update_scratch_regs(connector, ret); | 1120 | amdgpu_connector_update_scratch_regs(connector, ret); |
1109 | 1121 | ||
1110 | exit: | 1122 | exit: |
1111 | pm_runtime_mark_last_busy(connector->dev->dev); | 1123 | if (!drm_kms_helper_is_poll_worker()) { |
1112 | pm_runtime_put_autosuspend(connector->dev->dev); | 1124 | pm_runtime_mark_last_busy(connector->dev->dev); |
1125 | pm_runtime_put_autosuspend(connector->dev->dev); | ||
1126 | } | ||
1113 | 1127 | ||
1114 | return ret; | 1128 | return ret; |
1115 | } | 1129 | } |
@@ -1352,9 +1366,11 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force) | |||
1352 | struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector); | 1366 | struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector); |
1353 | int r; | 1367 | int r; |
1354 | 1368 | ||
1355 | r = pm_runtime_get_sync(connector->dev->dev); | 1369 | if (!drm_kms_helper_is_poll_worker()) { |
1356 | if (r < 0) | 1370 | r = pm_runtime_get_sync(connector->dev->dev); |
1357 | return connector_status_disconnected; | 1371 | if (r < 0) |
1372 | return connector_status_disconnected; | ||
1373 | } | ||
1358 | 1374 | ||
1359 | if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) { | 1375 | if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) { |
1360 | ret = connector->status; | 1376 | ret = connector->status; |
@@ -1424,8 +1440,10 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force) | |||
1424 | 1440 | ||
1425 | amdgpu_connector_update_scratch_regs(connector, ret); | 1441 | amdgpu_connector_update_scratch_regs(connector, ret); |
1426 | out: | 1442 | out: |
1427 | pm_runtime_mark_last_busy(connector->dev->dev); | 1443 | if (!drm_kms_helper_is_poll_worker()) { |
1428 | pm_runtime_put_autosuspend(connector->dev->dev); | 1444 | pm_runtime_mark_last_busy(connector->dev->dev); |
1445 | pm_runtime_put_autosuspend(connector->dev->dev); | ||
1446 | } | ||
1429 | 1447 | ||
1430 | return ret; | 1448 | return ret; |
1431 | } | 1449 | } |
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 13a5362d074e..e42a28e3adc5 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | |||
@@ -3172,8 +3172,6 @@ static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, | |||
3172 | 3172 | ||
3173 | switch (aplane->base.type) { | 3173 | switch (aplane->base.type) { |
3174 | case DRM_PLANE_TYPE_PRIMARY: | 3174 | case DRM_PLANE_TYPE_PRIMARY: |
3175 | aplane->base.format_default = true; | ||
3176 | |||
3177 | res = drm_universal_plane_init( | 3175 | res = drm_universal_plane_init( |
3178 | dm->adev->ddev, | 3176 | dm->adev->ddev, |
3179 | &aplane->base, | 3177 | &aplane->base, |
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index 9ab69b22b989..ca0b08bfa2cf 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c | |||
@@ -109,7 +109,7 @@ enum dc_edid_status dm_helpers_parse_edid_caps( | |||
109 | struct cea_sad *sad = &sads[i]; | 109 | struct cea_sad *sad = &sads[i]; |
110 | 110 | ||
111 | edid_caps->audio_modes[i].format_code = sad->format; | 111 | edid_caps->audio_modes[i].format_code = sad->format; |
112 | edid_caps->audio_modes[i].channel_count = sad->channels; | 112 | edid_caps->audio_modes[i].channel_count = sad->channels + 1; |
113 | edid_caps->audio_modes[i].sample_rate = sad->freq; | 113 | edid_caps->audio_modes[i].sample_rate = sad->freq; |
114 | edid_caps->audio_modes[i].sample_size = sad->byte2; | 114 | edid_caps->audio_modes[i].sample_size = sad->byte2; |
115 | } | 115 | } |
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_opp.c b/drivers/gpu/drm/amd/display/dc/dce/dce_opp.c index 3931412ab6d3..87093894ea9e 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_opp.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_opp.c | |||
@@ -128,23 +128,22 @@ static void set_truncation( | |||
128 | return; | 128 | return; |
129 | } | 129 | } |
130 | /* on other format-to do */ | 130 | /* on other format-to do */ |
131 | if (params->flags.TRUNCATE_ENABLED == 0 || | 131 | if (params->flags.TRUNCATE_ENABLED == 0) |
132 | params->flags.TRUNCATE_DEPTH == 2) | ||
133 | return; | 132 | return; |
134 | /*Set truncation depth and Enable truncation*/ | 133 | /*Set truncation depth and Enable truncation*/ |
135 | REG_UPDATE_3(FMT_BIT_DEPTH_CONTROL, | 134 | REG_UPDATE_3(FMT_BIT_DEPTH_CONTROL, |
136 | FMT_TRUNCATE_EN, 1, | 135 | FMT_TRUNCATE_EN, 1, |
137 | FMT_TRUNCATE_DEPTH, | 136 | FMT_TRUNCATE_DEPTH, |
138 | params->flags.TRUNCATE_MODE, | 137 | params->flags.TRUNCATE_DEPTH, |
139 | FMT_TRUNCATE_MODE, | 138 | FMT_TRUNCATE_MODE, |
140 | params->flags.TRUNCATE_DEPTH); | 139 | params->flags.TRUNCATE_MODE); |
141 | } | 140 | } |
142 | 141 | ||
143 | 142 | ||
144 | /** | 143 | /** |
145 | * set_spatial_dither | 144 | * set_spatial_dither |
146 | * 1) set spatial dithering mode: pattern of seed | 145 | * 1) set spatial dithering mode: pattern of seed |
147 | * 2) set spatical dithering depth: 0 for 18bpp or 1 for 24bpp | 146 | * 2) set spatial dithering depth: 0 for 18bpp or 1 for 24bpp |
148 | * 3) set random seed | 147 | * 3) set random seed |
149 | * 4) set random mode | 148 | * 4) set random mode |
150 | * lfsr is reset every frame or not reset | 149 | * lfsr is reset every frame or not reset |
diff --git a/drivers/gpu/drm/ast/ast_tables.h b/drivers/gpu/drm/ast/ast_tables.h index 5f4c2e833a65..d665dd5af5dd 100644 --- a/drivers/gpu/drm/ast/ast_tables.h +++ b/drivers/gpu/drm/ast/ast_tables.h | |||
@@ -97,7 +97,7 @@ static const struct ast_vbios_dclk_info dclk_table[] = { | |||
97 | {0x67, 0x22, 0x00}, /* 0E: VCLK157_5 */ | 97 | {0x67, 0x22, 0x00}, /* 0E: VCLK157_5 */ |
98 | {0x6A, 0x22, 0x00}, /* 0F: VCLK162 */ | 98 | {0x6A, 0x22, 0x00}, /* 0F: VCLK162 */ |
99 | {0x4d, 0x4c, 0x80}, /* 10: VCLK154 */ | 99 | {0x4d, 0x4c, 0x80}, /* 10: VCLK154 */ |
100 | {0xa7, 0x78, 0x80}, /* 11: VCLK83.5 */ | 100 | {0x68, 0x6f, 0x80}, /* 11: VCLK83.5 */ |
101 | {0x28, 0x49, 0x80}, /* 12: VCLK106.5 */ | 101 | {0x28, 0x49, 0x80}, /* 12: VCLK106.5 */ |
102 | {0x37, 0x49, 0x80}, /* 13: VCLK146.25 */ | 102 | {0x37, 0x49, 0x80}, /* 13: VCLK146.25 */ |
103 | {0x1f, 0x45, 0x80}, /* 14: VCLK148.5 */ | 103 | {0x1f, 0x45, 0x80}, /* 14: VCLK148.5 */ |
@@ -127,7 +127,7 @@ static const struct ast_vbios_dclk_info dclk_table_ast2500[] = { | |||
127 | {0x67, 0x22, 0x00}, /* 0E: VCLK157_5 */ | 127 | {0x67, 0x22, 0x00}, /* 0E: VCLK157_5 */ |
128 | {0x6A, 0x22, 0x00}, /* 0F: VCLK162 */ | 128 | {0x6A, 0x22, 0x00}, /* 0F: VCLK162 */ |
129 | {0x4d, 0x4c, 0x80}, /* 10: VCLK154 */ | 129 | {0x4d, 0x4c, 0x80}, /* 10: VCLK154 */ |
130 | {0xa7, 0x78, 0x80}, /* 11: VCLK83.5 */ | 130 | {0x68, 0x6f, 0x80}, /* 11: VCLK83.5 */ |
131 | {0x28, 0x49, 0x80}, /* 12: VCLK106.5 */ | 131 | {0x28, 0x49, 0x80}, /* 12: VCLK106.5 */ |
132 | {0x37, 0x49, 0x80}, /* 13: VCLK146.25 */ | 132 | {0x37, 0x49, 0x80}, /* 13: VCLK146.25 */ |
133 | {0x1f, 0x45, 0x80}, /* 14: VCLK148.5 */ | 133 | {0x1f, 0x45, 0x80}, /* 14: VCLK148.5 */ |
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c index cd23b1b28259..c91b9b054e3f 100644 --- a/drivers/gpu/drm/cirrus/cirrus_mode.c +++ b/drivers/gpu/drm/cirrus/cirrus_mode.c | |||
@@ -294,22 +294,7 @@ static void cirrus_crtc_prepare(struct drm_crtc *crtc) | |||
294 | { | 294 | { |
295 | } | 295 | } |
296 | 296 | ||
297 | /* | 297 | static void cirrus_crtc_load_lut(struct drm_crtc *crtc) |
298 | * This is called after a mode is programmed. It should reverse anything done | ||
299 | * by the prepare function | ||
300 | */ | ||
301 | static void cirrus_crtc_commit(struct drm_crtc *crtc) | ||
302 | { | ||
303 | } | ||
304 | |||
305 | /* | ||
306 | * The core can pass us a set of gamma values to program. We actually only | ||
307 | * use this for 8-bit mode so can't perform smooth fades on deeper modes, | ||
308 | * but it's a requirement that we provide the function | ||
309 | */ | ||
310 | static int cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, | ||
311 | u16 *blue, uint32_t size, | ||
312 | struct drm_modeset_acquire_ctx *ctx) | ||
313 | { | 298 | { |
314 | struct drm_device *dev = crtc->dev; | 299 | struct drm_device *dev = crtc->dev; |
315 | struct cirrus_device *cdev = dev->dev_private; | 300 | struct cirrus_device *cdev = dev->dev_private; |
@@ -317,7 +302,7 @@ static int cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, | |||
317 | int i; | 302 | int i; |
318 | 303 | ||
319 | if (!crtc->enabled) | 304 | if (!crtc->enabled) |
320 | return 0; | 305 | return; |
321 | 306 | ||
322 | r = crtc->gamma_store; | 307 | r = crtc->gamma_store; |
323 | g = r + crtc->gamma_size; | 308 | g = r + crtc->gamma_size; |
@@ -330,6 +315,27 @@ static int cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, | |||
330 | WREG8(PALETTE_DATA, *g++ >> 8); | 315 | WREG8(PALETTE_DATA, *g++ >> 8); |
331 | WREG8(PALETTE_DATA, *b++ >> 8); | 316 | WREG8(PALETTE_DATA, *b++ >> 8); |
332 | } | 317 | } |
318 | } | ||
319 | |||
320 | /* | ||
321 | * This is called after a mode is programmed. It should reverse anything done | ||
322 | * by the prepare function | ||
323 | */ | ||
324 | static void cirrus_crtc_commit(struct drm_crtc *crtc) | ||
325 | { | ||
326 | cirrus_crtc_load_lut(crtc); | ||
327 | } | ||
328 | |||
329 | /* | ||
330 | * The core can pass us a set of gamma values to program. We actually only | ||
331 | * use this for 8-bit mode so can't perform smooth fades on deeper modes, | ||
332 | * but it's a requirement that we provide the function | ||
333 | */ | ||
334 | static int cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, | ||
335 | u16 *blue, uint32_t size, | ||
336 | struct drm_modeset_acquire_ctx *ctx) | ||
337 | { | ||
338 | cirrus_crtc_load_lut(crtc); | ||
333 | 339 | ||
334 | return 0; | 340 | return 0; |
335 | } | 341 | } |
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 49147b2aa288..134069f36482 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c | |||
@@ -113,6 +113,9 @@ static const struct edid_quirk { | |||
113 | /* AEO model 0 reports 8 bpc, but is a 6 bpc panel */ | 113 | /* AEO model 0 reports 8 bpc, but is a 6 bpc panel */ |
114 | { "AEO", 0, EDID_QUIRK_FORCE_6BPC }, | 114 | { "AEO", 0, EDID_QUIRK_FORCE_6BPC }, |
115 | 115 | ||
116 | /* CPT panel of Asus UX303LA reports 8 bpc, but is a 6 bpc panel */ | ||
117 | { "CPT", 0x17df, EDID_QUIRK_FORCE_6BPC }, | ||
118 | |||
116 | /* Belinea 10 15 55 */ | 119 | /* Belinea 10 15 55 */ |
117 | { "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 }, | 120 | { "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 }, |
118 | { "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 }, | 121 | { "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 }, |
@@ -162,6 +165,24 @@ static const struct edid_quirk { | |||
162 | 165 | ||
163 | /* HTC Vive VR Headset */ | 166 | /* HTC Vive VR Headset */ |
164 | { "HVR", 0xaa01, EDID_QUIRK_NON_DESKTOP }, | 167 | { "HVR", 0xaa01, EDID_QUIRK_NON_DESKTOP }, |
168 | |||
169 | /* Oculus Rift DK1, DK2, and CV1 VR Headsets */ | ||
170 | { "OVR", 0x0001, EDID_QUIRK_NON_DESKTOP }, | ||
171 | { "OVR", 0x0003, EDID_QUIRK_NON_DESKTOP }, | ||
172 | { "OVR", 0x0004, EDID_QUIRK_NON_DESKTOP }, | ||
173 | |||
174 | /* Windows Mixed Reality Headsets */ | ||
175 | { "ACR", 0x7fce, EDID_QUIRK_NON_DESKTOP }, | ||
176 | { "HPN", 0x3515, EDID_QUIRK_NON_DESKTOP }, | ||
177 | { "LEN", 0x0408, EDID_QUIRK_NON_DESKTOP }, | ||
178 | { "LEN", 0xb800, EDID_QUIRK_NON_DESKTOP }, | ||
179 | { "FUJ", 0x1970, EDID_QUIRK_NON_DESKTOP }, | ||
180 | { "DEL", 0x7fce, EDID_QUIRK_NON_DESKTOP }, | ||
181 | { "SEC", 0x144a, EDID_QUIRK_NON_DESKTOP }, | ||
182 | { "AUS", 0xc102, EDID_QUIRK_NON_DESKTOP }, | ||
183 | |||
184 | /* Sony PlayStation VR Headset */ | ||
185 | { "SNY", 0x0704, EDID_QUIRK_NON_DESKTOP }, | ||
165 | }; | 186 | }; |
166 | 187 | ||
167 | /* | 188 | /* |
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c index 0eebe8ba8a2c..ad67203de715 100644 --- a/drivers/gpu/drm/drm_framebuffer.c +++ b/drivers/gpu/drm/drm_framebuffer.c | |||
@@ -121,6 +121,10 @@ int drm_mode_addfb(struct drm_device *dev, | |||
121 | r.pixel_format = drm_mode_legacy_fb_format(or->bpp, or->depth); | 121 | r.pixel_format = drm_mode_legacy_fb_format(or->bpp, or->depth); |
122 | r.handles[0] = or->handle; | 122 | r.handles[0] = or->handle; |
123 | 123 | ||
124 | if (r.pixel_format == DRM_FORMAT_XRGB2101010 && | ||
125 | dev->driver->driver_features & DRIVER_PREFER_XBGR_30BPP) | ||
126 | r.pixel_format = DRM_FORMAT_XBGR2101010; | ||
127 | |||
124 | ret = drm_mode_addfb2(dev, &r, file_priv); | 128 | ret = drm_mode_addfb2(dev, &r, file_priv); |
125 | if (ret) | 129 | if (ret) |
126 | return ret; | 130 | return ret; |
@@ -458,6 +462,12 @@ int drm_mode_getfb(struct drm_device *dev, | |||
458 | if (!fb) | 462 | if (!fb) |
459 | return -ENOENT; | 463 | return -ENOENT; |
460 | 464 | ||
465 | /* Multi-planar framebuffers need getfb2. */ | ||
466 | if (fb->format->num_planes > 1) { | ||
467 | ret = -EINVAL; | ||
468 | goto out; | ||
469 | } | ||
470 | |||
461 | r->height = fb->height; | 471 | r->height = fb->height; |
462 | r->width = fb->width; | 472 | r->width = fb->width; |
463 | r->depth = fb->format->depth; | 473 | r->depth = fb->format->depth; |
@@ -481,6 +491,7 @@ int drm_mode_getfb(struct drm_device *dev, | |||
481 | ret = -ENODEV; | 491 | ret = -ENODEV; |
482 | } | 492 | } |
483 | 493 | ||
494 | out: | ||
484 | drm_framebuffer_put(fb); | 495 | drm_framebuffer_put(fb); |
485 | 496 | ||
486 | return ret; | 497 | return ret; |
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index a351bd888a61..3166026a1874 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c | |||
@@ -837,9 +837,24 @@ struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan) | |||
837 | if (!mm->color_adjust) | 837 | if (!mm->color_adjust) |
838 | return NULL; | 838 | return NULL; |
839 | 839 | ||
840 | hole = list_first_entry(&mm->hole_stack, typeof(*hole), hole_stack); | 840 | /* |
841 | hole_start = __drm_mm_hole_node_start(hole); | 841 | * The hole found during scanning should ideally be the first element |
842 | hole_end = hole_start + hole->hole_size; | 842 | * in the hole_stack list, but due to side-effects in the driver it |
843 | * may not be. | ||
844 | */ | ||
845 | list_for_each_entry(hole, &mm->hole_stack, hole_stack) { | ||
846 | hole_start = __drm_mm_hole_node_start(hole); | ||
847 | hole_end = hole_start + hole->hole_size; | ||
848 | |||
849 | if (hole_start <= scan->hit_start && | ||
850 | hole_end >= scan->hit_end) | ||
851 | break; | ||
852 | } | ||
853 | |||
854 | /* We should only be called after we found the hole previously */ | ||
855 | DRM_MM_BUG_ON(&hole->hole_stack == &mm->hole_stack); | ||
856 | if (unlikely(&hole->hole_stack == &mm->hole_stack)) | ||
857 | return NULL; | ||
843 | 858 | ||
844 | DRM_MM_BUG_ON(hole_start > scan->hit_start); | 859 | DRM_MM_BUG_ON(hole_start > scan->hit_start); |
845 | DRM_MM_BUG_ON(hole_end < scan->hit_end); | 860 | DRM_MM_BUG_ON(hole_end < scan->hit_end); |
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index 2d1643bdae78..527743394150 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c | |||
@@ -654,6 +654,26 @@ out: | |||
654 | } | 654 | } |
655 | 655 | ||
656 | /** | 656 | /** |
657 | * drm_kms_helper_is_poll_worker - is %current task an output poll worker? | ||
658 | * | ||
659 | * Determine if %current task is an output poll worker. This can be used | ||
660 | * to select distinct code paths for output polling versus other contexts. | ||
661 | * | ||
662 | * One use case is to avoid a deadlock between the output poll worker and | ||
663 | * the autosuspend worker wherein the latter waits for polling to finish | ||
664 | * upon calling drm_kms_helper_poll_disable(), while the former waits for | ||
665 | * runtime suspend to finish upon calling pm_runtime_get_sync() in a | ||
666 | * connector ->detect hook. | ||
667 | */ | ||
668 | bool drm_kms_helper_is_poll_worker(void) | ||
669 | { | ||
670 | struct work_struct *work = current_work(); | ||
671 | |||
672 | return work && work->func == output_poll_execute; | ||
673 | } | ||
674 | EXPORT_SYMBOL(drm_kms_helper_is_poll_worker); | ||
675 | |||
676 | /** | ||
657 | * drm_kms_helper_poll_disable - disable output polling | 677 | * drm_kms_helper_poll_disable - disable output polling |
658 | * @dev: drm_device | 678 | * @dev: drm_device |
659 | * | 679 | * |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index 2b8bf2dd6387..f68ef1b3a28c 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c | |||
@@ -286,7 +286,6 @@ static int g2d_init_cmdlist(struct g2d_data *g2d) | |||
286 | 286 | ||
287 | node = kcalloc(G2D_CMDLIST_NUM, sizeof(*node), GFP_KERNEL); | 287 | node = kcalloc(G2D_CMDLIST_NUM, sizeof(*node), GFP_KERNEL); |
288 | if (!node) { | 288 | if (!node) { |
289 | dev_err(dev, "failed to allocate memory\n"); | ||
290 | ret = -ENOMEM; | 289 | ret = -ENOMEM; |
291 | goto err; | 290 | goto err; |
292 | } | 291 | } |
@@ -926,7 +925,7 @@ static void g2d_finish_event(struct g2d_data *g2d, u32 cmdlist_no) | |||
926 | struct drm_device *drm_dev = g2d->subdrv.drm_dev; | 925 | struct drm_device *drm_dev = g2d->subdrv.drm_dev; |
927 | struct g2d_runqueue_node *runqueue_node = g2d->runqueue_node; | 926 | struct g2d_runqueue_node *runqueue_node = g2d->runqueue_node; |
928 | struct drm_exynos_pending_g2d_event *e; | 927 | struct drm_exynos_pending_g2d_event *e; |
929 | struct timeval now; | 928 | struct timespec64 now; |
930 | 929 | ||
931 | if (list_empty(&runqueue_node->event_list)) | 930 | if (list_empty(&runqueue_node->event_list)) |
932 | return; | 931 | return; |
@@ -934,9 +933,9 @@ static void g2d_finish_event(struct g2d_data *g2d, u32 cmdlist_no) | |||
934 | e = list_first_entry(&runqueue_node->event_list, | 933 | e = list_first_entry(&runqueue_node->event_list, |
935 | struct drm_exynos_pending_g2d_event, base.link); | 934 | struct drm_exynos_pending_g2d_event, base.link); |
936 | 935 | ||
937 | do_gettimeofday(&now); | 936 | ktime_get_ts64(&now); |
938 | e->event.tv_sec = now.tv_sec; | 937 | e->event.tv_sec = now.tv_sec; |
939 | e->event.tv_usec = now.tv_usec; | 938 | e->event.tv_usec = now.tv_nsec / NSEC_PER_USEC; |
940 | e->event.cmdlist_no = cmdlist_no; | 939 | e->event.cmdlist_no = cmdlist_no; |
941 | 940 | ||
942 | drm_send_event(drm_dev, &e->base); | 941 | drm_send_event(drm_dev, &e->base); |
@@ -1358,10 +1357,9 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data, | |||
1358 | return -EFAULT; | 1357 | return -EFAULT; |
1359 | 1358 | ||
1360 | runqueue_node = kmem_cache_alloc(g2d->runqueue_slab, GFP_KERNEL); | 1359 | runqueue_node = kmem_cache_alloc(g2d->runqueue_slab, GFP_KERNEL); |
1361 | if (!runqueue_node) { | 1360 | if (!runqueue_node) |
1362 | dev_err(dev, "failed to allocate memory\n"); | ||
1363 | return -ENOMEM; | 1361 | return -ENOMEM; |
1364 | } | 1362 | |
1365 | run_cmdlist = &runqueue_node->run_cmdlist; | 1363 | run_cmdlist = &runqueue_node->run_cmdlist; |
1366 | event_list = &runqueue_node->event_list; | 1364 | event_list = &runqueue_node->event_list; |
1367 | INIT_LIST_HEAD(run_cmdlist); | 1365 | INIT_LIST_HEAD(run_cmdlist); |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.h b/drivers/gpu/drm/exynos/exynos_drm_rotator.h deleted file mode 100644 index 71a0b4c0c1e8..000000000000 --- a/drivers/gpu/drm/exynos/exynos_drm_rotator.h +++ /dev/null | |||
@@ -1,19 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2012 Samsung Electronics Co., Ltd. | ||
3 | * | ||
4 | * Authors: | ||
5 | * YoungJun Cho <yj44.cho@samsung.com> | ||
6 | * Eunchul Kim <chulspro.kim@samsung.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License as published by the | ||
10 | * Free Software Foundation; either version 2 of the License, or (at your | ||
11 | * option) any later version. | ||
12 | */ | ||
13 | |||
14 | #ifndef _EXYNOS_DRM_ROTATOR_H_ | ||
15 | #define _EXYNOS_DRM_ROTATOR_H_ | ||
16 | |||
17 | /* TODO */ | ||
18 | |||
19 | #endif | ||
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index a4b75a46f946..abd84cbcf1c2 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c | |||
@@ -1068,10 +1068,13 @@ static void hdmi_audio_config(struct hdmi_context *hdata) | |||
1068 | /* Configuration I2S input ports. Configure I2S_PIN_SEL_0~4 */ | 1068 | /* Configuration I2S input ports. Configure I2S_PIN_SEL_0~4 */ |
1069 | hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_0, HDMI_I2S_SEL_SCLK(5) | 1069 | hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_0, HDMI_I2S_SEL_SCLK(5) |
1070 | | HDMI_I2S_SEL_LRCK(6)); | 1070 | | HDMI_I2S_SEL_LRCK(6)); |
1071 | hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_1, HDMI_I2S_SEL_SDATA1(1) | 1071 | |
1072 | | HDMI_I2S_SEL_SDATA2(4)); | 1072 | hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_1, HDMI_I2S_SEL_SDATA1(3) |
1073 | | HDMI_I2S_SEL_SDATA0(4)); | ||
1074 | |||
1073 | hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_2, HDMI_I2S_SEL_SDATA3(1) | 1075 | hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_2, HDMI_I2S_SEL_SDATA3(1) |
1074 | | HDMI_I2S_SEL_SDATA2(2)); | 1076 | | HDMI_I2S_SEL_SDATA2(2)); |
1077 | |||
1075 | hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_3, HDMI_I2S_SEL_DSD(0)); | 1078 | hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_3, HDMI_I2S_SEL_DSD(0)); |
1076 | 1079 | ||
1077 | /* I2S_CON_1 & 2 */ | 1080 | /* I2S_CON_1 & 2 */ |
diff --git a/drivers/gpu/drm/exynos/regs-fimc.h b/drivers/gpu/drm/exynos/regs-fimc.h index 30496134a3d0..d7cbe53c4c01 100644 --- a/drivers/gpu/drm/exynos/regs-fimc.h +++ b/drivers/gpu/drm/exynos/regs-fimc.h | |||
@@ -569,7 +569,7 @@ | |||
569 | #define EXYNOS_CIIMGEFF_FIN_EMBOSSING (4 << 26) | 569 | #define EXYNOS_CIIMGEFF_FIN_EMBOSSING (4 << 26) |
570 | #define EXYNOS_CIIMGEFF_FIN_SILHOUETTE (5 << 26) | 570 | #define EXYNOS_CIIMGEFF_FIN_SILHOUETTE (5 << 26) |
571 | #define EXYNOS_CIIMGEFF_FIN_MASK (7 << 26) | 571 | #define EXYNOS_CIIMGEFF_FIN_MASK (7 << 26) |
572 | #define EXYNOS_CIIMGEFF_PAT_CBCR_MASK ((0xff < 13) | (0xff < 0)) | 572 | #define EXYNOS_CIIMGEFF_PAT_CBCR_MASK ((0xff << 13) | (0xff << 0)) |
573 | 573 | ||
574 | /* Real input DMA size register */ | 574 | /* Real input DMA size register */ |
575 | #define EXYNOS_CIREAL_ISIZE_AUTOLOAD_ENABLE (1 << 31) | 575 | #define EXYNOS_CIREAL_ISIZE_AUTOLOAD_ENABLE (1 << 31) |
diff --git a/drivers/gpu/drm/exynos/regs-hdmi.h b/drivers/gpu/drm/exynos/regs-hdmi.h index 04be0f7e8193..4420c203ac85 100644 --- a/drivers/gpu/drm/exynos/regs-hdmi.h +++ b/drivers/gpu/drm/exynos/regs-hdmi.h | |||
@@ -464,7 +464,7 @@ | |||
464 | 464 | ||
465 | /* I2S_PIN_SEL_1 */ | 465 | /* I2S_PIN_SEL_1 */ |
466 | #define HDMI_I2S_SEL_SDATA1(x) (((x) & 0x7) << 4) | 466 | #define HDMI_I2S_SEL_SDATA1(x) (((x) & 0x7) << 4) |
467 | #define HDMI_I2S_SEL_SDATA2(x) ((x) & 0x7) | 467 | #define HDMI_I2S_SEL_SDATA0(x) ((x) & 0x7) |
468 | 468 | ||
469 | /* I2S_PIN_SEL_2 */ | 469 | /* I2S_PIN_SEL_2 */ |
470 | #define HDMI_I2S_SEL_SDATA3(x) (((x) & 0x7) << 4) | 470 | #define HDMI_I2S_SEL_SDATA3(x) (((x) & 0x7) << 4) |
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index c8454ac43fae..db6b94dda5df 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c | |||
@@ -471,6 +471,7 @@ struct parser_exec_state { | |||
471 | * used when ret from 2nd level batch buffer | 471 | * used when ret from 2nd level batch buffer |
472 | */ | 472 | */ |
473 | int saved_buf_addr_type; | 473 | int saved_buf_addr_type; |
474 | bool is_ctx_wa; | ||
474 | 475 | ||
475 | struct cmd_info *info; | 476 | struct cmd_info *info; |
476 | 477 | ||
@@ -1715,6 +1716,11 @@ static int perform_bb_shadow(struct parser_exec_state *s) | |||
1715 | bb->accessing = true; | 1716 | bb->accessing = true; |
1716 | bb->bb_start_cmd_va = s->ip_va; | 1717 | bb->bb_start_cmd_va = s->ip_va; |
1717 | 1718 | ||
1719 | if ((s->buf_type == BATCH_BUFFER_INSTRUCTION) && (!s->is_ctx_wa)) | ||
1720 | bb->bb_offset = s->ip_va - s->rb_va; | ||
1721 | else | ||
1722 | bb->bb_offset = 0; | ||
1723 | |||
1718 | /* | 1724 | /* |
1719 | * ip_va saves the virtual address of the shadow batch buffer, while | 1725 | * ip_va saves the virtual address of the shadow batch buffer, while |
1720 | * ip_gma saves the graphics address of the original batch buffer. | 1726 | * ip_gma saves the graphics address of the original batch buffer. |
@@ -2571,6 +2577,7 @@ static int scan_workload(struct intel_vgpu_workload *workload) | |||
2571 | s.ring_tail = gma_tail; | 2577 | s.ring_tail = gma_tail; |
2572 | s.rb_va = workload->shadow_ring_buffer_va; | 2578 | s.rb_va = workload->shadow_ring_buffer_va; |
2573 | s.workload = workload; | 2579 | s.workload = workload; |
2580 | s.is_ctx_wa = false; | ||
2574 | 2581 | ||
2575 | if ((bypass_scan_mask & (1 << workload->ring_id)) || | 2582 | if ((bypass_scan_mask & (1 << workload->ring_id)) || |
2576 | gma_head == gma_tail) | 2583 | gma_head == gma_tail) |
@@ -2624,6 +2631,7 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) | |||
2624 | s.ring_tail = gma_tail; | 2631 | s.ring_tail = gma_tail; |
2625 | s.rb_va = wa_ctx->indirect_ctx.shadow_va; | 2632 | s.rb_va = wa_ctx->indirect_ctx.shadow_va; |
2626 | s.workload = workload; | 2633 | s.workload = workload; |
2634 | s.is_ctx_wa = true; | ||
2627 | 2635 | ||
2628 | if (!intel_gvt_ggtt_validate_range(s.vgpu, s.ring_start, s.ring_size)) { | 2636 | if (!intel_gvt_ggtt_validate_range(s.vgpu, s.ring_start, s.ring_size)) { |
2629 | ret = -EINVAL; | 2637 | ret = -EINVAL; |
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 520fe3d0a882..c16a492449d7 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c | |||
@@ -750,6 +750,25 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf, | |||
750 | return ret == 0 ? count : ret; | 750 | return ret == 0 ? count : ret; |
751 | } | 751 | } |
752 | 752 | ||
753 | static bool gtt_entry(struct mdev_device *mdev, loff_t *ppos) | ||
754 | { | ||
755 | struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); | ||
756 | unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos); | ||
757 | struct intel_gvt *gvt = vgpu->gvt; | ||
758 | int offset; | ||
759 | |||
760 | /* Only allow MMIO GGTT entry access */ | ||
761 | if (index != PCI_BASE_ADDRESS_0) | ||
762 | return false; | ||
763 | |||
764 | offset = (u64)(*ppos & VFIO_PCI_OFFSET_MASK) - | ||
765 | intel_vgpu_get_bar_gpa(vgpu, PCI_BASE_ADDRESS_0); | ||
766 | |||
767 | return (offset >= gvt->device_info.gtt_start_offset && | ||
768 | offset < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt)) ? | ||
769 | true : false; | ||
770 | } | ||
771 | |||
753 | static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf, | 772 | static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf, |
754 | size_t count, loff_t *ppos) | 773 | size_t count, loff_t *ppos) |
755 | { | 774 | { |
@@ -759,7 +778,21 @@ static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf, | |||
759 | while (count) { | 778 | while (count) { |
760 | size_t filled; | 779 | size_t filled; |
761 | 780 | ||
762 | if (count >= 4 && !(*ppos % 4)) { | 781 | /* Only support GGTT entry 8 bytes read */ |
782 | if (count >= 8 && !(*ppos % 8) && | ||
783 | gtt_entry(mdev, ppos)) { | ||
784 | u64 val; | ||
785 | |||
786 | ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val), | ||
787 | ppos, false); | ||
788 | if (ret <= 0) | ||
789 | goto read_err; | ||
790 | |||
791 | if (copy_to_user(buf, &val, sizeof(val))) | ||
792 | goto read_err; | ||
793 | |||
794 | filled = 8; | ||
795 | } else if (count >= 4 && !(*ppos % 4)) { | ||
763 | u32 val; | 796 | u32 val; |
764 | 797 | ||
765 | ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val), | 798 | ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val), |
@@ -819,7 +852,21 @@ static ssize_t intel_vgpu_write(struct mdev_device *mdev, | |||
819 | while (count) { | 852 | while (count) { |
820 | size_t filled; | 853 | size_t filled; |
821 | 854 | ||
822 | if (count >= 4 && !(*ppos % 4)) { | 855 | /* Only support GGTT entry 8 bytes write */ |
856 | if (count >= 8 && !(*ppos % 8) && | ||
857 | gtt_entry(mdev, ppos)) { | ||
858 | u64 val; | ||
859 | |||
860 | if (copy_from_user(&val, buf, sizeof(val))) | ||
861 | goto write_err; | ||
862 | |||
863 | ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val), | ||
864 | ppos, true); | ||
865 | if (ret <= 0) | ||
866 | goto write_err; | ||
867 | |||
868 | filled = 8; | ||
869 | } else if (count >= 4 && !(*ppos % 4)) { | ||
823 | u32 val; | 870 | u32 val; |
824 | 871 | ||
825 | if (copy_from_user(&val, buf, sizeof(val))) | 872 | if (copy_from_user(&val, buf, sizeof(val))) |
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c index 74a9c7b5516e..a5bac83d53a9 100644 --- a/drivers/gpu/drm/i915/gvt/mmio_context.c +++ b/drivers/gpu/drm/i915/gvt/mmio_context.c | |||
@@ -120,6 +120,7 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = { | |||
120 | {RCS, HALF_SLICE_CHICKEN3, 0xffff, true}, /* 0xe184 */ | 120 | {RCS, HALF_SLICE_CHICKEN3, 0xffff, true}, /* 0xe184 */ |
121 | {RCS, GEN9_HALF_SLICE_CHICKEN5, 0xffff, true}, /* 0xe188 */ | 121 | {RCS, GEN9_HALF_SLICE_CHICKEN5, 0xffff, true}, /* 0xe188 */ |
122 | {RCS, GEN9_HALF_SLICE_CHICKEN7, 0xffff, true}, /* 0xe194 */ | 122 | {RCS, GEN9_HALF_SLICE_CHICKEN7, 0xffff, true}, /* 0xe194 */ |
123 | {RCS, GEN8_ROW_CHICKEN, 0xffff, true}, /* 0xe4f0 */ | ||
123 | {RCS, TRVATTL3PTRDW(0), 0, false}, /* 0x4de0 */ | 124 | {RCS, TRVATTL3PTRDW(0), 0, false}, /* 0x4de0 */ |
124 | {RCS, TRVATTL3PTRDW(1), 0, false}, /* 0x4de4 */ | 125 | {RCS, TRVATTL3PTRDW(1), 0, false}, /* 0x4de4 */ |
125 | {RCS, TRNULLDETCT, 0, false}, /* 0x4de8 */ | 126 | {RCS, TRNULLDETCT, 0, false}, /* 0x4de8 */ |
@@ -557,9 +558,11 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre, | |||
557 | * performace for batch mmio read/write, so we need | 558 | * performace for batch mmio read/write, so we need |
558 | * handle forcewake mannually. | 559 | * handle forcewake mannually. |
559 | */ | 560 | */ |
561 | intel_runtime_pm_get(dev_priv); | ||
560 | intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); | 562 | intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); |
561 | switch_mmio(pre, next, ring_id); | 563 | switch_mmio(pre, next, ring_id); |
562 | intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); | 564 | intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); |
565 | intel_runtime_pm_put(dev_priv); | ||
563 | } | 566 | } |
564 | 567 | ||
565 | /** | 568 | /** |
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index a55b4975c154..638abe84857c 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c | |||
@@ -75,6 +75,54 @@ static void update_shadow_pdps(struct intel_vgpu_workload *workload) | |||
75 | kunmap(page); | 75 | kunmap(page); |
76 | } | 76 | } |
77 | 77 | ||
78 | /* | ||
79 | * when populating shadow ctx from guest, we should not overrride oa related | ||
80 | * registers, so that they will not be overlapped by guest oa configs. Thus | ||
81 | * made it possible to capture oa data from host for both host and guests. | ||
82 | */ | ||
83 | static void sr_oa_regs(struct intel_vgpu_workload *workload, | ||
84 | u32 *reg_state, bool save) | ||
85 | { | ||
86 | struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv; | ||
87 | u32 ctx_oactxctrl = dev_priv->perf.oa.ctx_oactxctrl_offset; | ||
88 | u32 ctx_flexeu0 = dev_priv->perf.oa.ctx_flexeu0_offset; | ||
89 | int i = 0; | ||
90 | u32 flex_mmio[] = { | ||
91 | i915_mmio_reg_offset(EU_PERF_CNTL0), | ||
92 | i915_mmio_reg_offset(EU_PERF_CNTL1), | ||
93 | i915_mmio_reg_offset(EU_PERF_CNTL2), | ||
94 | i915_mmio_reg_offset(EU_PERF_CNTL3), | ||
95 | i915_mmio_reg_offset(EU_PERF_CNTL4), | ||
96 | i915_mmio_reg_offset(EU_PERF_CNTL5), | ||
97 | i915_mmio_reg_offset(EU_PERF_CNTL6), | ||
98 | }; | ||
99 | |||
100 | if (!workload || !reg_state || workload->ring_id != RCS) | ||
101 | return; | ||
102 | |||
103 | if (save) { | ||
104 | workload->oactxctrl = reg_state[ctx_oactxctrl + 1]; | ||
105 | |||
106 | for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) { | ||
107 | u32 state_offset = ctx_flexeu0 + i * 2; | ||
108 | |||
109 | workload->flex_mmio[i] = reg_state[state_offset + 1]; | ||
110 | } | ||
111 | } else { | ||
112 | reg_state[ctx_oactxctrl] = | ||
113 | i915_mmio_reg_offset(GEN8_OACTXCONTROL); | ||
114 | reg_state[ctx_oactxctrl + 1] = workload->oactxctrl; | ||
115 | |||
116 | for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) { | ||
117 | u32 state_offset = ctx_flexeu0 + i * 2; | ||
118 | u32 mmio = flex_mmio[i]; | ||
119 | |||
120 | reg_state[state_offset] = mmio; | ||
121 | reg_state[state_offset + 1] = workload->flex_mmio[i]; | ||
122 | } | ||
123 | } | ||
124 | } | ||
125 | |||
78 | static int populate_shadow_context(struct intel_vgpu_workload *workload) | 126 | static int populate_shadow_context(struct intel_vgpu_workload *workload) |
79 | { | 127 | { |
80 | struct intel_vgpu *vgpu = workload->vgpu; | 128 | struct intel_vgpu *vgpu = workload->vgpu; |
@@ -121,6 +169,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) | |||
121 | page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN); | 169 | page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN); |
122 | shadow_ring_context = kmap(page); | 170 | shadow_ring_context = kmap(page); |
123 | 171 | ||
172 | sr_oa_regs(workload, (u32 *)shadow_ring_context, true); | ||
124 | #define COPY_REG(name) \ | 173 | #define COPY_REG(name) \ |
125 | intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \ | 174 | intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \ |
126 | + RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4) | 175 | + RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4) |
@@ -149,6 +198,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) | |||
149 | sizeof(*shadow_ring_context), | 198 | sizeof(*shadow_ring_context), |
150 | I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context)); | 199 | I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context)); |
151 | 200 | ||
201 | sr_oa_regs(workload, (u32 *)shadow_ring_context, false); | ||
152 | kunmap(page); | 202 | kunmap(page); |
153 | return 0; | 203 | return 0; |
154 | } | 204 | } |
@@ -408,6 +458,17 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) | |||
408 | goto err; | 458 | goto err; |
409 | } | 459 | } |
410 | 460 | ||
461 | /* For privilge batch buffer and not wa_ctx, the bb_start_cmd_va | ||
462 | * is only updated into ring_scan_buffer, not real ring address | ||
463 | * allocated in later copy_workload_to_ring_buffer. pls be noted | ||
464 | * shadow_ring_buffer_va is now pointed to real ring buffer va | ||
465 | * in copy_workload_to_ring_buffer. | ||
466 | */ | ||
467 | |||
468 | if (bb->bb_offset) | ||
469 | bb->bb_start_cmd_va = workload->shadow_ring_buffer_va | ||
470 | + bb->bb_offset; | ||
471 | |||
411 | /* relocate shadow batch buffer */ | 472 | /* relocate shadow batch buffer */ |
412 | bb->bb_start_cmd_va[1] = i915_ggtt_offset(bb->vma); | 473 | bb->bb_start_cmd_va[1] = i915_ggtt_offset(bb->vma); |
413 | if (gmadr_bytes == 8) | 474 | if (gmadr_bytes == 8) |
@@ -1078,10 +1139,12 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu) | |||
1078 | 1139 | ||
1079 | bitmap_zero(s->shadow_ctx_desc_updated, I915_NUM_ENGINES); | 1140 | bitmap_zero(s->shadow_ctx_desc_updated, I915_NUM_ENGINES); |
1080 | 1141 | ||
1081 | s->workloads = kmem_cache_create("gvt-g_vgpu_workload", | 1142 | s->workloads = kmem_cache_create_usercopy("gvt-g_vgpu_workload", |
1082 | sizeof(struct intel_vgpu_workload), 0, | 1143 | sizeof(struct intel_vgpu_workload), 0, |
1083 | SLAB_HWCACHE_ALIGN, | 1144 | SLAB_HWCACHE_ALIGN, |
1084 | NULL); | 1145 | offsetof(struct intel_vgpu_workload, rb_tail), |
1146 | sizeof_field(struct intel_vgpu_workload, rb_tail), | ||
1147 | NULL); | ||
1085 | 1148 | ||
1086 | if (!s->workloads) { | 1149 | if (!s->workloads) { |
1087 | ret = -ENOMEM; | 1150 | ret = -ENOMEM; |
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h index bab4097aa6d7..486ed57a4ad1 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.h +++ b/drivers/gpu/drm/i915/gvt/scheduler.h | |||
@@ -110,6 +110,10 @@ struct intel_vgpu_workload { | |||
110 | /* shadow batch buffer */ | 110 | /* shadow batch buffer */ |
111 | struct list_head shadow_bb; | 111 | struct list_head shadow_bb; |
112 | struct intel_shadow_wa_ctx wa_ctx; | 112 | struct intel_shadow_wa_ctx wa_ctx; |
113 | |||
114 | /* oa registers */ | ||
115 | u32 oactxctrl; | ||
116 | u32 flex_mmio[7]; | ||
113 | }; | 117 | }; |
114 | 118 | ||
115 | struct intel_vgpu_shadow_bb { | 119 | struct intel_vgpu_shadow_bb { |
@@ -120,6 +124,7 @@ struct intel_vgpu_shadow_bb { | |||
120 | u32 *bb_start_cmd_va; | 124 | u32 *bb_start_cmd_va; |
121 | unsigned int clflush; | 125 | unsigned int clflush; |
122 | bool accessing; | 126 | bool accessing; |
127 | unsigned long bb_offset; | ||
123 | }; | 128 | }; |
124 | 129 | ||
125 | #define workload_q_head(vgpu, ring_id) \ | 130 | #define workload_q_head(vgpu, ring_id) \ |
diff --git a/drivers/gpu/drm/i915/gvt/trace.h b/drivers/gpu/drm/i915/gvt/trace.h index fc7831a62121..82093f1e8612 100644 --- a/drivers/gpu/drm/i915/gvt/trace.h +++ b/drivers/gpu/drm/i915/gvt/trace.h | |||
@@ -333,7 +333,7 @@ TRACE_EVENT(render_mmio, | |||
333 | TP_PROTO(int old_id, int new_id, char *action, unsigned int reg, | 333 | TP_PROTO(int old_id, int new_id, char *action, unsigned int reg, |
334 | unsigned int old_val, unsigned int new_val), | 334 | unsigned int old_val, unsigned int new_val), |
335 | 335 | ||
336 | TP_ARGS(old_id, new_id, action, reg, new_val, old_val), | 336 | TP_ARGS(old_id, new_id, action, reg, old_val, new_val), |
337 | 337 | ||
338 | TP_STRUCT__entry( | 338 | TP_STRUCT__entry( |
339 | __field(int, old_id) | 339 | __field(int, old_id) |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 0359d6f870b4..7b5a9d7c9593 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -433,20 +433,28 @@ i915_gem_object_wait_reservation(struct reservation_object *resv, | |||
433 | dma_fence_put(shared[i]); | 433 | dma_fence_put(shared[i]); |
434 | kfree(shared); | 434 | kfree(shared); |
435 | 435 | ||
436 | /* | ||
437 | * If both shared fences and an exclusive fence exist, | ||
438 | * then by construction the shared fences must be later | ||
439 | * than the exclusive fence. If we successfully wait for | ||
440 | * all the shared fences, we know that the exclusive fence | ||
441 | * must all be signaled. If all the shared fences are | ||
442 | * signaled, we can prune the array and recover the | ||
443 | * floating references on the fences/requests. | ||
444 | */ | ||
436 | prune_fences = count && timeout >= 0; | 445 | prune_fences = count && timeout >= 0; |
437 | } else { | 446 | } else { |
438 | excl = reservation_object_get_excl_rcu(resv); | 447 | excl = reservation_object_get_excl_rcu(resv); |
439 | } | 448 | } |
440 | 449 | ||
441 | if (excl && timeout >= 0) { | 450 | if (excl && timeout >= 0) |
442 | timeout = i915_gem_object_wait_fence(excl, flags, timeout, | 451 | timeout = i915_gem_object_wait_fence(excl, flags, timeout, |
443 | rps_client); | 452 | rps_client); |
444 | prune_fences = timeout >= 0; | ||
445 | } | ||
446 | 453 | ||
447 | dma_fence_put(excl); | 454 | dma_fence_put(excl); |
448 | 455 | ||
449 | /* Oportunistically prune the fences iff we know they have *all* been | 456 | /* |
457 | * Opportunistically prune the fences iff we know they have *all* been | ||
450 | * signaled and that the reservation object has not been changed (i.e. | 458 | * signaled and that the reservation object has not been changed (i.e. |
451 | * no new fences have been added). | 459 | * no new fences have been added). |
452 | */ | 460 | */ |
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index b33d2158c234..e5e6f6bb2b05 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c | |||
@@ -304,8 +304,9 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev, | |||
304 | { | 304 | { |
305 | struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); | 305 | struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); |
306 | struct intel_rps *rps = &dev_priv->gt_pm.rps; | 306 | struct intel_rps *rps = &dev_priv->gt_pm.rps; |
307 | u32 val; | 307 | bool boost = false; |
308 | ssize_t ret; | 308 | ssize_t ret; |
309 | u32 val; | ||
309 | 310 | ||
310 | ret = kstrtou32(buf, 0, &val); | 311 | ret = kstrtou32(buf, 0, &val); |
311 | if (ret) | 312 | if (ret) |
@@ -317,8 +318,13 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev, | |||
317 | return -EINVAL; | 318 | return -EINVAL; |
318 | 319 | ||
319 | mutex_lock(&dev_priv->pcu_lock); | 320 | mutex_lock(&dev_priv->pcu_lock); |
320 | rps->boost_freq = val; | 321 | if (val != rps->boost_freq) { |
322 | rps->boost_freq = val; | ||
323 | boost = atomic_read(&rps->num_waiters); | ||
324 | } | ||
321 | mutex_unlock(&dev_priv->pcu_lock); | 325 | mutex_unlock(&dev_priv->pcu_lock); |
326 | if (boost) | ||
327 | schedule_work(&rps->work); | ||
322 | 328 | ||
323 | return count; | 329 | return count; |
324 | } | 330 | } |
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index dbcf1a0586f9..8c2d778560f0 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c | |||
@@ -2205,8 +2205,7 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder, | |||
2205 | intel_prepare_dp_ddi_buffers(encoder, crtc_state); | 2205 | intel_prepare_dp_ddi_buffers(encoder, crtc_state); |
2206 | 2206 | ||
2207 | intel_ddi_init_dp_buf_reg(encoder); | 2207 | intel_ddi_init_dp_buf_reg(encoder); |
2208 | if (!is_mst) | 2208 | intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); |
2209 | intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); | ||
2210 | intel_dp_start_link_train(intel_dp); | 2209 | intel_dp_start_link_train(intel_dp); |
2211 | if (port != PORT_A || INTEL_GEN(dev_priv) >= 9) | 2210 | if (port != PORT_A || INTEL_GEN(dev_priv) >= 9) |
2212 | intel_dp_stop_link_train(intel_dp); | 2211 | intel_dp_stop_link_train(intel_dp); |
@@ -2304,14 +2303,12 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder, | |||
2304 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | 2303 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); |
2305 | struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); | 2304 | struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); |
2306 | struct intel_dp *intel_dp = &dig_port->dp; | 2305 | struct intel_dp *intel_dp = &dig_port->dp; |
2307 | bool is_mst = intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST); | ||
2308 | 2306 | ||
2309 | /* | 2307 | /* |
2310 | * Power down sink before disabling the port, otherwise we end | 2308 | * Power down sink before disabling the port, otherwise we end |
2311 | * up getting interrupts from the sink on detecting link loss. | 2309 | * up getting interrupts from the sink on detecting link loss. |
2312 | */ | 2310 | */ |
2313 | if (!is_mst) | 2311 | intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); |
2314 | intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); | ||
2315 | 2312 | ||
2316 | intel_disable_ddi_buf(encoder); | 2313 | intel_disable_ddi_buf(encoder); |
2317 | 2314 | ||
diff --git a/drivers/gpu/drm/i915/intel_hangcheck.c b/drivers/gpu/drm/i915/intel_hangcheck.c index 42e45ae87393..c8ea510629fa 100644 --- a/drivers/gpu/drm/i915/intel_hangcheck.c +++ b/drivers/gpu/drm/i915/intel_hangcheck.c | |||
@@ -246,7 +246,7 @@ engine_stuck(struct intel_engine_cs *engine, u64 acthd) | |||
246 | */ | 246 | */ |
247 | tmp = I915_READ_CTL(engine); | 247 | tmp = I915_READ_CTL(engine); |
248 | if (tmp & RING_WAIT) { | 248 | if (tmp & RING_WAIT) { |
249 | i915_handle_error(dev_priv, 0, | 249 | i915_handle_error(dev_priv, BIT(engine->id), |
250 | "Kicking stuck wait on %s", | 250 | "Kicking stuck wait on %s", |
251 | engine->name); | 251 | engine->name); |
252 | I915_WRITE_CTL(engine, tmp); | 252 | I915_WRITE_CTL(engine, tmp); |
@@ -258,7 +258,7 @@ engine_stuck(struct intel_engine_cs *engine, u64 acthd) | |||
258 | default: | 258 | default: |
259 | return ENGINE_DEAD; | 259 | return ENGINE_DEAD; |
260 | case 1: | 260 | case 1: |
261 | i915_handle_error(dev_priv, 0, | 261 | i915_handle_error(dev_priv, ALL_ENGINES, |
262 | "Kicking stuck semaphore on %s", | 262 | "Kicking stuck semaphore on %s", |
263 | engine->name); | 263 | engine->name); |
264 | I915_WRITE_CTL(engine, tmp); | 264 | I915_WRITE_CTL(engine, tmp); |
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c index 9a9961802f5c..e83af0f2be86 100644 --- a/drivers/gpu/drm/imx/ipuv3-crtc.c +++ b/drivers/gpu/drm/imx/ipuv3-crtc.c | |||
@@ -225,7 +225,11 @@ static void ipu_crtc_atomic_begin(struct drm_crtc *crtc, | |||
225 | struct drm_crtc_state *old_crtc_state) | 225 | struct drm_crtc_state *old_crtc_state) |
226 | { | 226 | { |
227 | drm_crtc_vblank_on(crtc); | 227 | drm_crtc_vblank_on(crtc); |
228 | } | ||
228 | 229 | ||
230 | static void ipu_crtc_atomic_flush(struct drm_crtc *crtc, | ||
231 | struct drm_crtc_state *old_crtc_state) | ||
232 | { | ||
229 | spin_lock_irq(&crtc->dev->event_lock); | 233 | spin_lock_irq(&crtc->dev->event_lock); |
230 | if (crtc->state->event) { | 234 | if (crtc->state->event) { |
231 | WARN_ON(drm_crtc_vblank_get(crtc)); | 235 | WARN_ON(drm_crtc_vblank_get(crtc)); |
@@ -293,6 +297,7 @@ static const struct drm_crtc_helper_funcs ipu_helper_funcs = { | |||
293 | .mode_set_nofb = ipu_crtc_mode_set_nofb, | 297 | .mode_set_nofb = ipu_crtc_mode_set_nofb, |
294 | .atomic_check = ipu_crtc_atomic_check, | 298 | .atomic_check = ipu_crtc_atomic_check, |
295 | .atomic_begin = ipu_crtc_atomic_begin, | 299 | .atomic_begin = ipu_crtc_atomic_begin, |
300 | .atomic_flush = ipu_crtc_atomic_flush, | ||
296 | .atomic_disable = ipu_crtc_atomic_disable, | 301 | .atomic_disable = ipu_crtc_atomic_disable, |
297 | .atomic_enable = ipu_crtc_atomic_enable, | 302 | .atomic_enable = ipu_crtc_atomic_enable, |
298 | }; | 303 | }; |
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c index d7e3583e608e..203f247d4854 100644 --- a/drivers/gpu/drm/imx/ipuv3-plane.c +++ b/drivers/gpu/drm/imx/ipuv3-plane.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <drm/drm_plane_helper.h> | 22 | #include <drm/drm_plane_helper.h> |
23 | 23 | ||
24 | #include "video/imx-ipu-v3.h" | 24 | #include "video/imx-ipu-v3.h" |
25 | #include "imx-drm.h" | ||
25 | #include "ipuv3-plane.h" | 26 | #include "ipuv3-plane.h" |
26 | 27 | ||
27 | struct ipu_plane_state { | 28 | struct ipu_plane_state { |
@@ -272,7 +273,7 @@ static void ipu_plane_destroy(struct drm_plane *plane) | |||
272 | kfree(ipu_plane); | 273 | kfree(ipu_plane); |
273 | } | 274 | } |
274 | 275 | ||
275 | void ipu_plane_state_reset(struct drm_plane *plane) | 276 | static void ipu_plane_state_reset(struct drm_plane *plane) |
276 | { | 277 | { |
277 | struct ipu_plane_state *ipu_state; | 278 | struct ipu_plane_state *ipu_state; |
278 | 279 | ||
@@ -292,7 +293,8 @@ void ipu_plane_state_reset(struct drm_plane *plane) | |||
292 | plane->state = &ipu_state->base; | 293 | plane->state = &ipu_state->base; |
293 | } | 294 | } |
294 | 295 | ||
295 | struct drm_plane_state *ipu_plane_duplicate_state(struct drm_plane *plane) | 296 | static struct drm_plane_state * |
297 | ipu_plane_duplicate_state(struct drm_plane *plane) | ||
296 | { | 298 | { |
297 | struct ipu_plane_state *state; | 299 | struct ipu_plane_state *state; |
298 | 300 | ||
@@ -306,8 +308,8 @@ struct drm_plane_state *ipu_plane_duplicate_state(struct drm_plane *plane) | |||
306 | return &state->base; | 308 | return &state->base; |
307 | } | 309 | } |
308 | 310 | ||
309 | void ipu_plane_destroy_state(struct drm_plane *plane, | 311 | static void ipu_plane_destroy_state(struct drm_plane *plane, |
310 | struct drm_plane_state *state) | 312 | struct drm_plane_state *state) |
311 | { | 313 | { |
312 | struct ipu_plane_state *ipu_state = to_ipu_plane_state(state); | 314 | struct ipu_plane_state *ipu_state = to_ipu_plane_state(state); |
313 | 315 | ||
diff --git a/drivers/gpu/drm/meson/meson_crtc.c b/drivers/gpu/drm/meson/meson_crtc.c index 5155f0179b61..05520202c967 100644 --- a/drivers/gpu/drm/meson/meson_crtc.c +++ b/drivers/gpu/drm/meson/meson_crtc.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include "meson_venc.h" | 36 | #include "meson_venc.h" |
37 | #include "meson_vpp.h" | 37 | #include "meson_vpp.h" |
38 | #include "meson_viu.h" | 38 | #include "meson_viu.h" |
39 | #include "meson_canvas.h" | ||
39 | #include "meson_registers.h" | 40 | #include "meson_registers.h" |
40 | 41 | ||
41 | /* CRTC definition */ | 42 | /* CRTC definition */ |
@@ -192,6 +193,11 @@ void meson_crtc_irq(struct meson_drm *priv) | |||
192 | } else | 193 | } else |
193 | meson_vpp_disable_interlace_vscaler_osd1(priv); | 194 | meson_vpp_disable_interlace_vscaler_osd1(priv); |
194 | 195 | ||
196 | meson_canvas_setup(priv, MESON_CANVAS_ID_OSD1, | ||
197 | priv->viu.osd1_addr, priv->viu.osd1_stride, | ||
198 | priv->viu.osd1_height, MESON_CANVAS_WRAP_NONE, | ||
199 | MESON_CANVAS_BLKMODE_LINEAR); | ||
200 | |||
195 | /* Enable OSD1 */ | 201 | /* Enable OSD1 */ |
196 | writel_bits_relaxed(VPP_OSD1_POSTBLEND, VPP_OSD1_POSTBLEND, | 202 | writel_bits_relaxed(VPP_OSD1_POSTBLEND, VPP_OSD1_POSTBLEND, |
197 | priv->io_base + _REG(VPP_MISC)); | 203 | priv->io_base + _REG(VPP_MISC)); |
diff --git a/drivers/gpu/drm/meson/meson_drv.h b/drivers/gpu/drm/meson/meson_drv.h index 5e8b392b9d1f..8450d6ac8c9b 100644 --- a/drivers/gpu/drm/meson/meson_drv.h +++ b/drivers/gpu/drm/meson/meson_drv.h | |||
@@ -43,6 +43,9 @@ struct meson_drm { | |||
43 | bool osd1_commit; | 43 | bool osd1_commit; |
44 | uint32_t osd1_ctrl_stat; | 44 | uint32_t osd1_ctrl_stat; |
45 | uint32_t osd1_blk0_cfg[5]; | 45 | uint32_t osd1_blk0_cfg[5]; |
46 | uint32_t osd1_addr; | ||
47 | uint32_t osd1_stride; | ||
48 | uint32_t osd1_height; | ||
46 | } viu; | 49 | } viu; |
47 | 50 | ||
48 | struct { | 51 | struct { |
diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c index c78a3a59f58c..12c80dfcff59 100644 --- a/drivers/gpu/drm/meson/meson_plane.c +++ b/drivers/gpu/drm/meson/meson_plane.c | |||
@@ -160,10 +160,9 @@ static void meson_plane_atomic_update(struct drm_plane *plane, | |||
160 | /* Update Canvas with buffer address */ | 160 | /* Update Canvas with buffer address */ |
161 | gem = drm_fb_cma_get_gem_obj(fb, 0); | 161 | gem = drm_fb_cma_get_gem_obj(fb, 0); |
162 | 162 | ||
163 | meson_canvas_setup(priv, MESON_CANVAS_ID_OSD1, | 163 | priv->viu.osd1_addr = gem->paddr; |
164 | gem->paddr, fb->pitches[0], | 164 | priv->viu.osd1_stride = fb->pitches[0]; |
165 | fb->height, MESON_CANVAS_WRAP_NONE, | 165 | priv->viu.osd1_height = fb->height; |
166 | MESON_CANVAS_BLKMODE_LINEAR); | ||
167 | 166 | ||
168 | spin_unlock_irqrestore(&priv->drm->event_lock, flags); | 167 | spin_unlock_irqrestore(&priv->drm->event_lock, flags); |
169 | } | 168 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c index 380f340204e8..debbbf0fd4bd 100644 --- a/drivers/gpu/drm/nouveau/nouveau_backlight.c +++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c | |||
@@ -134,7 +134,7 @@ nv50_get_intensity(struct backlight_device *bd) | |||
134 | struct nouveau_encoder *nv_encoder = bl_get_data(bd); | 134 | struct nouveau_encoder *nv_encoder = bl_get_data(bd); |
135 | struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); | 135 | struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); |
136 | struct nvif_object *device = &drm->client.device.object; | 136 | struct nvif_object *device = &drm->client.device.object; |
137 | int or = nv_encoder->or; | 137 | int or = ffs(nv_encoder->dcb->or) - 1; |
138 | u32 div = 1025; | 138 | u32 div = 1025; |
139 | u32 val; | 139 | u32 val; |
140 | 140 | ||
@@ -149,7 +149,7 @@ nv50_set_intensity(struct backlight_device *bd) | |||
149 | struct nouveau_encoder *nv_encoder = bl_get_data(bd); | 149 | struct nouveau_encoder *nv_encoder = bl_get_data(bd); |
150 | struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); | 150 | struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); |
151 | struct nvif_object *device = &drm->client.device.object; | 151 | struct nvif_object *device = &drm->client.device.object; |
152 | int or = nv_encoder->or; | 152 | int or = ffs(nv_encoder->dcb->or) - 1; |
153 | u32 div = 1025; | 153 | u32 div = 1025; |
154 | u32 val = (bd->props.brightness * div) / 100; | 154 | u32 val = (bd->props.brightness * div) / 100; |
155 | 155 | ||
@@ -170,7 +170,7 @@ nva3_get_intensity(struct backlight_device *bd) | |||
170 | struct nouveau_encoder *nv_encoder = bl_get_data(bd); | 170 | struct nouveau_encoder *nv_encoder = bl_get_data(bd); |
171 | struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); | 171 | struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); |
172 | struct nvif_object *device = &drm->client.device.object; | 172 | struct nvif_object *device = &drm->client.device.object; |
173 | int or = nv_encoder->or; | 173 | int or = ffs(nv_encoder->dcb->or) - 1; |
174 | u32 div, val; | 174 | u32 div, val; |
175 | 175 | ||
176 | div = nvif_rd32(device, NV50_PDISP_SOR_PWM_DIV(or)); | 176 | div = nvif_rd32(device, NV50_PDISP_SOR_PWM_DIV(or)); |
@@ -188,7 +188,7 @@ nva3_set_intensity(struct backlight_device *bd) | |||
188 | struct nouveau_encoder *nv_encoder = bl_get_data(bd); | 188 | struct nouveau_encoder *nv_encoder = bl_get_data(bd); |
189 | struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); | 189 | struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); |
190 | struct nvif_object *device = &drm->client.device.object; | 190 | struct nvif_object *device = &drm->client.device.object; |
191 | int or = nv_encoder->or; | 191 | int or = ffs(nv_encoder->dcb->or) - 1; |
192 | u32 div, val; | 192 | u32 div, val; |
193 | 193 | ||
194 | div = nvif_rd32(device, NV50_PDISP_SOR_PWM_DIV(or)); | 194 | div = nvif_rd32(device, NV50_PDISP_SOR_PWM_DIV(or)); |
@@ -228,7 +228,7 @@ nv50_backlight_init(struct drm_connector *connector) | |||
228 | return -ENODEV; | 228 | return -ENODEV; |
229 | } | 229 | } |
230 | 230 | ||
231 | if (!nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(nv_encoder->or))) | 231 | if (!nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(ffs(nv_encoder->dcb->or) - 1))) |
232 | return 0; | 232 | return 0; |
233 | 233 | ||
234 | if (drm->client.device.info.chipset <= 0xa0 || | 234 | if (drm->client.device.info.chipset <= 0xa0 || |
@@ -268,13 +268,13 @@ nouveau_backlight_init(struct drm_device *dev) | |||
268 | struct nvif_device *device = &drm->client.device; | 268 | struct nvif_device *device = &drm->client.device; |
269 | struct drm_connector *connector; | 269 | struct drm_connector *connector; |
270 | 270 | ||
271 | INIT_LIST_HEAD(&drm->bl_connectors); | ||
272 | |||
271 | if (apple_gmux_present()) { | 273 | if (apple_gmux_present()) { |
272 | NV_INFO(drm, "Apple GMUX detected: not registering Nouveau backlight interface\n"); | 274 | NV_INFO(drm, "Apple GMUX detected: not registering Nouveau backlight interface\n"); |
273 | return 0; | 275 | return 0; |
274 | } | 276 | } |
275 | 277 | ||
276 | INIT_LIST_HEAD(&drm->bl_connectors); | ||
277 | |||
278 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 278 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
279 | if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS && | 279 | if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS && |
280 | connector->connector_type != DRM_MODE_CONNECTOR_eDP) | 280 | connector->connector_type != DRM_MODE_CONNECTOR_eDP) |
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 69d6e61a01ec..6ed9cb053dfa 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c | |||
@@ -570,9 +570,15 @@ nouveau_connector_detect(struct drm_connector *connector, bool force) | |||
570 | nv_connector->edid = NULL; | 570 | nv_connector->edid = NULL; |
571 | } | 571 | } |
572 | 572 | ||
573 | ret = pm_runtime_get_sync(connector->dev->dev); | 573 | /* Outputs are only polled while runtime active, so acquiring a |
574 | if (ret < 0 && ret != -EACCES) | 574 | * runtime PM ref here is unnecessary (and would deadlock upon |
575 | return conn_status; | 575 | * runtime suspend because it waits for polling to finish). |
576 | */ | ||
577 | if (!drm_kms_helper_is_poll_worker()) { | ||
578 | ret = pm_runtime_get_sync(connector->dev->dev); | ||
579 | if (ret < 0 && ret != -EACCES) | ||
580 | return conn_status; | ||
581 | } | ||
576 | 582 | ||
577 | nv_encoder = nouveau_connector_ddc_detect(connector); | 583 | nv_encoder = nouveau_connector_ddc_detect(connector); |
578 | if (nv_encoder && (i2c = nv_encoder->i2c) != NULL) { | 584 | if (nv_encoder && (i2c = nv_encoder->i2c) != NULL) { |
@@ -647,8 +653,10 @@ detect_analog: | |||
647 | 653 | ||
648 | out: | 654 | out: |
649 | 655 | ||
650 | pm_runtime_mark_last_busy(connector->dev->dev); | 656 | if (!drm_kms_helper_is_poll_worker()) { |
651 | pm_runtime_put_autosuspend(connector->dev->dev); | 657 | pm_runtime_mark_last_busy(connector->dev->dev); |
658 | pm_runtime_put_autosuspend(connector->dev->dev); | ||
659 | } | ||
652 | 660 | ||
653 | return conn_status; | 661 | return conn_status; |
654 | } | 662 | } |
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index 6af3bc483c84..8bd739cfd00d 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c | |||
@@ -4469,6 +4469,7 @@ nv50_display_create(struct drm_device *dev) | |||
4469 | nouveau_display(dev)->fini = nv50_display_fini; | 4469 | nouveau_display(dev)->fini = nv50_display_fini; |
4470 | disp->disp = &nouveau_display(dev)->disp; | 4470 | disp->disp = &nouveau_display(dev)->disp; |
4471 | dev->mode_config.funcs = &nv50_disp_func; | 4471 | dev->mode_config.funcs = &nv50_disp_func; |
4472 | dev->driver->driver_features |= DRIVER_PREFER_XBGR_30BPP; | ||
4472 | if (nouveau_atomic) | 4473 | if (nouveau_atomic) |
4473 | dev->driver->driver_features |= DRIVER_ATOMIC; | 4474 | dev->driver->driver_features |= DRIVER_ATOMIC; |
4474 | 4475 | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c index 93946dcee319..1c12e58f44c2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c | |||
@@ -1354,7 +1354,7 @@ nvkm_vmm_get_locked(struct nvkm_vmm *vmm, bool getref, bool mapref, bool sparse, | |||
1354 | 1354 | ||
1355 | tail = this->addr + this->size; | 1355 | tail = this->addr + this->size; |
1356 | if (vmm->func->page_block && next && next->page != p) | 1356 | if (vmm->func->page_block && next && next->page != p) |
1357 | tail = ALIGN_DOWN(addr, vmm->func->page_block); | 1357 | tail = ALIGN_DOWN(tail, vmm->func->page_block); |
1358 | 1358 | ||
1359 | if (addr <= tail && tail - addr >= size) { | 1359 | if (addr <= tail && tail - addr >= size) { |
1360 | rb_erase(&this->tree, &vmm->free); | 1360 | rb_erase(&this->tree, &vmm->free); |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c index bf62303571b3..3695cde669f8 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c | |||
@@ -301,7 +301,7 @@ nvkm_therm_attr_set(struct nvkm_therm *therm, | |||
301 | void | 301 | void |
302 | nvkm_therm_clkgate_enable(struct nvkm_therm *therm) | 302 | nvkm_therm_clkgate_enable(struct nvkm_therm *therm) |
303 | { | 303 | { |
304 | if (!therm->func->clkgate_enable || !therm->clkgating_enabled) | 304 | if (!therm || !therm->func->clkgate_enable || !therm->clkgating_enabled) |
305 | return; | 305 | return; |
306 | 306 | ||
307 | nvkm_debug(&therm->subdev, | 307 | nvkm_debug(&therm->subdev, |
@@ -312,7 +312,7 @@ nvkm_therm_clkgate_enable(struct nvkm_therm *therm) | |||
312 | void | 312 | void |
313 | nvkm_therm_clkgate_fini(struct nvkm_therm *therm, bool suspend) | 313 | nvkm_therm_clkgate_fini(struct nvkm_therm *therm, bool suspend) |
314 | { | 314 | { |
315 | if (!therm->func->clkgate_fini || !therm->clkgating_enabled) | 315 | if (!therm || !therm->func->clkgate_fini || !therm->clkgating_enabled) |
316 | return; | 316 | return; |
317 | 317 | ||
318 | nvkm_debug(&therm->subdev, | 318 | nvkm_debug(&therm->subdev, |
@@ -395,7 +395,7 @@ void | |||
395 | nvkm_therm_clkgate_init(struct nvkm_therm *therm, | 395 | nvkm_therm_clkgate_init(struct nvkm_therm *therm, |
396 | const struct nvkm_therm_clkgate_pack *p) | 396 | const struct nvkm_therm_clkgate_pack *p) |
397 | { | 397 | { |
398 | if (!therm->func->clkgate_init || !therm->clkgating_enabled) | 398 | if (!therm || !therm->func->clkgate_init || !therm->clkgating_enabled) |
399 | return; | 399 | return; |
400 | 400 | ||
401 | therm->func->clkgate_init(therm, p); | 401 | therm->func->clkgate_init(therm, p); |
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index b108eaabb6df..df9469a8fdb1 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c | |||
@@ -892,9 +892,11 @@ radeon_lvds_detect(struct drm_connector *connector, bool force) | |||
892 | enum drm_connector_status ret = connector_status_disconnected; | 892 | enum drm_connector_status ret = connector_status_disconnected; |
893 | int r; | 893 | int r; |
894 | 894 | ||
895 | r = pm_runtime_get_sync(connector->dev->dev); | 895 | if (!drm_kms_helper_is_poll_worker()) { |
896 | if (r < 0) | 896 | r = pm_runtime_get_sync(connector->dev->dev); |
897 | return connector_status_disconnected; | 897 | if (r < 0) |
898 | return connector_status_disconnected; | ||
899 | } | ||
898 | 900 | ||
899 | if (encoder) { | 901 | if (encoder) { |
900 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 902 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
@@ -917,8 +919,12 @@ radeon_lvds_detect(struct drm_connector *connector, bool force) | |||
917 | /* check acpi lid status ??? */ | 919 | /* check acpi lid status ??? */ |
918 | 920 | ||
919 | radeon_connector_update_scratch_regs(connector, ret); | 921 | radeon_connector_update_scratch_regs(connector, ret); |
920 | pm_runtime_mark_last_busy(connector->dev->dev); | 922 | |
921 | pm_runtime_put_autosuspend(connector->dev->dev); | 923 | if (!drm_kms_helper_is_poll_worker()) { |
924 | pm_runtime_mark_last_busy(connector->dev->dev); | ||
925 | pm_runtime_put_autosuspend(connector->dev->dev); | ||
926 | } | ||
927 | |||
922 | return ret; | 928 | return ret; |
923 | } | 929 | } |
924 | 930 | ||
@@ -1032,9 +1038,11 @@ radeon_vga_detect(struct drm_connector *connector, bool force) | |||
1032 | enum drm_connector_status ret = connector_status_disconnected; | 1038 | enum drm_connector_status ret = connector_status_disconnected; |
1033 | int r; | 1039 | int r; |
1034 | 1040 | ||
1035 | r = pm_runtime_get_sync(connector->dev->dev); | 1041 | if (!drm_kms_helper_is_poll_worker()) { |
1036 | if (r < 0) | 1042 | r = pm_runtime_get_sync(connector->dev->dev); |
1037 | return connector_status_disconnected; | 1043 | if (r < 0) |
1044 | return connector_status_disconnected; | ||
1045 | } | ||
1038 | 1046 | ||
1039 | encoder = radeon_best_single_encoder(connector); | 1047 | encoder = radeon_best_single_encoder(connector); |
1040 | if (!encoder) | 1048 | if (!encoder) |
@@ -1101,8 +1109,10 @@ radeon_vga_detect(struct drm_connector *connector, bool force) | |||
1101 | radeon_connector_update_scratch_regs(connector, ret); | 1109 | radeon_connector_update_scratch_regs(connector, ret); |
1102 | 1110 | ||
1103 | out: | 1111 | out: |
1104 | pm_runtime_mark_last_busy(connector->dev->dev); | 1112 | if (!drm_kms_helper_is_poll_worker()) { |
1105 | pm_runtime_put_autosuspend(connector->dev->dev); | 1113 | pm_runtime_mark_last_busy(connector->dev->dev); |
1114 | pm_runtime_put_autosuspend(connector->dev->dev); | ||
1115 | } | ||
1106 | 1116 | ||
1107 | return ret; | 1117 | return ret; |
1108 | } | 1118 | } |
@@ -1166,9 +1176,11 @@ radeon_tv_detect(struct drm_connector *connector, bool force) | |||
1166 | if (!radeon_connector->dac_load_detect) | 1176 | if (!radeon_connector->dac_load_detect) |
1167 | return ret; | 1177 | return ret; |
1168 | 1178 | ||
1169 | r = pm_runtime_get_sync(connector->dev->dev); | 1179 | if (!drm_kms_helper_is_poll_worker()) { |
1170 | if (r < 0) | 1180 | r = pm_runtime_get_sync(connector->dev->dev); |
1171 | return connector_status_disconnected; | 1181 | if (r < 0) |
1182 | return connector_status_disconnected; | ||
1183 | } | ||
1172 | 1184 | ||
1173 | encoder = radeon_best_single_encoder(connector); | 1185 | encoder = radeon_best_single_encoder(connector); |
1174 | if (!encoder) | 1186 | if (!encoder) |
@@ -1180,8 +1192,12 @@ radeon_tv_detect(struct drm_connector *connector, bool force) | |||
1180 | if (ret == connector_status_connected) | 1192 | if (ret == connector_status_connected) |
1181 | ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, false); | 1193 | ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, false); |
1182 | radeon_connector_update_scratch_regs(connector, ret); | 1194 | radeon_connector_update_scratch_regs(connector, ret); |
1183 | pm_runtime_mark_last_busy(connector->dev->dev); | 1195 | |
1184 | pm_runtime_put_autosuspend(connector->dev->dev); | 1196 | if (!drm_kms_helper_is_poll_worker()) { |
1197 | pm_runtime_mark_last_busy(connector->dev->dev); | ||
1198 | pm_runtime_put_autosuspend(connector->dev->dev); | ||
1199 | } | ||
1200 | |||
1185 | return ret; | 1201 | return ret; |
1186 | } | 1202 | } |
1187 | 1203 | ||
@@ -1244,9 +1260,11 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) | |||
1244 | enum drm_connector_status ret = connector_status_disconnected; | 1260 | enum drm_connector_status ret = connector_status_disconnected; |
1245 | bool dret = false, broken_edid = false; | 1261 | bool dret = false, broken_edid = false; |
1246 | 1262 | ||
1247 | r = pm_runtime_get_sync(connector->dev->dev); | 1263 | if (!drm_kms_helper_is_poll_worker()) { |
1248 | if (r < 0) | 1264 | r = pm_runtime_get_sync(connector->dev->dev); |
1249 | return connector_status_disconnected; | 1265 | if (r < 0) |
1266 | return connector_status_disconnected; | ||
1267 | } | ||
1250 | 1268 | ||
1251 | if (radeon_connector->detected_hpd_without_ddc) { | 1269 | if (radeon_connector->detected_hpd_without_ddc) { |
1252 | force = true; | 1270 | force = true; |
@@ -1429,8 +1447,10 @@ out: | |||
1429 | } | 1447 | } |
1430 | 1448 | ||
1431 | exit: | 1449 | exit: |
1432 | pm_runtime_mark_last_busy(connector->dev->dev); | 1450 | if (!drm_kms_helper_is_poll_worker()) { |
1433 | pm_runtime_put_autosuspend(connector->dev->dev); | 1451 | pm_runtime_mark_last_busy(connector->dev->dev); |
1452 | pm_runtime_put_autosuspend(connector->dev->dev); | ||
1453 | } | ||
1434 | 1454 | ||
1435 | return ret; | 1455 | return ret; |
1436 | } | 1456 | } |
@@ -1681,9 +1701,11 @@ radeon_dp_detect(struct drm_connector *connector, bool force) | |||
1681 | if (radeon_dig_connector->is_mst) | 1701 | if (radeon_dig_connector->is_mst) |
1682 | return connector_status_disconnected; | 1702 | return connector_status_disconnected; |
1683 | 1703 | ||
1684 | r = pm_runtime_get_sync(connector->dev->dev); | 1704 | if (!drm_kms_helper_is_poll_worker()) { |
1685 | if (r < 0) | 1705 | r = pm_runtime_get_sync(connector->dev->dev); |
1686 | return connector_status_disconnected; | 1706 | if (r < 0) |
1707 | return connector_status_disconnected; | ||
1708 | } | ||
1687 | 1709 | ||
1688 | if (!force && radeon_check_hpd_status_unchanged(connector)) { | 1710 | if (!force && radeon_check_hpd_status_unchanged(connector)) { |
1689 | ret = connector->status; | 1711 | ret = connector->status; |
@@ -1770,8 +1792,10 @@ radeon_dp_detect(struct drm_connector *connector, bool force) | |||
1770 | } | 1792 | } |
1771 | 1793 | ||
1772 | out: | 1794 | out: |
1773 | pm_runtime_mark_last_busy(connector->dev->dev); | 1795 | if (!drm_kms_helper_is_poll_worker()) { |
1774 | pm_runtime_put_autosuspend(connector->dev->dev); | 1796 | pm_runtime_mark_last_busy(connector->dev->dev); |
1797 | pm_runtime_put_autosuspend(connector->dev->dev); | ||
1798 | } | ||
1775 | 1799 | ||
1776 | return ret; | 1800 | return ret; |
1777 | } | 1801 | } |
diff --git a/drivers/gpu/drm/sun4i/sun4i_crtc.c b/drivers/gpu/drm/sun4i/sun4i_crtc.c index 3b2d11b675e8..2d7c57406715 100644 --- a/drivers/gpu/drm/sun4i/sun4i_crtc.c +++ b/drivers/gpu/drm/sun4i/sun4i_crtc.c | |||
@@ -111,6 +111,8 @@ static void sun4i_crtc_atomic_disable(struct drm_crtc *crtc, | |||
111 | 111 | ||
112 | DRM_DEBUG_DRIVER("Disabling the CRTC\n"); | 112 | DRM_DEBUG_DRIVER("Disabling the CRTC\n"); |
113 | 113 | ||
114 | drm_crtc_vblank_off(crtc); | ||
115 | |||
114 | sun4i_tcon_set_status(scrtc->tcon, encoder, false); | 116 | sun4i_tcon_set_status(scrtc->tcon, encoder, false); |
115 | 117 | ||
116 | if (crtc->state->event && !crtc->state->active) { | 118 | if (crtc->state->event && !crtc->state->active) { |
@@ -131,6 +133,8 @@ static void sun4i_crtc_atomic_enable(struct drm_crtc *crtc, | |||
131 | DRM_DEBUG_DRIVER("Enabling the CRTC\n"); | 133 | DRM_DEBUG_DRIVER("Enabling the CRTC\n"); |
132 | 134 | ||
133 | sun4i_tcon_set_status(scrtc->tcon, encoder, true); | 135 | sun4i_tcon_set_status(scrtc->tcon, encoder, true); |
136 | |||
137 | drm_crtc_vblank_on(crtc); | ||
134 | } | 138 | } |
135 | 139 | ||
136 | static void sun4i_crtc_mode_set_nofb(struct drm_crtc *crtc) | 140 | static void sun4i_crtc_mode_set_nofb(struct drm_crtc *crtc) |
diff --git a/drivers/gpu/drm/sun4i/sun4i_dotclock.c b/drivers/gpu/drm/sun4i/sun4i_dotclock.c index 023f39bda633..e36004fbe453 100644 --- a/drivers/gpu/drm/sun4i/sun4i_dotclock.c +++ b/drivers/gpu/drm/sun4i/sun4i_dotclock.c | |||
@@ -132,10 +132,13 @@ static int sun4i_dclk_get_phase(struct clk_hw *hw) | |||
132 | static int sun4i_dclk_set_phase(struct clk_hw *hw, int degrees) | 132 | static int sun4i_dclk_set_phase(struct clk_hw *hw, int degrees) |
133 | { | 133 | { |
134 | struct sun4i_dclk *dclk = hw_to_dclk(hw); | 134 | struct sun4i_dclk *dclk = hw_to_dclk(hw); |
135 | u32 val = degrees / 120; | ||
136 | |||
137 | val <<= 28; | ||
135 | 138 | ||
136 | regmap_update_bits(dclk->regmap, SUN4I_TCON0_IO_POL_REG, | 139 | regmap_update_bits(dclk->regmap, SUN4I_TCON0_IO_POL_REG, |
137 | GENMASK(29, 28), | 140 | GENMASK(29, 28), |
138 | degrees / 120); | 141 | val); |
139 | 142 | ||
140 | return 0; | 143 | return 0; |
141 | } | 144 | } |
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c index 7f0705ef9f4e..50d19605c38f 100644 --- a/drivers/gpu/drm/sun4i/sun4i_drv.c +++ b/drivers/gpu/drm/sun4i/sun4i_drv.c | |||
@@ -113,7 +113,7 @@ static int sun4i_drv_bind(struct device *dev) | |||
113 | /* drm_vblank_init calls kcalloc, which can fail */ | 113 | /* drm_vblank_init calls kcalloc, which can fail */ |
114 | ret = drm_vblank_init(drm, drm->mode_config.num_crtc); | 114 | ret = drm_vblank_init(drm, drm->mode_config.num_crtc); |
115 | if (ret) | 115 | if (ret) |
116 | goto free_mem_region; | 116 | goto cleanup_mode_config; |
117 | 117 | ||
118 | drm->irq_enabled = true; | 118 | drm->irq_enabled = true; |
119 | 119 | ||
@@ -141,7 +141,6 @@ finish_poll: | |||
141 | sun4i_framebuffer_free(drm); | 141 | sun4i_framebuffer_free(drm); |
142 | cleanup_mode_config: | 142 | cleanup_mode_config: |
143 | drm_mode_config_cleanup(drm); | 143 | drm_mode_config_cleanup(drm); |
144 | free_mem_region: | ||
145 | of_reserved_mem_device_release(dev); | 144 | of_reserved_mem_device_release(dev); |
146 | free_drm: | 145 | free_drm: |
147 | drm_dev_unref(drm); | 146 | drm_dev_unref(drm); |
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c index 500b6fb3e028..fa4bcd092eaf 100644 --- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c +++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c | |||
@@ -538,7 +538,8 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master, | |||
538 | &sun4i_hdmi_regmap_config); | 538 | &sun4i_hdmi_regmap_config); |
539 | if (IS_ERR(hdmi->regmap)) { | 539 | if (IS_ERR(hdmi->regmap)) { |
540 | dev_err(dev, "Couldn't create HDMI encoder regmap\n"); | 540 | dev_err(dev, "Couldn't create HDMI encoder regmap\n"); |
541 | return PTR_ERR(hdmi->regmap); | 541 | ret = PTR_ERR(hdmi->regmap); |
542 | goto err_disable_mod_clk; | ||
542 | } | 543 | } |
543 | 544 | ||
544 | ret = sun4i_tmds_create(hdmi); | 545 | ret = sun4i_tmds_create(hdmi); |
@@ -551,7 +552,8 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master, | |||
551 | hdmi->ddc_parent_clk = devm_clk_get(dev, "ddc"); | 552 | hdmi->ddc_parent_clk = devm_clk_get(dev, "ddc"); |
552 | if (IS_ERR(hdmi->ddc_parent_clk)) { | 553 | if (IS_ERR(hdmi->ddc_parent_clk)) { |
553 | dev_err(dev, "Couldn't get the HDMI DDC clock\n"); | 554 | dev_err(dev, "Couldn't get the HDMI DDC clock\n"); |
554 | return PTR_ERR(hdmi->ddc_parent_clk); | 555 | ret = PTR_ERR(hdmi->ddc_parent_clk); |
556 | goto err_disable_mod_clk; | ||
555 | } | 557 | } |
556 | } else { | 558 | } else { |
557 | hdmi->ddc_parent_clk = hdmi->tmds_clk; | 559 | hdmi->ddc_parent_clk = hdmi->tmds_clk; |
diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c index a2a697a099e6..f2fa1f210509 100644 --- a/drivers/gpu/drm/sun4i/sun4i_rgb.c +++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c | |||
@@ -92,6 +92,8 @@ static enum drm_mode_status sun4i_rgb_mode_valid(struct drm_encoder *crtc, | |||
92 | 92 | ||
93 | DRM_DEBUG_DRIVER("Vertical parameters OK\n"); | 93 | DRM_DEBUG_DRIVER("Vertical parameters OK\n"); |
94 | 94 | ||
95 | tcon->dclk_min_div = 6; | ||
96 | tcon->dclk_max_div = 127; | ||
95 | rounded_rate = clk_round_rate(tcon->dclk, rate); | 97 | rounded_rate = clk_round_rate(tcon->dclk, rate); |
96 | if (rounded_rate < rate) | 98 | if (rounded_rate < rate) |
97 | return MODE_CLOCK_LOW; | 99 | return MODE_CLOCK_LOW; |
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c index 1a114e380f13..c3d92d537240 100644 --- a/drivers/gpu/drm/sun4i/sun4i_tcon.c +++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c | |||
@@ -103,10 +103,13 @@ static void sun4i_tcon_channel_set_status(struct sun4i_tcon *tcon, int channel, | |||
103 | return; | 103 | return; |
104 | } | 104 | } |
105 | 105 | ||
106 | if (enabled) | 106 | if (enabled) { |
107 | clk_prepare_enable(clk); | 107 | clk_prepare_enable(clk); |
108 | else | 108 | clk_rate_exclusive_get(clk); |
109 | } else { | ||
110 | clk_rate_exclusive_put(clk); | ||
109 | clk_disable_unprepare(clk); | 111 | clk_disable_unprepare(clk); |
112 | } | ||
110 | } | 113 | } |
111 | 114 | ||
112 | static void sun4i_tcon_lvds_set_status(struct sun4i_tcon *tcon, | 115 | static void sun4i_tcon_lvds_set_status(struct sun4i_tcon *tcon, |
@@ -339,6 +342,9 @@ static void sun4i_tcon0_mode_set_lvds(struct sun4i_tcon *tcon, | |||
339 | regmap_update_bits(tcon->regs, SUN4I_TCON_GCTL_REG, | 342 | regmap_update_bits(tcon->regs, SUN4I_TCON_GCTL_REG, |
340 | SUN4I_TCON_GCTL_IOMAP_MASK, | 343 | SUN4I_TCON_GCTL_IOMAP_MASK, |
341 | SUN4I_TCON_GCTL_IOMAP_TCON0); | 344 | SUN4I_TCON_GCTL_IOMAP_TCON0); |
345 | |||
346 | /* Enable the output on the pins */ | ||
347 | regmap_write(tcon->regs, SUN4I_TCON0_IO_TRI_REG, 0xe0000000); | ||
342 | } | 348 | } |
343 | 349 | ||
344 | static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon, | 350 | static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon, |
@@ -921,52 +927,56 @@ static int sun4i_tcon_bind(struct device *dev, struct device *master, | |||
921 | return ret; | 927 | return ret; |
922 | } | 928 | } |
923 | 929 | ||
924 | /* | 930 | if (tcon->quirks->supports_lvds) { |
925 | * This can only be made optional since we've had DT nodes | 931 | /* |
926 | * without the LVDS reset properties. | 932 | * This can only be made optional since we've had DT |
927 | * | 933 | * nodes without the LVDS reset properties. |
928 | * If the property is missing, just disable LVDS, and print a | 934 | * |
929 | * warning. | 935 | * If the property is missing, just disable LVDS, and |
930 | */ | 936 | * print a warning. |
931 | tcon->lvds_rst = devm_reset_control_get_optional(dev, "lvds"); | 937 | */ |
932 | if (IS_ERR(tcon->lvds_rst)) { | 938 | tcon->lvds_rst = devm_reset_control_get_optional(dev, "lvds"); |
933 | dev_err(dev, "Couldn't get our reset line\n"); | 939 | if (IS_ERR(tcon->lvds_rst)) { |
934 | return PTR_ERR(tcon->lvds_rst); | 940 | dev_err(dev, "Couldn't get our reset line\n"); |
935 | } else if (tcon->lvds_rst) { | 941 | return PTR_ERR(tcon->lvds_rst); |
936 | has_lvds_rst = true; | 942 | } else if (tcon->lvds_rst) { |
937 | reset_control_reset(tcon->lvds_rst); | 943 | has_lvds_rst = true; |
938 | } else { | 944 | reset_control_reset(tcon->lvds_rst); |
939 | has_lvds_rst = false; | 945 | } else { |
940 | } | 946 | has_lvds_rst = false; |
947 | } | ||
941 | 948 | ||
942 | /* | 949 | /* |
943 | * This can only be made optional since we've had DT nodes | 950 | * This can only be made optional since we've had DT |
944 | * without the LVDS reset properties. | 951 | * nodes without the LVDS reset properties. |
945 | * | 952 | * |
946 | * If the property is missing, just disable LVDS, and print a | 953 | * If the property is missing, just disable LVDS, and |
947 | * warning. | 954 | * print a warning. |
948 | */ | 955 | */ |
949 | if (tcon->quirks->has_lvds_alt) { | 956 | if (tcon->quirks->has_lvds_alt) { |
950 | tcon->lvds_pll = devm_clk_get(dev, "lvds-alt"); | 957 | tcon->lvds_pll = devm_clk_get(dev, "lvds-alt"); |
951 | if (IS_ERR(tcon->lvds_pll)) { | 958 | if (IS_ERR(tcon->lvds_pll)) { |
952 | if (PTR_ERR(tcon->lvds_pll) == -ENOENT) { | 959 | if (PTR_ERR(tcon->lvds_pll) == -ENOENT) { |
953 | has_lvds_alt = false; | 960 | has_lvds_alt = false; |
961 | } else { | ||
962 | dev_err(dev, "Couldn't get the LVDS PLL\n"); | ||
963 | return PTR_ERR(tcon->lvds_pll); | ||
964 | } | ||
954 | } else { | 965 | } else { |
955 | dev_err(dev, "Couldn't get the LVDS PLL\n"); | 966 | has_lvds_alt = true; |
956 | return PTR_ERR(tcon->lvds_pll); | ||
957 | } | 967 | } |
958 | } else { | ||
959 | has_lvds_alt = true; | ||
960 | } | 968 | } |
961 | } | ||
962 | 969 | ||
963 | if (!has_lvds_rst || (tcon->quirks->has_lvds_alt && !has_lvds_alt)) { | 970 | if (!has_lvds_rst || |
964 | dev_warn(dev, | 971 | (tcon->quirks->has_lvds_alt && !has_lvds_alt)) { |
965 | "Missing LVDS properties, Please upgrade your DT\n"); | 972 | dev_warn(dev, "Missing LVDS properties, Please upgrade your DT\n"); |
966 | dev_warn(dev, "LVDS output disabled\n"); | 973 | dev_warn(dev, "LVDS output disabled\n"); |
967 | can_lvds = false; | 974 | can_lvds = false; |
975 | } else { | ||
976 | can_lvds = true; | ||
977 | } | ||
968 | } else { | 978 | } else { |
969 | can_lvds = true; | 979 | can_lvds = false; |
970 | } | 980 | } |
971 | 981 | ||
972 | ret = sun4i_tcon_init_clocks(dev, tcon); | 982 | ret = sun4i_tcon_init_clocks(dev, tcon); |
@@ -1195,6 +1205,7 @@ static const struct sun4i_tcon_quirks sun8i_a33_quirks = { | |||
1195 | }; | 1205 | }; |
1196 | 1206 | ||
1197 | static const struct sun4i_tcon_quirks sun8i_a83t_lcd_quirks = { | 1207 | static const struct sun4i_tcon_quirks sun8i_a83t_lcd_quirks = { |
1208 | .supports_lvds = true, | ||
1198 | .has_channel_0 = true, | 1209 | .has_channel_0 = true, |
1199 | }; | 1210 | }; |
1200 | 1211 | ||
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.h b/drivers/gpu/drm/sun4i/sun4i_tcon.h index d3a945b7bb60..161e09427124 100644 --- a/drivers/gpu/drm/sun4i/sun4i_tcon.h +++ b/drivers/gpu/drm/sun4i/sun4i_tcon.h | |||
@@ -177,6 +177,7 @@ struct sun4i_tcon_quirks { | |||
177 | bool has_lvds_alt; /* Does the LVDS clock have a parent other than the TCON clock? */ | 177 | bool has_lvds_alt; /* Does the LVDS clock have a parent other than the TCON clock? */ |
178 | bool needs_de_be_mux; /* sun6i needs mux to select backend */ | 178 | bool needs_de_be_mux; /* sun6i needs mux to select backend */ |
179 | bool needs_edp_reset; /* a80 edp reset needed for tcon0 access */ | 179 | bool needs_edp_reset; /* a80 edp reset needed for tcon0 access */ |
180 | bool supports_lvds; /* Does the TCON support an LVDS output? */ | ||
180 | 181 | ||
181 | /* callback to handle tcon muxing options */ | 182 | /* callback to handle tcon muxing options */ |
182 | int (*set_mux)(struct sun4i_tcon *, const struct drm_encoder *); | 183 | int (*set_mux)(struct sun4i_tcon *, const struct drm_encoder *); |
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c index 71152776b04c..616c9634585e 100644 --- a/drivers/gpu/drm/tegra/dc.c +++ b/drivers/gpu/drm/tegra/dc.c | |||
@@ -1916,8 +1916,12 @@ cleanup: | |||
1916 | if (!IS_ERR(primary)) | 1916 | if (!IS_ERR(primary)) |
1917 | drm_plane_cleanup(primary); | 1917 | drm_plane_cleanup(primary); |
1918 | 1918 | ||
1919 | if (group && tegra->domain) { | 1919 | if (group && dc->domain) { |
1920 | iommu_detach_group(tegra->domain, group); | 1920 | if (group == tegra->group) { |
1921 | iommu_detach_group(dc->domain, group); | ||
1922 | tegra->group = NULL; | ||
1923 | } | ||
1924 | |||
1921 | dc->domain = NULL; | 1925 | dc->domain = NULL; |
1922 | } | 1926 | } |
1923 | 1927 | ||
@@ -1926,8 +1930,10 @@ cleanup: | |||
1926 | 1930 | ||
1927 | static int tegra_dc_exit(struct host1x_client *client) | 1931 | static int tegra_dc_exit(struct host1x_client *client) |
1928 | { | 1932 | { |
1933 | struct drm_device *drm = dev_get_drvdata(client->parent); | ||
1929 | struct iommu_group *group = iommu_group_get(client->dev); | 1934 | struct iommu_group *group = iommu_group_get(client->dev); |
1930 | struct tegra_dc *dc = host1x_client_to_dc(client); | 1935 | struct tegra_dc *dc = host1x_client_to_dc(client); |
1936 | struct tegra_drm *tegra = drm->dev_private; | ||
1931 | int err; | 1937 | int err; |
1932 | 1938 | ||
1933 | devm_free_irq(dc->dev, dc->irq, dc); | 1939 | devm_free_irq(dc->dev, dc->irq, dc); |
@@ -1939,7 +1945,11 @@ static int tegra_dc_exit(struct host1x_client *client) | |||
1939 | } | 1945 | } |
1940 | 1946 | ||
1941 | if (group && dc->domain) { | 1947 | if (group && dc->domain) { |
1942 | iommu_detach_group(dc->domain, group); | 1948 | if (group == tegra->group) { |
1949 | iommu_detach_group(dc->domain, group); | ||
1950 | tegra->group = NULL; | ||
1951 | } | ||
1952 | |||
1943 | dc->domain = NULL; | 1953 | dc->domain = NULL; |
1944 | } | 1954 | } |
1945 | 1955 | ||
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index e20e013151f0..7afe2f635f74 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c | |||
@@ -222,6 +222,7 @@ static void tegra_drm_unload(struct drm_device *drm) | |||
222 | 222 | ||
223 | drm_kms_helper_poll_fini(drm); | 223 | drm_kms_helper_poll_fini(drm); |
224 | tegra_drm_fb_exit(drm); | 224 | tegra_drm_fb_exit(drm); |
225 | drm_atomic_helper_shutdown(drm); | ||
225 | drm_mode_config_cleanup(drm); | 226 | drm_mode_config_cleanup(drm); |
226 | 227 | ||
227 | err = host1x_device_exit(device); | 228 | err = host1x_device_exit(device); |
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c index 4d2ed966f9e3..87c5d89bc9ba 100644 --- a/drivers/gpu/drm/tegra/dsi.c +++ b/drivers/gpu/drm/tegra/dsi.c | |||
@@ -1072,7 +1072,6 @@ static int tegra_dsi_exit(struct host1x_client *client) | |||
1072 | struct tegra_dsi *dsi = host1x_client_to_dsi(client); | 1072 | struct tegra_dsi *dsi = host1x_client_to_dsi(client); |
1073 | 1073 | ||
1074 | tegra_output_exit(&dsi->output); | 1074 | tegra_output_exit(&dsi->output); |
1075 | regulator_disable(dsi->vdd); | ||
1076 | 1075 | ||
1077 | return 0; | 1076 | return 0; |
1078 | } | 1077 | } |
diff --git a/drivers/gpu/drm/tegra/plane.c b/drivers/gpu/drm/tegra/plane.c index 6d6e2d0091eb..176ef46c615c 100644 --- a/drivers/gpu/drm/tegra/plane.c +++ b/drivers/gpu/drm/tegra/plane.c | |||
@@ -307,6 +307,10 @@ int tegra_plane_format_get_alpha(unsigned int opaque, unsigned int *alpha) | |||
307 | case WIN_COLOR_DEPTH_B8G8R8X8: | 307 | case WIN_COLOR_DEPTH_B8G8R8X8: |
308 | *alpha = WIN_COLOR_DEPTH_B8G8R8A8; | 308 | *alpha = WIN_COLOR_DEPTH_B8G8R8A8; |
309 | return 0; | 309 | return 0; |
310 | |||
311 | case WIN_COLOR_DEPTH_B5G6R5: | ||
312 | *alpha = opaque; | ||
313 | return 0; | ||
310 | } | 314 | } |
311 | 315 | ||
312 | return -EINVAL; | 316 | return -EINVAL; |
@@ -340,9 +344,6 @@ void tegra_plane_check_dependent(struct tegra_plane *tegra, | |||
340 | unsigned int zpos[2]; | 344 | unsigned int zpos[2]; |
341 | unsigned int i; | 345 | unsigned int i; |
342 | 346 | ||
343 | for (i = 0; i < 3; i++) | ||
344 | state->dependent[i] = false; | ||
345 | |||
346 | for (i = 0; i < 2; i++) | 347 | for (i = 0; i < 2; i++) |
347 | zpos[i] = 0; | 348 | zpos[i] = 0; |
348 | 349 | ||
@@ -356,6 +357,8 @@ void tegra_plane_check_dependent(struct tegra_plane *tegra, | |||
356 | 357 | ||
357 | index = tegra_plane_get_overlap_index(tegra, p); | 358 | index = tegra_plane_get_overlap_index(tegra, p); |
358 | 359 | ||
360 | state->dependent[index] = false; | ||
361 | |||
359 | /* | 362 | /* |
360 | * If any of the other planes is on top of this plane and uses | 363 | * If any of the other planes is on top of this plane and uses |
361 | * a format with an alpha component, mark this plane as being | 364 | * a format with an alpha component, mark this plane as being |
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c index b5b335c9b2bb..2ebdc6d5a76e 100644 --- a/drivers/gpu/drm/udl/udl_fb.c +++ b/drivers/gpu/drm/udl/udl_fb.c | |||
@@ -159,10 +159,15 @@ static int udl_fb_mmap(struct fb_info *info, struct vm_area_struct *vma) | |||
159 | { | 159 | { |
160 | unsigned long start = vma->vm_start; | 160 | unsigned long start = vma->vm_start; |
161 | unsigned long size = vma->vm_end - vma->vm_start; | 161 | unsigned long size = vma->vm_end - vma->vm_start; |
162 | unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; | 162 | unsigned long offset; |
163 | unsigned long page, pos; | 163 | unsigned long page, pos; |
164 | 164 | ||
165 | if (offset + size > info->fix.smem_len) | 165 | if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) |
166 | return -EINVAL; | ||
167 | |||
168 | offset = vma->vm_pgoff << PAGE_SHIFT; | ||
169 | |||
170 | if (offset > info->fix.smem_len || size > info->fix.smem_len - offset) | ||
166 | return -EINVAL; | 171 | return -EINVAL; |
167 | 172 | ||
168 | pos = (unsigned long)info->fix.smem_start + offset; | 173 | pos = (unsigned long)info->fix.smem_start + offset; |
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c index a14e8a2ec682..7bdf6f0e58a5 100644 --- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c +++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c | |||
@@ -198,6 +198,9 @@ static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data, | |||
198 | case VIRTGPU_PARAM_3D_FEATURES: | 198 | case VIRTGPU_PARAM_3D_FEATURES: |
199 | value = vgdev->has_virgl_3d == true ? 1 : 0; | 199 | value = vgdev->has_virgl_3d == true ? 1 : 0; |
200 | break; | 200 | break; |
201 | case VIRTGPU_PARAM_CAPSET_QUERY_FIX: | ||
202 | value = 1; | ||
203 | break; | ||
201 | default: | 204 | default: |
202 | return -EINVAL; | 205 | return -EINVAL; |
203 | } | 206 | } |
@@ -473,7 +476,7 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev, | |||
473 | { | 476 | { |
474 | struct virtio_gpu_device *vgdev = dev->dev_private; | 477 | struct virtio_gpu_device *vgdev = dev->dev_private; |
475 | struct drm_virtgpu_get_caps *args = data; | 478 | struct drm_virtgpu_get_caps *args = data; |
476 | int size; | 479 | unsigned size, host_caps_size; |
477 | int i; | 480 | int i; |
478 | int found_valid = -1; | 481 | int found_valid = -1; |
479 | int ret; | 482 | int ret; |
@@ -483,6 +486,10 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev, | |||
483 | if (vgdev->num_capsets == 0) | 486 | if (vgdev->num_capsets == 0) |
484 | return -ENOSYS; | 487 | return -ENOSYS; |
485 | 488 | ||
489 | /* don't allow userspace to pass 0 */ | ||
490 | if (args->size == 0) | ||
491 | return -EINVAL; | ||
492 | |||
486 | spin_lock(&vgdev->display_info_lock); | 493 | spin_lock(&vgdev->display_info_lock); |
487 | for (i = 0; i < vgdev->num_capsets; i++) { | 494 | for (i = 0; i < vgdev->num_capsets; i++) { |
488 | if (vgdev->capsets[i].id == args->cap_set_id) { | 495 | if (vgdev->capsets[i].id == args->cap_set_id) { |
@@ -498,11 +505,9 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev, | |||
498 | return -EINVAL; | 505 | return -EINVAL; |
499 | } | 506 | } |
500 | 507 | ||
501 | size = vgdev->capsets[found_valid].max_size; | 508 | host_caps_size = vgdev->capsets[found_valid].max_size; |
502 | if (args->size > size) { | 509 | /* only copy to user the minimum of the host caps size or the guest caps size */ |
503 | spin_unlock(&vgdev->display_info_lock); | 510 | size = min(args->size, host_caps_size); |
504 | return -EINVAL; | ||
505 | } | ||
506 | 511 | ||
507 | list_for_each_entry(cache_ent, &vgdev->cap_cache, head) { | 512 | list_for_each_entry(cache_ent, &vgdev->cap_cache, head) { |
508 | if (cache_ent->id == args->cap_set_id && | 513 | if (cache_ent->id == args->cap_set_id && |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 61a03ac90f8c..70e1a8820a7c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | |||
@@ -1338,6 +1338,19 @@ static void __vmw_svga_disable(struct vmw_private *dev_priv) | |||
1338 | */ | 1338 | */ |
1339 | void vmw_svga_disable(struct vmw_private *dev_priv) | 1339 | void vmw_svga_disable(struct vmw_private *dev_priv) |
1340 | { | 1340 | { |
1341 | /* | ||
1342 | * Disabling SVGA will turn off device modesetting capabilities, so | ||
1343 | * notify KMS about that so that it doesn't cache atomic state that | ||
1344 | * isn't valid anymore, for example crtcs turned on. | ||
1345 | * Strictly we'd want to do this under the SVGA lock (or an SVGA mutex), | ||
1346 | * but vmw_kms_lost_device() takes the reservation sem and thus we'll | ||
1347 | * end up with lock order reversal. Thus, a master may actually perform | ||
1348 | * a new modeset just after we call vmw_kms_lost_device() and race with | ||
1349 | * vmw_svga_disable(), but that should at worst cause atomic KMS state | ||
1350 | * to be inconsistent with the device, causing modesetting problems. | ||
1351 | * | ||
1352 | */ | ||
1353 | vmw_kms_lost_device(dev_priv->dev); | ||
1341 | ttm_write_lock(&dev_priv->reservation_sem, false); | 1354 | ttm_write_lock(&dev_priv->reservation_sem, false); |
1342 | spin_lock(&dev_priv->svga_lock); | 1355 | spin_lock(&dev_priv->svga_lock); |
1343 | if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) { | 1356 | if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) { |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 9e60de95b863..f34f368c1a2e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | |||
@@ -949,6 +949,7 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, | |||
949 | void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv); | 949 | void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv); |
950 | int vmw_kms_suspend(struct drm_device *dev); | 950 | int vmw_kms_suspend(struct drm_device *dev); |
951 | int vmw_kms_resume(struct drm_device *dev); | 951 | int vmw_kms_resume(struct drm_device *dev); |
952 | void vmw_kms_lost_device(struct drm_device *dev); | ||
952 | 953 | ||
953 | int vmw_dumb_create(struct drm_file *file_priv, | 954 | int vmw_dumb_create(struct drm_file *file_priv, |
954 | struct drm_device *dev, | 955 | struct drm_device *dev, |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 3628a9fe705f..f11601b6fd74 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | |||
@@ -31,7 +31,6 @@ | |||
31 | #include <drm/drm_atomic_helper.h> | 31 | #include <drm/drm_atomic_helper.h> |
32 | #include <drm/drm_rect.h> | 32 | #include <drm/drm_rect.h> |
33 | 33 | ||
34 | |||
35 | /* Might need a hrtimer here? */ | 34 | /* Might need a hrtimer here? */ |
36 | #define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1) | 35 | #define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1) |
37 | 36 | ||
@@ -2513,9 +2512,12 @@ void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv, | |||
2513 | * Helper to be used if an error forces the caller to undo the actions of | 2512 | * Helper to be used if an error forces the caller to undo the actions of |
2514 | * vmw_kms_helper_resource_prepare. | 2513 | * vmw_kms_helper_resource_prepare. |
2515 | */ | 2514 | */ |
2516 | void vmw_kms_helper_resource_revert(struct vmw_resource *res) | 2515 | void vmw_kms_helper_resource_revert(struct vmw_validation_ctx *ctx) |
2517 | { | 2516 | { |
2518 | vmw_kms_helper_buffer_revert(res->backup); | 2517 | struct vmw_resource *res = ctx->res; |
2518 | |||
2519 | vmw_kms_helper_buffer_revert(ctx->buf); | ||
2520 | vmw_dmabuf_unreference(&ctx->buf); | ||
2519 | vmw_resource_unreserve(res, false, NULL, 0); | 2521 | vmw_resource_unreserve(res, false, NULL, 0); |
2520 | mutex_unlock(&res->dev_priv->cmdbuf_mutex); | 2522 | mutex_unlock(&res->dev_priv->cmdbuf_mutex); |
2521 | } | 2523 | } |
@@ -2532,10 +2534,14 @@ void vmw_kms_helper_resource_revert(struct vmw_resource *res) | |||
2532 | * interrupted by a signal. | 2534 | * interrupted by a signal. |
2533 | */ | 2535 | */ |
2534 | int vmw_kms_helper_resource_prepare(struct vmw_resource *res, | 2536 | int vmw_kms_helper_resource_prepare(struct vmw_resource *res, |
2535 | bool interruptible) | 2537 | bool interruptible, |
2538 | struct vmw_validation_ctx *ctx) | ||
2536 | { | 2539 | { |
2537 | int ret = 0; | 2540 | int ret = 0; |
2538 | 2541 | ||
2542 | ctx->buf = NULL; | ||
2543 | ctx->res = res; | ||
2544 | |||
2539 | if (interruptible) | 2545 | if (interruptible) |
2540 | ret = mutex_lock_interruptible(&res->dev_priv->cmdbuf_mutex); | 2546 | ret = mutex_lock_interruptible(&res->dev_priv->cmdbuf_mutex); |
2541 | else | 2547 | else |
@@ -2555,6 +2561,8 @@ int vmw_kms_helper_resource_prepare(struct vmw_resource *res, | |||
2555 | false); | 2561 | false); |
2556 | if (ret) | 2562 | if (ret) |
2557 | goto out_unreserve; | 2563 | goto out_unreserve; |
2564 | |||
2565 | ctx->buf = vmw_dmabuf_reference(res->backup); | ||
2558 | } | 2566 | } |
2559 | ret = vmw_resource_validate(res); | 2567 | ret = vmw_resource_validate(res); |
2560 | if (ret) | 2568 | if (ret) |
@@ -2562,7 +2570,7 @@ int vmw_kms_helper_resource_prepare(struct vmw_resource *res, | |||
2562 | return 0; | 2570 | return 0; |
2563 | 2571 | ||
2564 | out_revert: | 2572 | out_revert: |
2565 | vmw_kms_helper_buffer_revert(res->backup); | 2573 | vmw_kms_helper_buffer_revert(ctx->buf); |
2566 | out_unreserve: | 2574 | out_unreserve: |
2567 | vmw_resource_unreserve(res, false, NULL, 0); | 2575 | vmw_resource_unreserve(res, false, NULL, 0); |
2568 | out_unlock: | 2576 | out_unlock: |
@@ -2578,11 +2586,13 @@ out_unlock: | |||
2578 | * @out_fence: Optional pointer to a fence pointer. If non-NULL, a | 2586 | * @out_fence: Optional pointer to a fence pointer. If non-NULL, a |
2579 | * ref-counted fence pointer is returned here. | 2587 | * ref-counted fence pointer is returned here. |
2580 | */ | 2588 | */ |
2581 | void vmw_kms_helper_resource_finish(struct vmw_resource *res, | 2589 | void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx, |
2582 | struct vmw_fence_obj **out_fence) | 2590 | struct vmw_fence_obj **out_fence) |
2583 | { | 2591 | { |
2584 | if (res->backup || out_fence) | 2592 | struct vmw_resource *res = ctx->res; |
2585 | vmw_kms_helper_buffer_finish(res->dev_priv, NULL, res->backup, | 2593 | |
2594 | if (ctx->buf || out_fence) | ||
2595 | vmw_kms_helper_buffer_finish(res->dev_priv, NULL, ctx->buf, | ||
2586 | out_fence, NULL); | 2596 | out_fence, NULL); |
2587 | 2597 | ||
2588 | vmw_resource_unreserve(res, false, NULL, 0); | 2598 | vmw_resource_unreserve(res, false, NULL, 0); |
@@ -2896,3 +2906,13 @@ int vmw_kms_resume(struct drm_device *dev) | |||
2896 | 2906 | ||
2897 | return ret; | 2907 | return ret; |
2898 | } | 2908 | } |
2909 | |||
2910 | /** | ||
2911 | * vmw_kms_lost_device - Notify kms that modesetting capabilities will be lost | ||
2912 | * | ||
2913 | * @dev: Pointer to the drm device | ||
2914 | */ | ||
2915 | void vmw_kms_lost_device(struct drm_device *dev) | ||
2916 | { | ||
2917 | drm_atomic_helper_shutdown(dev); | ||
2918 | } | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h index 4e8749a8717e..6b7c012719f1 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h | |||
@@ -241,6 +241,11 @@ struct vmw_display_unit { | |||
241 | int set_gui_y; | 241 | int set_gui_y; |
242 | }; | 242 | }; |
243 | 243 | ||
244 | struct vmw_validation_ctx { | ||
245 | struct vmw_resource *res; | ||
246 | struct vmw_dma_buffer *buf; | ||
247 | }; | ||
248 | |||
244 | #define vmw_crtc_to_du(x) \ | 249 | #define vmw_crtc_to_du(x) \ |
245 | container_of(x, struct vmw_display_unit, crtc) | 250 | container_of(x, struct vmw_display_unit, crtc) |
246 | #define vmw_connector_to_du(x) \ | 251 | #define vmw_connector_to_du(x) \ |
@@ -298,9 +303,10 @@ void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv, | |||
298 | struct drm_vmw_fence_rep __user * | 303 | struct drm_vmw_fence_rep __user * |
299 | user_fence_rep); | 304 | user_fence_rep); |
300 | int vmw_kms_helper_resource_prepare(struct vmw_resource *res, | 305 | int vmw_kms_helper_resource_prepare(struct vmw_resource *res, |
301 | bool interruptible); | 306 | bool interruptible, |
302 | void vmw_kms_helper_resource_revert(struct vmw_resource *res); | 307 | struct vmw_validation_ctx *ctx); |
303 | void vmw_kms_helper_resource_finish(struct vmw_resource *res, | 308 | void vmw_kms_helper_resource_revert(struct vmw_validation_ctx *ctx); |
309 | void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx, | ||
304 | struct vmw_fence_obj **out_fence); | 310 | struct vmw_fence_obj **out_fence); |
305 | int vmw_kms_readback(struct vmw_private *dev_priv, | 311 | int vmw_kms_readback(struct vmw_private *dev_priv, |
306 | struct drm_file *file_priv, | 312 | struct drm_file *file_priv, |
@@ -446,5 +452,4 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv, | |||
446 | 452 | ||
447 | int vmw_kms_set_config(struct drm_mode_set *set, | 453 | int vmw_kms_set_config(struct drm_mode_set *set, |
448 | struct drm_modeset_acquire_ctx *ctx); | 454 | struct drm_modeset_acquire_ctx *ctx); |
449 | |||
450 | #endif | 455 | #endif |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c index 419185f60278..648f8127f65a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c | |||
@@ -938,12 +938,13 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv, | |||
938 | struct vmw_framebuffer_surface *vfbs = | 938 | struct vmw_framebuffer_surface *vfbs = |
939 | container_of(framebuffer, typeof(*vfbs), base); | 939 | container_of(framebuffer, typeof(*vfbs), base); |
940 | struct vmw_kms_sou_surface_dirty sdirty; | 940 | struct vmw_kms_sou_surface_dirty sdirty; |
941 | struct vmw_validation_ctx ctx; | ||
941 | int ret; | 942 | int ret; |
942 | 943 | ||
943 | if (!srf) | 944 | if (!srf) |
944 | srf = &vfbs->surface->res; | 945 | srf = &vfbs->surface->res; |
945 | 946 | ||
946 | ret = vmw_kms_helper_resource_prepare(srf, true); | 947 | ret = vmw_kms_helper_resource_prepare(srf, true, &ctx); |
947 | if (ret) | 948 | if (ret) |
948 | return ret; | 949 | return ret; |
949 | 950 | ||
@@ -963,7 +964,7 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv, | |||
963 | ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips, | 964 | ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips, |
964 | dest_x, dest_y, num_clips, inc, | 965 | dest_x, dest_y, num_clips, inc, |
965 | &sdirty.base); | 966 | &sdirty.base); |
966 | vmw_kms_helper_resource_finish(srf, out_fence); | 967 | vmw_kms_helper_resource_finish(&ctx, out_fence); |
967 | 968 | ||
968 | return ret; | 969 | return ret; |
969 | } | 970 | } |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c index 8eec88920851..67331f01ef32 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c | |||
@@ -916,12 +916,13 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv, | |||
916 | struct vmw_framebuffer_surface *vfbs = | 916 | struct vmw_framebuffer_surface *vfbs = |
917 | container_of(framebuffer, typeof(*vfbs), base); | 917 | container_of(framebuffer, typeof(*vfbs), base); |
918 | struct vmw_stdu_dirty sdirty; | 918 | struct vmw_stdu_dirty sdirty; |
919 | struct vmw_validation_ctx ctx; | ||
919 | int ret; | 920 | int ret; |
920 | 921 | ||
921 | if (!srf) | 922 | if (!srf) |
922 | srf = &vfbs->surface->res; | 923 | srf = &vfbs->surface->res; |
923 | 924 | ||
924 | ret = vmw_kms_helper_resource_prepare(srf, true); | 925 | ret = vmw_kms_helper_resource_prepare(srf, true, &ctx); |
925 | if (ret) | 926 | if (ret) |
926 | return ret; | 927 | return ret; |
927 | 928 | ||
@@ -945,7 +946,7 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv, | |||
945 | dest_x, dest_y, num_clips, inc, | 946 | dest_x, dest_y, num_clips, inc, |
946 | &sdirty.base); | 947 | &sdirty.base); |
947 | out_finish: | 948 | out_finish: |
948 | vmw_kms_helper_resource_finish(srf, out_fence); | 949 | vmw_kms_helper_resource_finish(&ctx, out_fence); |
949 | 950 | ||
950 | return ret; | 951 | return ret; |
951 | } | 952 | } |
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c index 658fa2d3e40c..48685cddbad1 100644 --- a/drivers/gpu/ipu-v3/ipu-common.c +++ b/drivers/gpu/ipu-v3/ipu-common.c | |||
@@ -1089,7 +1089,7 @@ static void ipu_irq_handler(struct irq_desc *desc) | |||
1089 | { | 1089 | { |
1090 | struct ipu_soc *ipu = irq_desc_get_handler_data(desc); | 1090 | struct ipu_soc *ipu = irq_desc_get_handler_data(desc); |
1091 | struct irq_chip *chip = irq_desc_get_chip(desc); | 1091 | struct irq_chip *chip = irq_desc_get_chip(desc); |
1092 | const int int_reg[] = { 0, 1, 2, 3, 10, 11, 12, 13, 14}; | 1092 | static const int int_reg[] = { 0, 1, 2, 3, 10, 11, 12, 13, 14}; |
1093 | 1093 | ||
1094 | chained_irq_enter(chip, desc); | 1094 | chained_irq_enter(chip, desc); |
1095 | 1095 | ||
@@ -1102,7 +1102,7 @@ static void ipu_err_irq_handler(struct irq_desc *desc) | |||
1102 | { | 1102 | { |
1103 | struct ipu_soc *ipu = irq_desc_get_handler_data(desc); | 1103 | struct ipu_soc *ipu = irq_desc_get_handler_data(desc); |
1104 | struct irq_chip *chip = irq_desc_get_chip(desc); | 1104 | struct irq_chip *chip = irq_desc_get_chip(desc); |
1105 | const int int_reg[] = { 4, 5, 8, 9}; | 1105 | static const int int_reg[] = { 4, 5, 8, 9}; |
1106 | 1106 | ||
1107 | chained_irq_enter(chip, desc); | 1107 | chained_irq_enter(chip, desc); |
1108 | 1108 | ||
diff --git a/drivers/gpu/ipu-v3/ipu-cpmem.c b/drivers/gpu/ipu-v3/ipu-cpmem.c index bb9c087e6c0d..9f2d9ec42add 100644 --- a/drivers/gpu/ipu-v3/ipu-cpmem.c +++ b/drivers/gpu/ipu-v3/ipu-cpmem.c | |||
@@ -788,12 +788,14 @@ int ipu_cpmem_set_image(struct ipuv3_channel *ch, struct ipu_image *image) | |||
788 | case V4L2_PIX_FMT_SGBRG8: | 788 | case V4L2_PIX_FMT_SGBRG8: |
789 | case V4L2_PIX_FMT_SGRBG8: | 789 | case V4L2_PIX_FMT_SGRBG8: |
790 | case V4L2_PIX_FMT_SRGGB8: | 790 | case V4L2_PIX_FMT_SRGGB8: |
791 | case V4L2_PIX_FMT_GREY: | ||
791 | offset = image->rect.left + image->rect.top * pix->bytesperline; | 792 | offset = image->rect.left + image->rect.top * pix->bytesperline; |
792 | break; | 793 | break; |
793 | case V4L2_PIX_FMT_SBGGR16: | 794 | case V4L2_PIX_FMT_SBGGR16: |
794 | case V4L2_PIX_FMT_SGBRG16: | 795 | case V4L2_PIX_FMT_SGBRG16: |
795 | case V4L2_PIX_FMT_SGRBG16: | 796 | case V4L2_PIX_FMT_SGRBG16: |
796 | case V4L2_PIX_FMT_SRGGB16: | 797 | case V4L2_PIX_FMT_SRGGB16: |
798 | case V4L2_PIX_FMT_Y16: | ||
797 | offset = image->rect.left * 2 + | 799 | offset = image->rect.left * 2 + |
798 | image->rect.top * pix->bytesperline; | 800 | image->rect.top * pix->bytesperline; |
799 | break; | 801 | break; |
diff --git a/drivers/gpu/ipu-v3/ipu-csi.c b/drivers/gpu/ipu-v3/ipu-csi.c index 24e12b87a0cb..caa05b0702e1 100644 --- a/drivers/gpu/ipu-v3/ipu-csi.c +++ b/drivers/gpu/ipu-v3/ipu-csi.c | |||
@@ -288,6 +288,7 @@ static int mbus_code_to_bus_cfg(struct ipu_csi_bus_config *cfg, u32 mbus_code) | |||
288 | case MEDIA_BUS_FMT_SGBRG10_1X10: | 288 | case MEDIA_BUS_FMT_SGBRG10_1X10: |
289 | case MEDIA_BUS_FMT_SGRBG10_1X10: | 289 | case MEDIA_BUS_FMT_SGRBG10_1X10: |
290 | case MEDIA_BUS_FMT_SRGGB10_1X10: | 290 | case MEDIA_BUS_FMT_SRGGB10_1X10: |
291 | case MEDIA_BUS_FMT_Y10_1X10: | ||
291 | cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER; | 292 | cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER; |
292 | cfg->mipi_dt = MIPI_DT_RAW10; | 293 | cfg->mipi_dt = MIPI_DT_RAW10; |
293 | cfg->data_width = IPU_CSI_DATA_WIDTH_10; | 294 | cfg->data_width = IPU_CSI_DATA_WIDTH_10; |
@@ -296,6 +297,7 @@ static int mbus_code_to_bus_cfg(struct ipu_csi_bus_config *cfg, u32 mbus_code) | |||
296 | case MEDIA_BUS_FMT_SGBRG12_1X12: | 297 | case MEDIA_BUS_FMT_SGBRG12_1X12: |
297 | case MEDIA_BUS_FMT_SGRBG12_1X12: | 298 | case MEDIA_BUS_FMT_SGRBG12_1X12: |
298 | case MEDIA_BUS_FMT_SRGGB12_1X12: | 299 | case MEDIA_BUS_FMT_SRGGB12_1X12: |
300 | case MEDIA_BUS_FMT_Y12_1X12: | ||
299 | cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER; | 301 | cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER; |
300 | cfg->mipi_dt = MIPI_DT_RAW12; | 302 | cfg->mipi_dt = MIPI_DT_RAW12; |
301 | cfg->data_width = IPU_CSI_DATA_WIDTH_12; | 303 | cfg->data_width = IPU_CSI_DATA_WIDTH_12; |
diff --git a/drivers/gpu/ipu-v3/ipu-pre.c b/drivers/gpu/ipu-v3/ipu-pre.c index f1cec3d70498..0f70e8847540 100644 --- a/drivers/gpu/ipu-v3/ipu-pre.c +++ b/drivers/gpu/ipu-v3/ipu-pre.c | |||
@@ -129,11 +129,14 @@ ipu_pre_lookup_by_phandle(struct device *dev, const char *name, int index) | |||
129 | if (pre_node == pre->dev->of_node) { | 129 | if (pre_node == pre->dev->of_node) { |
130 | mutex_unlock(&ipu_pre_list_mutex); | 130 | mutex_unlock(&ipu_pre_list_mutex); |
131 | device_link_add(dev, pre->dev, DL_FLAG_AUTOREMOVE); | 131 | device_link_add(dev, pre->dev, DL_FLAG_AUTOREMOVE); |
132 | of_node_put(pre_node); | ||
132 | return pre; | 133 | return pre; |
133 | } | 134 | } |
134 | } | 135 | } |
135 | mutex_unlock(&ipu_pre_list_mutex); | 136 | mutex_unlock(&ipu_pre_list_mutex); |
136 | 137 | ||
138 | of_node_put(pre_node); | ||
139 | |||
137 | return NULL; | 140 | return NULL; |
138 | } | 141 | } |
139 | 142 | ||
diff --git a/drivers/gpu/ipu-v3/ipu-prg.c b/drivers/gpu/ipu-v3/ipu-prg.c index 067365c733c6..83f9dd934a5d 100644 --- a/drivers/gpu/ipu-v3/ipu-prg.c +++ b/drivers/gpu/ipu-v3/ipu-prg.c | |||
@@ -102,11 +102,14 @@ ipu_prg_lookup_by_phandle(struct device *dev, const char *name, int ipu_id) | |||
102 | mutex_unlock(&ipu_prg_list_mutex); | 102 | mutex_unlock(&ipu_prg_list_mutex); |
103 | device_link_add(dev, prg->dev, DL_FLAG_AUTOREMOVE); | 103 | device_link_add(dev, prg->dev, DL_FLAG_AUTOREMOVE); |
104 | prg->id = ipu_id; | 104 | prg->id = ipu_id; |
105 | of_node_put(prg_node); | ||
105 | return prg; | 106 | return prg; |
106 | } | 107 | } |
107 | } | 108 | } |
108 | mutex_unlock(&ipu_prg_list_mutex); | 109 | mutex_unlock(&ipu_prg_list_mutex); |
109 | 110 | ||
111 | of_node_put(prg_node); | ||
112 | |||
110 | return NULL; | 113 | return NULL; |
111 | } | 114 | } |
112 | 115 | ||
@@ -247,10 +250,14 @@ void ipu_prg_channel_disable(struct ipuv3_channel *ipu_chan) | |||
247 | { | 250 | { |
248 | int prg_chan = ipu_prg_ipu_to_prg_chan(ipu_chan->num); | 251 | int prg_chan = ipu_prg_ipu_to_prg_chan(ipu_chan->num); |
249 | struct ipu_prg *prg = ipu_chan->ipu->prg_priv; | 252 | struct ipu_prg *prg = ipu_chan->ipu->prg_priv; |
250 | struct ipu_prg_channel *chan = &prg->chan[prg_chan]; | 253 | struct ipu_prg_channel *chan; |
251 | u32 val; | 254 | u32 val; |
252 | 255 | ||
253 | if (!chan->enabled || prg_chan < 0) | 256 | if (prg_chan < 0) |
257 | return; | ||
258 | |||
259 | chan = &prg->chan[prg_chan]; | ||
260 | if (!chan->enabled) | ||
254 | return; | 261 | return; |
255 | 262 | ||
256 | pm_runtime_get_sync(prg->dev); | 263 | pm_runtime_get_sync(prg->dev); |
@@ -277,13 +284,15 @@ int ipu_prg_channel_configure(struct ipuv3_channel *ipu_chan, | |||
277 | { | 284 | { |
278 | int prg_chan = ipu_prg_ipu_to_prg_chan(ipu_chan->num); | 285 | int prg_chan = ipu_prg_ipu_to_prg_chan(ipu_chan->num); |
279 | struct ipu_prg *prg = ipu_chan->ipu->prg_priv; | 286 | struct ipu_prg *prg = ipu_chan->ipu->prg_priv; |
280 | struct ipu_prg_channel *chan = &prg->chan[prg_chan]; | 287 | struct ipu_prg_channel *chan; |
281 | u32 val; | 288 | u32 val; |
282 | int ret; | 289 | int ret; |
283 | 290 | ||
284 | if (prg_chan < 0) | 291 | if (prg_chan < 0) |
285 | return prg_chan; | 292 | return prg_chan; |
286 | 293 | ||
294 | chan = &prg->chan[prg_chan]; | ||
295 | |||
287 | if (chan->enabled) { | 296 | if (chan->enabled) { |
288 | ipu_pre_update(prg->pres[chan->used_pre], *eba); | 297 | ipu_pre_update(prg->pres[chan->used_pre], *eba); |
289 | return 0; | 298 | return 0; |
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 43ddcdfbd0da..9454ac134ce2 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h | |||
@@ -645,6 +645,9 @@ | |||
645 | #define USB_DEVICE_ID_LD_MICROCASSYTIME 0x1033 | 645 | #define USB_DEVICE_ID_LD_MICROCASSYTIME 0x1033 |
646 | #define USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE 0x1035 | 646 | #define USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE 0x1035 |
647 | #define USB_DEVICE_ID_LD_MICROCASSYPH 0x1038 | 647 | #define USB_DEVICE_ID_LD_MICROCASSYPH 0x1038 |
648 | #define USB_DEVICE_ID_LD_POWERANALYSERCASSY 0x1040 | ||
649 | #define USB_DEVICE_ID_LD_CONVERTERCONTROLLERCASSY 0x1042 | ||
650 | #define USB_DEVICE_ID_LD_MACHINETESTCASSY 0x1043 | ||
648 | #define USB_DEVICE_ID_LD_JWM 0x1080 | 651 | #define USB_DEVICE_ID_LD_JWM 0x1080 |
649 | #define USB_DEVICE_ID_LD_DMMP 0x1081 | 652 | #define USB_DEVICE_ID_LD_DMMP 0x1081 |
650 | #define USB_DEVICE_ID_LD_UMIP 0x1090 | 653 | #define USB_DEVICE_ID_LD_UMIP 0x1090 |
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c index 5f6035a5ce36..e92b77fa574a 100644 --- a/drivers/hid/hid-quirks.c +++ b/drivers/hid/hid-quirks.c | |||
@@ -809,6 +809,9 @@ static const struct hid_device_id hid_ignore_list[] = { | |||
809 | { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTIME) }, | 809 | { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTIME) }, |
810 | { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE) }, | 810 | { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE) }, |
811 | { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYPH) }, | 811 | { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYPH) }, |
812 | { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POWERANALYSERCASSY) }, | ||
813 | { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_CONVERTERCONTROLLERCASSY) }, | ||
814 | { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MACHINETESTCASSY) }, | ||
812 | { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_JWM) }, | 815 | { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_JWM) }, |
813 | { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_DMMP) }, | 816 | { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_DMMP) }, |
814 | { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_UMIP) }, | 817 | { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_UMIP) }, |
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c index 50e071444a5c..8699bb969e7e 100644 --- a/drivers/hv/ring_buffer.c +++ b/drivers/hv/ring_buffer.c | |||
@@ -417,13 +417,24 @@ __hv_pkt_iter_next(struct vmbus_channel *channel, | |||
417 | } | 417 | } |
418 | EXPORT_SYMBOL_GPL(__hv_pkt_iter_next); | 418 | EXPORT_SYMBOL_GPL(__hv_pkt_iter_next); |
419 | 419 | ||
420 | /* How many bytes were read in this iterator cycle */ | ||
421 | static u32 hv_pkt_iter_bytes_read(const struct hv_ring_buffer_info *rbi, | ||
422 | u32 start_read_index) | ||
423 | { | ||
424 | if (rbi->priv_read_index >= start_read_index) | ||
425 | return rbi->priv_read_index - start_read_index; | ||
426 | else | ||
427 | return rbi->ring_datasize - start_read_index + | ||
428 | rbi->priv_read_index; | ||
429 | } | ||
430 | |||
420 | /* | 431 | /* |
421 | * Update host ring buffer after iterating over packets. | 432 | * Update host ring buffer after iterating over packets. |
422 | */ | 433 | */ |
423 | void hv_pkt_iter_close(struct vmbus_channel *channel) | 434 | void hv_pkt_iter_close(struct vmbus_channel *channel) |
424 | { | 435 | { |
425 | struct hv_ring_buffer_info *rbi = &channel->inbound; | 436 | struct hv_ring_buffer_info *rbi = &channel->inbound; |
426 | u32 orig_write_sz = hv_get_bytes_to_write(rbi); | 437 | u32 curr_write_sz, pending_sz, bytes_read, start_read_index; |
427 | 438 | ||
428 | /* | 439 | /* |
429 | * Make sure all reads are done before we update the read index since | 440 | * Make sure all reads are done before we update the read index since |
@@ -431,8 +442,12 @@ void hv_pkt_iter_close(struct vmbus_channel *channel) | |||
431 | * is updated. | 442 | * is updated. |
432 | */ | 443 | */ |
433 | virt_rmb(); | 444 | virt_rmb(); |
445 | start_read_index = rbi->ring_buffer->read_index; | ||
434 | rbi->ring_buffer->read_index = rbi->priv_read_index; | 446 | rbi->ring_buffer->read_index = rbi->priv_read_index; |
435 | 447 | ||
448 | if (!rbi->ring_buffer->feature_bits.feat_pending_send_sz) | ||
449 | return; | ||
450 | |||
436 | /* | 451 | /* |
437 | * Issue a full memory barrier before making the signaling decision. | 452 | * Issue a full memory barrier before making the signaling decision. |
438 | * Here is the reason for having this barrier: | 453 | * Here is the reason for having this barrier: |
@@ -446,26 +461,29 @@ void hv_pkt_iter_close(struct vmbus_channel *channel) | |||
446 | */ | 461 | */ |
447 | virt_mb(); | 462 | virt_mb(); |
448 | 463 | ||
449 | /* If host has disabled notifications then skip */ | 464 | pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz); |
450 | if (rbi->ring_buffer->interrupt_mask) | 465 | if (!pending_sz) |
451 | return; | 466 | return; |
452 | 467 | ||
453 | if (rbi->ring_buffer->feature_bits.feat_pending_send_sz) { | 468 | /* |
454 | u32 pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz); | 469 | * Ensure the read of write_index in hv_get_bytes_to_write() |
470 | * happens after the read of pending_send_sz. | ||
471 | */ | ||
472 | virt_rmb(); | ||
473 | curr_write_sz = hv_get_bytes_to_write(rbi); | ||
474 | bytes_read = hv_pkt_iter_bytes_read(rbi, start_read_index); | ||
455 | 475 | ||
456 | /* | 476 | /* |
457 | * If there was space before we began iteration, | 477 | * If there was space before we began iteration, |
458 | * then host was not blocked. Also handles case where | 478 | * then host was not blocked. |
459 | * pending_sz is zero then host has nothing pending | 479 | */ |
460 | * and does not need to be signaled. | ||
461 | */ | ||
462 | if (orig_write_sz > pending_sz) | ||
463 | return; | ||
464 | 480 | ||
465 | /* If pending write will not fit, don't give false hope. */ | 481 | if (curr_write_sz - bytes_read > pending_sz) |
466 | if (hv_get_bytes_to_write(rbi) < pending_sz) | 482 | return; |
467 | return; | 483 | |
468 | } | 484 | /* If pending write will not fit, don't give false hope. */ |
485 | if (curr_write_sz <= pending_sz) | ||
486 | return; | ||
469 | 487 | ||
470 | vmbus_setevent(channel); | 488 | vmbus_setevent(channel); |
471 | } | 489 | } |
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c index 4bdbf77f7197..72c338eb5fae 100644 --- a/drivers/hwmon/coretemp.c +++ b/drivers/hwmon/coretemp.c | |||
@@ -269,13 +269,13 @@ static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev) | |||
269 | for (i = 0; i < ARRAY_SIZE(tjmax_model_table); i++) { | 269 | for (i = 0; i < ARRAY_SIZE(tjmax_model_table); i++) { |
270 | const struct tjmax_model *tm = &tjmax_model_table[i]; | 270 | const struct tjmax_model *tm = &tjmax_model_table[i]; |
271 | if (c->x86_model == tm->model && | 271 | if (c->x86_model == tm->model && |
272 | (tm->mask == ANY || c->x86_mask == tm->mask)) | 272 | (tm->mask == ANY || c->x86_stepping == tm->mask)) |
273 | return tm->tjmax; | 273 | return tm->tjmax; |
274 | } | 274 | } |
275 | 275 | ||
276 | /* Early chips have no MSR for TjMax */ | 276 | /* Early chips have no MSR for TjMax */ |
277 | 277 | ||
278 | if (c->x86_model == 0xf && c->x86_mask < 4) | 278 | if (c->x86_model == 0xf && c->x86_stepping < 4) |
279 | usemsr_ee = 0; | 279 | usemsr_ee = 0; |
280 | 280 | ||
281 | if (c->x86_model > 0xe && usemsr_ee) { | 281 | if (c->x86_model > 0xe && usemsr_ee) { |
@@ -426,7 +426,7 @@ static int chk_ucode_version(unsigned int cpu) | |||
426 | * Readings might stop update when processor visited too deep sleep, | 426 | * Readings might stop update when processor visited too deep sleep, |
427 | * fixed for stepping D0 (6EC). | 427 | * fixed for stepping D0 (6EC). |
428 | */ | 428 | */ |
429 | if (c->x86_model == 0xe && c->x86_mask < 0xc && c->microcode < 0x39) { | 429 | if (c->x86_model == 0xe && c->x86_stepping < 0xc && c->microcode < 0x39) { |
430 | pr_err("Errata AE18 not fixed, update BIOS or microcode of the CPU!\n"); | 430 | pr_err("Errata AE18 not fixed, update BIOS or microcode of the CPU!\n"); |
431 | return -ENODEV; | 431 | return -ENODEV; |
432 | } | 432 | } |
diff --git a/drivers/hwmon/hwmon-vid.c b/drivers/hwmon/hwmon-vid.c index ef91b8a67549..84e91286fc4f 100644 --- a/drivers/hwmon/hwmon-vid.c +++ b/drivers/hwmon/hwmon-vid.c | |||
@@ -293,7 +293,7 @@ u8 vid_which_vrm(void) | |||
293 | if (c->x86 < 6) /* Any CPU with family lower than 6 */ | 293 | if (c->x86 < 6) /* Any CPU with family lower than 6 */ |
294 | return 0; /* doesn't have VID */ | 294 | return 0; /* doesn't have VID */ |
295 | 295 | ||
296 | vrm_ret = find_vrm(c->x86, c->x86_model, c->x86_mask, c->x86_vendor); | 296 | vrm_ret = find_vrm(c->x86, c->x86_model, c->x86_stepping, c->x86_vendor); |
297 | if (vrm_ret == 134) | 297 | if (vrm_ret == 134) |
298 | vrm_ret = get_via_model_d_vrm(); | 298 | vrm_ret = get_via_model_d_vrm(); |
299 | if (vrm_ret == 0) | 299 | if (vrm_ret == 0) |
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c index 06b4e1c78bd8..051a72eecb24 100644 --- a/drivers/hwmon/k10temp.c +++ b/drivers/hwmon/k10temp.c | |||
@@ -129,7 +129,10 @@ static ssize_t temp1_input_show(struct device *dev, | |||
129 | 129 | ||
130 | data->read_tempreg(data->pdev, ®val); | 130 | data->read_tempreg(data->pdev, ®val); |
131 | temp = (regval >> 21) * 125; | 131 | temp = (regval >> 21) * 125; |
132 | temp -= data->temp_offset; | 132 | if (temp > data->temp_offset) |
133 | temp -= data->temp_offset; | ||
134 | else | ||
135 | temp = 0; | ||
133 | 136 | ||
134 | return sprintf(buf, "%u\n", temp); | 137 | return sprintf(buf, "%u\n", temp); |
135 | } | 138 | } |
@@ -227,7 +230,7 @@ static bool has_erratum_319(struct pci_dev *pdev) | |||
227 | * and AM3 formats, but that's the best we can do. | 230 | * and AM3 formats, but that's the best we can do. |
228 | */ | 231 | */ |
229 | return boot_cpu_data.x86_model < 4 || | 232 | return boot_cpu_data.x86_model < 4 || |
230 | (boot_cpu_data.x86_model == 4 && boot_cpu_data.x86_mask <= 2); | 233 | (boot_cpu_data.x86_model == 4 && boot_cpu_data.x86_stepping <= 2); |
231 | } | 234 | } |
232 | 235 | ||
233 | static int k10temp_probe(struct pci_dev *pdev, | 236 | static int k10temp_probe(struct pci_dev *pdev, |
diff --git a/drivers/hwmon/k8temp.c b/drivers/hwmon/k8temp.c index 5a632bcf869b..e59f9113fb93 100644 --- a/drivers/hwmon/k8temp.c +++ b/drivers/hwmon/k8temp.c | |||
@@ -187,7 +187,7 @@ static int k8temp_probe(struct pci_dev *pdev, | |||
187 | return -ENOMEM; | 187 | return -ENOMEM; |
188 | 188 | ||
189 | model = boot_cpu_data.x86_model; | 189 | model = boot_cpu_data.x86_model; |
190 | stepping = boot_cpu_data.x86_mask; | 190 | stepping = boot_cpu_data.x86_stepping; |
191 | 191 | ||
192 | /* feature available since SH-C0, exclude older revisions */ | 192 | /* feature available since SH-C0, exclude older revisions */ |
193 | if ((model == 4 && stepping == 0) || | 193 | if ((model == 4 && stepping == 0) || |
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index a9805c7cb305..e2954fb86d65 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig | |||
@@ -123,8 +123,10 @@ config I2C_I801 | |||
123 | Wildcat Point (PCH) | 123 | Wildcat Point (PCH) |
124 | Wildcat Point-LP (PCH) | 124 | Wildcat Point-LP (PCH) |
125 | BayTrail (SOC) | 125 | BayTrail (SOC) |
126 | Braswell (SOC) | ||
126 | Sunrise Point-H (PCH) | 127 | Sunrise Point-H (PCH) |
127 | Sunrise Point-LP (PCH) | 128 | Sunrise Point-LP (PCH) |
129 | Kaby Lake-H (PCH) | ||
128 | DNV (SOC) | 130 | DNV (SOC) |
129 | Broxton (SOC) | 131 | Broxton (SOC) |
130 | Lewisburg (PCH) | 132 | Lewisburg (PCH) |
diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c index cd07a69e2e93..44deae78913e 100644 --- a/drivers/i2c/busses/i2c-bcm2835.c +++ b/drivers/i2c/busses/i2c-bcm2835.c | |||
@@ -50,6 +50,9 @@ | |||
50 | #define BCM2835_I2C_S_CLKT BIT(9) | 50 | #define BCM2835_I2C_S_CLKT BIT(9) |
51 | #define BCM2835_I2C_S_LEN BIT(10) /* Fake bit for SW error reporting */ | 51 | #define BCM2835_I2C_S_LEN BIT(10) /* Fake bit for SW error reporting */ |
52 | 52 | ||
53 | #define BCM2835_I2C_FEDL_SHIFT 16 | ||
54 | #define BCM2835_I2C_REDL_SHIFT 0 | ||
55 | |||
53 | #define BCM2835_I2C_CDIV_MIN 0x0002 | 56 | #define BCM2835_I2C_CDIV_MIN 0x0002 |
54 | #define BCM2835_I2C_CDIV_MAX 0xFFFE | 57 | #define BCM2835_I2C_CDIV_MAX 0xFFFE |
55 | 58 | ||
@@ -81,7 +84,7 @@ static inline u32 bcm2835_i2c_readl(struct bcm2835_i2c_dev *i2c_dev, u32 reg) | |||
81 | 84 | ||
82 | static int bcm2835_i2c_set_divider(struct bcm2835_i2c_dev *i2c_dev) | 85 | static int bcm2835_i2c_set_divider(struct bcm2835_i2c_dev *i2c_dev) |
83 | { | 86 | { |
84 | u32 divider; | 87 | u32 divider, redl, fedl; |
85 | 88 | ||
86 | divider = DIV_ROUND_UP(clk_get_rate(i2c_dev->clk), | 89 | divider = DIV_ROUND_UP(clk_get_rate(i2c_dev->clk), |
87 | i2c_dev->bus_clk_rate); | 90 | i2c_dev->bus_clk_rate); |
@@ -100,6 +103,22 @@ static int bcm2835_i2c_set_divider(struct bcm2835_i2c_dev *i2c_dev) | |||
100 | 103 | ||
101 | bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_DIV, divider); | 104 | bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_DIV, divider); |
102 | 105 | ||
106 | /* | ||
107 | * Number of core clocks to wait after falling edge before | ||
108 | * outputting the next data bit. Note that both FEDL and REDL | ||
109 | * can't be greater than CDIV/2. | ||
110 | */ | ||
111 | fedl = max(divider / 16, 1u); | ||
112 | |||
113 | /* | ||
114 | * Number of core clocks to wait after rising edge before | ||
115 | * sampling the next incoming data bit. | ||
116 | */ | ||
117 | redl = max(divider / 4, 1u); | ||
118 | |||
119 | bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_DEL, | ||
120 | (fedl << BCM2835_I2C_FEDL_SHIFT) | | ||
121 | (redl << BCM2835_I2C_REDL_SHIFT)); | ||
103 | return 0; | 122 | return 0; |
104 | } | 123 | } |
105 | 124 | ||
diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c index ae691884d071..05732531829f 100644 --- a/drivers/i2c/busses/i2c-designware-master.c +++ b/drivers/i2c/busses/i2c-designware-master.c | |||
@@ -209,7 +209,7 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev) | |||
209 | i2c_dw_disable_int(dev); | 209 | i2c_dw_disable_int(dev); |
210 | 210 | ||
211 | /* Enable the adapter */ | 211 | /* Enable the adapter */ |
212 | __i2c_dw_enable(dev, true); | 212 | __i2c_dw_enable_and_wait(dev, true); |
213 | 213 | ||
214 | /* Clear and enable interrupts */ | 214 | /* Clear and enable interrupts */ |
215 | dw_readl(dev, DW_IC_CLR_INTR); | 215 | dw_readl(dev, DW_IC_CLR_INTR); |
@@ -644,7 +644,7 @@ static int i2c_dw_init_recovery_info(struct dw_i2c_dev *dev) | |||
644 | gpio = devm_gpiod_get(dev->dev, "scl", GPIOD_OUT_HIGH); | 644 | gpio = devm_gpiod_get(dev->dev, "scl", GPIOD_OUT_HIGH); |
645 | if (IS_ERR(gpio)) { | 645 | if (IS_ERR(gpio)) { |
646 | r = PTR_ERR(gpio); | 646 | r = PTR_ERR(gpio); |
647 | if (r == -ENOENT) | 647 | if (r == -ENOENT || r == -ENOSYS) |
648 | return 0; | 648 | return 0; |
649 | return r; | 649 | return r; |
650 | } | 650 | } |
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index 8eac00efadc1..692b34125866 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c | |||
@@ -58,6 +58,7 @@ | |||
58 | * Wildcat Point (PCH) 0x8ca2 32 hard yes yes yes | 58 | * Wildcat Point (PCH) 0x8ca2 32 hard yes yes yes |
59 | * Wildcat Point-LP (PCH) 0x9ca2 32 hard yes yes yes | 59 | * Wildcat Point-LP (PCH) 0x9ca2 32 hard yes yes yes |
60 | * BayTrail (SOC) 0x0f12 32 hard yes yes yes | 60 | * BayTrail (SOC) 0x0f12 32 hard yes yes yes |
61 | * Braswell (SOC) 0x2292 32 hard yes yes yes | ||
61 | * Sunrise Point-H (PCH) 0xa123 32 hard yes yes yes | 62 | * Sunrise Point-H (PCH) 0xa123 32 hard yes yes yes |
62 | * Sunrise Point-LP (PCH) 0x9d23 32 hard yes yes yes | 63 | * Sunrise Point-LP (PCH) 0x9d23 32 hard yes yes yes |
63 | * DNV (SOC) 0x19df 32 hard yes yes yes | 64 | * DNV (SOC) 0x19df 32 hard yes yes yes |
diff --git a/drivers/i2c/busses/i2c-octeon-core.c b/drivers/i2c/busses/i2c-octeon-core.c index 1d8775799056..d9607905dc2f 100644 --- a/drivers/i2c/busses/i2c-octeon-core.c +++ b/drivers/i2c/busses/i2c-octeon-core.c | |||
@@ -233,6 +233,7 @@ static int octeon_i2c_check_status(struct octeon_i2c *i2c, int final_read) | |||
233 | return -EOPNOTSUPP; | 233 | return -EOPNOTSUPP; |
234 | 234 | ||
235 | case STAT_TXDATA_NAK: | 235 | case STAT_TXDATA_NAK: |
236 | case STAT_BUS_ERROR: | ||
236 | return -EIO; | 237 | return -EIO; |
237 | case STAT_TXADDR_NAK: | 238 | case STAT_TXADDR_NAK: |
238 | case STAT_RXADDR_NAK: | 239 | case STAT_RXADDR_NAK: |
diff --git a/drivers/i2c/busses/i2c-octeon-core.h b/drivers/i2c/busses/i2c-octeon-core.h index a7ef19855bb8..9bb9f64fdda0 100644 --- a/drivers/i2c/busses/i2c-octeon-core.h +++ b/drivers/i2c/busses/i2c-octeon-core.h | |||
@@ -43,7 +43,7 @@ | |||
43 | #define TWSI_CTL_AAK 0x04 /* Assert ACK */ | 43 | #define TWSI_CTL_AAK 0x04 /* Assert ACK */ |
44 | 44 | ||
45 | /* Status values */ | 45 | /* Status values */ |
46 | #define STAT_ERROR 0x00 | 46 | #define STAT_BUS_ERROR 0x00 |
47 | #define STAT_START 0x08 | 47 | #define STAT_START 0x08 |
48 | #define STAT_REP_START 0x10 | 48 | #define STAT_REP_START 0x10 |
49 | #define STAT_TXADDR_ACK 0x18 | 49 | #define STAT_TXADDR_ACK 0x18 |
diff --git a/drivers/i2c/busses/i2c-sirf.c b/drivers/i2c/busses/i2c-sirf.c index 2fd8b6d00391..87197ece0f90 100644 --- a/drivers/i2c/busses/i2c-sirf.c +++ b/drivers/i2c/busses/i2c-sirf.c | |||
@@ -341,7 +341,7 @@ static int i2c_sirfsoc_probe(struct platform_device *pdev) | |||
341 | platform_set_drvdata(pdev, adap); | 341 | platform_set_drvdata(pdev, adap); |
342 | init_completion(&siic->done); | 342 | init_completion(&siic->done); |
343 | 343 | ||
344 | /* Controller Initalisation */ | 344 | /* Controller initialisation */ |
345 | 345 | ||
346 | writel(SIRFSOC_I2C_RESET, siic->base + SIRFSOC_I2C_CTRL); | 346 | writel(SIRFSOC_I2C_RESET, siic->base + SIRFSOC_I2C_CTRL); |
347 | while (readl(siic->base + SIRFSOC_I2C_CTRL) & SIRFSOC_I2C_RESET) | 347 | while (readl(siic->base + SIRFSOC_I2C_CTRL) & SIRFSOC_I2C_RESET) |
@@ -369,7 +369,7 @@ static int i2c_sirfsoc_probe(struct platform_device *pdev) | |||
369 | * but they start to affect the speed when clock is set to faster | 369 | * but they start to affect the speed when clock is set to faster |
370 | * frequencies. | 370 | * frequencies. |
371 | * Through the actual tests, use the different user_div value(which | 371 | * Through the actual tests, use the different user_div value(which |
372 | * in the divider formular 'Fio / (Fi2c * user_div)') to adapt | 372 | * in the divider formula 'Fio / (Fi2c * user_div)') to adapt |
373 | * the different ranges of i2c bus clock frequency, to make the SCL | 373 | * the different ranges of i2c bus clock frequency, to make the SCL |
374 | * more accurate. | 374 | * more accurate. |
375 | */ | 375 | */ |
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c index 17fd55af4d92..caa20eb5f26b 100644 --- a/drivers/ide/ide-probe.c +++ b/drivers/ide/ide-probe.c | |||
@@ -928,7 +928,7 @@ static int exact_lock(dev_t dev, void *data) | |||
928 | { | 928 | { |
929 | struct gendisk *p = data; | 929 | struct gendisk *p = data; |
930 | 930 | ||
931 | if (!get_disk(p)) | 931 | if (!get_disk_and_module(p)) |
932 | return -1; | 932 | return -1; |
933 | return 0; | 933 | return 0; |
934 | } | 934 | } |
diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c index 6fe995cf16a6..3e6fd5a8ac5b 100644 --- a/drivers/iio/accel/st_accel_core.c +++ b/drivers/iio/accel/st_accel_core.c | |||
@@ -920,6 +920,8 @@ static const struct iio_trigger_ops st_accel_trigger_ops = { | |||
920 | int st_accel_common_probe(struct iio_dev *indio_dev) | 920 | int st_accel_common_probe(struct iio_dev *indio_dev) |
921 | { | 921 | { |
922 | struct st_sensor_data *adata = iio_priv(indio_dev); | 922 | struct st_sensor_data *adata = iio_priv(indio_dev); |
923 | struct st_sensors_platform_data *pdata = | ||
924 | (struct st_sensors_platform_data *)adata->dev->platform_data; | ||
923 | int irq = adata->get_irq_data_ready(indio_dev); | 925 | int irq = adata->get_irq_data_ready(indio_dev); |
924 | int err; | 926 | int err; |
925 | 927 | ||
@@ -946,7 +948,10 @@ int st_accel_common_probe(struct iio_dev *indio_dev) | |||
946 | &adata->sensor_settings->fs.fs_avl[0]; | 948 | &adata->sensor_settings->fs.fs_avl[0]; |
947 | adata->odr = adata->sensor_settings->odr.odr_avl[0].hz; | 949 | adata->odr = adata->sensor_settings->odr.odr_avl[0].hz; |
948 | 950 | ||
949 | err = st_sensors_init_sensor(indio_dev, adata->dev->platform_data); | 951 | if (!pdata) |
952 | pdata = (struct st_sensors_platform_data *)&default_accel_pdata; | ||
953 | |||
954 | err = st_sensors_init_sensor(indio_dev, pdata); | ||
950 | if (err < 0) | 955 | if (err < 0) |
951 | goto st_accel_power_off; | 956 | goto st_accel_power_off; |
952 | 957 | ||
diff --git a/drivers/iio/adc/aspeed_adc.c b/drivers/iio/adc/aspeed_adc.c index 327a49ba1991..9515ca165dfd 100644 --- a/drivers/iio/adc/aspeed_adc.c +++ b/drivers/iio/adc/aspeed_adc.c | |||
@@ -243,7 +243,7 @@ static int aspeed_adc_probe(struct platform_device *pdev) | |||
243 | ASPEED_ADC_INIT_POLLING_TIME, | 243 | ASPEED_ADC_INIT_POLLING_TIME, |
244 | ASPEED_ADC_INIT_TIMEOUT); | 244 | ASPEED_ADC_INIT_TIMEOUT); |
245 | if (ret) | 245 | if (ret) |
246 | goto scaler_error; | 246 | goto poll_timeout_error; |
247 | } | 247 | } |
248 | 248 | ||
249 | /* Start all channels in normal mode. */ | 249 | /* Start all channels in normal mode. */ |
@@ -274,9 +274,10 @@ iio_register_error: | |||
274 | writel(ASPEED_OPERATION_MODE_POWER_DOWN, | 274 | writel(ASPEED_OPERATION_MODE_POWER_DOWN, |
275 | data->base + ASPEED_REG_ENGINE_CONTROL); | 275 | data->base + ASPEED_REG_ENGINE_CONTROL); |
276 | clk_disable_unprepare(data->clk_scaler->clk); | 276 | clk_disable_unprepare(data->clk_scaler->clk); |
277 | reset_error: | ||
278 | reset_control_assert(data->rst); | ||
279 | clk_enable_error: | 277 | clk_enable_error: |
278 | poll_timeout_error: | ||
279 | reset_control_assert(data->rst); | ||
280 | reset_error: | ||
280 | clk_hw_unregister_divider(data->clk_scaler); | 281 | clk_hw_unregister_divider(data->clk_scaler); |
281 | scaler_error: | 282 | scaler_error: |
282 | clk_hw_unregister_divider(data->clk_prescaler); | 283 | clk_hw_unregister_divider(data->clk_prescaler); |
diff --git a/drivers/iio/adc/meson_saradc.c b/drivers/iio/adc/meson_saradc.c index 29fa7736d80c..ede955d9b2a4 100644 --- a/drivers/iio/adc/meson_saradc.c +++ b/drivers/iio/adc/meson_saradc.c | |||
@@ -462,8 +462,10 @@ static int meson_sar_adc_lock(struct iio_dev *indio_dev) | |||
462 | regmap_read(priv->regmap, MESON_SAR_ADC_DELAY, &val); | 462 | regmap_read(priv->regmap, MESON_SAR_ADC_DELAY, &val); |
463 | } while (val & MESON_SAR_ADC_DELAY_BL30_BUSY && timeout--); | 463 | } while (val & MESON_SAR_ADC_DELAY_BL30_BUSY && timeout--); |
464 | 464 | ||
465 | if (timeout < 0) | 465 | if (timeout < 0) { |
466 | mutex_unlock(&indio_dev->mlock); | ||
466 | return -ETIMEDOUT; | 467 | return -ETIMEDOUT; |
468 | } | ||
467 | } | 469 | } |
468 | 470 | ||
469 | return 0; | 471 | return 0; |
diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c index 7f5def465340..9a2583caedaa 100644 --- a/drivers/iio/adc/stm32-adc.c +++ b/drivers/iio/adc/stm32-adc.c | |||
@@ -722,8 +722,6 @@ static int stm32h7_adc_enable(struct stm32_adc *adc) | |||
722 | int ret; | 722 | int ret; |
723 | u32 val; | 723 | u32 val; |
724 | 724 | ||
725 | /* Clear ADRDY by writing one, then enable ADC */ | ||
726 | stm32_adc_set_bits(adc, STM32H7_ADC_ISR, STM32H7_ADRDY); | ||
727 | stm32_adc_set_bits(adc, STM32H7_ADC_CR, STM32H7_ADEN); | 725 | stm32_adc_set_bits(adc, STM32H7_ADC_CR, STM32H7_ADEN); |
728 | 726 | ||
729 | /* Poll for ADRDY to be set (after adc startup time) */ | 727 | /* Poll for ADRDY to be set (after adc startup time) */ |
@@ -731,8 +729,11 @@ static int stm32h7_adc_enable(struct stm32_adc *adc) | |||
731 | val & STM32H7_ADRDY, | 729 | val & STM32H7_ADRDY, |
732 | 100, STM32_ADC_TIMEOUT_US); | 730 | 100, STM32_ADC_TIMEOUT_US); |
733 | if (ret) { | 731 | if (ret) { |
734 | stm32_adc_clr_bits(adc, STM32H7_ADC_CR, STM32H7_ADEN); | 732 | stm32_adc_set_bits(adc, STM32H7_ADC_CR, STM32H7_ADDIS); |
735 | dev_err(&indio_dev->dev, "Failed to enable ADC\n"); | 733 | dev_err(&indio_dev->dev, "Failed to enable ADC\n"); |
734 | } else { | ||
735 | /* Clear ADRDY by writing one */ | ||
736 | stm32_adc_set_bits(adc, STM32H7_ADC_ISR, STM32H7_ADRDY); | ||
736 | } | 737 | } |
737 | 738 | ||
738 | return ret; | 739 | return ret; |
diff --git a/drivers/iio/adc/stm32-dfsdm-adc.c b/drivers/iio/adc/stm32-dfsdm-adc.c index daa026d6a94f..01422d11753c 100644 --- a/drivers/iio/adc/stm32-dfsdm-adc.c +++ b/drivers/iio/adc/stm32-dfsdm-adc.c | |||
@@ -54,7 +54,6 @@ struct stm32_dfsdm_adc { | |||
54 | struct stm32_dfsdm *dfsdm; | 54 | struct stm32_dfsdm *dfsdm; |
55 | const struct stm32_dfsdm_dev_data *dev_data; | 55 | const struct stm32_dfsdm_dev_data *dev_data; |
56 | unsigned int fl_id; | 56 | unsigned int fl_id; |
57 | unsigned int ch_id; | ||
58 | 57 | ||
59 | /* ADC specific */ | 58 | /* ADC specific */ |
60 | unsigned int oversamp; | 59 | unsigned int oversamp; |
@@ -384,7 +383,7 @@ static ssize_t dfsdm_adc_audio_set_spiclk(struct iio_dev *indio_dev, | |||
384 | { | 383 | { |
385 | struct stm32_dfsdm_adc *adc = iio_priv(indio_dev); | 384 | struct stm32_dfsdm_adc *adc = iio_priv(indio_dev); |
386 | struct stm32_dfsdm_filter *fl = &adc->dfsdm->fl_list[adc->fl_id]; | 385 | struct stm32_dfsdm_filter *fl = &adc->dfsdm->fl_list[adc->fl_id]; |
387 | struct stm32_dfsdm_channel *ch = &adc->dfsdm->ch_list[adc->ch_id]; | 386 | struct stm32_dfsdm_channel *ch = &adc->dfsdm->ch_list[chan->channel]; |
388 | unsigned int sample_freq = adc->sample_freq; | 387 | unsigned int sample_freq = adc->sample_freq; |
389 | unsigned int spi_freq; | 388 | unsigned int spi_freq; |
390 | int ret; | 389 | int ret; |
@@ -419,18 +418,20 @@ static ssize_t dfsdm_adc_audio_set_spiclk(struct iio_dev *indio_dev, | |||
419 | return len; | 418 | return len; |
420 | } | 419 | } |
421 | 420 | ||
422 | static int stm32_dfsdm_start_conv(struct stm32_dfsdm_adc *adc, bool dma) | 421 | static int stm32_dfsdm_start_conv(struct stm32_dfsdm_adc *adc, |
422 | const struct iio_chan_spec *chan, | ||
423 | bool dma) | ||
423 | { | 424 | { |
424 | struct regmap *regmap = adc->dfsdm->regmap; | 425 | struct regmap *regmap = adc->dfsdm->regmap; |
425 | int ret; | 426 | int ret; |
426 | unsigned int dma_en = 0, cont_en = 0; | 427 | unsigned int dma_en = 0, cont_en = 0; |
427 | 428 | ||
428 | ret = stm32_dfsdm_start_channel(adc->dfsdm, adc->ch_id); | 429 | ret = stm32_dfsdm_start_channel(adc->dfsdm, chan->channel); |
429 | if (ret < 0) | 430 | if (ret < 0) |
430 | return ret; | 431 | return ret; |
431 | 432 | ||
432 | ret = stm32_dfsdm_filter_configure(adc->dfsdm, adc->fl_id, | 433 | ret = stm32_dfsdm_filter_configure(adc->dfsdm, adc->fl_id, |
433 | adc->ch_id); | 434 | chan->channel); |
434 | if (ret < 0) | 435 | if (ret < 0) |
435 | goto stop_channels; | 436 | goto stop_channels; |
436 | 437 | ||
@@ -464,12 +465,13 @@ stop_channels: | |||
464 | 465 | ||
465 | regmap_update_bits(regmap, DFSDM_CR1(adc->fl_id), | 466 | regmap_update_bits(regmap, DFSDM_CR1(adc->fl_id), |
466 | DFSDM_CR1_RCONT_MASK, 0); | 467 | DFSDM_CR1_RCONT_MASK, 0); |
467 | stm32_dfsdm_stop_channel(adc->dfsdm, adc->fl_id); | 468 | stm32_dfsdm_stop_channel(adc->dfsdm, chan->channel); |
468 | 469 | ||
469 | return ret; | 470 | return ret; |
470 | } | 471 | } |
471 | 472 | ||
472 | static void stm32_dfsdm_stop_conv(struct stm32_dfsdm_adc *adc) | 473 | static void stm32_dfsdm_stop_conv(struct stm32_dfsdm_adc *adc, |
474 | const struct iio_chan_spec *chan) | ||
473 | { | 475 | { |
474 | struct regmap *regmap = adc->dfsdm->regmap; | 476 | struct regmap *regmap = adc->dfsdm->regmap; |
475 | 477 | ||
@@ -482,7 +484,7 @@ static void stm32_dfsdm_stop_conv(struct stm32_dfsdm_adc *adc) | |||
482 | regmap_update_bits(regmap, DFSDM_CR1(adc->fl_id), | 484 | regmap_update_bits(regmap, DFSDM_CR1(adc->fl_id), |
483 | DFSDM_CR1_RCONT_MASK, 0); | 485 | DFSDM_CR1_RCONT_MASK, 0); |
484 | 486 | ||
485 | stm32_dfsdm_stop_channel(adc->dfsdm, adc->ch_id); | 487 | stm32_dfsdm_stop_channel(adc->dfsdm, chan->channel); |
486 | } | 488 | } |
487 | 489 | ||
488 | static int stm32_dfsdm_set_watermark(struct iio_dev *indio_dev, | 490 | static int stm32_dfsdm_set_watermark(struct iio_dev *indio_dev, |
@@ -609,6 +611,7 @@ static int stm32_dfsdm_adc_dma_start(struct iio_dev *indio_dev) | |||
609 | static int stm32_dfsdm_postenable(struct iio_dev *indio_dev) | 611 | static int stm32_dfsdm_postenable(struct iio_dev *indio_dev) |
610 | { | 612 | { |
611 | struct stm32_dfsdm_adc *adc = iio_priv(indio_dev); | 613 | struct stm32_dfsdm_adc *adc = iio_priv(indio_dev); |
614 | const struct iio_chan_spec *chan = &indio_dev->channels[0]; | ||
612 | int ret; | 615 | int ret; |
613 | 616 | ||
614 | /* Reset adc buffer index */ | 617 | /* Reset adc buffer index */ |
@@ -618,7 +621,7 @@ static int stm32_dfsdm_postenable(struct iio_dev *indio_dev) | |||
618 | if (ret < 0) | 621 | if (ret < 0) |
619 | return ret; | 622 | return ret; |
620 | 623 | ||
621 | ret = stm32_dfsdm_start_conv(adc, true); | 624 | ret = stm32_dfsdm_start_conv(adc, chan, true); |
622 | if (ret) { | 625 | if (ret) { |
623 | dev_err(&indio_dev->dev, "Can't start conversion\n"); | 626 | dev_err(&indio_dev->dev, "Can't start conversion\n"); |
624 | goto stop_dfsdm; | 627 | goto stop_dfsdm; |
@@ -635,7 +638,7 @@ static int stm32_dfsdm_postenable(struct iio_dev *indio_dev) | |||
635 | return 0; | 638 | return 0; |
636 | 639 | ||
637 | err_stop_conv: | 640 | err_stop_conv: |
638 | stm32_dfsdm_stop_conv(adc); | 641 | stm32_dfsdm_stop_conv(adc, chan); |
639 | stop_dfsdm: | 642 | stop_dfsdm: |
640 | stm32_dfsdm_stop_dfsdm(adc->dfsdm); | 643 | stm32_dfsdm_stop_dfsdm(adc->dfsdm); |
641 | 644 | ||
@@ -645,11 +648,12 @@ stop_dfsdm: | |||
645 | static int stm32_dfsdm_predisable(struct iio_dev *indio_dev) | 648 | static int stm32_dfsdm_predisable(struct iio_dev *indio_dev) |
646 | { | 649 | { |
647 | struct stm32_dfsdm_adc *adc = iio_priv(indio_dev); | 650 | struct stm32_dfsdm_adc *adc = iio_priv(indio_dev); |
651 | const struct iio_chan_spec *chan = &indio_dev->channels[0]; | ||
648 | 652 | ||
649 | if (adc->dma_chan) | 653 | if (adc->dma_chan) |
650 | dmaengine_terminate_all(adc->dma_chan); | 654 | dmaengine_terminate_all(adc->dma_chan); |
651 | 655 | ||
652 | stm32_dfsdm_stop_conv(adc); | 656 | stm32_dfsdm_stop_conv(adc, chan); |
653 | 657 | ||
654 | stm32_dfsdm_stop_dfsdm(adc->dfsdm); | 658 | stm32_dfsdm_stop_dfsdm(adc->dfsdm); |
655 | 659 | ||
@@ -730,7 +734,7 @@ static int stm32_dfsdm_single_conv(struct iio_dev *indio_dev, | |||
730 | if (ret < 0) | 734 | if (ret < 0) |
731 | goto stop_dfsdm; | 735 | goto stop_dfsdm; |
732 | 736 | ||
733 | ret = stm32_dfsdm_start_conv(adc, false); | 737 | ret = stm32_dfsdm_start_conv(adc, chan, false); |
734 | if (ret < 0) { | 738 | if (ret < 0) { |
735 | regmap_update_bits(adc->dfsdm->regmap, DFSDM_CR2(adc->fl_id), | 739 | regmap_update_bits(adc->dfsdm->regmap, DFSDM_CR2(adc->fl_id), |
736 | DFSDM_CR2_REOCIE_MASK, DFSDM_CR2_REOCIE(0)); | 740 | DFSDM_CR2_REOCIE_MASK, DFSDM_CR2_REOCIE(0)); |
@@ -751,7 +755,7 @@ static int stm32_dfsdm_single_conv(struct iio_dev *indio_dev, | |||
751 | else | 755 | else |
752 | ret = IIO_VAL_INT; | 756 | ret = IIO_VAL_INT; |
753 | 757 | ||
754 | stm32_dfsdm_stop_conv(adc); | 758 | stm32_dfsdm_stop_conv(adc, chan); |
755 | 759 | ||
756 | stop_dfsdm: | 760 | stop_dfsdm: |
757 | stm32_dfsdm_stop_dfsdm(adc->dfsdm); | 761 | stm32_dfsdm_stop_dfsdm(adc->dfsdm); |
@@ -765,7 +769,7 @@ static int stm32_dfsdm_write_raw(struct iio_dev *indio_dev, | |||
765 | { | 769 | { |
766 | struct stm32_dfsdm_adc *adc = iio_priv(indio_dev); | 770 | struct stm32_dfsdm_adc *adc = iio_priv(indio_dev); |
767 | struct stm32_dfsdm_filter *fl = &adc->dfsdm->fl_list[adc->fl_id]; | 771 | struct stm32_dfsdm_filter *fl = &adc->dfsdm->fl_list[adc->fl_id]; |
768 | struct stm32_dfsdm_channel *ch = &adc->dfsdm->ch_list[adc->ch_id]; | 772 | struct stm32_dfsdm_channel *ch = &adc->dfsdm->ch_list[chan->channel]; |
769 | unsigned int spi_freq = adc->spi_freq; | 773 | unsigned int spi_freq = adc->spi_freq; |
770 | int ret = -EINVAL; | 774 | int ret = -EINVAL; |
771 | 775 | ||
@@ -972,7 +976,6 @@ static int stm32_dfsdm_adc_chan_init_one(struct iio_dev *indio_dev, | |||
972 | } | 976 | } |
973 | ch->scan_type.realbits = 24; | 977 | ch->scan_type.realbits = 24; |
974 | ch->scan_type.storagebits = 32; | 978 | ch->scan_type.storagebits = 32; |
975 | adc->ch_id = ch->channel; | ||
976 | 979 | ||
977 | return stm32_dfsdm_chan_configure(adc->dfsdm, | 980 | return stm32_dfsdm_chan_configure(adc->dfsdm, |
978 | &adc->dfsdm->ch_list[ch->channel]); | 981 | &adc->dfsdm->ch_list[ch->channel]); |
@@ -1001,7 +1004,7 @@ static int stm32_dfsdm_audio_init(struct iio_dev *indio_dev) | |||
1001 | } | 1004 | } |
1002 | ch->info_mask_separate = BIT(IIO_CHAN_INFO_SAMP_FREQ); | 1005 | ch->info_mask_separate = BIT(IIO_CHAN_INFO_SAMP_FREQ); |
1003 | 1006 | ||
1004 | d_ch = &adc->dfsdm->ch_list[adc->ch_id]; | 1007 | d_ch = &adc->dfsdm->ch_list[ch->channel]; |
1005 | if (d_ch->src != DFSDM_CHANNEL_SPI_CLOCK_EXTERNAL) | 1008 | if (d_ch->src != DFSDM_CHANNEL_SPI_CLOCK_EXTERNAL) |
1006 | adc->spi_freq = adc->dfsdm->spi_master_freq; | 1009 | adc->spi_freq = adc->dfsdm->spi_master_freq; |
1007 | 1010 | ||
@@ -1042,8 +1045,8 @@ static int stm32_dfsdm_adc_init(struct iio_dev *indio_dev) | |||
1042 | return -ENOMEM; | 1045 | return -ENOMEM; |
1043 | 1046 | ||
1044 | for (chan_idx = 0; chan_idx < num_ch; chan_idx++) { | 1047 | for (chan_idx = 0; chan_idx < num_ch; chan_idx++) { |
1045 | ch->scan_index = chan_idx; | 1048 | ch[chan_idx].scan_index = chan_idx; |
1046 | ret = stm32_dfsdm_adc_chan_init_one(indio_dev, ch); | 1049 | ret = stm32_dfsdm_adc_chan_init_one(indio_dev, &ch[chan_idx]); |
1047 | if (ret < 0) { | 1050 | if (ret < 0) { |
1048 | dev_err(&indio_dev->dev, "Channels init failed\n"); | 1051 | dev_err(&indio_dev->dev, "Channels init failed\n"); |
1049 | return ret; | 1052 | return ret; |
diff --git a/drivers/iio/adc/stm32-dfsdm-core.c b/drivers/iio/adc/stm32-dfsdm-core.c index 6290332cfd3f..e50efdcc41ff 100644 --- a/drivers/iio/adc/stm32-dfsdm-core.c +++ b/drivers/iio/adc/stm32-dfsdm-core.c | |||
@@ -83,7 +83,7 @@ int stm32_dfsdm_start_dfsdm(struct stm32_dfsdm *dfsdm) | |||
83 | { | 83 | { |
84 | struct dfsdm_priv *priv = container_of(dfsdm, struct dfsdm_priv, dfsdm); | 84 | struct dfsdm_priv *priv = container_of(dfsdm, struct dfsdm_priv, dfsdm); |
85 | struct device *dev = &priv->pdev->dev; | 85 | struct device *dev = &priv->pdev->dev; |
86 | unsigned int clk_div = priv->spi_clk_out_div; | 86 | unsigned int clk_div = priv->spi_clk_out_div, clk_src; |
87 | int ret; | 87 | int ret; |
88 | 88 | ||
89 | if (atomic_inc_return(&priv->n_active_ch) == 1) { | 89 | if (atomic_inc_return(&priv->n_active_ch) == 1) { |
@@ -100,6 +100,14 @@ int stm32_dfsdm_start_dfsdm(struct stm32_dfsdm *dfsdm) | |||
100 | } | 100 | } |
101 | } | 101 | } |
102 | 102 | ||
103 | /* select clock source, e.g. 0 for "dfsdm" or 1 for "audio" */ | ||
104 | clk_src = priv->aclk ? 1 : 0; | ||
105 | ret = regmap_update_bits(dfsdm->regmap, DFSDM_CHCFGR1(0), | ||
106 | DFSDM_CHCFGR1_CKOUTSRC_MASK, | ||
107 | DFSDM_CHCFGR1_CKOUTSRC(clk_src)); | ||
108 | if (ret < 0) | ||
109 | goto disable_aclk; | ||
110 | |||
103 | /* Output the SPI CLKOUT (if clk_div == 0 clock if OFF) */ | 111 | /* Output the SPI CLKOUT (if clk_div == 0 clock if OFF) */ |
104 | ret = regmap_update_bits(dfsdm->regmap, DFSDM_CHCFGR1(0), | 112 | ret = regmap_update_bits(dfsdm->regmap, DFSDM_CHCFGR1(0), |
105 | DFSDM_CHCFGR1_CKOUTDIV_MASK, | 113 | DFSDM_CHCFGR1_CKOUTDIV_MASK, |
@@ -274,7 +282,7 @@ static int stm32_dfsdm_probe(struct platform_device *pdev) | |||
274 | 282 | ||
275 | dfsdm->regmap = devm_regmap_init_mmio_clk(&pdev->dev, "dfsdm", | 283 | dfsdm->regmap = devm_regmap_init_mmio_clk(&pdev->dev, "dfsdm", |
276 | dfsdm->base, | 284 | dfsdm->base, |
277 | &stm32h7_dfsdm_regmap_cfg); | 285 | dev_data->regmap_cfg); |
278 | if (IS_ERR(dfsdm->regmap)) { | 286 | if (IS_ERR(dfsdm->regmap)) { |
279 | ret = PTR_ERR(dfsdm->regmap); | 287 | ret = PTR_ERR(dfsdm->regmap); |
280 | dev_err(&pdev->dev, "%s: Failed to allocate regmap: %d\n", | 288 | dev_err(&pdev->dev, "%s: Failed to allocate regmap: %d\n", |
diff --git a/drivers/iio/chemical/ccs811.c b/drivers/iio/chemical/ccs811.c index fbe2431f5b81..1ea9f5513b02 100644 --- a/drivers/iio/chemical/ccs811.c +++ b/drivers/iio/chemical/ccs811.c | |||
@@ -133,6 +133,9 @@ static int ccs811_start_sensor_application(struct i2c_client *client) | |||
133 | if (ret < 0) | 133 | if (ret < 0) |
134 | return ret; | 134 | return ret; |
135 | 135 | ||
136 | if ((ret & CCS811_STATUS_FW_MODE_APPLICATION)) | ||
137 | return 0; | ||
138 | |||
136 | if ((ret & CCS811_STATUS_APP_VALID_MASK) != | 139 | if ((ret & CCS811_STATUS_APP_VALID_MASK) != |
137 | CCS811_STATUS_APP_VALID_LOADED) | 140 | CCS811_STATUS_APP_VALID_LOADED) |
138 | return -EIO; | 141 | return -EIO; |
diff --git a/drivers/iio/imu/adis_trigger.c b/drivers/iio/imu/adis_trigger.c index 0dd5a381be64..457372f36791 100644 --- a/drivers/iio/imu/adis_trigger.c +++ b/drivers/iio/imu/adis_trigger.c | |||
@@ -46,6 +46,10 @@ int adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev) | |||
46 | if (adis->trig == NULL) | 46 | if (adis->trig == NULL) |
47 | return -ENOMEM; | 47 | return -ENOMEM; |
48 | 48 | ||
49 | adis->trig->dev.parent = &adis->spi->dev; | ||
50 | adis->trig->ops = &adis_trigger_ops; | ||
51 | iio_trigger_set_drvdata(adis->trig, adis); | ||
52 | |||
49 | ret = request_irq(adis->spi->irq, | 53 | ret = request_irq(adis->spi->irq, |
50 | &iio_trigger_generic_data_rdy_poll, | 54 | &iio_trigger_generic_data_rdy_poll, |
51 | IRQF_TRIGGER_RISING, | 55 | IRQF_TRIGGER_RISING, |
@@ -54,9 +58,6 @@ int adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev) | |||
54 | if (ret) | 58 | if (ret) |
55 | goto error_free_trig; | 59 | goto error_free_trig; |
56 | 60 | ||
57 | adis->trig->dev.parent = &adis->spi->dev; | ||
58 | adis->trig->ops = &adis_trigger_ops; | ||
59 | iio_trigger_set_drvdata(adis->trig, adis); | ||
60 | ret = iio_trigger_register(adis->trig); | 61 | ret = iio_trigger_register(adis->trig); |
61 | 62 | ||
62 | indio_dev->trig = iio_trigger_get(adis->trig); | 63 | indio_dev->trig = iio_trigger_get(adis->trig); |
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c index 79abf70a126d..cd5bfe39591b 100644 --- a/drivers/iio/industrialio-buffer.c +++ b/drivers/iio/industrialio-buffer.c | |||
@@ -175,7 +175,7 @@ __poll_t iio_buffer_poll(struct file *filp, | |||
175 | struct iio_dev *indio_dev = filp->private_data; | 175 | struct iio_dev *indio_dev = filp->private_data; |
176 | struct iio_buffer *rb = indio_dev->buffer; | 176 | struct iio_buffer *rb = indio_dev->buffer; |
177 | 177 | ||
178 | if (!indio_dev->info) | 178 | if (!indio_dev->info || rb == NULL) |
179 | return 0; | 179 | return 0; |
180 | 180 | ||
181 | poll_wait(filp, &rb->pollq, wait); | 181 | poll_wait(filp, &rb->pollq, wait); |
diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c index 349e5c713c03..4ddb6cf7d401 100644 --- a/drivers/iio/pressure/st_pressure_core.c +++ b/drivers/iio/pressure/st_pressure_core.c | |||
@@ -640,7 +640,7 @@ int st_press_common_probe(struct iio_dev *indio_dev) | |||
640 | press_data->sensor_settings->drdy_irq.int2.addr)) | 640 | press_data->sensor_settings->drdy_irq.int2.addr)) |
641 | pdata = (struct st_sensors_platform_data *)&default_press_pdata; | 641 | pdata = (struct st_sensors_platform_data *)&default_press_pdata; |
642 | 642 | ||
643 | err = st_sensors_init_sensor(indio_dev, press_data->dev->platform_data); | 643 | err = st_sensors_init_sensor(indio_dev, pdata); |
644 | if (err < 0) | 644 | if (err < 0) |
645 | goto st_press_power_off; | 645 | goto st_press_power_off; |
646 | 646 | ||
diff --git a/drivers/iio/proximity/Kconfig b/drivers/iio/proximity/Kconfig index fcb1c4ba5e41..f726f9427602 100644 --- a/drivers/iio/proximity/Kconfig +++ b/drivers/iio/proximity/Kconfig | |||
@@ -68,6 +68,8 @@ config SX9500 | |||
68 | 68 | ||
69 | config SRF08 | 69 | config SRF08 |
70 | tristate "Devantech SRF02/SRF08/SRF10 ultrasonic ranger sensor" | 70 | tristate "Devantech SRF02/SRF08/SRF10 ultrasonic ranger sensor" |
71 | select IIO_BUFFER | ||
72 | select IIO_TRIGGERED_BUFFER | ||
71 | depends on I2C | 73 | depends on I2C |
72 | help | 74 | help |
73 | Say Y here to build a driver for Devantech SRF02/SRF08/SRF10 | 75 | Say Y here to build a driver for Devantech SRF02/SRF08/SRF10 |
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index a5b4cf030c11..9183d148d644 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c | |||
@@ -550,18 +550,13 @@ static int addr_resolve(struct sockaddr *src_in, | |||
550 | dst_release(dst); | 550 | dst_release(dst); |
551 | } | 551 | } |
552 | 552 | ||
553 | if (ndev->flags & IFF_LOOPBACK) { | 553 | if (ndev) { |
554 | ret = rdma_translate_ip(dst_in, addr); | 554 | if (ndev->flags & IFF_LOOPBACK) |
555 | /* | 555 | ret = rdma_translate_ip(dst_in, addr); |
556 | * Put the loopback device and get the translated | 556 | else |
557 | * device instead. | 557 | addr->bound_dev_if = ndev->ifindex; |
558 | */ | ||
559 | dev_put(ndev); | 558 | dev_put(ndev); |
560 | ndev = dev_get_by_index(addr->net, addr->bound_dev_if); | ||
561 | } else { | ||
562 | addr->bound_dev_if = ndev->ifindex; | ||
563 | } | 559 | } |
564 | dev_put(ndev); | ||
565 | 560 | ||
566 | return ret; | 561 | return ret; |
567 | } | 562 | } |
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index e66963ca58bd..a5367c5efbe7 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c | |||
@@ -3069,7 +3069,8 @@ static int cma_port_is_unique(struct rdma_bind_list *bind_list, | |||
3069 | continue; | 3069 | continue; |
3070 | 3070 | ||
3071 | /* different dest port -> unique */ | 3071 | /* different dest port -> unique */ |
3072 | if (!cma_any_port(cur_daddr) && | 3072 | if (!cma_any_port(daddr) && |
3073 | !cma_any_port(cur_daddr) && | ||
3073 | (dport != cur_dport)) | 3074 | (dport != cur_dport)) |
3074 | continue; | 3075 | continue; |
3075 | 3076 | ||
@@ -3080,7 +3081,8 @@ static int cma_port_is_unique(struct rdma_bind_list *bind_list, | |||
3080 | continue; | 3081 | continue; |
3081 | 3082 | ||
3082 | /* different dst address -> unique */ | 3083 | /* different dst address -> unique */ |
3083 | if (!cma_any_addr(cur_daddr) && | 3084 | if (!cma_any_addr(daddr) && |
3085 | !cma_any_addr(cur_daddr) && | ||
3084 | cma_addr_cmp(daddr, cur_daddr)) | 3086 | cma_addr_cmp(daddr, cur_daddr)) |
3085 | continue; | 3087 | continue; |
3086 | 3088 | ||
@@ -3378,13 +3380,13 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) | |||
3378 | } | 3380 | } |
3379 | #endif | 3381 | #endif |
3380 | } | 3382 | } |
3383 | daddr = cma_dst_addr(id_priv); | ||
3384 | daddr->sa_family = addr->sa_family; | ||
3385 | |||
3381 | ret = cma_get_port(id_priv); | 3386 | ret = cma_get_port(id_priv); |
3382 | if (ret) | 3387 | if (ret) |
3383 | goto err2; | 3388 | goto err2; |
3384 | 3389 | ||
3385 | daddr = cma_dst_addr(id_priv); | ||
3386 | daddr->sa_family = addr->sa_family; | ||
3387 | |||
3388 | return 0; | 3390 | return 0; |
3389 | err2: | 3391 | err2: |
3390 | if (id_priv->cma_dev) | 3392 | if (id_priv->cma_dev) |
@@ -4173,6 +4175,9 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr, | |||
4173 | struct cma_multicast *mc; | 4175 | struct cma_multicast *mc; |
4174 | int ret; | 4176 | int ret; |
4175 | 4177 | ||
4178 | if (!id->device) | ||
4179 | return -EINVAL; | ||
4180 | |||
4176 | id_priv = container_of(id, struct rdma_id_private, id); | 4181 | id_priv = container_of(id, struct rdma_id_private, id); |
4177 | if (!cma_comp(id_priv, RDMA_CM_ADDR_BOUND) && | 4182 | if (!cma_comp(id_priv, RDMA_CM_ADDR_BOUND) && |
4178 | !cma_comp(id_priv, RDMA_CM_ADDR_RESOLVED)) | 4183 | !cma_comp(id_priv, RDMA_CM_ADDR_RESOLVED)) |
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h index c4560d84dfae..25bb178f6074 100644 --- a/drivers/infiniband/core/core_priv.h +++ b/drivers/infiniband/core/core_priv.h | |||
@@ -305,16 +305,21 @@ void nldev_exit(void); | |||
305 | static inline struct ib_qp *_ib_create_qp(struct ib_device *dev, | 305 | static inline struct ib_qp *_ib_create_qp(struct ib_device *dev, |
306 | struct ib_pd *pd, | 306 | struct ib_pd *pd, |
307 | struct ib_qp_init_attr *attr, | 307 | struct ib_qp_init_attr *attr, |
308 | struct ib_udata *udata) | 308 | struct ib_udata *udata, |
309 | struct ib_uobject *uobj) | ||
309 | { | 310 | { |
310 | struct ib_qp *qp; | 311 | struct ib_qp *qp; |
311 | 312 | ||
313 | if (!dev->create_qp) | ||
314 | return ERR_PTR(-EOPNOTSUPP); | ||
315 | |||
312 | qp = dev->create_qp(pd, attr, udata); | 316 | qp = dev->create_qp(pd, attr, udata); |
313 | if (IS_ERR(qp)) | 317 | if (IS_ERR(qp)) |
314 | return qp; | 318 | return qp; |
315 | 319 | ||
316 | qp->device = dev; | 320 | qp->device = dev; |
317 | qp->pd = pd; | 321 | qp->pd = pd; |
322 | qp->uobject = uobj; | ||
318 | /* | 323 | /* |
319 | * We don't track XRC QPs for now, because they don't have PD | 324 | * We don't track XRC QPs for now, because they don't have PD |
320 | * and more importantly they are created internaly by driver, | 325 | * and more importantly they are created internaly by driver, |
diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c index bc79ca8215d7..af5ad6a56ae4 100644 --- a/drivers/infiniband/core/cq.c +++ b/drivers/infiniband/core/cq.c | |||
@@ -17,6 +17,7 @@ | |||
17 | 17 | ||
18 | /* # of WCs to poll for with a single call to ib_poll_cq */ | 18 | /* # of WCs to poll for with a single call to ib_poll_cq */ |
19 | #define IB_POLL_BATCH 16 | 19 | #define IB_POLL_BATCH 16 |
20 | #define IB_POLL_BATCH_DIRECT 8 | ||
20 | 21 | ||
21 | /* # of WCs to iterate over before yielding */ | 22 | /* # of WCs to iterate over before yielding */ |
22 | #define IB_POLL_BUDGET_IRQ 256 | 23 | #define IB_POLL_BUDGET_IRQ 256 |
@@ -25,18 +26,18 @@ | |||
25 | #define IB_POLL_FLAGS \ | 26 | #define IB_POLL_FLAGS \ |
26 | (IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS) | 27 | (IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS) |
27 | 28 | ||
28 | static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *poll_wc) | 29 | static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *wcs, |
30 | int batch) | ||
29 | { | 31 | { |
30 | int i, n, completed = 0; | 32 | int i, n, completed = 0; |
31 | struct ib_wc *wcs = poll_wc ? : cq->wc; | ||
32 | 33 | ||
33 | /* | 34 | /* |
34 | * budget might be (-1) if the caller does not | 35 | * budget might be (-1) if the caller does not |
35 | * want to bound this call, thus we need unsigned | 36 | * want to bound this call, thus we need unsigned |
36 | * minimum here. | 37 | * minimum here. |
37 | */ | 38 | */ |
38 | while ((n = ib_poll_cq(cq, min_t(u32, IB_POLL_BATCH, | 39 | while ((n = ib_poll_cq(cq, min_t(u32, batch, |
39 | budget - completed), wcs)) > 0) { | 40 | budget - completed), wcs)) > 0) { |
40 | for (i = 0; i < n; i++) { | 41 | for (i = 0; i < n; i++) { |
41 | struct ib_wc *wc = &wcs[i]; | 42 | struct ib_wc *wc = &wcs[i]; |
42 | 43 | ||
@@ -48,8 +49,7 @@ static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *poll_wc) | |||
48 | 49 | ||
49 | completed += n; | 50 | completed += n; |
50 | 51 | ||
51 | if (n != IB_POLL_BATCH || | 52 | if (n != batch || (budget != -1 && completed >= budget)) |
52 | (budget != -1 && completed >= budget)) | ||
53 | break; | 53 | break; |
54 | } | 54 | } |
55 | 55 | ||
@@ -72,9 +72,9 @@ static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *poll_wc) | |||
72 | */ | 72 | */ |
73 | int ib_process_cq_direct(struct ib_cq *cq, int budget) | 73 | int ib_process_cq_direct(struct ib_cq *cq, int budget) |
74 | { | 74 | { |
75 | struct ib_wc wcs[IB_POLL_BATCH]; | 75 | struct ib_wc wcs[IB_POLL_BATCH_DIRECT]; |
76 | 76 | ||
77 | return __ib_process_cq(cq, budget, wcs); | 77 | return __ib_process_cq(cq, budget, wcs, IB_POLL_BATCH_DIRECT); |
78 | } | 78 | } |
79 | EXPORT_SYMBOL(ib_process_cq_direct); | 79 | EXPORT_SYMBOL(ib_process_cq_direct); |
80 | 80 | ||
@@ -88,7 +88,7 @@ static int ib_poll_handler(struct irq_poll *iop, int budget) | |||
88 | struct ib_cq *cq = container_of(iop, struct ib_cq, iop); | 88 | struct ib_cq *cq = container_of(iop, struct ib_cq, iop); |
89 | int completed; | 89 | int completed; |
90 | 90 | ||
91 | completed = __ib_process_cq(cq, budget, NULL); | 91 | completed = __ib_process_cq(cq, budget, cq->wc, IB_POLL_BATCH); |
92 | if (completed < budget) { | 92 | if (completed < budget) { |
93 | irq_poll_complete(&cq->iop); | 93 | irq_poll_complete(&cq->iop); |
94 | if (ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0) | 94 | if (ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0) |
@@ -108,7 +108,8 @@ static void ib_cq_poll_work(struct work_struct *work) | |||
108 | struct ib_cq *cq = container_of(work, struct ib_cq, work); | 108 | struct ib_cq *cq = container_of(work, struct ib_cq, work); |
109 | int completed; | 109 | int completed; |
110 | 110 | ||
111 | completed = __ib_process_cq(cq, IB_POLL_BUDGET_WORKQUEUE, NULL); | 111 | completed = __ib_process_cq(cq, IB_POLL_BUDGET_WORKQUEUE, cq->wc, |
112 | IB_POLL_BATCH); | ||
112 | if (completed >= IB_POLL_BUDGET_WORKQUEUE || | 113 | if (completed >= IB_POLL_BUDGET_WORKQUEUE || |
113 | ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0) | 114 | ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0) |
114 | queue_work(ib_comp_wq, &cq->work); | 115 | queue_work(ib_comp_wq, &cq->work); |
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index e8010e73a1cf..bb065c9449be 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c | |||
@@ -536,14 +536,14 @@ int ib_register_device(struct ib_device *device, | |||
536 | ret = device->query_device(device, &device->attrs, &uhw); | 536 | ret = device->query_device(device, &device->attrs, &uhw); |
537 | if (ret) { | 537 | if (ret) { |
538 | pr_warn("Couldn't query the device attributes\n"); | 538 | pr_warn("Couldn't query the device attributes\n"); |
539 | goto cache_cleanup; | 539 | goto cg_cleanup; |
540 | } | 540 | } |
541 | 541 | ||
542 | ret = ib_device_register_sysfs(device, port_callback); | 542 | ret = ib_device_register_sysfs(device, port_callback); |
543 | if (ret) { | 543 | if (ret) { |
544 | pr_warn("Couldn't register device %s with driver model\n", | 544 | pr_warn("Couldn't register device %s with driver model\n", |
545 | device->name); | 545 | device->name); |
546 | goto cache_cleanup; | 546 | goto cg_cleanup; |
547 | } | 547 | } |
548 | 548 | ||
549 | device->reg_state = IB_DEV_REGISTERED; | 549 | device->reg_state = IB_DEV_REGISTERED; |
@@ -559,6 +559,8 @@ int ib_register_device(struct ib_device *device, | |||
559 | mutex_unlock(&device_mutex); | 559 | mutex_unlock(&device_mutex); |
560 | return 0; | 560 | return 0; |
561 | 561 | ||
562 | cg_cleanup: | ||
563 | ib_device_unregister_rdmacg(device); | ||
562 | cache_cleanup: | 564 | cache_cleanup: |
563 | ib_cache_cleanup_one(device); | 565 | ib_cache_cleanup_one(device); |
564 | ib_cache_release_one(device); | 566 | ib_cache_release_one(device); |
diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c index 85b5ee4defa4..d8eead5d106d 100644 --- a/drivers/infiniband/core/rdma_core.c +++ b/drivers/infiniband/core/rdma_core.c | |||
@@ -141,7 +141,12 @@ static struct ib_uobject *alloc_uobj(struct ib_ucontext *context, | |||
141 | */ | 141 | */ |
142 | uobj->context = context; | 142 | uobj->context = context; |
143 | uobj->type = type; | 143 | uobj->type = type; |
144 | atomic_set(&uobj->usecnt, 0); | 144 | /* |
145 | * Allocated objects start out as write locked to deny any other | ||
146 | * syscalls from accessing them until they are committed. See | ||
147 | * rdma_alloc_commit_uobject | ||
148 | */ | ||
149 | atomic_set(&uobj->usecnt, -1); | ||
145 | kref_init(&uobj->ref); | 150 | kref_init(&uobj->ref); |
146 | 151 | ||
147 | return uobj; | 152 | return uobj; |
@@ -196,7 +201,15 @@ static struct ib_uobject *lookup_get_idr_uobject(const struct uverbs_obj_type *t | |||
196 | goto free; | 201 | goto free; |
197 | } | 202 | } |
198 | 203 | ||
199 | uverbs_uobject_get(uobj); | 204 | /* |
205 | * The idr_find is guaranteed to return a pointer to something that | ||
206 | * isn't freed yet, or NULL, as the free after idr_remove goes through | ||
207 | * kfree_rcu(). However the object may still have been released and | ||
208 | * kfree() could be called at any time. | ||
209 | */ | ||
210 | if (!kref_get_unless_zero(&uobj->ref)) | ||
211 | uobj = ERR_PTR(-ENOENT); | ||
212 | |||
200 | free: | 213 | free: |
201 | rcu_read_unlock(); | 214 | rcu_read_unlock(); |
202 | return uobj; | 215 | return uobj; |
@@ -399,13 +412,13 @@ static int __must_check remove_commit_fd_uobject(struct ib_uobject *uobj, | |||
399 | return ret; | 412 | return ret; |
400 | } | 413 | } |
401 | 414 | ||
402 | static void lockdep_check(struct ib_uobject *uobj, bool exclusive) | 415 | static void assert_uverbs_usecnt(struct ib_uobject *uobj, bool exclusive) |
403 | { | 416 | { |
404 | #ifdef CONFIG_LOCKDEP | 417 | #ifdef CONFIG_LOCKDEP |
405 | if (exclusive) | 418 | if (exclusive) |
406 | WARN_ON(atomic_read(&uobj->usecnt) > 0); | 419 | WARN_ON(atomic_read(&uobj->usecnt) != -1); |
407 | else | 420 | else |
408 | WARN_ON(atomic_read(&uobj->usecnt) == -1); | 421 | WARN_ON(atomic_read(&uobj->usecnt) <= 0); |
409 | #endif | 422 | #endif |
410 | } | 423 | } |
411 | 424 | ||
@@ -444,7 +457,7 @@ int __must_check rdma_remove_commit_uobject(struct ib_uobject *uobj) | |||
444 | WARN(true, "ib_uverbs: Cleanup is running while removing an uobject\n"); | 457 | WARN(true, "ib_uverbs: Cleanup is running while removing an uobject\n"); |
445 | return 0; | 458 | return 0; |
446 | } | 459 | } |
447 | lockdep_check(uobj, true); | 460 | assert_uverbs_usecnt(uobj, true); |
448 | ret = _rdma_remove_commit_uobject(uobj, RDMA_REMOVE_DESTROY); | 461 | ret = _rdma_remove_commit_uobject(uobj, RDMA_REMOVE_DESTROY); |
449 | 462 | ||
450 | up_read(&ucontext->cleanup_rwsem); | 463 | up_read(&ucontext->cleanup_rwsem); |
@@ -474,16 +487,17 @@ int rdma_explicit_destroy(struct ib_uobject *uobject) | |||
474 | WARN(true, "ib_uverbs: Cleanup is running while removing an uobject\n"); | 487 | WARN(true, "ib_uverbs: Cleanup is running while removing an uobject\n"); |
475 | return 0; | 488 | return 0; |
476 | } | 489 | } |
477 | lockdep_check(uobject, true); | 490 | assert_uverbs_usecnt(uobject, true); |
478 | ret = uobject->type->type_class->remove_commit(uobject, | 491 | ret = uobject->type->type_class->remove_commit(uobject, |
479 | RDMA_REMOVE_DESTROY); | 492 | RDMA_REMOVE_DESTROY); |
480 | if (ret) | 493 | if (ret) |
481 | return ret; | 494 | goto out; |
482 | 495 | ||
483 | uobject->type = &null_obj_type; | 496 | uobject->type = &null_obj_type; |
484 | 497 | ||
498 | out: | ||
485 | up_read(&ucontext->cleanup_rwsem); | 499 | up_read(&ucontext->cleanup_rwsem); |
486 | return 0; | 500 | return ret; |
487 | } | 501 | } |
488 | 502 | ||
489 | static void alloc_commit_idr_uobject(struct ib_uobject *uobj) | 503 | static void alloc_commit_idr_uobject(struct ib_uobject *uobj) |
@@ -527,6 +541,10 @@ int rdma_alloc_commit_uobject(struct ib_uobject *uobj) | |||
527 | return ret; | 541 | return ret; |
528 | } | 542 | } |
529 | 543 | ||
544 | /* matches atomic_set(-1) in alloc_uobj */ | ||
545 | assert_uverbs_usecnt(uobj, true); | ||
546 | atomic_set(&uobj->usecnt, 0); | ||
547 | |||
530 | uobj->type->type_class->alloc_commit(uobj); | 548 | uobj->type->type_class->alloc_commit(uobj); |
531 | up_read(&uobj->context->cleanup_rwsem); | 549 | up_read(&uobj->context->cleanup_rwsem); |
532 | 550 | ||
@@ -561,7 +579,7 @@ static void lookup_put_fd_uobject(struct ib_uobject *uobj, bool exclusive) | |||
561 | 579 | ||
562 | void rdma_lookup_put_uobject(struct ib_uobject *uobj, bool exclusive) | 580 | void rdma_lookup_put_uobject(struct ib_uobject *uobj, bool exclusive) |
563 | { | 581 | { |
564 | lockdep_check(uobj, exclusive); | 582 | assert_uverbs_usecnt(uobj, exclusive); |
565 | uobj->type->type_class->lookup_put(uobj, exclusive); | 583 | uobj->type->type_class->lookup_put(uobj, exclusive); |
566 | /* | 584 | /* |
567 | * In order to unlock an object, either decrease its usecnt for | 585 | * In order to unlock an object, either decrease its usecnt for |
diff --git a/drivers/infiniband/core/restrack.c b/drivers/infiniband/core/restrack.c index 857637bf46da..3dbc4e4cca41 100644 --- a/drivers/infiniband/core/restrack.c +++ b/drivers/infiniband/core/restrack.c | |||
@@ -7,7 +7,6 @@ | |||
7 | #include <rdma/restrack.h> | 7 | #include <rdma/restrack.h> |
8 | #include <linux/mutex.h> | 8 | #include <linux/mutex.h> |
9 | #include <linux/sched/task.h> | 9 | #include <linux/sched/task.h> |
10 | #include <linux/uaccess.h> | ||
11 | #include <linux/pid_namespace.h> | 10 | #include <linux/pid_namespace.h> |
12 | 11 | ||
13 | void rdma_restrack_init(struct rdma_restrack_root *res) | 12 | void rdma_restrack_init(struct rdma_restrack_root *res) |
@@ -63,7 +62,6 @@ static struct ib_device *res_to_dev(struct rdma_restrack_entry *res) | |||
63 | { | 62 | { |
64 | enum rdma_restrack_type type = res->type; | 63 | enum rdma_restrack_type type = res->type; |
65 | struct ib_device *dev; | 64 | struct ib_device *dev; |
66 | struct ib_xrcd *xrcd; | ||
67 | struct ib_pd *pd; | 65 | struct ib_pd *pd; |
68 | struct ib_cq *cq; | 66 | struct ib_cq *cq; |
69 | struct ib_qp *qp; | 67 | struct ib_qp *qp; |
@@ -81,10 +79,6 @@ static struct ib_device *res_to_dev(struct rdma_restrack_entry *res) | |||
81 | qp = container_of(res, struct ib_qp, res); | 79 | qp = container_of(res, struct ib_qp, res); |
82 | dev = qp->device; | 80 | dev = qp->device; |
83 | break; | 81 | break; |
84 | case RDMA_RESTRACK_XRCD: | ||
85 | xrcd = container_of(res, struct ib_xrcd, res); | ||
86 | dev = xrcd->device; | ||
87 | break; | ||
88 | default: | 82 | default: |
89 | WARN_ONCE(true, "Wrong resource tracking type %u\n", type); | 83 | WARN_ONCE(true, "Wrong resource tracking type %u\n", type); |
90 | return NULL; | 84 | return NULL; |
@@ -93,6 +87,21 @@ static struct ib_device *res_to_dev(struct rdma_restrack_entry *res) | |||
93 | return dev; | 87 | return dev; |
94 | } | 88 | } |
95 | 89 | ||
90 | static bool res_is_user(struct rdma_restrack_entry *res) | ||
91 | { | ||
92 | switch (res->type) { | ||
93 | case RDMA_RESTRACK_PD: | ||
94 | return container_of(res, struct ib_pd, res)->uobject; | ||
95 | case RDMA_RESTRACK_CQ: | ||
96 | return container_of(res, struct ib_cq, res)->uobject; | ||
97 | case RDMA_RESTRACK_QP: | ||
98 | return container_of(res, struct ib_qp, res)->uobject; | ||
99 | default: | ||
100 | WARN_ONCE(true, "Wrong resource tracking type %u\n", res->type); | ||
101 | return false; | ||
102 | } | ||
103 | } | ||
104 | |||
96 | void rdma_restrack_add(struct rdma_restrack_entry *res) | 105 | void rdma_restrack_add(struct rdma_restrack_entry *res) |
97 | { | 106 | { |
98 | struct ib_device *dev = res_to_dev(res); | 107 | struct ib_device *dev = res_to_dev(res); |
@@ -100,7 +109,7 @@ void rdma_restrack_add(struct rdma_restrack_entry *res) | |||
100 | if (!dev) | 109 | if (!dev) |
101 | return; | 110 | return; |
102 | 111 | ||
103 | if (!uaccess_kernel()) { | 112 | if (res_is_user(res)) { |
104 | get_task_struct(current); | 113 | get_task_struct(current); |
105 | res->task = current; | 114 | res->task = current; |
106 | res->kern_name = NULL; | 115 | res->kern_name = NULL; |
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index 8cf15d4a8ac4..9f029a1ca5ea 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c | |||
@@ -1291,10 +1291,9 @@ int ib_init_ah_attr_from_path(struct ib_device *device, u8 port_num, | |||
1291 | 1291 | ||
1292 | resolved_dev = dev_get_by_index(dev_addr.net, | 1292 | resolved_dev = dev_get_by_index(dev_addr.net, |
1293 | dev_addr.bound_dev_if); | 1293 | dev_addr.bound_dev_if); |
1294 | if (resolved_dev->flags & IFF_LOOPBACK) { | 1294 | if (!resolved_dev) { |
1295 | dev_put(resolved_dev); | 1295 | dev_put(idev); |
1296 | resolved_dev = idev; | 1296 | return -ENODEV; |
1297 | dev_hold(resolved_dev); | ||
1298 | } | 1297 | } |
1299 | ndev = ib_get_ndev_from_path(rec); | 1298 | ndev = ib_get_ndev_from_path(rec); |
1300 | rcu_read_lock(); | 1299 | rcu_read_lock(); |
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index f015f1bf88c9..e5a1e7d81326 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c | |||
@@ -132,7 +132,7 @@ static inline struct ucma_context *_ucma_find_context(int id, | |||
132 | ctx = idr_find(&ctx_idr, id); | 132 | ctx = idr_find(&ctx_idr, id); |
133 | if (!ctx) | 133 | if (!ctx) |
134 | ctx = ERR_PTR(-ENOENT); | 134 | ctx = ERR_PTR(-ENOENT); |
135 | else if (ctx->file != file) | 135 | else if (ctx->file != file || !ctx->cm_id) |
136 | ctx = ERR_PTR(-EINVAL); | 136 | ctx = ERR_PTR(-EINVAL); |
137 | return ctx; | 137 | return ctx; |
138 | } | 138 | } |
@@ -456,6 +456,7 @@ static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf, | |||
456 | struct rdma_ucm_create_id cmd; | 456 | struct rdma_ucm_create_id cmd; |
457 | struct rdma_ucm_create_id_resp resp; | 457 | struct rdma_ucm_create_id_resp resp; |
458 | struct ucma_context *ctx; | 458 | struct ucma_context *ctx; |
459 | struct rdma_cm_id *cm_id; | ||
459 | enum ib_qp_type qp_type; | 460 | enum ib_qp_type qp_type; |
460 | int ret; | 461 | int ret; |
461 | 462 | ||
@@ -476,10 +477,10 @@ static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf, | |||
476 | return -ENOMEM; | 477 | return -ENOMEM; |
477 | 478 | ||
478 | ctx->uid = cmd.uid; | 479 | ctx->uid = cmd.uid; |
479 | ctx->cm_id = rdma_create_id(current->nsproxy->net_ns, | 480 | cm_id = rdma_create_id(current->nsproxy->net_ns, |
480 | ucma_event_handler, ctx, cmd.ps, qp_type); | 481 | ucma_event_handler, ctx, cmd.ps, qp_type); |
481 | if (IS_ERR(ctx->cm_id)) { | 482 | if (IS_ERR(cm_id)) { |
482 | ret = PTR_ERR(ctx->cm_id); | 483 | ret = PTR_ERR(cm_id); |
483 | goto err1; | 484 | goto err1; |
484 | } | 485 | } |
485 | 486 | ||
@@ -489,14 +490,19 @@ static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf, | |||
489 | ret = -EFAULT; | 490 | ret = -EFAULT; |
490 | goto err2; | 491 | goto err2; |
491 | } | 492 | } |
493 | |||
494 | ctx->cm_id = cm_id; | ||
492 | return 0; | 495 | return 0; |
493 | 496 | ||
494 | err2: | 497 | err2: |
495 | rdma_destroy_id(ctx->cm_id); | 498 | rdma_destroy_id(cm_id); |
496 | err1: | 499 | err1: |
497 | mutex_lock(&mut); | 500 | mutex_lock(&mut); |
498 | idr_remove(&ctx_idr, ctx->id); | 501 | idr_remove(&ctx_idr, ctx->id); |
499 | mutex_unlock(&mut); | 502 | mutex_unlock(&mut); |
503 | mutex_lock(&file->mut); | ||
504 | list_del(&ctx->list); | ||
505 | mutex_unlock(&file->mut); | ||
500 | kfree(ctx); | 506 | kfree(ctx); |
501 | return ret; | 507 | return ret; |
502 | } | 508 | } |
@@ -664,19 +670,23 @@ static ssize_t ucma_resolve_ip(struct ucma_file *file, | |||
664 | int in_len, int out_len) | 670 | int in_len, int out_len) |
665 | { | 671 | { |
666 | struct rdma_ucm_resolve_ip cmd; | 672 | struct rdma_ucm_resolve_ip cmd; |
673 | struct sockaddr *src, *dst; | ||
667 | struct ucma_context *ctx; | 674 | struct ucma_context *ctx; |
668 | int ret; | 675 | int ret; |
669 | 676 | ||
670 | if (copy_from_user(&cmd, inbuf, sizeof(cmd))) | 677 | if (copy_from_user(&cmd, inbuf, sizeof(cmd))) |
671 | return -EFAULT; | 678 | return -EFAULT; |
672 | 679 | ||
680 | src = (struct sockaddr *) &cmd.src_addr; | ||
681 | dst = (struct sockaddr *) &cmd.dst_addr; | ||
682 | if (!rdma_addr_size(src) || !rdma_addr_size(dst)) | ||
683 | return -EINVAL; | ||
684 | |||
673 | ctx = ucma_get_ctx(file, cmd.id); | 685 | ctx = ucma_get_ctx(file, cmd.id); |
674 | if (IS_ERR(ctx)) | 686 | if (IS_ERR(ctx)) |
675 | return PTR_ERR(ctx); | 687 | return PTR_ERR(ctx); |
676 | 688 | ||
677 | ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr, | 689 | ret = rdma_resolve_addr(ctx->cm_id, src, dst, cmd.timeout_ms); |
678 | (struct sockaddr *) &cmd.dst_addr, | ||
679 | cmd.timeout_ms); | ||
680 | ucma_put_ctx(ctx); | 690 | ucma_put_ctx(ctx); |
681 | return ret; | 691 | return ret; |
682 | } | 692 | } |
@@ -1149,6 +1159,9 @@ static ssize_t ucma_init_qp_attr(struct ucma_file *file, | |||
1149 | if (copy_from_user(&cmd, inbuf, sizeof(cmd))) | 1159 | if (copy_from_user(&cmd, inbuf, sizeof(cmd))) |
1150 | return -EFAULT; | 1160 | return -EFAULT; |
1151 | 1161 | ||
1162 | if (cmd.qp_state > IB_QPS_ERR) | ||
1163 | return -EINVAL; | ||
1164 | |||
1152 | ctx = ucma_get_ctx(file, cmd.id); | 1165 | ctx = ucma_get_ctx(file, cmd.id); |
1153 | if (IS_ERR(ctx)) | 1166 | if (IS_ERR(ctx)) |
1154 | return PTR_ERR(ctx); | 1167 | return PTR_ERR(ctx); |
@@ -1294,6 +1307,9 @@ static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf, | |||
1294 | if (IS_ERR(ctx)) | 1307 | if (IS_ERR(ctx)) |
1295 | return PTR_ERR(ctx); | 1308 | return PTR_ERR(ctx); |
1296 | 1309 | ||
1310 | if (unlikely(cmd.optval > KMALLOC_MAX_SIZE)) | ||
1311 | return -EINVAL; | ||
1312 | |||
1297 | optval = memdup_user((void __user *) (unsigned long) cmd.optval, | 1313 | optval = memdup_user((void __user *) (unsigned long) cmd.optval, |
1298 | cmd.optlen); | 1314 | cmd.optlen); |
1299 | if (IS_ERR(optval)) { | 1315 | if (IS_ERR(optval)) { |
@@ -1343,7 +1359,7 @@ static ssize_t ucma_process_join(struct ucma_file *file, | |||
1343 | return -ENOSPC; | 1359 | return -ENOSPC; |
1344 | 1360 | ||
1345 | addr = (struct sockaddr *) &cmd->addr; | 1361 | addr = (struct sockaddr *) &cmd->addr; |
1346 | if (!cmd->addr_size || (cmd->addr_size != rdma_addr_size(addr))) | 1362 | if (cmd->addr_size != rdma_addr_size(addr)) |
1347 | return -EINVAL; | 1363 | return -EINVAL; |
1348 | 1364 | ||
1349 | if (cmd->join_flags == RDMA_MC_JOIN_FLAG_FULLMEMBER) | 1365 | if (cmd->join_flags == RDMA_MC_JOIN_FLAG_FULLMEMBER) |
@@ -1411,6 +1427,9 @@ static ssize_t ucma_join_ip_multicast(struct ucma_file *file, | |||
1411 | join_cmd.uid = cmd.uid; | 1427 | join_cmd.uid = cmd.uid; |
1412 | join_cmd.id = cmd.id; | 1428 | join_cmd.id = cmd.id; |
1413 | join_cmd.addr_size = rdma_addr_size((struct sockaddr *) &cmd.addr); | 1429 | join_cmd.addr_size = rdma_addr_size((struct sockaddr *) &cmd.addr); |
1430 | if (!join_cmd.addr_size) | ||
1431 | return -EINVAL; | ||
1432 | |||
1414 | join_cmd.join_flags = RDMA_MC_JOIN_FLAG_FULLMEMBER; | 1433 | join_cmd.join_flags = RDMA_MC_JOIN_FLAG_FULLMEMBER; |
1415 | memcpy(&join_cmd.addr, &cmd.addr, join_cmd.addr_size); | 1434 | memcpy(&join_cmd.addr, &cmd.addr, join_cmd.addr_size); |
1416 | 1435 | ||
@@ -1426,6 +1445,9 @@ static ssize_t ucma_join_multicast(struct ucma_file *file, | |||
1426 | if (copy_from_user(&cmd, inbuf, sizeof(cmd))) | 1445 | if (copy_from_user(&cmd, inbuf, sizeof(cmd))) |
1427 | return -EFAULT; | 1446 | return -EFAULT; |
1428 | 1447 | ||
1448 | if (!rdma_addr_size((struct sockaddr *)&cmd.addr)) | ||
1449 | return -EINVAL; | ||
1450 | |||
1429 | return ucma_process_join(file, &cmd, out_len); | 1451 | return ucma_process_join(file, &cmd, out_len); |
1430 | } | 1452 | } |
1431 | 1453 | ||
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 256934d1f64f..a148de35df8d 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c | |||
@@ -562,9 +562,10 @@ ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file, | |||
562 | if (f.file) | 562 | if (f.file) |
563 | fdput(f); | 563 | fdput(f); |
564 | 564 | ||
565 | mutex_unlock(&file->device->xrcd_tree_mutex); | ||
566 | |||
565 | uobj_alloc_commit(&obj->uobject); | 567 | uobj_alloc_commit(&obj->uobject); |
566 | 568 | ||
567 | mutex_unlock(&file->device->xrcd_tree_mutex); | ||
568 | return in_len; | 569 | return in_len; |
569 | 570 | ||
570 | err_copy: | 571 | err_copy: |
@@ -603,10 +604,8 @@ ssize_t ib_uverbs_close_xrcd(struct ib_uverbs_file *file, | |||
603 | 604 | ||
604 | uobj = uobj_get_write(uobj_get_type(xrcd), cmd.xrcd_handle, | 605 | uobj = uobj_get_write(uobj_get_type(xrcd), cmd.xrcd_handle, |
605 | file->ucontext); | 606 | file->ucontext); |
606 | if (IS_ERR(uobj)) { | 607 | if (IS_ERR(uobj)) |
607 | mutex_unlock(&file->device->xrcd_tree_mutex); | ||
608 | return PTR_ERR(uobj); | 608 | return PTR_ERR(uobj); |
609 | } | ||
610 | 609 | ||
611 | ret = uobj_remove_commit(uobj); | 610 | ret = uobj_remove_commit(uobj); |
612 | return ret ?: in_len; | 611 | return ret ?: in_len; |
@@ -979,6 +978,9 @@ static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file, | |||
979 | struct ib_uverbs_ex_create_cq_resp resp; | 978 | struct ib_uverbs_ex_create_cq_resp resp; |
980 | struct ib_cq_init_attr attr = {}; | 979 | struct ib_cq_init_attr attr = {}; |
981 | 980 | ||
981 | if (!ib_dev->create_cq) | ||
982 | return ERR_PTR(-EOPNOTSUPP); | ||
983 | |||
982 | if (cmd->comp_vector >= file->device->num_comp_vectors) | 984 | if (cmd->comp_vector >= file->device->num_comp_vectors) |
983 | return ERR_PTR(-EINVAL); | 985 | return ERR_PTR(-EINVAL); |
984 | 986 | ||
@@ -1030,14 +1032,14 @@ static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file, | |||
1030 | resp.response_length = offsetof(typeof(resp), response_length) + | 1032 | resp.response_length = offsetof(typeof(resp), response_length) + |
1031 | sizeof(resp.response_length); | 1033 | sizeof(resp.response_length); |
1032 | 1034 | ||
1035 | cq->res.type = RDMA_RESTRACK_CQ; | ||
1036 | rdma_restrack_add(&cq->res); | ||
1037 | |||
1033 | ret = cb(file, obj, &resp, ucore, context); | 1038 | ret = cb(file, obj, &resp, ucore, context); |
1034 | if (ret) | 1039 | if (ret) |
1035 | goto err_cb; | 1040 | goto err_cb; |
1036 | 1041 | ||
1037 | uobj_alloc_commit(&obj->uobject); | 1042 | uobj_alloc_commit(&obj->uobject); |
1038 | cq->res.type = RDMA_RESTRACK_CQ; | ||
1039 | rdma_restrack_add(&cq->res); | ||
1040 | |||
1041 | return obj; | 1043 | return obj; |
1042 | 1044 | ||
1043 | err_cb: | 1045 | err_cb: |
@@ -1518,7 +1520,8 @@ static int create_qp(struct ib_uverbs_file *file, | |||
1518 | if (cmd->qp_type == IB_QPT_XRC_TGT) | 1520 | if (cmd->qp_type == IB_QPT_XRC_TGT) |
1519 | qp = ib_create_qp(pd, &attr); | 1521 | qp = ib_create_qp(pd, &attr); |
1520 | else | 1522 | else |
1521 | qp = _ib_create_qp(device, pd, &attr, uhw); | 1523 | qp = _ib_create_qp(device, pd, &attr, uhw, |
1524 | &obj->uevent.uobject); | ||
1522 | 1525 | ||
1523 | if (IS_ERR(qp)) { | 1526 | if (IS_ERR(qp)) { |
1524 | ret = PTR_ERR(qp); | 1527 | ret = PTR_ERR(qp); |
@@ -1550,8 +1553,10 @@ static int create_qp(struct ib_uverbs_file *file, | |||
1550 | atomic_inc(&attr.srq->usecnt); | 1553 | atomic_inc(&attr.srq->usecnt); |
1551 | if (ind_tbl) | 1554 | if (ind_tbl) |
1552 | atomic_inc(&ind_tbl->usecnt); | 1555 | atomic_inc(&ind_tbl->usecnt); |
1556 | } else { | ||
1557 | /* It is done in _ib_create_qp for other QP types */ | ||
1558 | qp->uobject = &obj->uevent.uobject; | ||
1553 | } | 1559 | } |
1554 | qp->uobject = &obj->uevent.uobject; | ||
1555 | 1560 | ||
1556 | obj->uevent.uobject.object = qp; | 1561 | obj->uevent.uobject.object = qp; |
1557 | 1562 | ||
@@ -1971,8 +1976,15 @@ static int modify_qp(struct ib_uverbs_file *file, | |||
1971 | goto release_qp; | 1976 | goto release_qp; |
1972 | } | 1977 | } |
1973 | 1978 | ||
1979 | if ((cmd->base.attr_mask & IB_QP_AV) && | ||
1980 | !rdma_is_port_valid(qp->device, cmd->base.dest.port_num)) { | ||
1981 | ret = -EINVAL; | ||
1982 | goto release_qp; | ||
1983 | } | ||
1984 | |||
1974 | if ((cmd->base.attr_mask & IB_QP_ALT_PATH) && | 1985 | if ((cmd->base.attr_mask & IB_QP_ALT_PATH) && |
1975 | !rdma_is_port_valid(qp->device, cmd->base.alt_port_num)) { | 1986 | (!rdma_is_port_valid(qp->device, cmd->base.alt_port_num) || |
1987 | !rdma_is_port_valid(qp->device, cmd->base.alt_dest.port_num))) { | ||
1976 | ret = -EINVAL; | 1988 | ret = -EINVAL; |
1977 | goto release_qp; | 1989 | goto release_qp; |
1978 | } | 1990 | } |
@@ -2941,6 +2953,11 @@ int ib_uverbs_ex_create_wq(struct ib_uverbs_file *file, | |||
2941 | wq_init_attr.create_flags = cmd.create_flags; | 2953 | wq_init_attr.create_flags = cmd.create_flags; |
2942 | obj->uevent.events_reported = 0; | 2954 | obj->uevent.events_reported = 0; |
2943 | INIT_LIST_HEAD(&obj->uevent.event_list); | 2955 | INIT_LIST_HEAD(&obj->uevent.event_list); |
2956 | |||
2957 | if (!pd->device->create_wq) { | ||
2958 | err = -EOPNOTSUPP; | ||
2959 | goto err_put_cq; | ||
2960 | } | ||
2944 | wq = pd->device->create_wq(pd, &wq_init_attr, uhw); | 2961 | wq = pd->device->create_wq(pd, &wq_init_attr, uhw); |
2945 | if (IS_ERR(wq)) { | 2962 | if (IS_ERR(wq)) { |
2946 | err = PTR_ERR(wq); | 2963 | err = PTR_ERR(wq); |
@@ -3084,7 +3101,12 @@ int ib_uverbs_ex_modify_wq(struct ib_uverbs_file *file, | |||
3084 | wq_attr.flags = cmd.flags; | 3101 | wq_attr.flags = cmd.flags; |
3085 | wq_attr.flags_mask = cmd.flags_mask; | 3102 | wq_attr.flags_mask = cmd.flags_mask; |
3086 | } | 3103 | } |
3104 | if (!wq->device->modify_wq) { | ||
3105 | ret = -EOPNOTSUPP; | ||
3106 | goto out; | ||
3107 | } | ||
3087 | ret = wq->device->modify_wq(wq, &wq_attr, cmd.attr_mask, uhw); | 3108 | ret = wq->device->modify_wq(wq, &wq_attr, cmd.attr_mask, uhw); |
3109 | out: | ||
3088 | uobj_put_obj_read(wq); | 3110 | uobj_put_obj_read(wq); |
3089 | return ret; | 3111 | return ret; |
3090 | } | 3112 | } |
@@ -3181,6 +3203,11 @@ int ib_uverbs_ex_create_rwq_ind_table(struct ib_uverbs_file *file, | |||
3181 | 3203 | ||
3182 | init_attr.log_ind_tbl_size = cmd.log_ind_tbl_size; | 3204 | init_attr.log_ind_tbl_size = cmd.log_ind_tbl_size; |
3183 | init_attr.ind_tbl = wqs; | 3205 | init_attr.ind_tbl = wqs; |
3206 | |||
3207 | if (!ib_dev->create_rwq_ind_table) { | ||
3208 | err = -EOPNOTSUPP; | ||
3209 | goto err_uobj; | ||
3210 | } | ||
3184 | rwq_ind_tbl = ib_dev->create_rwq_ind_table(ib_dev, &init_attr, uhw); | 3211 | rwq_ind_tbl = ib_dev->create_rwq_ind_table(ib_dev, &init_attr, uhw); |
3185 | 3212 | ||
3186 | if (IS_ERR(rwq_ind_tbl)) { | 3213 | if (IS_ERR(rwq_ind_tbl)) { |
@@ -3770,6 +3797,9 @@ int ib_uverbs_ex_query_device(struct ib_uverbs_file *file, | |||
3770 | struct ib_device_attr attr = {0}; | 3797 | struct ib_device_attr attr = {0}; |
3771 | int err; | 3798 | int err; |
3772 | 3799 | ||
3800 | if (!ib_dev->query_device) | ||
3801 | return -EOPNOTSUPP; | ||
3802 | |||
3773 | if (ucore->inlen < sizeof(cmd)) | 3803 | if (ucore->inlen < sizeof(cmd)) |
3774 | return -EINVAL; | 3804 | return -EINVAL; |
3775 | 3805 | ||
diff --git a/drivers/infiniband/core/uverbs_ioctl.c b/drivers/infiniband/core/uverbs_ioctl.c index d96dc1d17be1..339b85145044 100644 --- a/drivers/infiniband/core/uverbs_ioctl.c +++ b/drivers/infiniband/core/uverbs_ioctl.c | |||
@@ -59,6 +59,9 @@ static int uverbs_process_attr(struct ib_device *ibdev, | |||
59 | return 0; | 59 | return 0; |
60 | } | 60 | } |
61 | 61 | ||
62 | if (test_bit(attr_id, attr_bundle_h->valid_bitmap)) | ||
63 | return -EINVAL; | ||
64 | |||
62 | spec = &attr_spec_bucket->attrs[attr_id]; | 65 | spec = &attr_spec_bucket->attrs[attr_id]; |
63 | e = &elements[attr_id]; | 66 | e = &elements[attr_id]; |
64 | e->uattr = uattr_ptr; | 67 | e->uattr = uattr_ptr; |
diff --git a/drivers/infiniband/core/uverbs_ioctl_merge.c b/drivers/infiniband/core/uverbs_ioctl_merge.c index 062485f9300d..62e1eb1d2a28 100644 --- a/drivers/infiniband/core/uverbs_ioctl_merge.c +++ b/drivers/infiniband/core/uverbs_ioctl_merge.c | |||
@@ -114,6 +114,7 @@ static size_t get_elements_above_id(const void **iters, | |||
114 | short min = SHRT_MAX; | 114 | short min = SHRT_MAX; |
115 | const void *elem; | 115 | const void *elem; |
116 | int i, j, last_stored = -1; | 116 | int i, j, last_stored = -1; |
117 | unsigned int equal_min = 0; | ||
117 | 118 | ||
118 | for_each_element(elem, i, j, elements, num_elements, num_offset, | 119 | for_each_element(elem, i, j, elements, num_elements, num_offset, |
119 | data_offset) { | 120 | data_offset) { |
@@ -136,6 +137,10 @@ static size_t get_elements_above_id(const void **iters, | |||
136 | */ | 137 | */ |
137 | iters[last_stored == i ? num_iters - 1 : num_iters++] = elem; | 138 | iters[last_stored == i ? num_iters - 1 : num_iters++] = elem; |
138 | last_stored = i; | 139 | last_stored = i; |
140 | if (min == GET_ID(id)) | ||
141 | equal_min++; | ||
142 | else | ||
143 | equal_min = 1; | ||
139 | min = GET_ID(id); | 144 | min = GET_ID(id); |
140 | } | 145 | } |
141 | 146 | ||
@@ -146,15 +151,10 @@ static size_t get_elements_above_id(const void **iters, | |||
146 | * Therefore, we need to clean the beginning of the array to make sure | 151 | * Therefore, we need to clean the beginning of the array to make sure |
147 | * all ids of final elements are equal to min. | 152 | * all ids of final elements are equal to min. |
148 | */ | 153 | */ |
149 | for (i = num_iters - 1; i >= 0 && | 154 | memmove(iters, iters + num_iters - equal_min, sizeof(*iters) * equal_min); |
150 | GET_ID(*(u16 *)(iters[i] + id_offset)) == min; i--) | ||
151 | ; | ||
152 | |||
153 | num_iters -= i + 1; | ||
154 | memmove(iters, iters + i + 1, sizeof(*iters) * num_iters); | ||
155 | 155 | ||
156 | *min_id = min; | 156 | *min_id = min; |
157 | return num_iters; | 157 | return equal_min; |
158 | } | 158 | } |
159 | 159 | ||
160 | #define find_max_element_entry_id(num_elements, elements, num_objects_fld, \ | 160 | #define find_max_element_entry_id(num_elements, elements, num_objects_fld, \ |
@@ -322,7 +322,7 @@ static struct uverbs_method_spec *build_method_with_attrs(const struct uverbs_me | |||
322 | hash = kzalloc(sizeof(*hash) + | 322 | hash = kzalloc(sizeof(*hash) + |
323 | ALIGN(sizeof(*hash->attrs) * (attr_max_bucket + 1), | 323 | ALIGN(sizeof(*hash->attrs) * (attr_max_bucket + 1), |
324 | sizeof(long)) + | 324 | sizeof(long)) + |
325 | BITS_TO_LONGS(attr_max_bucket) * sizeof(long), | 325 | BITS_TO_LONGS(attr_max_bucket + 1) * sizeof(long), |
326 | GFP_KERNEL); | 326 | GFP_KERNEL); |
327 | if (!hash) { | 327 | if (!hash) { |
328 | res = -ENOMEM; | 328 | res = -ENOMEM; |
@@ -509,7 +509,7 @@ static struct uverbs_object_spec *build_object_with_methods(const struct uverbs_ | |||
509 | * first handler which != NULL. This also defines the | 509 | * first handler which != NULL. This also defines the |
510 | * set of flags used for this handler. | 510 | * set of flags used for this handler. |
511 | */ | 511 | */ |
512 | for (i = num_object_defs - 1; | 512 | for (i = num_method_defs - 1; |
513 | i >= 0 && !method_defs[i]->handler; i--) | 513 | i >= 0 && !method_defs[i]->handler; i--) |
514 | ; | 514 | ; |
515 | hash->methods[min_id++] = method; | 515 | hash->methods[min_id++] = method; |
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index 395a3b091229..b1ca223aa380 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c | |||
@@ -650,12 +650,21 @@ static int verify_command_mask(struct ib_device *ib_dev, __u32 command) | |||
650 | return -1; | 650 | return -1; |
651 | } | 651 | } |
652 | 652 | ||
653 | static bool verify_command_idx(u32 command, bool extended) | ||
654 | { | ||
655 | if (extended) | ||
656 | return command < ARRAY_SIZE(uverbs_ex_cmd_table); | ||
657 | |||
658 | return command < ARRAY_SIZE(uverbs_cmd_table); | ||
659 | } | ||
660 | |||
653 | static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf, | 661 | static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf, |
654 | size_t count, loff_t *pos) | 662 | size_t count, loff_t *pos) |
655 | { | 663 | { |
656 | struct ib_uverbs_file *file = filp->private_data; | 664 | struct ib_uverbs_file *file = filp->private_data; |
657 | struct ib_device *ib_dev; | 665 | struct ib_device *ib_dev; |
658 | struct ib_uverbs_cmd_hdr hdr; | 666 | struct ib_uverbs_cmd_hdr hdr; |
667 | bool extended_command; | ||
659 | __u32 command; | 668 | __u32 command; |
660 | __u32 flags; | 669 | __u32 flags; |
661 | int srcu_key; | 670 | int srcu_key; |
@@ -688,6 +697,15 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf, | |||
688 | } | 697 | } |
689 | 698 | ||
690 | command = hdr.command & IB_USER_VERBS_CMD_COMMAND_MASK; | 699 | command = hdr.command & IB_USER_VERBS_CMD_COMMAND_MASK; |
700 | flags = (hdr.command & | ||
701 | IB_USER_VERBS_CMD_FLAGS_MASK) >> IB_USER_VERBS_CMD_FLAGS_SHIFT; | ||
702 | |||
703 | extended_command = flags & IB_USER_VERBS_CMD_FLAG_EXTENDED; | ||
704 | if (!verify_command_idx(command, extended_command)) { | ||
705 | ret = -EINVAL; | ||
706 | goto out; | ||
707 | } | ||
708 | |||
691 | if (verify_command_mask(ib_dev, command)) { | 709 | if (verify_command_mask(ib_dev, command)) { |
692 | ret = -EOPNOTSUPP; | 710 | ret = -EOPNOTSUPP; |
693 | goto out; | 711 | goto out; |
@@ -699,12 +717,8 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf, | |||
699 | goto out; | 717 | goto out; |
700 | } | 718 | } |
701 | 719 | ||
702 | flags = (hdr.command & | ||
703 | IB_USER_VERBS_CMD_FLAGS_MASK) >> IB_USER_VERBS_CMD_FLAGS_SHIFT; | ||
704 | |||
705 | if (!flags) { | 720 | if (!flags) { |
706 | if (command >= ARRAY_SIZE(uverbs_cmd_table) || | 721 | if (!uverbs_cmd_table[command]) { |
707 | !uverbs_cmd_table[command]) { | ||
708 | ret = -EINVAL; | 722 | ret = -EINVAL; |
709 | goto out; | 723 | goto out; |
710 | } | 724 | } |
@@ -725,8 +739,7 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf, | |||
725 | struct ib_udata uhw; | 739 | struct ib_udata uhw; |
726 | size_t written_count = count; | 740 | size_t written_count = count; |
727 | 741 | ||
728 | if (command >= ARRAY_SIZE(uverbs_ex_cmd_table) || | 742 | if (!uverbs_ex_cmd_table[command]) { |
729 | !uverbs_ex_cmd_table[command]) { | ||
730 | ret = -ENOSYS; | 743 | ret = -ENOSYS; |
731 | goto out; | 744 | goto out; |
732 | } | 745 | } |
@@ -942,6 +955,7 @@ static const struct file_operations uverbs_fops = { | |||
942 | .llseek = no_llseek, | 955 | .llseek = no_llseek, |
943 | #if IS_ENABLED(CONFIG_INFINIBAND_EXP_USER_ACCESS) | 956 | #if IS_ENABLED(CONFIG_INFINIBAND_EXP_USER_ACCESS) |
944 | .unlocked_ioctl = ib_uverbs_ioctl, | 957 | .unlocked_ioctl = ib_uverbs_ioctl, |
958 | .compat_ioctl = ib_uverbs_ioctl, | ||
945 | #endif | 959 | #endif |
946 | }; | 960 | }; |
947 | 961 | ||
@@ -954,6 +968,7 @@ static const struct file_operations uverbs_mmap_fops = { | |||
954 | .llseek = no_llseek, | 968 | .llseek = no_llseek, |
955 | #if IS_ENABLED(CONFIG_INFINIBAND_EXP_USER_ACCESS) | 969 | #if IS_ENABLED(CONFIG_INFINIBAND_EXP_USER_ACCESS) |
956 | .unlocked_ioctl = ib_uverbs_ioctl, | 970 | .unlocked_ioctl = ib_uverbs_ioctl, |
971 | .compat_ioctl = ib_uverbs_ioctl, | ||
957 | #endif | 972 | #endif |
958 | }; | 973 | }; |
959 | 974 | ||
diff --git a/drivers/infiniband/core/uverbs_std_types.c b/drivers/infiniband/core/uverbs_std_types.c index cab0ac3556eb..df1360e6774f 100644 --- a/drivers/infiniband/core/uverbs_std_types.c +++ b/drivers/infiniband/core/uverbs_std_types.c | |||
@@ -234,15 +234,18 @@ static void create_udata(struct uverbs_attr_bundle *ctx, | |||
234 | uverbs_attr_get(ctx, UVERBS_UHW_OUT); | 234 | uverbs_attr_get(ctx, UVERBS_UHW_OUT); |
235 | 235 | ||
236 | if (!IS_ERR(uhw_in)) { | 236 | if (!IS_ERR(uhw_in)) { |
237 | udata->inbuf = uhw_in->ptr_attr.ptr; | ||
238 | udata->inlen = uhw_in->ptr_attr.len; | 237 | udata->inlen = uhw_in->ptr_attr.len; |
238 | if (uverbs_attr_ptr_is_inline(uhw_in)) | ||
239 | udata->inbuf = &uhw_in->uattr->data; | ||
240 | else | ||
241 | udata->inbuf = u64_to_user_ptr(uhw_in->ptr_attr.data); | ||
239 | } else { | 242 | } else { |
240 | udata->inbuf = NULL; | 243 | udata->inbuf = NULL; |
241 | udata->inlen = 0; | 244 | udata->inlen = 0; |
242 | } | 245 | } |
243 | 246 | ||
244 | if (!IS_ERR(uhw_out)) { | 247 | if (!IS_ERR(uhw_out)) { |
245 | udata->outbuf = uhw_out->ptr_attr.ptr; | 248 | udata->outbuf = u64_to_user_ptr(uhw_out->ptr_attr.data); |
246 | udata->outlen = uhw_out->ptr_attr.len; | 249 | udata->outlen = uhw_out->ptr_attr.len; |
247 | } else { | 250 | } else { |
248 | udata->outbuf = NULL; | 251 | udata->outbuf = NULL; |
@@ -323,7 +326,8 @@ static int uverbs_create_cq_handler(struct ib_device *ib_dev, | |||
323 | cq->res.type = RDMA_RESTRACK_CQ; | 326 | cq->res.type = RDMA_RESTRACK_CQ; |
324 | rdma_restrack_add(&cq->res); | 327 | rdma_restrack_add(&cq->res); |
325 | 328 | ||
326 | ret = uverbs_copy_to(attrs, CREATE_CQ_RESP_CQE, &cq->cqe); | 329 | ret = uverbs_copy_to(attrs, CREATE_CQ_RESP_CQE, &cq->cqe, |
330 | sizeof(cq->cqe)); | ||
327 | if (ret) | 331 | if (ret) |
328 | goto err_cq; | 332 | goto err_cq; |
329 | 333 | ||
@@ -375,7 +379,7 @@ static int uverbs_destroy_cq_handler(struct ib_device *ib_dev, | |||
375 | resp.comp_events_reported = obj->comp_events_reported; | 379 | resp.comp_events_reported = obj->comp_events_reported; |
376 | resp.async_events_reported = obj->async_events_reported; | 380 | resp.async_events_reported = obj->async_events_reported; |
377 | 381 | ||
378 | return uverbs_copy_to(attrs, DESTROY_CQ_RESP, &resp); | 382 | return uverbs_copy_to(attrs, DESTROY_CQ_RESP, &resp, sizeof(resp)); |
379 | } | 383 | } |
380 | 384 | ||
381 | static DECLARE_UVERBS_METHOD( | 385 | static DECLARE_UVERBS_METHOD( |
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 16ebc6372c31..93025d2009b8 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c | |||
@@ -887,7 +887,7 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd, | |||
887 | if (qp_init_attr->cap.max_rdma_ctxs) | 887 | if (qp_init_attr->cap.max_rdma_ctxs) |
888 | rdma_rw_init_qp(device, qp_init_attr); | 888 | rdma_rw_init_qp(device, qp_init_attr); |
889 | 889 | ||
890 | qp = _ib_create_qp(device, pd, qp_init_attr, NULL); | 890 | qp = _ib_create_qp(device, pd, qp_init_attr, NULL, NULL); |
891 | if (IS_ERR(qp)) | 891 | if (IS_ERR(qp)) |
892 | return qp; | 892 | return qp; |
893 | 893 | ||
@@ -898,7 +898,6 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd, | |||
898 | } | 898 | } |
899 | 899 | ||
900 | qp->real_qp = qp; | 900 | qp->real_qp = qp; |
901 | qp->uobject = NULL; | ||
902 | qp->qp_type = qp_init_attr->qp_type; | 901 | qp->qp_type = qp_init_attr->qp_type; |
903 | qp->rwq_ind_tbl = qp_init_attr->rwq_ind_tbl; | 902 | qp->rwq_ind_tbl = qp_init_attr->rwq_ind_tbl; |
904 | 903 | ||
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h index ca32057e886f..96f76896488d 100644 --- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h +++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h | |||
@@ -57,8 +57,8 @@ | |||
57 | #define BNXT_RE_PAGE_SIZE_8M BIT(BNXT_RE_PAGE_SHIFT_8M) | 57 | #define BNXT_RE_PAGE_SIZE_8M BIT(BNXT_RE_PAGE_SHIFT_8M) |
58 | #define BNXT_RE_PAGE_SIZE_1G BIT(BNXT_RE_PAGE_SHIFT_1G) | 58 | #define BNXT_RE_PAGE_SIZE_1G BIT(BNXT_RE_PAGE_SHIFT_1G) |
59 | 59 | ||
60 | #define BNXT_RE_MAX_MR_SIZE_LOW BIT(BNXT_RE_PAGE_SHIFT_1G) | 60 | #define BNXT_RE_MAX_MR_SIZE_LOW BIT_ULL(BNXT_RE_PAGE_SHIFT_1G) |
61 | #define BNXT_RE_MAX_MR_SIZE_HIGH BIT(39) | 61 | #define BNXT_RE_MAX_MR_SIZE_HIGH BIT_ULL(39) |
62 | #define BNXT_RE_MAX_MR_SIZE BNXT_RE_MAX_MR_SIZE_HIGH | 62 | #define BNXT_RE_MAX_MR_SIZE BNXT_RE_MAX_MR_SIZE_HIGH |
63 | 63 | ||
64 | #define BNXT_RE_MAX_QPC_COUNT (64 * 1024) | 64 | #define BNXT_RE_MAX_QPC_COUNT (64 * 1024) |
@@ -120,7 +120,6 @@ struct bnxt_re_dev { | |||
120 | #define BNXT_RE_FLAG_HAVE_L2_REF 3 | 120 | #define BNXT_RE_FLAG_HAVE_L2_REF 3 |
121 | #define BNXT_RE_FLAG_RCFW_CHANNEL_EN 4 | 121 | #define BNXT_RE_FLAG_RCFW_CHANNEL_EN 4 |
122 | #define BNXT_RE_FLAG_QOS_WORK_REG 5 | 122 | #define BNXT_RE_FLAG_QOS_WORK_REG 5 |
123 | #define BNXT_RE_FLAG_TASK_IN_PROG 6 | ||
124 | #define BNXT_RE_FLAG_ISSUE_ROCE_STATS 29 | 123 | #define BNXT_RE_FLAG_ISSUE_ROCE_STATS 29 |
125 | struct net_device *netdev; | 124 | struct net_device *netdev; |
126 | unsigned int version, major, minor; | 125 | unsigned int version, major, minor; |
@@ -158,6 +157,7 @@ struct bnxt_re_dev { | |||
158 | atomic_t srq_count; | 157 | atomic_t srq_count; |
159 | atomic_t mr_count; | 158 | atomic_t mr_count; |
160 | atomic_t mw_count; | 159 | atomic_t mw_count; |
160 | atomic_t sched_count; | ||
161 | /* Max of 2 lossless traffic class supported per port */ | 161 | /* Max of 2 lossless traffic class supported per port */ |
162 | u16 cosq[2]; | 162 | u16 cosq[2]; |
163 | 163 | ||
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index ae9e9ff54826..8301d7e5fa8c 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c | |||
@@ -174,10 +174,8 @@ int bnxt_re_query_device(struct ib_device *ibdev, | |||
174 | ib_attr->max_pd = dev_attr->max_pd; | 174 | ib_attr->max_pd = dev_attr->max_pd; |
175 | ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom; | 175 | ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom; |
176 | ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_init_rd_atom; | 176 | ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_init_rd_atom; |
177 | if (dev_attr->is_atomic) { | 177 | ib_attr->atomic_cap = IB_ATOMIC_NONE; |
178 | ib_attr->atomic_cap = IB_ATOMIC_HCA; | 178 | ib_attr->masked_atomic_cap = IB_ATOMIC_NONE; |
179 | ib_attr->masked_atomic_cap = IB_ATOMIC_HCA; | ||
180 | } | ||
181 | 179 | ||
182 | ib_attr->max_ee_rd_atom = 0; | 180 | ib_attr->max_ee_rd_atom = 0; |
183 | ib_attr->max_res_rd_atom = 0; | 181 | ib_attr->max_res_rd_atom = 0; |
@@ -787,20 +785,51 @@ int bnxt_re_query_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr) | |||
787 | return 0; | 785 | return 0; |
788 | } | 786 | } |
789 | 787 | ||
788 | unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp) | ||
789 | __acquires(&qp->scq->cq_lock) __acquires(&qp->rcq->cq_lock) | ||
790 | { | ||
791 | unsigned long flags; | ||
792 | |||
793 | spin_lock_irqsave(&qp->scq->cq_lock, flags); | ||
794 | if (qp->rcq != qp->scq) | ||
795 | spin_lock(&qp->rcq->cq_lock); | ||
796 | else | ||
797 | __acquire(&qp->rcq->cq_lock); | ||
798 | |||
799 | return flags; | ||
800 | } | ||
801 | |||
802 | void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp, | ||
803 | unsigned long flags) | ||
804 | __releases(&qp->scq->cq_lock) __releases(&qp->rcq->cq_lock) | ||
805 | { | ||
806 | if (qp->rcq != qp->scq) | ||
807 | spin_unlock(&qp->rcq->cq_lock); | ||
808 | else | ||
809 | __release(&qp->rcq->cq_lock); | ||
810 | spin_unlock_irqrestore(&qp->scq->cq_lock, flags); | ||
811 | } | ||
812 | |||
790 | /* Queue Pairs */ | 813 | /* Queue Pairs */ |
791 | int bnxt_re_destroy_qp(struct ib_qp *ib_qp) | 814 | int bnxt_re_destroy_qp(struct ib_qp *ib_qp) |
792 | { | 815 | { |
793 | struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp); | 816 | struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp); |
794 | struct bnxt_re_dev *rdev = qp->rdev; | 817 | struct bnxt_re_dev *rdev = qp->rdev; |
795 | int rc; | 818 | int rc; |
819 | unsigned int flags; | ||
796 | 820 | ||
797 | bnxt_qplib_flush_cqn_wq(&qp->qplib_qp); | 821 | bnxt_qplib_flush_cqn_wq(&qp->qplib_qp); |
798 | bnxt_qplib_del_flush_qp(&qp->qplib_qp); | ||
799 | rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp); | 822 | rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp); |
800 | if (rc) { | 823 | if (rc) { |
801 | dev_err(rdev_to_dev(rdev), "Failed to destroy HW QP"); | 824 | dev_err(rdev_to_dev(rdev), "Failed to destroy HW QP"); |
802 | return rc; | 825 | return rc; |
803 | } | 826 | } |
827 | |||
828 | flags = bnxt_re_lock_cqs(qp); | ||
829 | bnxt_qplib_clean_qp(&qp->qplib_qp); | ||
830 | bnxt_re_unlock_cqs(qp, flags); | ||
831 | bnxt_qplib_free_qp_res(&rdev->qplib_res, &qp->qplib_qp); | ||
832 | |||
804 | if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp) { | 833 | if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp) { |
805 | rc = bnxt_qplib_destroy_ah(&rdev->qplib_res, | 834 | rc = bnxt_qplib_destroy_ah(&rdev->qplib_res, |
806 | &rdev->sqp_ah->qplib_ah); | 835 | &rdev->sqp_ah->qplib_ah); |
@@ -810,7 +839,7 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp) | |||
810 | return rc; | 839 | return rc; |
811 | } | 840 | } |
812 | 841 | ||
813 | bnxt_qplib_del_flush_qp(&qp->qplib_qp); | 842 | bnxt_qplib_clean_qp(&qp->qplib_qp); |
814 | rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, | 843 | rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, |
815 | &rdev->qp1_sqp->qplib_qp); | 844 | &rdev->qp1_sqp->qplib_qp); |
816 | if (rc) { | 845 | if (rc) { |
@@ -1069,6 +1098,7 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd, | |||
1069 | goto fail; | 1098 | goto fail; |
1070 | } | 1099 | } |
1071 | qp->qplib_qp.scq = &cq->qplib_cq; | 1100 | qp->qplib_qp.scq = &cq->qplib_cq; |
1101 | qp->scq = cq; | ||
1072 | } | 1102 | } |
1073 | 1103 | ||
1074 | if (qp_init_attr->recv_cq) { | 1104 | if (qp_init_attr->recv_cq) { |
@@ -1080,6 +1110,7 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd, | |||
1080 | goto fail; | 1110 | goto fail; |
1081 | } | 1111 | } |
1082 | qp->qplib_qp.rcq = &cq->qplib_cq; | 1112 | qp->qplib_qp.rcq = &cq->qplib_cq; |
1113 | qp->rcq = cq; | ||
1083 | } | 1114 | } |
1084 | 1115 | ||
1085 | if (qp_init_attr->srq) { | 1116 | if (qp_init_attr->srq) { |
@@ -1185,7 +1216,7 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd, | |||
1185 | rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp); | 1216 | rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp); |
1186 | if (rc) { | 1217 | if (rc) { |
1187 | dev_err(rdev_to_dev(rdev), "Failed to create HW QP"); | 1218 | dev_err(rdev_to_dev(rdev), "Failed to create HW QP"); |
1188 | goto fail; | 1219 | goto free_umem; |
1189 | } | 1220 | } |
1190 | } | 1221 | } |
1191 | 1222 | ||
@@ -1213,6 +1244,13 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd, | |||
1213 | return &qp->ib_qp; | 1244 | return &qp->ib_qp; |
1214 | qp_destroy: | 1245 | qp_destroy: |
1215 | bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp); | 1246 | bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp); |
1247 | free_umem: | ||
1248 | if (udata) { | ||
1249 | if (qp->rumem) | ||
1250 | ib_umem_release(qp->rumem); | ||
1251 | if (qp->sumem) | ||
1252 | ib_umem_release(qp->sumem); | ||
1253 | } | ||
1216 | fail: | 1254 | fail: |
1217 | kfree(qp); | 1255 | kfree(qp); |
1218 | return ERR_PTR(rc); | 1256 | return ERR_PTR(rc); |
@@ -1568,6 +1606,7 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr, | |||
1568 | int status; | 1606 | int status; |
1569 | union ib_gid sgid; | 1607 | union ib_gid sgid; |
1570 | struct ib_gid_attr sgid_attr; | 1608 | struct ib_gid_attr sgid_attr; |
1609 | unsigned int flags; | ||
1571 | u8 nw_type; | 1610 | u8 nw_type; |
1572 | 1611 | ||
1573 | qp->qplib_qp.modify_flags = 0; | 1612 | qp->qplib_qp.modify_flags = 0; |
@@ -1596,14 +1635,18 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr, | |||
1596 | dev_dbg(rdev_to_dev(rdev), | 1635 | dev_dbg(rdev_to_dev(rdev), |
1597 | "Move QP = %p to flush list\n", | 1636 | "Move QP = %p to flush list\n", |
1598 | qp); | 1637 | qp); |
1638 | flags = bnxt_re_lock_cqs(qp); | ||
1599 | bnxt_qplib_add_flush_qp(&qp->qplib_qp); | 1639 | bnxt_qplib_add_flush_qp(&qp->qplib_qp); |
1640 | bnxt_re_unlock_cqs(qp, flags); | ||
1600 | } | 1641 | } |
1601 | if (!qp->sumem && | 1642 | if (!qp->sumem && |
1602 | qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) { | 1643 | qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) { |
1603 | dev_dbg(rdev_to_dev(rdev), | 1644 | dev_dbg(rdev_to_dev(rdev), |
1604 | "Move QP = %p out of flush list\n", | 1645 | "Move QP = %p out of flush list\n", |
1605 | qp); | 1646 | qp); |
1606 | bnxt_qplib_del_flush_qp(&qp->qplib_qp); | 1647 | flags = bnxt_re_lock_cqs(qp); |
1648 | bnxt_qplib_clean_qp(&qp->qplib_qp); | ||
1649 | bnxt_re_unlock_cqs(qp, flags); | ||
1607 | } | 1650 | } |
1608 | } | 1651 | } |
1609 | if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) { | 1652 | if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) { |
@@ -2189,10 +2232,13 @@ static int bnxt_re_build_inv_wqe(struct ib_send_wr *wr, | |||
2189 | wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV; | 2232 | wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV; |
2190 | wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey; | 2233 | wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey; |
2191 | 2234 | ||
2235 | /* Need unconditional fence for local invalidate | ||
2236 | * opcode to work as expected. | ||
2237 | */ | ||
2238 | wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; | ||
2239 | |||
2192 | if (wr->send_flags & IB_SEND_SIGNALED) | 2240 | if (wr->send_flags & IB_SEND_SIGNALED) |
2193 | wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; | 2241 | wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; |
2194 | if (wr->send_flags & IB_SEND_FENCE) | ||
2195 | wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; | ||
2196 | if (wr->send_flags & IB_SEND_SOLICITED) | 2242 | if (wr->send_flags & IB_SEND_SOLICITED) |
2197 | wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT; | 2243 | wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT; |
2198 | 2244 | ||
@@ -2213,8 +2259,12 @@ static int bnxt_re_build_reg_wqe(struct ib_reg_wr *wr, | |||
2213 | wqe->frmr.levels = qplib_frpl->hwq.level + 1; | 2259 | wqe->frmr.levels = qplib_frpl->hwq.level + 1; |
2214 | wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR; | 2260 | wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR; |
2215 | 2261 | ||
2216 | if (wr->wr.send_flags & IB_SEND_FENCE) | 2262 | /* Need unconditional fence for reg_mr |
2217 | wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; | 2263 | * opcode to function as expected. |
2264 | */ | ||
2265 | |||
2266 | wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; | ||
2267 | |||
2218 | if (wr->wr.send_flags & IB_SEND_SIGNALED) | 2268 | if (wr->wr.send_flags & IB_SEND_SIGNALED) |
2219 | wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; | 2269 | wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; |
2220 | 2270 | ||
@@ -3548,7 +3598,7 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length, | |||
3548 | int umem_pgs, page_shift, rc; | 3598 | int umem_pgs, page_shift, rc; |
3549 | 3599 | ||
3550 | if (length > BNXT_RE_MAX_MR_SIZE) { | 3600 | if (length > BNXT_RE_MAX_MR_SIZE) { |
3551 | dev_err(rdev_to_dev(rdev), "MR Size: %lld > Max supported:%ld\n", | 3601 | dev_err(rdev_to_dev(rdev), "MR Size: %lld > Max supported:%lld\n", |
3552 | length, BNXT_RE_MAX_MR_SIZE); | 3602 | length, BNXT_RE_MAX_MR_SIZE); |
3553 | return ERR_PTR(-ENOMEM); | 3603 | return ERR_PTR(-ENOMEM); |
3554 | } | 3604 | } |
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h index 423ebe012f95..e62b7c2c7da6 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h | |||
@@ -89,6 +89,8 @@ struct bnxt_re_qp { | |||
89 | /* QP1 */ | 89 | /* QP1 */ |
90 | u32 send_psn; | 90 | u32 send_psn; |
91 | struct ib_ud_header qp1_hdr; | 91 | struct ib_ud_header qp1_hdr; |
92 | struct bnxt_re_cq *scq; | ||
93 | struct bnxt_re_cq *rcq; | ||
92 | }; | 94 | }; |
93 | 95 | ||
94 | struct bnxt_re_cq { | 96 | struct bnxt_re_cq { |
@@ -220,4 +222,7 @@ struct ib_ucontext *bnxt_re_alloc_ucontext(struct ib_device *ibdev, | |||
220 | struct ib_udata *udata); | 222 | struct ib_udata *udata); |
221 | int bnxt_re_dealloc_ucontext(struct ib_ucontext *context); | 223 | int bnxt_re_dealloc_ucontext(struct ib_ucontext *context); |
222 | int bnxt_re_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); | 224 | int bnxt_re_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); |
225 | |||
226 | unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp); | ||
227 | void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp, unsigned long flags); | ||
223 | #endif /* __BNXT_RE_IB_VERBS_H__ */ | 228 | #endif /* __BNXT_RE_IB_VERBS_H__ */ |
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index 508d00a5a106..f6e361750466 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c | |||
@@ -656,7 +656,6 @@ static void bnxt_re_dev_remove(struct bnxt_re_dev *rdev) | |||
656 | mutex_unlock(&bnxt_re_dev_lock); | 656 | mutex_unlock(&bnxt_re_dev_lock); |
657 | 657 | ||
658 | synchronize_rcu(); | 658 | synchronize_rcu(); |
659 | flush_workqueue(bnxt_re_wq); | ||
660 | 659 | ||
661 | ib_dealloc_device(&rdev->ibdev); | 660 | ib_dealloc_device(&rdev->ibdev); |
662 | /* rdev is gone */ | 661 | /* rdev is gone */ |
@@ -731,6 +730,13 @@ static int bnxt_re_handle_qp_async_event(struct creq_qp_event *qp_event, | |||
731 | struct bnxt_re_qp *qp) | 730 | struct bnxt_re_qp *qp) |
732 | { | 731 | { |
733 | struct ib_event event; | 732 | struct ib_event event; |
733 | unsigned int flags; | ||
734 | |||
735 | if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) { | ||
736 | flags = bnxt_re_lock_cqs(qp); | ||
737 | bnxt_qplib_add_flush_qp(&qp->qplib_qp); | ||
738 | bnxt_re_unlock_cqs(qp, flags); | ||
739 | } | ||
734 | 740 | ||
735 | memset(&event, 0, sizeof(event)); | 741 | memset(&event, 0, sizeof(event)); |
736 | if (qp->qplib_qp.srq) { | 742 | if (qp->qplib_qp.srq) { |
@@ -1417,9 +1423,12 @@ static void bnxt_re_task(struct work_struct *work) | |||
1417 | switch (re_work->event) { | 1423 | switch (re_work->event) { |
1418 | case NETDEV_REGISTER: | 1424 | case NETDEV_REGISTER: |
1419 | rc = bnxt_re_ib_reg(rdev); | 1425 | rc = bnxt_re_ib_reg(rdev); |
1420 | if (rc) | 1426 | if (rc) { |
1421 | dev_err(rdev_to_dev(rdev), | 1427 | dev_err(rdev_to_dev(rdev), |
1422 | "Failed to register with IB: %#x", rc); | 1428 | "Failed to register with IB: %#x", rc); |
1429 | bnxt_re_remove_one(rdev); | ||
1430 | bnxt_re_dev_unreg(rdev); | ||
1431 | } | ||
1423 | break; | 1432 | break; |
1424 | case NETDEV_UP: | 1433 | case NETDEV_UP: |
1425 | bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, | 1434 | bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, |
@@ -1441,7 +1450,7 @@ static void bnxt_re_task(struct work_struct *work) | |||
1441 | break; | 1450 | break; |
1442 | } | 1451 | } |
1443 | smp_mb__before_atomic(); | 1452 | smp_mb__before_atomic(); |
1444 | clear_bit(BNXT_RE_FLAG_TASK_IN_PROG, &rdev->flags); | 1453 | atomic_dec(&rdev->sched_count); |
1445 | kfree(re_work); | 1454 | kfree(re_work); |
1446 | } | 1455 | } |
1447 | 1456 | ||
@@ -1503,7 +1512,7 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier, | |||
1503 | /* netdev notifier will call NETDEV_UNREGISTER again later since | 1512 | /* netdev notifier will call NETDEV_UNREGISTER again later since |
1504 | * we are still holding the reference to the netdev | 1513 | * we are still holding the reference to the netdev |
1505 | */ | 1514 | */ |
1506 | if (test_bit(BNXT_RE_FLAG_TASK_IN_PROG, &rdev->flags)) | 1515 | if (atomic_read(&rdev->sched_count) > 0) |
1507 | goto exit; | 1516 | goto exit; |
1508 | bnxt_re_ib_unreg(rdev, false); | 1517 | bnxt_re_ib_unreg(rdev, false); |
1509 | bnxt_re_remove_one(rdev); | 1518 | bnxt_re_remove_one(rdev); |
@@ -1523,7 +1532,7 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier, | |||
1523 | re_work->vlan_dev = (real_dev == netdev ? | 1532 | re_work->vlan_dev = (real_dev == netdev ? |
1524 | NULL : netdev); | 1533 | NULL : netdev); |
1525 | INIT_WORK(&re_work->work, bnxt_re_task); | 1534 | INIT_WORK(&re_work->work, bnxt_re_task); |
1526 | set_bit(BNXT_RE_FLAG_TASK_IN_PROG, &rdev->flags); | 1535 | atomic_inc(&rdev->sched_count); |
1527 | queue_work(bnxt_re_wq, &re_work->work); | 1536 | queue_work(bnxt_re_wq, &re_work->work); |
1528 | } | 1537 | } |
1529 | } | 1538 | } |
@@ -1578,6 +1587,11 @@ static void __exit bnxt_re_mod_exit(void) | |||
1578 | */ | 1587 | */ |
1579 | list_for_each_entry_safe_reverse(rdev, next, &to_be_deleted, list) { | 1588 | list_for_each_entry_safe_reverse(rdev, next, &to_be_deleted, list) { |
1580 | dev_info(rdev_to_dev(rdev), "Unregistering Device"); | 1589 | dev_info(rdev_to_dev(rdev), "Unregistering Device"); |
1590 | /* | ||
1591 | * Flush out any scheduled tasks before destroying the | ||
1592 | * resources | ||
1593 | */ | ||
1594 | flush_workqueue(bnxt_re_wq); | ||
1581 | bnxt_re_dev_stop(rdev); | 1595 | bnxt_re_dev_stop(rdev); |
1582 | bnxt_re_ib_unreg(rdev, true); | 1596 | bnxt_re_ib_unreg(rdev, true); |
1583 | bnxt_re_remove_one(rdev); | 1597 | bnxt_re_remove_one(rdev); |
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c index 1b0e94697fe3..3a78faba8d91 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c | |||
@@ -88,75 +88,35 @@ static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp) | |||
88 | } | 88 | } |
89 | } | 89 | } |
90 | 90 | ||
91 | void bnxt_qplib_acquire_cq_locks(struct bnxt_qplib_qp *qp, | 91 | static void bnxt_qplib_acquire_cq_flush_locks(struct bnxt_qplib_qp *qp, |
92 | unsigned long *flags) | 92 | unsigned long *flags) |
93 | __acquires(&qp->scq->hwq.lock) __acquires(&qp->rcq->hwq.lock) | 93 | __acquires(&qp->scq->flush_lock) __acquires(&qp->rcq->flush_lock) |
94 | { | 94 | { |
95 | spin_lock_irqsave(&qp->scq->hwq.lock, *flags); | 95 | spin_lock_irqsave(&qp->scq->flush_lock, *flags); |
96 | if (qp->scq == qp->rcq) | 96 | if (qp->scq == qp->rcq) |
97 | __acquire(&qp->rcq->hwq.lock); | 97 | __acquire(&qp->rcq->flush_lock); |
98 | else | 98 | else |
99 | spin_lock(&qp->rcq->hwq.lock); | 99 | spin_lock(&qp->rcq->flush_lock); |
100 | } | 100 | } |
101 | 101 | ||
102 | void bnxt_qplib_release_cq_locks(struct bnxt_qplib_qp *qp, | 102 | static void bnxt_qplib_release_cq_flush_locks(struct bnxt_qplib_qp *qp, |
103 | unsigned long *flags) | 103 | unsigned long *flags) |
104 | __releases(&qp->scq->hwq.lock) __releases(&qp->rcq->hwq.lock) | 104 | __releases(&qp->scq->flush_lock) __releases(&qp->rcq->flush_lock) |
105 | { | 105 | { |
106 | if (qp->scq == qp->rcq) | 106 | if (qp->scq == qp->rcq) |
107 | __release(&qp->rcq->hwq.lock); | 107 | __release(&qp->rcq->flush_lock); |
108 | else | 108 | else |
109 | spin_unlock(&qp->rcq->hwq.lock); | 109 | spin_unlock(&qp->rcq->flush_lock); |
110 | spin_unlock_irqrestore(&qp->scq->hwq.lock, *flags); | 110 | spin_unlock_irqrestore(&qp->scq->flush_lock, *flags); |
111 | } | ||
112 | |||
113 | static struct bnxt_qplib_cq *bnxt_qplib_find_buddy_cq(struct bnxt_qplib_qp *qp, | ||
114 | struct bnxt_qplib_cq *cq) | ||
115 | { | ||
116 | struct bnxt_qplib_cq *buddy_cq = NULL; | ||
117 | |||
118 | if (qp->scq == qp->rcq) | ||
119 | buddy_cq = NULL; | ||
120 | else if (qp->scq == cq) | ||
121 | buddy_cq = qp->rcq; | ||
122 | else | ||
123 | buddy_cq = qp->scq; | ||
124 | return buddy_cq; | ||
125 | } | ||
126 | |||
127 | static void bnxt_qplib_lock_buddy_cq(struct bnxt_qplib_qp *qp, | ||
128 | struct bnxt_qplib_cq *cq) | ||
129 | __acquires(&buddy_cq->hwq.lock) | ||
130 | { | ||
131 | struct bnxt_qplib_cq *buddy_cq = NULL; | ||
132 | |||
133 | buddy_cq = bnxt_qplib_find_buddy_cq(qp, cq); | ||
134 | if (!buddy_cq) | ||
135 | __acquire(&cq->hwq.lock); | ||
136 | else | ||
137 | spin_lock(&buddy_cq->hwq.lock); | ||
138 | } | ||
139 | |||
140 | static void bnxt_qplib_unlock_buddy_cq(struct bnxt_qplib_qp *qp, | ||
141 | struct bnxt_qplib_cq *cq) | ||
142 | __releases(&buddy_cq->hwq.lock) | ||
143 | { | ||
144 | struct bnxt_qplib_cq *buddy_cq = NULL; | ||
145 | |||
146 | buddy_cq = bnxt_qplib_find_buddy_cq(qp, cq); | ||
147 | if (!buddy_cq) | ||
148 | __release(&cq->hwq.lock); | ||
149 | else | ||
150 | spin_unlock(&buddy_cq->hwq.lock); | ||
151 | } | 111 | } |
152 | 112 | ||
153 | void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp) | 113 | void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp) |
154 | { | 114 | { |
155 | unsigned long flags; | 115 | unsigned long flags; |
156 | 116 | ||
157 | bnxt_qplib_acquire_cq_locks(qp, &flags); | 117 | bnxt_qplib_acquire_cq_flush_locks(qp, &flags); |
158 | __bnxt_qplib_add_flush_qp(qp); | 118 | __bnxt_qplib_add_flush_qp(qp); |
159 | bnxt_qplib_release_cq_locks(qp, &flags); | 119 | bnxt_qplib_release_cq_flush_locks(qp, &flags); |
160 | } | 120 | } |
161 | 121 | ||
162 | static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp) | 122 | static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp) |
@@ -173,11 +133,11 @@ static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp) | |||
173 | } | 133 | } |
174 | } | 134 | } |
175 | 135 | ||
176 | void bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp) | 136 | void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp) |
177 | { | 137 | { |
178 | unsigned long flags; | 138 | unsigned long flags; |
179 | 139 | ||
180 | bnxt_qplib_acquire_cq_locks(qp, &flags); | 140 | bnxt_qplib_acquire_cq_flush_locks(qp, &flags); |
181 | __clean_cq(qp->scq, (u64)(unsigned long)qp); | 141 | __clean_cq(qp->scq, (u64)(unsigned long)qp); |
182 | qp->sq.hwq.prod = 0; | 142 | qp->sq.hwq.prod = 0; |
183 | qp->sq.hwq.cons = 0; | 143 | qp->sq.hwq.cons = 0; |
@@ -186,7 +146,7 @@ void bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp) | |||
186 | qp->rq.hwq.cons = 0; | 146 | qp->rq.hwq.cons = 0; |
187 | 147 | ||
188 | __bnxt_qplib_del_flush_qp(qp); | 148 | __bnxt_qplib_del_flush_qp(qp); |
189 | bnxt_qplib_release_cq_locks(qp, &flags); | 149 | bnxt_qplib_release_cq_flush_locks(qp, &flags); |
190 | } | 150 | } |
191 | 151 | ||
192 | static void bnxt_qpn_cqn_sched_task(struct work_struct *work) | 152 | static void bnxt_qpn_cqn_sched_task(struct work_struct *work) |
@@ -283,7 +243,7 @@ static void bnxt_qplib_service_nq(unsigned long data) | |||
283 | u32 sw_cons, raw_cons; | 243 | u32 sw_cons, raw_cons; |
284 | u16 type; | 244 | u16 type; |
285 | int budget = nq->budget; | 245 | int budget = nq->budget; |
286 | u64 q_handle; | 246 | uintptr_t q_handle; |
287 | 247 | ||
288 | /* Service the NQ until empty */ | 248 | /* Service the NQ until empty */ |
289 | raw_cons = hwq->cons; | 249 | raw_cons = hwq->cons; |
@@ -566,7 +526,7 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res, | |||
566 | 526 | ||
567 | /* Configure the request */ | 527 | /* Configure the request */ |
568 | req.dpi = cpu_to_le32(srq->dpi->dpi); | 528 | req.dpi = cpu_to_le32(srq->dpi->dpi); |
569 | req.srq_handle = cpu_to_le64(srq); | 529 | req.srq_handle = cpu_to_le64((uintptr_t)srq); |
570 | 530 | ||
571 | req.srq_size = cpu_to_le16((u16)srq->hwq.max_elements); | 531 | req.srq_size = cpu_to_le16((u16)srq->hwq.max_elements); |
572 | pbl = &srq->hwq.pbl[PBL_LVL_0]; | 532 | pbl = &srq->hwq.pbl[PBL_LVL_0]; |
@@ -1419,7 +1379,6 @@ int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res, | |||
1419 | struct bnxt_qplib_rcfw *rcfw = res->rcfw; | 1379 | struct bnxt_qplib_rcfw *rcfw = res->rcfw; |
1420 | struct cmdq_destroy_qp req; | 1380 | struct cmdq_destroy_qp req; |
1421 | struct creq_destroy_qp_resp resp; | 1381 | struct creq_destroy_qp_resp resp; |
1422 | unsigned long flags; | ||
1423 | u16 cmd_flags = 0; | 1382 | u16 cmd_flags = 0; |
1424 | int rc; | 1383 | int rc; |
1425 | 1384 | ||
@@ -1437,19 +1396,12 @@ int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res, | |||
1437 | return rc; | 1396 | return rc; |
1438 | } | 1397 | } |
1439 | 1398 | ||
1440 | /* Must walk the associated CQs to nullified the QP ptr */ | 1399 | return 0; |
1441 | spin_lock_irqsave(&qp->scq->hwq.lock, flags); | 1400 | } |
1442 | |||
1443 | __clean_cq(qp->scq, (u64)(unsigned long)qp); | ||
1444 | |||
1445 | if (qp->rcq && qp->rcq != qp->scq) { | ||
1446 | spin_lock(&qp->rcq->hwq.lock); | ||
1447 | __clean_cq(qp->rcq, (u64)(unsigned long)qp); | ||
1448 | spin_unlock(&qp->rcq->hwq.lock); | ||
1449 | } | ||
1450 | |||
1451 | spin_unlock_irqrestore(&qp->scq->hwq.lock, flags); | ||
1452 | 1401 | ||
1402 | void bnxt_qplib_free_qp_res(struct bnxt_qplib_res *res, | ||
1403 | struct bnxt_qplib_qp *qp) | ||
1404 | { | ||
1453 | bnxt_qplib_free_qp_hdr_buf(res, qp); | 1405 | bnxt_qplib_free_qp_hdr_buf(res, qp); |
1454 | bnxt_qplib_free_hwq(res->pdev, &qp->sq.hwq); | 1406 | bnxt_qplib_free_hwq(res->pdev, &qp->sq.hwq); |
1455 | kfree(qp->sq.swq); | 1407 | kfree(qp->sq.swq); |
@@ -1462,7 +1414,6 @@ int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res, | |||
1462 | if (qp->orrq.max_elements) | 1414 | if (qp->orrq.max_elements) |
1463 | bnxt_qplib_free_hwq(res->pdev, &qp->orrq); | 1415 | bnxt_qplib_free_hwq(res->pdev, &qp->orrq); |
1464 | 1416 | ||
1465 | return 0; | ||
1466 | } | 1417 | } |
1467 | 1418 | ||
1468 | void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp, | 1419 | void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp, |
@@ -2116,9 +2067,6 @@ void bnxt_qplib_mark_qp_error(void *qp_handle) | |||
2116 | /* Must block new posting of SQ and RQ */ | 2067 | /* Must block new posting of SQ and RQ */ |
2117 | qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; | 2068 | qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; |
2118 | bnxt_qplib_cancel_phantom_processing(qp); | 2069 | bnxt_qplib_cancel_phantom_processing(qp); |
2119 | |||
2120 | /* Add qp to flush list of the CQ */ | ||
2121 | __bnxt_qplib_add_flush_qp(qp); | ||
2122 | } | 2070 | } |
2123 | 2071 | ||
2124 | /* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive) | 2072 | /* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive) |
@@ -2294,9 +2242,9 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq, | |||
2294 | sw_sq_cons, cqe->wr_id, cqe->status); | 2242 | sw_sq_cons, cqe->wr_id, cqe->status); |
2295 | cqe++; | 2243 | cqe++; |
2296 | (*budget)--; | 2244 | (*budget)--; |
2297 | bnxt_qplib_lock_buddy_cq(qp, cq); | ||
2298 | bnxt_qplib_mark_qp_error(qp); | 2245 | bnxt_qplib_mark_qp_error(qp); |
2299 | bnxt_qplib_unlock_buddy_cq(qp, cq); | 2246 | /* Add qp to flush list of the CQ */ |
2247 | bnxt_qplib_add_flush_qp(qp); | ||
2300 | } else { | 2248 | } else { |
2301 | if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) { | 2249 | if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) { |
2302 | /* Before we complete, do WA 9060 */ | 2250 | /* Before we complete, do WA 9060 */ |
@@ -2412,9 +2360,7 @@ static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq, | |||
2412 | if (hwcqe->status != CQ_RES_RC_STATUS_OK) { | 2360 | if (hwcqe->status != CQ_RES_RC_STATUS_OK) { |
2413 | qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; | 2361 | qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; |
2414 | /* Add qp to flush list of the CQ */ | 2362 | /* Add qp to flush list of the CQ */ |
2415 | bnxt_qplib_lock_buddy_cq(qp, cq); | 2363 | bnxt_qplib_add_flush_qp(qp); |
2416 | __bnxt_qplib_add_flush_qp(qp); | ||
2417 | bnxt_qplib_unlock_buddy_cq(qp, cq); | ||
2418 | } | 2364 | } |
2419 | } | 2365 | } |
2420 | 2366 | ||
@@ -2498,9 +2444,7 @@ static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq, | |||
2498 | if (hwcqe->status != CQ_RES_RC_STATUS_OK) { | 2444 | if (hwcqe->status != CQ_RES_RC_STATUS_OK) { |
2499 | qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; | 2445 | qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; |
2500 | /* Add qp to flush list of the CQ */ | 2446 | /* Add qp to flush list of the CQ */ |
2501 | bnxt_qplib_lock_buddy_cq(qp, cq); | 2447 | bnxt_qplib_add_flush_qp(qp); |
2502 | __bnxt_qplib_add_flush_qp(qp); | ||
2503 | bnxt_qplib_unlock_buddy_cq(qp, cq); | ||
2504 | } | 2448 | } |
2505 | } | 2449 | } |
2506 | done: | 2450 | done: |
@@ -2510,11 +2454,9 @@ done: | |||
2510 | bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq) | 2454 | bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq) |
2511 | { | 2455 | { |
2512 | struct cq_base *hw_cqe, **hw_cqe_ptr; | 2456 | struct cq_base *hw_cqe, **hw_cqe_ptr; |
2513 | unsigned long flags; | ||
2514 | u32 sw_cons, raw_cons; | 2457 | u32 sw_cons, raw_cons; |
2515 | bool rc = true; | 2458 | bool rc = true; |
2516 | 2459 | ||
2517 | spin_lock_irqsave(&cq->hwq.lock, flags); | ||
2518 | raw_cons = cq->hwq.cons; | 2460 | raw_cons = cq->hwq.cons; |
2519 | sw_cons = HWQ_CMP(raw_cons, &cq->hwq); | 2461 | sw_cons = HWQ_CMP(raw_cons, &cq->hwq); |
2520 | hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr; | 2462 | hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr; |
@@ -2522,7 +2464,6 @@ bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq) | |||
2522 | 2464 | ||
2523 | /* Check for Valid bit. If the CQE is valid, return false */ | 2465 | /* Check for Valid bit. If the CQE is valid, return false */ |
2524 | rc = !CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements); | 2466 | rc = !CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements); |
2525 | spin_unlock_irqrestore(&cq->hwq.lock, flags); | ||
2526 | return rc; | 2467 | return rc; |
2527 | } | 2468 | } |
2528 | 2469 | ||
@@ -2611,9 +2552,7 @@ static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq, | |||
2611 | if (hwcqe->status != CQ_RES_RC_STATUS_OK) { | 2552 | if (hwcqe->status != CQ_RES_RC_STATUS_OK) { |
2612 | qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; | 2553 | qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; |
2613 | /* Add qp to flush list of the CQ */ | 2554 | /* Add qp to flush list of the CQ */ |
2614 | bnxt_qplib_lock_buddy_cq(qp, cq); | 2555 | bnxt_qplib_add_flush_qp(qp); |
2615 | __bnxt_qplib_add_flush_qp(qp); | ||
2616 | bnxt_qplib_unlock_buddy_cq(qp, cq); | ||
2617 | } | 2556 | } |
2618 | } | 2557 | } |
2619 | 2558 | ||
@@ -2728,9 +2667,7 @@ do_rq: | |||
2728 | */ | 2667 | */ |
2729 | 2668 | ||
2730 | /* Add qp to flush list of the CQ */ | 2669 | /* Add qp to flush list of the CQ */ |
2731 | bnxt_qplib_lock_buddy_cq(qp, cq); | 2670 | bnxt_qplib_add_flush_qp(qp); |
2732 | __bnxt_qplib_add_flush_qp(qp); | ||
2733 | bnxt_qplib_unlock_buddy_cq(qp, cq); | ||
2734 | done: | 2671 | done: |
2735 | return rc; | 2672 | return rc; |
2736 | } | 2673 | } |
@@ -2759,7 +2696,7 @@ int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq, | |||
2759 | u32 budget = num_cqes; | 2696 | u32 budget = num_cqes; |
2760 | unsigned long flags; | 2697 | unsigned long flags; |
2761 | 2698 | ||
2762 | spin_lock_irqsave(&cq->hwq.lock, flags); | 2699 | spin_lock_irqsave(&cq->flush_lock, flags); |
2763 | list_for_each_entry(qp, &cq->sqf_head, sq_flush) { | 2700 | list_for_each_entry(qp, &cq->sqf_head, sq_flush) { |
2764 | dev_dbg(&cq->hwq.pdev->dev, | 2701 | dev_dbg(&cq->hwq.pdev->dev, |
2765 | "QPLIB: FP: Flushing SQ QP= %p", | 2702 | "QPLIB: FP: Flushing SQ QP= %p", |
@@ -2773,7 +2710,7 @@ int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq, | |||
2773 | qp); | 2710 | qp); |
2774 | __flush_rq(&qp->rq, qp, &cqe, &budget); | 2711 | __flush_rq(&qp->rq, qp, &cqe, &budget); |
2775 | } | 2712 | } |
2776 | spin_unlock_irqrestore(&cq->hwq.lock, flags); | 2713 | spin_unlock_irqrestore(&cq->flush_lock, flags); |
2777 | 2714 | ||
2778 | return num_cqes - budget; | 2715 | return num_cqes - budget; |
2779 | } | 2716 | } |
@@ -2782,11 +2719,9 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, | |||
2782 | int num_cqes, struct bnxt_qplib_qp **lib_qp) | 2719 | int num_cqes, struct bnxt_qplib_qp **lib_qp) |
2783 | { | 2720 | { |
2784 | struct cq_base *hw_cqe, **hw_cqe_ptr; | 2721 | struct cq_base *hw_cqe, **hw_cqe_ptr; |
2785 | unsigned long flags; | ||
2786 | u32 sw_cons, raw_cons; | 2722 | u32 sw_cons, raw_cons; |
2787 | int budget, rc = 0; | 2723 | int budget, rc = 0; |
2788 | 2724 | ||
2789 | spin_lock_irqsave(&cq->hwq.lock, flags); | ||
2790 | raw_cons = cq->hwq.cons; | 2725 | raw_cons = cq->hwq.cons; |
2791 | budget = num_cqes; | 2726 | budget = num_cqes; |
2792 | 2727 | ||
@@ -2862,20 +2797,15 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, | |||
2862 | bnxt_qplib_arm_cq(cq, DBR_DBR_TYPE_CQ); | 2797 | bnxt_qplib_arm_cq(cq, DBR_DBR_TYPE_CQ); |
2863 | } | 2798 | } |
2864 | exit: | 2799 | exit: |
2865 | spin_unlock_irqrestore(&cq->hwq.lock, flags); | ||
2866 | return num_cqes - budget; | 2800 | return num_cqes - budget; |
2867 | } | 2801 | } |
2868 | 2802 | ||
2869 | void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type) | 2803 | void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type) |
2870 | { | 2804 | { |
2871 | unsigned long flags; | ||
2872 | |||
2873 | spin_lock_irqsave(&cq->hwq.lock, flags); | ||
2874 | if (arm_type) | 2805 | if (arm_type) |
2875 | bnxt_qplib_arm_cq(cq, arm_type); | 2806 | bnxt_qplib_arm_cq(cq, arm_type); |
2876 | /* Using cq->arm_state variable to track whether to issue cq handler */ | 2807 | /* Using cq->arm_state variable to track whether to issue cq handler */ |
2877 | atomic_set(&cq->arm_state, 1); | 2808 | atomic_set(&cq->arm_state, 1); |
2878 | spin_unlock_irqrestore(&cq->hwq.lock, flags); | ||
2879 | } | 2809 | } |
2880 | 2810 | ||
2881 | void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp) | 2811 | void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp) |
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h index 211b27a8f9e2..ade9f13c0fd1 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h | |||
@@ -389,6 +389,18 @@ struct bnxt_qplib_cq { | |||
389 | struct list_head sqf_head, rqf_head; | 389 | struct list_head sqf_head, rqf_head; |
390 | atomic_t arm_state; | 390 | atomic_t arm_state; |
391 | spinlock_t compl_lock; /* synch CQ handlers */ | 391 | spinlock_t compl_lock; /* synch CQ handlers */ |
392 | /* Locking Notes: | ||
393 | * QP can move to error state from modify_qp, async error event or error | ||
394 | * CQE as part of poll_cq. When QP is moved to error state, it gets added | ||
395 | * to two flush lists, one each for SQ and RQ. | ||
396 | * Each flush list is protected by qplib_cq->flush_lock. Both scq and rcq | ||
397 | * flush_locks should be acquired when QP is moved to error. The control path | ||
398 | * operations(modify_qp and async error events) are synchronized with poll_cq | ||
399 | * using upper level CQ locks (bnxt_re_cq->cq_lock) of both SCQ and RCQ. | ||
400 | * The qplib_cq->flush_lock is required to synchronize two instances of poll_cq | ||
401 | * of the same QP while manipulating the flush list. | ||
402 | */ | ||
403 | spinlock_t flush_lock; /* QP flush management */ | ||
392 | }; | 404 | }; |
393 | 405 | ||
394 | #define BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE sizeof(struct xrrq_irrq) | 406 | #define BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE sizeof(struct xrrq_irrq) |
@@ -478,6 +490,9 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp); | |||
478 | int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp); | 490 | int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp); |
479 | int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp); | 491 | int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp); |
480 | int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp); | 492 | int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp); |
493 | void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp); | ||
494 | void bnxt_qplib_free_qp_res(struct bnxt_qplib_res *res, | ||
495 | struct bnxt_qplib_qp *qp); | ||
481 | void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp, | 496 | void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp, |
482 | struct bnxt_qplib_sge *sge); | 497 | struct bnxt_qplib_sge *sge); |
483 | void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp, | 498 | void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp, |
@@ -500,7 +515,6 @@ void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type); | |||
500 | void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq); | 515 | void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq); |
501 | int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq); | 516 | int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq); |
502 | void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp); | 517 | void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp); |
503 | void bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp); | ||
504 | void bnxt_qplib_acquire_cq_locks(struct bnxt_qplib_qp *qp, | 518 | void bnxt_qplib_acquire_cq_locks(struct bnxt_qplib_qp *qp, |
505 | unsigned long *flags); | 519 | unsigned long *flags); |
506 | void bnxt_qplib_release_cq_locks(struct bnxt_qplib_qp *qp, | 520 | void bnxt_qplib_release_cq_locks(struct bnxt_qplib_qp *qp, |
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c index 8329ec6a7946..80027a494730 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c | |||
@@ -305,9 +305,8 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw, | |||
305 | err_event->res_err_state_reason); | 305 | err_event->res_err_state_reason); |
306 | if (!qp) | 306 | if (!qp) |
307 | break; | 307 | break; |
308 | bnxt_qplib_acquire_cq_locks(qp, &flags); | ||
309 | bnxt_qplib_mark_qp_error(qp); | 308 | bnxt_qplib_mark_qp_error(qp); |
310 | bnxt_qplib_release_cq_locks(qp, &flags); | 309 | rcfw->aeq_handler(rcfw, qp_event, qp); |
311 | break; | 310 | break; |
312 | default: | 311 | default: |
313 | /* Command Response */ | 312 | /* Command Response */ |
@@ -460,7 +459,11 @@ int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw, | |||
460 | int rc; | 459 | int rc; |
461 | 460 | ||
462 | RCFW_CMD_PREP(req, INITIALIZE_FW, cmd_flags); | 461 | RCFW_CMD_PREP(req, INITIALIZE_FW, cmd_flags); |
463 | 462 | /* Supply (log-base-2-of-host-page-size - base-page-shift) | |
463 | * to bono to adjust the doorbell page sizes. | ||
464 | */ | ||
465 | req.log2_dbr_pg_size = cpu_to_le16(PAGE_SHIFT - | ||
466 | RCFW_DBR_BASE_PAGE_SHIFT); | ||
464 | /* | 467 | /* |
465 | * VFs need not setup the HW context area, PF | 468 | * VFs need not setup the HW context area, PF |
466 | * shall setup this area for VF. Skipping the | 469 | * shall setup this area for VF. Skipping the |
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h index 6bee6e3636ea..c7cce2e4185e 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h | |||
@@ -49,6 +49,7 @@ | |||
49 | #define RCFW_COMM_SIZE 0x104 | 49 | #define RCFW_COMM_SIZE 0x104 |
50 | 50 | ||
51 | #define RCFW_DBR_PCI_BAR_REGION 2 | 51 | #define RCFW_DBR_PCI_BAR_REGION 2 |
52 | #define RCFW_DBR_BASE_PAGE_SHIFT 12 | ||
52 | 53 | ||
53 | #define RCFW_CMD_PREP(req, CMD, cmd_flags) \ | 54 | #define RCFW_CMD_PREP(req, CMD, cmd_flags) \ |
54 | do { \ | 55 | do { \ |
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c index c015c1861351..ee98e5efef84 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c | |||
@@ -52,18 +52,6 @@ const struct bnxt_qplib_gid bnxt_qplib_gid_zero = {{ 0, 0, 0, 0, 0, 0, 0, 0, | |||
52 | 52 | ||
53 | /* Device */ | 53 | /* Device */ |
54 | 54 | ||
55 | static bool bnxt_qplib_is_atomic_cap(struct bnxt_qplib_rcfw *rcfw) | ||
56 | { | ||
57 | int rc; | ||
58 | u16 pcie_ctl2; | ||
59 | |||
60 | rc = pcie_capability_read_word(rcfw->pdev, PCI_EXP_DEVCTL2, | ||
61 | &pcie_ctl2); | ||
62 | if (rc) | ||
63 | return false; | ||
64 | return !!(pcie_ctl2 & PCI_EXP_DEVCTL2_ATOMIC_REQ); | ||
65 | } | ||
66 | |||
67 | static void bnxt_qplib_query_version(struct bnxt_qplib_rcfw *rcfw, | 55 | static void bnxt_qplib_query_version(struct bnxt_qplib_rcfw *rcfw, |
68 | char *fw_ver) | 56 | char *fw_ver) |
69 | { | 57 | { |
@@ -151,7 +139,8 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw, | |||
151 | attr->max_pkey = le32_to_cpu(sb->max_pkeys); | 139 | attr->max_pkey = le32_to_cpu(sb->max_pkeys); |
152 | 140 | ||
153 | attr->max_inline_data = le32_to_cpu(sb->max_inline_data); | 141 | attr->max_inline_data = le32_to_cpu(sb->max_inline_data); |
154 | attr->l2_db_size = (sb->l2_db_space_size + 1) * PAGE_SIZE; | 142 | attr->l2_db_size = (sb->l2_db_space_size + 1) * |
143 | (0x01 << RCFW_DBR_BASE_PAGE_SHIFT); | ||
155 | attr->max_sgid = le32_to_cpu(sb->max_gid); | 144 | attr->max_sgid = le32_to_cpu(sb->max_gid); |
156 | 145 | ||
157 | bnxt_qplib_query_version(rcfw, attr->fw_ver); | 146 | bnxt_qplib_query_version(rcfw, attr->fw_ver); |
@@ -165,7 +154,7 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw, | |||
165 | attr->tqm_alloc_reqs[i * 4 + 3] = *(++tqm_alloc); | 154 | attr->tqm_alloc_reqs[i * 4 + 3] = *(++tqm_alloc); |
166 | } | 155 | } |
167 | 156 | ||
168 | attr->is_atomic = bnxt_qplib_is_atomic_cap(rcfw); | 157 | attr->is_atomic = 0; |
169 | bail: | 158 | bail: |
170 | bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf); | 159 | bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf); |
171 | return rc; | 160 | return rc; |
diff --git a/drivers/infiniband/hw/bnxt_re/roce_hsi.h b/drivers/infiniband/hw/bnxt_re/roce_hsi.h index 2d7ea096a247..3e5a4f760d0e 100644 --- a/drivers/infiniband/hw/bnxt_re/roce_hsi.h +++ b/drivers/infiniband/hw/bnxt_re/roce_hsi.h | |||
@@ -1761,7 +1761,30 @@ struct cmdq_initialize_fw { | |||
1761 | #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_2M (0x3UL << 4) | 1761 | #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_2M (0x3UL << 4) |
1762 | #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_8M (0x4UL << 4) | 1762 | #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_8M (0x4UL << 4) |
1763 | #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_1G (0x5UL << 4) | 1763 | #define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_1G (0x5UL << 4) |
1764 | __le16 reserved16; | 1764 | /* This value is (log-base-2-of-DBR-page-size - 12). |
1765 | * 0 for 4KB. HW supported values are enumerated below. | ||
1766 | */ | ||
1767 | __le16 log2_dbr_pg_size; | ||
1768 | #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_MASK 0xfUL | ||
1769 | #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_SFT 0 | ||
1770 | #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_4K 0x0UL | ||
1771 | #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_8K 0x1UL | ||
1772 | #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_16K 0x2UL | ||
1773 | #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_32K 0x3UL | ||
1774 | #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_64K 0x4UL | ||
1775 | #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_128K 0x5UL | ||
1776 | #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_256K 0x6UL | ||
1777 | #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_512K 0x7UL | ||
1778 | #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_1M 0x8UL | ||
1779 | #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_2M 0x9UL | ||
1780 | #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_4M 0xaUL | ||
1781 | #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_8M 0xbUL | ||
1782 | #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_16M 0xcUL | ||
1783 | #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_32M 0xdUL | ||
1784 | #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_64M 0xeUL | ||
1785 | #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_128M 0xfUL | ||
1786 | #define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_LAST \ | ||
1787 | CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_PG_128M | ||
1765 | __le64 qpc_page_dir; | 1788 | __le64 qpc_page_dir; |
1766 | __le64 mrw_page_dir; | 1789 | __le64 mrw_page_dir; |
1767 | __le64 srq_page_dir; | 1790 | __le64 srq_page_dir; |
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c index 9a566ee3ceff..82adc0d1d30e 100644 --- a/drivers/infiniband/hw/mlx4/cq.c +++ b/drivers/infiniband/hw/mlx4/cq.c | |||
@@ -601,6 +601,7 @@ static void use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct | |||
601 | wc->dlid_path_bits = 0; | 601 | wc->dlid_path_bits = 0; |
602 | 602 | ||
603 | if (is_eth) { | 603 | if (is_eth) { |
604 | wc->slid = 0; | ||
604 | wc->vlan_id = be16_to_cpu(hdr->tun.sl_vid); | 605 | wc->vlan_id = be16_to_cpu(hdr->tun.sl_vid); |
605 | memcpy(&(wc->smac[0]), (char *)&hdr->tun.mac_31_0, 4); | 606 | memcpy(&(wc->smac[0]), (char *)&hdr->tun.mac_31_0, 4); |
606 | memcpy(&(wc->smac[4]), (char *)&hdr->tun.slid_mac_47_32, 2); | 607 | memcpy(&(wc->smac[4]), (char *)&hdr->tun.slid_mac_47_32, 2); |
@@ -851,7 +852,6 @@ repoll: | |||
851 | } | 852 | } |
852 | } | 853 | } |
853 | 854 | ||
854 | wc->slid = be16_to_cpu(cqe->rlid); | ||
855 | g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn); | 855 | g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn); |
856 | wc->src_qp = g_mlpath_rqpn & 0xffffff; | 856 | wc->src_qp = g_mlpath_rqpn & 0xffffff; |
857 | wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f; | 857 | wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f; |
@@ -860,6 +860,7 @@ repoll: | |||
860 | wc->wc_flags |= mlx4_ib_ipoib_csum_ok(cqe->status, | 860 | wc->wc_flags |= mlx4_ib_ipoib_csum_ok(cqe->status, |
861 | cqe->checksum) ? IB_WC_IP_CSUM_OK : 0; | 861 | cqe->checksum) ? IB_WC_IP_CSUM_OK : 0; |
862 | if (is_eth) { | 862 | if (is_eth) { |
863 | wc->slid = 0; | ||
863 | wc->sl = be16_to_cpu(cqe->sl_vid) >> 13; | 864 | wc->sl = be16_to_cpu(cqe->sl_vid) >> 13; |
864 | if (be32_to_cpu(cqe->vlan_my_qpn) & | 865 | if (be32_to_cpu(cqe->vlan_my_qpn) & |
865 | MLX4_CQE_CVLAN_PRESENT_MASK) { | 866 | MLX4_CQE_CVLAN_PRESENT_MASK) { |
@@ -871,6 +872,7 @@ repoll: | |||
871 | memcpy(wc->smac, cqe->smac, ETH_ALEN); | 872 | memcpy(wc->smac, cqe->smac, ETH_ALEN); |
872 | wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC); | 873 | wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC); |
873 | } else { | 874 | } else { |
875 | wc->slid = be16_to_cpu(cqe->rlid); | ||
874 | wc->sl = be16_to_cpu(cqe->sl_vid) >> 12; | 876 | wc->sl = be16_to_cpu(cqe->sl_vid) >> 12; |
875 | wc->vlan_id = 0xffff; | 877 | wc->vlan_id = 0xffff; |
876 | } | 878 | } |
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 8d2ee9322f2e..5a0e4fc4785a 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c | |||
@@ -219,8 +219,6 @@ static int mlx4_ib_update_gids_v1_v2(struct gid_entry *gids, | |||
219 | gid_tbl[i].version = 2; | 219 | gid_tbl[i].version = 2; |
220 | if (!ipv6_addr_v4mapped((struct in6_addr *)&gids[i].gid)) | 220 | if (!ipv6_addr_v4mapped((struct in6_addr *)&gids[i].gid)) |
221 | gid_tbl[i].type = 1; | 221 | gid_tbl[i].type = 1; |
222 | else | ||
223 | memset(&gid_tbl[i].gid, 0, 12); | ||
224 | } | 222 | } |
225 | } | 223 | } |
226 | 224 | ||
@@ -366,8 +364,13 @@ static int mlx4_ib_del_gid(struct ib_device *device, | |||
366 | if (!gids) { | 364 | if (!gids) { |
367 | ret = -ENOMEM; | 365 | ret = -ENOMEM; |
368 | } else { | 366 | } else { |
369 | for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) | 367 | for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) { |
370 | memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid)); | 368 | memcpy(&gids[i].gid, |
369 | &port_gid_table->gids[i].gid, | ||
370 | sizeof(union ib_gid)); | ||
371 | gids[i].gid_type = | ||
372 | port_gid_table->gids[i].gid_type; | ||
373 | } | ||
371 | } | 374 | } |
372 | } | 375 | } |
373 | spin_unlock_bh(&iboe->lock); | 376 | spin_unlock_bh(&iboe->lock); |
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index 5b974fb97611..15457c9569a7 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c | |||
@@ -226,7 +226,6 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe, | |||
226 | wc->ex.invalidate_rkey = be32_to_cpu(cqe->imm_inval_pkey); | 226 | wc->ex.invalidate_rkey = be32_to_cpu(cqe->imm_inval_pkey); |
227 | break; | 227 | break; |
228 | } | 228 | } |
229 | wc->slid = be16_to_cpu(cqe->slid); | ||
230 | wc->src_qp = be32_to_cpu(cqe->flags_rqpn) & 0xffffff; | 229 | wc->src_qp = be32_to_cpu(cqe->flags_rqpn) & 0xffffff; |
231 | wc->dlid_path_bits = cqe->ml_path; | 230 | wc->dlid_path_bits = cqe->ml_path; |
232 | g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3; | 231 | g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3; |
@@ -241,10 +240,12 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe, | |||
241 | } | 240 | } |
242 | 241 | ||
243 | if (ll != IB_LINK_LAYER_ETHERNET) { | 242 | if (ll != IB_LINK_LAYER_ETHERNET) { |
243 | wc->slid = be16_to_cpu(cqe->slid); | ||
244 | wc->sl = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0xf; | 244 | wc->sl = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0xf; |
245 | return; | 245 | return; |
246 | } | 246 | } |
247 | 247 | ||
248 | wc->slid = 0; | ||
248 | vlan_present = cqe->l4_l3_hdr_type & 0x1; | 249 | vlan_present = cqe->l4_l3_hdr_type & 0x1; |
249 | roce_packet_type = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0x3; | 250 | roce_packet_type = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0x3; |
250 | if (vlan_present) { | 251 | if (vlan_present) { |
@@ -1177,7 +1178,12 @@ static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, | |||
1177 | if (ucmd.reserved0 || ucmd.reserved1) | 1178 | if (ucmd.reserved0 || ucmd.reserved1) |
1178 | return -EINVAL; | 1179 | return -EINVAL; |
1179 | 1180 | ||
1180 | umem = ib_umem_get(context, ucmd.buf_addr, entries * ucmd.cqe_size, | 1181 | /* check multiplication overflow */ |
1182 | if (ucmd.cqe_size && SIZE_MAX / ucmd.cqe_size <= entries - 1) | ||
1183 | return -EINVAL; | ||
1184 | |||
1185 | umem = ib_umem_get(context, ucmd.buf_addr, | ||
1186 | (size_t)ucmd.cqe_size * entries, | ||
1181 | IB_ACCESS_LOCAL_WRITE, 1); | 1187 | IB_ACCESS_LOCAL_WRITE, 1); |
1182 | if (IS_ERR(umem)) { | 1188 | if (IS_ERR(umem)) { |
1183 | err = PTR_ERR(umem); | 1189 | err = PTR_ERR(umem); |
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 4236c8086820..da091de4e69d 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c | |||
@@ -245,12 +245,16 @@ struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *ibdev, | |||
245 | struct mlx5_ib_multiport_info *mpi; | 245 | struct mlx5_ib_multiport_info *mpi; |
246 | struct mlx5_ib_port *port; | 246 | struct mlx5_ib_port *port; |
247 | 247 | ||
248 | if (!mlx5_core_mp_enabled(ibdev->mdev) || | ||
249 | ll != IB_LINK_LAYER_ETHERNET) { | ||
250 | if (native_port_num) | ||
251 | *native_port_num = ib_port_num; | ||
252 | return ibdev->mdev; | ||
253 | } | ||
254 | |||
248 | if (native_port_num) | 255 | if (native_port_num) |
249 | *native_port_num = 1; | 256 | *native_port_num = 1; |
250 | 257 | ||
251 | if (!mlx5_core_mp_enabled(ibdev->mdev) || ll != IB_LINK_LAYER_ETHERNET) | ||
252 | return ibdev->mdev; | ||
253 | |||
254 | port = &ibdev->port[ib_port_num - 1]; | 258 | port = &ibdev->port[ib_port_num - 1]; |
255 | if (!port) | 259 | if (!port) |
256 | return NULL; | 260 | return NULL; |
@@ -3263,7 +3267,7 @@ static void mlx5_ib_handle_event(struct work_struct *_work) | |||
3263 | struct mlx5_ib_dev *ibdev; | 3267 | struct mlx5_ib_dev *ibdev; |
3264 | struct ib_event ibev; | 3268 | struct ib_event ibev; |
3265 | bool fatal = false; | 3269 | bool fatal = false; |
3266 | u8 port = 0; | 3270 | u8 port = (u8)work->param; |
3267 | 3271 | ||
3268 | if (mlx5_core_is_mp_slave(work->dev)) { | 3272 | if (mlx5_core_is_mp_slave(work->dev)) { |
3269 | ibdev = mlx5_ib_get_ibdev_from_mpi(work->context); | 3273 | ibdev = mlx5_ib_get_ibdev_from_mpi(work->context); |
@@ -3283,8 +3287,6 @@ static void mlx5_ib_handle_event(struct work_struct *_work) | |||
3283 | case MLX5_DEV_EVENT_PORT_UP: | 3287 | case MLX5_DEV_EVENT_PORT_UP: |
3284 | case MLX5_DEV_EVENT_PORT_DOWN: | 3288 | case MLX5_DEV_EVENT_PORT_DOWN: |
3285 | case MLX5_DEV_EVENT_PORT_INITIALIZED: | 3289 | case MLX5_DEV_EVENT_PORT_INITIALIZED: |
3286 | port = (u8)work->param; | ||
3287 | |||
3288 | /* In RoCE, port up/down events are handled in | 3290 | /* In RoCE, port up/down events are handled in |
3289 | * mlx5_netdev_event(). | 3291 | * mlx5_netdev_event(). |
3290 | */ | 3292 | */ |
@@ -3298,24 +3300,19 @@ static void mlx5_ib_handle_event(struct work_struct *_work) | |||
3298 | 3300 | ||
3299 | case MLX5_DEV_EVENT_LID_CHANGE: | 3301 | case MLX5_DEV_EVENT_LID_CHANGE: |
3300 | ibev.event = IB_EVENT_LID_CHANGE; | 3302 | ibev.event = IB_EVENT_LID_CHANGE; |
3301 | port = (u8)work->param; | ||
3302 | break; | 3303 | break; |
3303 | 3304 | ||
3304 | case MLX5_DEV_EVENT_PKEY_CHANGE: | 3305 | case MLX5_DEV_EVENT_PKEY_CHANGE: |
3305 | ibev.event = IB_EVENT_PKEY_CHANGE; | 3306 | ibev.event = IB_EVENT_PKEY_CHANGE; |
3306 | port = (u8)work->param; | ||
3307 | |||
3308 | schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work); | 3307 | schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work); |
3309 | break; | 3308 | break; |
3310 | 3309 | ||
3311 | case MLX5_DEV_EVENT_GUID_CHANGE: | 3310 | case MLX5_DEV_EVENT_GUID_CHANGE: |
3312 | ibev.event = IB_EVENT_GID_CHANGE; | 3311 | ibev.event = IB_EVENT_GID_CHANGE; |
3313 | port = (u8)work->param; | ||
3314 | break; | 3312 | break; |
3315 | 3313 | ||
3316 | case MLX5_DEV_EVENT_CLIENT_REREG: | 3314 | case MLX5_DEV_EVENT_CLIENT_REREG: |
3317 | ibev.event = IB_EVENT_CLIENT_REREGISTER; | 3315 | ibev.event = IB_EVENT_CLIENT_REREGISTER; |
3318 | port = (u8)work->param; | ||
3319 | break; | 3316 | break; |
3320 | case MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT: | 3317 | case MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT: |
3321 | schedule_work(&ibdev->delay_drop.delay_drop_work); | 3318 | schedule_work(&ibdev->delay_drop.delay_drop_work); |
@@ -3327,7 +3324,7 @@ static void mlx5_ib_handle_event(struct work_struct *_work) | |||
3327 | ibev.device = &ibdev->ib_dev; | 3324 | ibev.device = &ibdev->ib_dev; |
3328 | ibev.element.port_num = port; | 3325 | ibev.element.port_num = port; |
3329 | 3326 | ||
3330 | if (port < 1 || port > ibdev->num_ports) { | 3327 | if (!rdma_is_port_valid(&ibdev->ib_dev, port)) { |
3331 | mlx5_ib_warn(ibdev, "warning: event on port %d\n", port); | 3328 | mlx5_ib_warn(ibdev, "warning: event on port %d\n", port); |
3332 | goto out; | 3329 | goto out; |
3333 | } | 3330 | } |
@@ -4863,19 +4860,19 @@ static int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev) | |||
4863 | return ib_register_device(&dev->ib_dev, NULL); | 4860 | return ib_register_device(&dev->ib_dev, NULL); |
4864 | } | 4861 | } |
4865 | 4862 | ||
4866 | static void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev) | 4863 | static void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev) |
4867 | { | 4864 | { |
4868 | ib_unregister_device(&dev->ib_dev); | 4865 | destroy_umrc_res(dev); |
4869 | } | 4866 | } |
4870 | 4867 | ||
4871 | static int mlx5_ib_stage_umr_res_init(struct mlx5_ib_dev *dev) | 4868 | static void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev) |
4872 | { | 4869 | { |
4873 | return create_umr_res(dev); | 4870 | ib_unregister_device(&dev->ib_dev); |
4874 | } | 4871 | } |
4875 | 4872 | ||
4876 | static void mlx5_ib_stage_umr_res_cleanup(struct mlx5_ib_dev *dev) | 4873 | static int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev) |
4877 | { | 4874 | { |
4878 | destroy_umrc_res(dev); | 4875 | return create_umr_res(dev); |
4879 | } | 4876 | } |
4880 | 4877 | ||
4881 | static int mlx5_ib_stage_delay_drop_init(struct mlx5_ib_dev *dev) | 4878 | static int mlx5_ib_stage_delay_drop_init(struct mlx5_ib_dev *dev) |
@@ -4985,12 +4982,15 @@ static const struct mlx5_ib_profile pf_profile = { | |||
4985 | STAGE_CREATE(MLX5_IB_STAGE_BFREG, | 4982 | STAGE_CREATE(MLX5_IB_STAGE_BFREG, |
4986 | mlx5_ib_stage_bfrag_init, | 4983 | mlx5_ib_stage_bfrag_init, |
4987 | mlx5_ib_stage_bfrag_cleanup), | 4984 | mlx5_ib_stage_bfrag_cleanup), |
4985 | STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR, | ||
4986 | NULL, | ||
4987 | mlx5_ib_stage_pre_ib_reg_umr_cleanup), | ||
4988 | STAGE_CREATE(MLX5_IB_STAGE_IB_REG, | 4988 | STAGE_CREATE(MLX5_IB_STAGE_IB_REG, |
4989 | mlx5_ib_stage_ib_reg_init, | 4989 | mlx5_ib_stage_ib_reg_init, |
4990 | mlx5_ib_stage_ib_reg_cleanup), | 4990 | mlx5_ib_stage_ib_reg_cleanup), |
4991 | STAGE_CREATE(MLX5_IB_STAGE_UMR_RESOURCES, | 4991 | STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR, |
4992 | mlx5_ib_stage_umr_res_init, | 4992 | mlx5_ib_stage_post_ib_reg_umr_init, |
4993 | mlx5_ib_stage_umr_res_cleanup), | 4993 | NULL), |
4994 | STAGE_CREATE(MLX5_IB_STAGE_DELAY_DROP, | 4994 | STAGE_CREATE(MLX5_IB_STAGE_DELAY_DROP, |
4995 | mlx5_ib_stage_delay_drop_init, | 4995 | mlx5_ib_stage_delay_drop_init, |
4996 | mlx5_ib_stage_delay_drop_cleanup), | 4996 | mlx5_ib_stage_delay_drop_cleanup), |
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 139385129973..a5272499b600 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h | |||
@@ -739,8 +739,9 @@ enum mlx5_ib_stages { | |||
739 | MLX5_IB_STAGE_CONG_DEBUGFS, | 739 | MLX5_IB_STAGE_CONG_DEBUGFS, |
740 | MLX5_IB_STAGE_UAR, | 740 | MLX5_IB_STAGE_UAR, |
741 | MLX5_IB_STAGE_BFREG, | 741 | MLX5_IB_STAGE_BFREG, |
742 | MLX5_IB_STAGE_PRE_IB_REG_UMR, | ||
742 | MLX5_IB_STAGE_IB_REG, | 743 | MLX5_IB_STAGE_IB_REG, |
743 | MLX5_IB_STAGE_UMR_RESOURCES, | 744 | MLX5_IB_STAGE_POST_IB_REG_UMR, |
744 | MLX5_IB_STAGE_DELAY_DROP, | 745 | MLX5_IB_STAGE_DELAY_DROP, |
745 | MLX5_IB_STAGE_CLASS_ATTR, | 746 | MLX5_IB_STAGE_CLASS_ATTR, |
746 | MLX5_IB_STAGE_MAX, | 747 | MLX5_IB_STAGE_MAX, |
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 556e015678de..c51c602f06d6 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c | |||
@@ -838,7 +838,8 @@ static int mr_umem_get(struct ib_pd *pd, u64 start, u64 length, | |||
838 | *umem = ib_umem_get(pd->uobject->context, start, length, | 838 | *umem = ib_umem_get(pd->uobject->context, start, length, |
839 | access_flags, 0); | 839 | access_flags, 0); |
840 | err = PTR_ERR_OR_ZERO(*umem); | 840 | err = PTR_ERR_OR_ZERO(*umem); |
841 | if (err < 0) { | 841 | if (err) { |
842 | *umem = NULL; | ||
842 | mlx5_ib_err(dev, "umem get failed (%d)\n", err); | 843 | mlx5_ib_err(dev, "umem get failed (%d)\n", err); |
843 | return err; | 844 | return err; |
844 | } | 845 | } |
@@ -1415,6 +1416,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, | |||
1415 | if (err) { | 1416 | if (err) { |
1416 | mlx5_ib_warn(dev, "Failed to rereg UMR\n"); | 1417 | mlx5_ib_warn(dev, "Failed to rereg UMR\n"); |
1417 | ib_umem_release(mr->umem); | 1418 | ib_umem_release(mr->umem); |
1419 | mr->umem = NULL; | ||
1418 | clean_mr(dev, mr); | 1420 | clean_mr(dev, mr); |
1419 | return err; | 1421 | return err; |
1420 | } | 1422 | } |
@@ -1498,14 +1500,11 @@ static int clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) | |||
1498 | u32 key = mr->mmkey.key; | 1500 | u32 key = mr->mmkey.key; |
1499 | 1501 | ||
1500 | err = destroy_mkey(dev, mr); | 1502 | err = destroy_mkey(dev, mr); |
1501 | kfree(mr); | ||
1502 | if (err) { | 1503 | if (err) { |
1503 | mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n", | 1504 | mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n", |
1504 | key, err); | 1505 | key, err); |
1505 | return err; | 1506 | return err; |
1506 | } | 1507 | } |
1507 | } else { | ||
1508 | mlx5_mr_cache_free(dev, mr); | ||
1509 | } | 1508 | } |
1510 | 1509 | ||
1511 | return 0; | 1510 | return 0; |
@@ -1548,6 +1547,11 @@ static int dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) | |||
1548 | atomic_sub(npages, &dev->mdev->priv.reg_pages); | 1547 | atomic_sub(npages, &dev->mdev->priv.reg_pages); |
1549 | } | 1548 | } |
1550 | 1549 | ||
1550 | if (!mr->allocated_from_cache) | ||
1551 | kfree(mr); | ||
1552 | else | ||
1553 | mlx5_mr_cache_free(dev, mr); | ||
1554 | |||
1551 | return 0; | 1555 | return 0; |
1552 | } | 1556 | } |
1553 | 1557 | ||
@@ -1816,7 +1820,6 @@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr, | |||
1816 | 1820 | ||
1817 | mr->ibmr.iova = sg_dma_address(sg) + sg_offset; | 1821 | mr->ibmr.iova = sg_dma_address(sg) + sg_offset; |
1818 | mr->ibmr.length = 0; | 1822 | mr->ibmr.length = 0; |
1819 | mr->ndescs = sg_nents; | ||
1820 | 1823 | ||
1821 | for_each_sg(sgl, sg, sg_nents, i) { | 1824 | for_each_sg(sgl, sg, sg_nents, i) { |
1822 | if (unlikely(i >= mr->max_descs)) | 1825 | if (unlikely(i >= mr->max_descs)) |
@@ -1828,6 +1831,7 @@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr, | |||
1828 | 1831 | ||
1829 | sg_offset = 0; | 1832 | sg_offset = 0; |
1830 | } | 1833 | } |
1834 | mr->ndescs = i; | ||
1831 | 1835 | ||
1832 | if (sg_offset_p) | 1836 | if (sg_offset_p) |
1833 | *sg_offset_p = sg_offset; | 1837 | *sg_offset_p = sg_offset; |
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 39d24bf694a8..a2e1aa86e133 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c | |||
@@ -1161,7 +1161,7 @@ static void destroy_raw_packet_qp_sq(struct mlx5_ib_dev *dev, | |||
1161 | ib_umem_release(sq->ubuffer.umem); | 1161 | ib_umem_release(sq->ubuffer.umem); |
1162 | } | 1162 | } |
1163 | 1163 | ||
1164 | static int get_rq_pas_size(void *qpc) | 1164 | static size_t get_rq_pas_size(void *qpc) |
1165 | { | 1165 | { |
1166 | u32 log_page_size = MLX5_GET(qpc, qpc, log_page_size) + 12; | 1166 | u32 log_page_size = MLX5_GET(qpc, qpc, log_page_size) + 12; |
1167 | u32 log_rq_stride = MLX5_GET(qpc, qpc, log_rq_stride); | 1167 | u32 log_rq_stride = MLX5_GET(qpc, qpc, log_rq_stride); |
@@ -1177,7 +1177,8 @@ static int get_rq_pas_size(void *qpc) | |||
1177 | } | 1177 | } |
1178 | 1178 | ||
1179 | static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev, | 1179 | static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev, |
1180 | struct mlx5_ib_rq *rq, void *qpin) | 1180 | struct mlx5_ib_rq *rq, void *qpin, |
1181 | size_t qpinlen) | ||
1181 | { | 1182 | { |
1182 | struct mlx5_ib_qp *mqp = rq->base.container_mibqp; | 1183 | struct mlx5_ib_qp *mqp = rq->base.container_mibqp; |
1183 | __be64 *pas; | 1184 | __be64 *pas; |
@@ -1186,9 +1187,12 @@ static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev, | |||
1186 | void *rqc; | 1187 | void *rqc; |
1187 | void *wq; | 1188 | void *wq; |
1188 | void *qpc = MLX5_ADDR_OF(create_qp_in, qpin, qpc); | 1189 | void *qpc = MLX5_ADDR_OF(create_qp_in, qpin, qpc); |
1189 | int inlen; | 1190 | size_t rq_pas_size = get_rq_pas_size(qpc); |
1191 | size_t inlen; | ||
1190 | int err; | 1192 | int err; |
1191 | u32 rq_pas_size = get_rq_pas_size(qpc); | 1193 | |
1194 | if (qpinlen < rq_pas_size + MLX5_BYTE_OFF(create_qp_in, pas)) | ||
1195 | return -EINVAL; | ||
1192 | 1196 | ||
1193 | inlen = MLX5_ST_SZ_BYTES(create_rq_in) + rq_pas_size; | 1197 | inlen = MLX5_ST_SZ_BYTES(create_rq_in) + rq_pas_size; |
1194 | in = kvzalloc(inlen, GFP_KERNEL); | 1198 | in = kvzalloc(inlen, GFP_KERNEL); |
@@ -1277,7 +1281,7 @@ static void destroy_raw_packet_qp_tir(struct mlx5_ib_dev *dev, | |||
1277 | } | 1281 | } |
1278 | 1282 | ||
1279 | static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, | 1283 | static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, |
1280 | u32 *in, | 1284 | u32 *in, size_t inlen, |
1281 | struct ib_pd *pd) | 1285 | struct ib_pd *pd) |
1282 | { | 1286 | { |
1283 | struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp; | 1287 | struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp; |
@@ -1309,7 +1313,7 @@ static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, | |||
1309 | rq->flags |= MLX5_IB_RQ_CVLAN_STRIPPING; | 1313 | rq->flags |= MLX5_IB_RQ_CVLAN_STRIPPING; |
1310 | if (qp->flags & MLX5_IB_QP_PCI_WRITE_END_PADDING) | 1314 | if (qp->flags & MLX5_IB_QP_PCI_WRITE_END_PADDING) |
1311 | rq->flags |= MLX5_IB_RQ_PCI_WRITE_END_PADDING; | 1315 | rq->flags |= MLX5_IB_RQ_PCI_WRITE_END_PADDING; |
1312 | err = create_raw_packet_qp_rq(dev, rq, in); | 1316 | err = create_raw_packet_qp_rq(dev, rq, in, inlen); |
1313 | if (err) | 1317 | if (err) |
1314 | goto err_destroy_sq; | 1318 | goto err_destroy_sq; |
1315 | 1319 | ||
@@ -1584,6 +1588,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, | |||
1584 | u32 uidx = MLX5_IB_DEFAULT_UIDX; | 1588 | u32 uidx = MLX5_IB_DEFAULT_UIDX; |
1585 | struct mlx5_ib_create_qp ucmd; | 1589 | struct mlx5_ib_create_qp ucmd; |
1586 | struct mlx5_ib_qp_base *base; | 1590 | struct mlx5_ib_qp_base *base; |
1591 | int mlx5_st; | ||
1587 | void *qpc; | 1592 | void *qpc; |
1588 | u32 *in; | 1593 | u32 *in; |
1589 | int err; | 1594 | int err; |
@@ -1592,6 +1597,10 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, | |||
1592 | spin_lock_init(&qp->sq.lock); | 1597 | spin_lock_init(&qp->sq.lock); |
1593 | spin_lock_init(&qp->rq.lock); | 1598 | spin_lock_init(&qp->rq.lock); |
1594 | 1599 | ||
1600 | mlx5_st = to_mlx5_st(init_attr->qp_type); | ||
1601 | if (mlx5_st < 0) | ||
1602 | return -EINVAL; | ||
1603 | |||
1595 | if (init_attr->rwq_ind_tbl) { | 1604 | if (init_attr->rwq_ind_tbl) { |
1596 | if (!udata) | 1605 | if (!udata) |
1597 | return -ENOSYS; | 1606 | return -ENOSYS; |
@@ -1753,7 +1762,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, | |||
1753 | 1762 | ||
1754 | qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); | 1763 | qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); |
1755 | 1764 | ||
1756 | MLX5_SET(qpc, qpc, st, to_mlx5_st(init_attr->qp_type)); | 1765 | MLX5_SET(qpc, qpc, st, mlx5_st); |
1757 | MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); | 1766 | MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); |
1758 | 1767 | ||
1759 | if (init_attr->qp_type != MLX5_IB_QPT_REG_UMR) | 1768 | if (init_attr->qp_type != MLX5_IB_QPT_REG_UMR) |
@@ -1867,11 +1876,16 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, | |||
1867 | } | 1876 | } |
1868 | } | 1877 | } |
1869 | 1878 | ||
1879 | if (inlen < 0) { | ||
1880 | err = -EINVAL; | ||
1881 | goto err; | ||
1882 | } | ||
1883 | |||
1870 | if (init_attr->qp_type == IB_QPT_RAW_PACKET || | 1884 | if (init_attr->qp_type == IB_QPT_RAW_PACKET || |
1871 | qp->flags & MLX5_IB_QP_UNDERLAY) { | 1885 | qp->flags & MLX5_IB_QP_UNDERLAY) { |
1872 | qp->raw_packet_qp.sq.ubuffer.buf_addr = ucmd.sq_buf_addr; | 1886 | qp->raw_packet_qp.sq.ubuffer.buf_addr = ucmd.sq_buf_addr; |
1873 | raw_packet_qp_copy_info(qp, &qp->raw_packet_qp); | 1887 | raw_packet_qp_copy_info(qp, &qp->raw_packet_qp); |
1874 | err = create_raw_packet_qp(dev, qp, in, pd); | 1888 | err = create_raw_packet_qp(dev, qp, in, inlen, pd); |
1875 | } else { | 1889 | } else { |
1876 | err = mlx5_core_create_qp(dev->mdev, &base->mqp, in, inlen); | 1890 | err = mlx5_core_create_qp(dev->mdev, &base->mqp, in, inlen); |
1877 | } | 1891 | } |
@@ -3095,8 +3109,10 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, | |||
3095 | goto out; | 3109 | goto out; |
3096 | 3110 | ||
3097 | if (mlx5_cur >= MLX5_QP_NUM_STATE || mlx5_new >= MLX5_QP_NUM_STATE || | 3111 | if (mlx5_cur >= MLX5_QP_NUM_STATE || mlx5_new >= MLX5_QP_NUM_STATE || |
3098 | !optab[mlx5_cur][mlx5_new]) | 3112 | !optab[mlx5_cur][mlx5_new]) { |
3113 | err = -EINVAL; | ||
3099 | goto out; | 3114 | goto out; |
3115 | } | ||
3100 | 3116 | ||
3101 | op = optab[mlx5_cur][mlx5_new]; | 3117 | op = optab[mlx5_cur][mlx5_new]; |
3102 | optpar = ib_mask_to_mlx5_opt(attr_mask); | 3118 | optpar = ib_mask_to_mlx5_opt(attr_mask); |
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c index 6d5fadad9090..3c7522d025f2 100644 --- a/drivers/infiniband/hw/mlx5/srq.c +++ b/drivers/infiniband/hw/mlx5/srq.c | |||
@@ -241,8 +241,8 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd, | |||
241 | { | 241 | { |
242 | struct mlx5_ib_dev *dev = to_mdev(pd->device); | 242 | struct mlx5_ib_dev *dev = to_mdev(pd->device); |
243 | struct mlx5_ib_srq *srq; | 243 | struct mlx5_ib_srq *srq; |
244 | int desc_size; | 244 | size_t desc_size; |
245 | int buf_size; | 245 | size_t buf_size; |
246 | int err; | 246 | int err; |
247 | struct mlx5_srq_attr in = {0}; | 247 | struct mlx5_srq_attr in = {0}; |
248 | __u32 max_srq_wqes = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz); | 248 | __u32 max_srq_wqes = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz); |
@@ -266,15 +266,18 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd, | |||
266 | 266 | ||
267 | desc_size = sizeof(struct mlx5_wqe_srq_next_seg) + | 267 | desc_size = sizeof(struct mlx5_wqe_srq_next_seg) + |
268 | srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg); | 268 | srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg); |
269 | if (desc_size == 0 || srq->msrq.max_gs > desc_size) | ||
270 | return ERR_PTR(-EINVAL); | ||
269 | desc_size = roundup_pow_of_two(desc_size); | 271 | desc_size = roundup_pow_of_two(desc_size); |
270 | desc_size = max_t(int, 32, desc_size); | 272 | desc_size = max_t(size_t, 32, desc_size); |
273 | if (desc_size < sizeof(struct mlx5_wqe_srq_next_seg)) | ||
274 | return ERR_PTR(-EINVAL); | ||
271 | srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) / | 275 | srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) / |
272 | sizeof(struct mlx5_wqe_data_seg); | 276 | sizeof(struct mlx5_wqe_data_seg); |
273 | srq->msrq.wqe_shift = ilog2(desc_size); | 277 | srq->msrq.wqe_shift = ilog2(desc_size); |
274 | buf_size = srq->msrq.max * desc_size; | 278 | buf_size = srq->msrq.max * desc_size; |
275 | mlx5_ib_dbg(dev, "desc_size 0x%x, req wr 0x%x, srq size 0x%x, max_gs 0x%x, max_avail_gather 0x%x\n", | 279 | if (buf_size < desc_size) |
276 | desc_size, init_attr->attr.max_wr, srq->msrq.max, srq->msrq.max_gs, | 280 | return ERR_PTR(-EINVAL); |
277 | srq->msrq.max_avail_gather); | ||
278 | in.type = init_attr->srq_type; | 281 | in.type = init_attr->srq_type; |
279 | 282 | ||
280 | if (pd->uobject) | 283 | if (pd->uobject) |
diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c index 478b7317b80a..26dc374787f7 100644 --- a/drivers/infiniband/hw/qedr/qedr_iw_cm.c +++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c | |||
@@ -458,8 +458,7 @@ qedr_addr6_resolve(struct qedr_dev *dev, | |||
458 | } | 458 | } |
459 | return -EINVAL; | 459 | return -EINVAL; |
460 | } | 460 | } |
461 | neigh = dst_neigh_lookup(dst, &dst_in); | 461 | neigh = dst_neigh_lookup(dst, &fl6.daddr); |
462 | |||
463 | if (neigh) { | 462 | if (neigh) { |
464 | rcu_read_lock(); | 463 | rcu_read_lock(); |
465 | if (neigh->nud_state & NUD_VALID) { | 464 | if (neigh->nud_state & NUD_VALID) { |
@@ -494,10 +493,14 @@ int qedr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
494 | 493 | ||
495 | qp = idr_find(&dev->qpidr, conn_param->qpn); | 494 | qp = idr_find(&dev->qpidr, conn_param->qpn); |
496 | 495 | ||
497 | laddr = (struct sockaddr_in *)&cm_id->local_addr; | 496 | laddr = (struct sockaddr_in *)&cm_id->m_local_addr; |
498 | raddr = (struct sockaddr_in *)&cm_id->remote_addr; | 497 | raddr = (struct sockaddr_in *)&cm_id->m_remote_addr; |
499 | laddr6 = (struct sockaddr_in6 *)&cm_id->local_addr; | 498 | laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr; |
500 | raddr6 = (struct sockaddr_in6 *)&cm_id->remote_addr; | 499 | raddr6 = (struct sockaddr_in6 *)&cm_id->m_remote_addr; |
500 | |||
501 | DP_DEBUG(dev, QEDR_MSG_IWARP, "MAPPED %d %d\n", | ||
502 | ntohs(((struct sockaddr_in *)&cm_id->remote_addr)->sin_port), | ||
503 | ntohs(raddr->sin_port)); | ||
501 | 504 | ||
502 | DP_DEBUG(dev, QEDR_MSG_IWARP, | 505 | DP_DEBUG(dev, QEDR_MSG_IWARP, |
503 | "Connect source address: %pISpc, remote address: %pISpc\n", | 506 | "Connect source address: %pISpc, remote address: %pISpc\n", |
@@ -599,8 +602,8 @@ int qedr_iw_create_listen(struct iw_cm_id *cm_id, int backlog) | |||
599 | int rc; | 602 | int rc; |
600 | int i; | 603 | int i; |
601 | 604 | ||
602 | laddr = (struct sockaddr_in *)&cm_id->local_addr; | 605 | laddr = (struct sockaddr_in *)&cm_id->m_local_addr; |
603 | laddr6 = (struct sockaddr_in6 *)&cm_id->local_addr; | 606 | laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr; |
604 | 607 | ||
605 | DP_DEBUG(dev, QEDR_MSG_IWARP, | 608 | DP_DEBUG(dev, QEDR_MSG_IWARP, |
606 | "Create Listener address: %pISpc\n", &cm_id->local_addr); | 609 | "Create Listener address: %pISpc\n", &cm_id->local_addr); |
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 53f00dbf313f..875b17272d65 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c | |||
@@ -3034,6 +3034,11 @@ static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
3034 | 3034 | ||
3035 | switch (wr->opcode) { | 3035 | switch (wr->opcode) { |
3036 | case IB_WR_SEND_WITH_IMM: | 3036 | case IB_WR_SEND_WITH_IMM: |
3037 | if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) { | ||
3038 | rc = -EINVAL; | ||
3039 | *bad_wr = wr; | ||
3040 | break; | ||
3041 | } | ||
3037 | wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM; | 3042 | wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM; |
3038 | swqe = (struct rdma_sq_send_wqe_1st *)wqe; | 3043 | swqe = (struct rdma_sq_send_wqe_1st *)wqe; |
3039 | swqe->wqe_size = 2; | 3044 | swqe->wqe_size = 2; |
@@ -3075,6 +3080,11 @@ static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
3075 | break; | 3080 | break; |
3076 | 3081 | ||
3077 | case IB_WR_RDMA_WRITE_WITH_IMM: | 3082 | case IB_WR_RDMA_WRITE_WITH_IMM: |
3083 | if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) { | ||
3084 | rc = -EINVAL; | ||
3085 | *bad_wr = wr; | ||
3086 | break; | ||
3087 | } | ||
3078 | wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM; | 3088 | wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM; |
3079 | rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe; | 3089 | rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe; |
3080 | 3090 | ||
@@ -3724,7 +3734,7 @@ int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) | |||
3724 | { | 3734 | { |
3725 | struct qedr_dev *dev = get_qedr_dev(ibcq->device); | 3735 | struct qedr_dev *dev = get_qedr_dev(ibcq->device); |
3726 | struct qedr_cq *cq = get_qedr_cq(ibcq); | 3736 | struct qedr_cq *cq = get_qedr_cq(ibcq); |
3727 | union rdma_cqe *cqe = cq->latest_cqe; | 3737 | union rdma_cqe *cqe; |
3728 | u32 old_cons, new_cons; | 3738 | u32 old_cons, new_cons; |
3729 | unsigned long flags; | 3739 | unsigned long flags; |
3730 | int update = 0; | 3740 | int update = 0; |
@@ -3741,6 +3751,7 @@ int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) | |||
3741 | return qedr_gsi_poll_cq(ibcq, num_entries, wc); | 3751 | return qedr_gsi_poll_cq(ibcq, num_entries, wc); |
3742 | 3752 | ||
3743 | spin_lock_irqsave(&cq->cq_lock, flags); | 3753 | spin_lock_irqsave(&cq->cq_lock, flags); |
3754 | cqe = cq->latest_cqe; | ||
3744 | old_cons = qed_chain_get_cons_idx_u32(&cq->pbl); | 3755 | old_cons = qed_chain_get_cons_idx_u32(&cq->pbl); |
3745 | while (num_entries && is_valid_cqe(cq, cqe)) { | 3756 | while (num_entries && is_valid_cqe(cq, cqe)) { |
3746 | struct qedr_qp *qp; | 3757 | struct qedr_qp *qp; |
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c index faa9478c14a6..f95b97646c25 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c | |||
@@ -114,6 +114,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev, | |||
114 | union pvrdma_cmd_resp rsp; | 114 | union pvrdma_cmd_resp rsp; |
115 | struct pvrdma_cmd_create_cq *cmd = &req.create_cq; | 115 | struct pvrdma_cmd_create_cq *cmd = &req.create_cq; |
116 | struct pvrdma_cmd_create_cq_resp *resp = &rsp.create_cq_resp; | 116 | struct pvrdma_cmd_create_cq_resp *resp = &rsp.create_cq_resp; |
117 | struct pvrdma_create_cq_resp cq_resp = {0}; | ||
117 | struct pvrdma_create_cq ucmd; | 118 | struct pvrdma_create_cq ucmd; |
118 | 119 | ||
119 | BUILD_BUG_ON(sizeof(struct pvrdma_cqe) != 64); | 120 | BUILD_BUG_ON(sizeof(struct pvrdma_cqe) != 64); |
@@ -197,6 +198,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev, | |||
197 | 198 | ||
198 | cq->ibcq.cqe = resp->cqe; | 199 | cq->ibcq.cqe = resp->cqe; |
199 | cq->cq_handle = resp->cq_handle; | 200 | cq->cq_handle = resp->cq_handle; |
201 | cq_resp.cqn = resp->cq_handle; | ||
200 | spin_lock_irqsave(&dev->cq_tbl_lock, flags); | 202 | spin_lock_irqsave(&dev->cq_tbl_lock, flags); |
201 | dev->cq_tbl[cq->cq_handle % dev->dsr->caps.max_cq] = cq; | 203 | dev->cq_tbl[cq->cq_handle % dev->dsr->caps.max_cq] = cq; |
202 | spin_unlock_irqrestore(&dev->cq_tbl_lock, flags); | 204 | spin_unlock_irqrestore(&dev->cq_tbl_lock, flags); |
@@ -205,7 +207,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev, | |||
205 | cq->uar = &(to_vucontext(context)->uar); | 207 | cq->uar = &(to_vucontext(context)->uar); |
206 | 208 | ||
207 | /* Copy udata back. */ | 209 | /* Copy udata back. */ |
208 | if (ib_copy_to_udata(udata, &cq->cq_handle, sizeof(__u32))) { | 210 | if (ib_copy_to_udata(udata, &cq_resp, sizeof(cq_resp))) { |
209 | dev_warn(&dev->pdev->dev, | 211 | dev_warn(&dev->pdev->dev, |
210 | "failed to copy back udata\n"); | 212 | "failed to copy back udata\n"); |
211 | pvrdma_destroy_cq(&cq->ibcq); | 213 | pvrdma_destroy_cq(&cq->ibcq); |
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c index 5acebb1ef631..af235967a9c2 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c | |||
@@ -113,6 +113,7 @@ struct ib_srq *pvrdma_create_srq(struct ib_pd *pd, | |||
113 | union pvrdma_cmd_resp rsp; | 113 | union pvrdma_cmd_resp rsp; |
114 | struct pvrdma_cmd_create_srq *cmd = &req.create_srq; | 114 | struct pvrdma_cmd_create_srq *cmd = &req.create_srq; |
115 | struct pvrdma_cmd_create_srq_resp *resp = &rsp.create_srq_resp; | 115 | struct pvrdma_cmd_create_srq_resp *resp = &rsp.create_srq_resp; |
116 | struct pvrdma_create_srq_resp srq_resp = {0}; | ||
116 | struct pvrdma_create_srq ucmd; | 117 | struct pvrdma_create_srq ucmd; |
117 | unsigned long flags; | 118 | unsigned long flags; |
118 | int ret; | 119 | int ret; |
@@ -204,12 +205,13 @@ struct ib_srq *pvrdma_create_srq(struct ib_pd *pd, | |||
204 | } | 205 | } |
205 | 206 | ||
206 | srq->srq_handle = resp->srqn; | 207 | srq->srq_handle = resp->srqn; |
208 | srq_resp.srqn = resp->srqn; | ||
207 | spin_lock_irqsave(&dev->srq_tbl_lock, flags); | 209 | spin_lock_irqsave(&dev->srq_tbl_lock, flags); |
208 | dev->srq_tbl[srq->srq_handle % dev->dsr->caps.max_srq] = srq; | 210 | dev->srq_tbl[srq->srq_handle % dev->dsr->caps.max_srq] = srq; |
209 | spin_unlock_irqrestore(&dev->srq_tbl_lock, flags); | 211 | spin_unlock_irqrestore(&dev->srq_tbl_lock, flags); |
210 | 212 | ||
211 | /* Copy udata back. */ | 213 | /* Copy udata back. */ |
212 | if (ib_copy_to_udata(udata, &srq->srq_handle, sizeof(__u32))) { | 214 | if (ib_copy_to_udata(udata, &srq_resp, sizeof(srq_resp))) { |
213 | dev_warn(&dev->pdev->dev, "failed to copy back udata\n"); | 215 | dev_warn(&dev->pdev->dev, "failed to copy back udata\n"); |
214 | pvrdma_destroy_srq(&srq->ibsrq); | 216 | pvrdma_destroy_srq(&srq->ibsrq); |
215 | return ERR_PTR(-EINVAL); | 217 | return ERR_PTR(-EINVAL); |
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c index 16b96616ef7e..a51463cd2f37 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c | |||
@@ -447,6 +447,7 @@ struct ib_pd *pvrdma_alloc_pd(struct ib_device *ibdev, | |||
447 | union pvrdma_cmd_resp rsp; | 447 | union pvrdma_cmd_resp rsp; |
448 | struct pvrdma_cmd_create_pd *cmd = &req.create_pd; | 448 | struct pvrdma_cmd_create_pd *cmd = &req.create_pd; |
449 | struct pvrdma_cmd_create_pd_resp *resp = &rsp.create_pd_resp; | 449 | struct pvrdma_cmd_create_pd_resp *resp = &rsp.create_pd_resp; |
450 | struct pvrdma_alloc_pd_resp pd_resp = {0}; | ||
450 | int ret; | 451 | int ret; |
451 | void *ptr; | 452 | void *ptr; |
452 | 453 | ||
@@ -475,9 +476,10 @@ struct ib_pd *pvrdma_alloc_pd(struct ib_device *ibdev, | |||
475 | pd->privileged = !context; | 476 | pd->privileged = !context; |
476 | pd->pd_handle = resp->pd_handle; | 477 | pd->pd_handle = resp->pd_handle; |
477 | pd->pdn = resp->pd_handle; | 478 | pd->pdn = resp->pd_handle; |
479 | pd_resp.pdn = resp->pd_handle; | ||
478 | 480 | ||
479 | if (context) { | 481 | if (context) { |
480 | if (ib_copy_to_udata(udata, &pd->pdn, sizeof(__u32))) { | 482 | if (ib_copy_to_udata(udata, &pd_resp, sizeof(pd_resp))) { |
481 | dev_warn(&dev->pdev->dev, | 483 | dev_warn(&dev->pdev->dev, |
482 | "failed to copy back protection domain\n"); | 484 | "failed to copy back protection domain\n"); |
483 | pvrdma_dealloc_pd(&pd->ibpd); | 485 | pvrdma_dealloc_pd(&pd->ibpd); |
diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c index 1b2e5362a3ff..cc429b567d0a 100644 --- a/drivers/infiniband/sw/rdmavt/mr.c +++ b/drivers/infiniband/sw/rdmavt/mr.c | |||
@@ -489,11 +489,13 @@ static int rvt_check_refs(struct rvt_mregion *mr, const char *t) | |||
489 | unsigned long timeout; | 489 | unsigned long timeout; |
490 | struct rvt_dev_info *rdi = ib_to_rvt(mr->pd->device); | 490 | struct rvt_dev_info *rdi = ib_to_rvt(mr->pd->device); |
491 | 491 | ||
492 | if (percpu_ref_is_zero(&mr->refcount)) | 492 | if (mr->lkey) { |
493 | return 0; | 493 | /* avoid dma mr */ |
494 | /* avoid dma mr */ | ||
495 | if (mr->lkey) | ||
496 | rvt_dereg_clean_qps(mr); | 494 | rvt_dereg_clean_qps(mr); |
495 | /* @mr was indexed on rcu protected @lkey_table */ | ||
496 | synchronize_rcu(); | ||
497 | } | ||
498 | |||
497 | timeout = wait_for_completion_timeout(&mr->comp, 5 * HZ); | 499 | timeout = wait_for_completion_timeout(&mr->comp, 5 * HZ); |
498 | if (!timeout) { | 500 | if (!timeout) { |
499 | rvt_pr_err(rdi, | 501 | rvt_pr_err(rdi, |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_fs.c b/drivers/infiniband/ulp/ipoib/ipoib_fs.c index 11f74cbe6660..ea302b054601 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_fs.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_fs.c | |||
@@ -281,8 +281,6 @@ void ipoib_delete_debug_files(struct net_device *dev) | |||
281 | { | 281 | { |
282 | struct ipoib_dev_priv *priv = ipoib_priv(dev); | 282 | struct ipoib_dev_priv *priv = ipoib_priv(dev); |
283 | 283 | ||
284 | WARN_ONCE(!priv->mcg_dentry, "null mcg debug file\n"); | ||
285 | WARN_ONCE(!priv->path_dentry, "null path debug file\n"); | ||
286 | debugfs_remove(priv->mcg_dentry); | 284 | debugfs_remove(priv->mcg_dentry); |
287 | debugfs_remove(priv->path_dentry); | 285 | debugfs_remove(priv->path_dentry); |
288 | priv->mcg_dentry = priv->path_dentry = NULL; | 286 | priv->mcg_dentry = priv->path_dentry = NULL; |
diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c index 1f316d66e6f7..41614c185918 100644 --- a/drivers/input/keyboard/matrix_keypad.c +++ b/drivers/input/keyboard/matrix_keypad.c | |||
@@ -218,8 +218,10 @@ static void matrix_keypad_stop(struct input_dev *dev) | |||
218 | { | 218 | { |
219 | struct matrix_keypad *keypad = input_get_drvdata(dev); | 219 | struct matrix_keypad *keypad = input_get_drvdata(dev); |
220 | 220 | ||
221 | spin_lock_irq(&keypad->lock); | ||
221 | keypad->stopped = true; | 222 | keypad->stopped = true; |
222 | mb(); | 223 | spin_unlock_irq(&keypad->lock); |
224 | |||
223 | flush_work(&keypad->work.work); | 225 | flush_work(&keypad->work.work); |
224 | /* | 226 | /* |
225 | * matrix_keypad_scan() will leave IRQs enabled; | 227 | * matrix_keypad_scan() will leave IRQs enabled; |
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index 3d2e23a0ae39..a246fc686bb7 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c | |||
@@ -173,7 +173,6 @@ static const char * const smbus_pnp_ids[] = { | |||
173 | "LEN0046", /* X250 */ | 173 | "LEN0046", /* X250 */ |
174 | "LEN004a", /* W541 */ | 174 | "LEN004a", /* W541 */ |
175 | "LEN200f", /* T450s */ | 175 | "LEN200f", /* T450s */ |
176 | "LEN2018", /* T460p */ | ||
177 | NULL | 176 | NULL |
178 | }; | 177 | }; |
179 | 178 | ||
diff --git a/drivers/input/touchscreen/mms114.c b/drivers/input/touchscreen/mms114.c index db4f6bb502e3..a5ab774da4cc 100644 --- a/drivers/input/touchscreen/mms114.c +++ b/drivers/input/touchscreen/mms114.c | |||
@@ -1,11 +1,8 @@ | |||
1 | /* | 1 | // SPDX-License-Identifier: GPL-2.0 |
2 | * Copyright (C) 2012 Samsung Electronics Co.Ltd | 2 | // Melfas MMS114/MMS152 touchscreen device driver |
3 | * Author: Joonyoung Shim <jy0922.shim@samsung.com> | 3 | // |
4 | * | 4 | // Copyright (c) 2012 Samsung Electronics Co., Ltd. |
5 | * This program is free software; you can redistribute it and/or modify | 5 | // Author: Joonyoung Shim <jy0922.shim@samsung.com> |
6 | * it under the terms of the GNU General Public License version 2 as | ||
7 | * published by the Free Software Foundation. | ||
8 | */ | ||
9 | 6 | ||
10 | #include <linux/module.h> | 7 | #include <linux/module.h> |
11 | #include <linux/delay.h> | 8 | #include <linux/delay.h> |
@@ -624,4 +621,4 @@ module_i2c_driver(mms114_driver); | |||
624 | /* Module information */ | 621 | /* Module information */ |
625 | MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>"); | 622 | MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>"); |
626 | MODULE_DESCRIPTION("MELFAS mms114 Touchscreen driver"); | 623 | MODULE_DESCRIPTION("MELFAS mms114 Touchscreen driver"); |
627 | MODULE_LICENSE("GPL"); | 624 | MODULE_LICENSE("GPL v2"); |
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c index 35a408d0ae4f..99bc9bd64b9e 100644 --- a/drivers/iommu/intel-svm.c +++ b/drivers/iommu/intel-svm.c | |||
@@ -205,7 +205,7 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d | |||
205 | * for example, an "address" value of 0x12345f000 will | 205 | * for example, an "address" value of 0x12345f000 will |
206 | * flush from 0x123440000 to 0x12347ffff (256KiB). */ | 206 | * flush from 0x123440000 to 0x12347ffff (256KiB). */ |
207 | unsigned long last = address + ((unsigned long)(pages - 1) << VTD_PAGE_SHIFT); | 207 | unsigned long last = address + ((unsigned long)(pages - 1) << VTD_PAGE_SHIFT); |
208 | unsigned long mask = __rounddown_pow_of_two(address ^ last);; | 208 | unsigned long mask = __rounddown_pow_of_two(address ^ last); |
209 | 209 | ||
210 | desc.high = QI_DEV_EIOTLB_ADDR((address & ~mask) | (mask - 1)) | QI_DEV_EIOTLB_SIZE; | 210 | desc.high = QI_DEV_EIOTLB_ADDR((address & ~mask) | (mask - 1)) | QI_DEV_EIOTLB_SIZE; |
211 | } else { | 211 | } else { |
diff --git a/drivers/irqchip/irq-bcm7038-l1.c b/drivers/irqchip/irq-bcm7038-l1.c index 55cfb986225b..faf734ff4cf3 100644 --- a/drivers/irqchip/irq-bcm7038-l1.c +++ b/drivers/irqchip/irq-bcm7038-l1.c | |||
@@ -339,9 +339,6 @@ int __init bcm7038_l1_of_init(struct device_node *dn, | |||
339 | goto out_unmap; | 339 | goto out_unmap; |
340 | } | 340 | } |
341 | 341 | ||
342 | pr_info("registered BCM7038 L1 intc (mem: 0x%p, IRQs: %d)\n", | ||
343 | intc->cpus[0]->map_base, IRQS_PER_WORD * intc->n_words); | ||
344 | |||
345 | return 0; | 342 | return 0; |
346 | 343 | ||
347 | out_unmap: | 344 | out_unmap: |
diff --git a/drivers/irqchip/irq-bcm7120-l2.c b/drivers/irqchip/irq-bcm7120-l2.c index 983640eba418..8968e5e93fcb 100644 --- a/drivers/irqchip/irq-bcm7120-l2.c +++ b/drivers/irqchip/irq-bcm7120-l2.c | |||
@@ -318,9 +318,6 @@ static int __init bcm7120_l2_intc_probe(struct device_node *dn, | |||
318 | } | 318 | } |
319 | } | 319 | } |
320 | 320 | ||
321 | pr_info("registered %s intc (mem: 0x%p, parent IRQ(s): %d)\n", | ||
322 | intc_name, data->map_base[0], data->num_parent_irqs); | ||
323 | |||
324 | return 0; | 321 | return 0; |
325 | 322 | ||
326 | out_free_domain: | 323 | out_free_domain: |
diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c index 691d20eb0bec..0e65f609352e 100644 --- a/drivers/irqchip/irq-brcmstb-l2.c +++ b/drivers/irqchip/irq-brcmstb-l2.c | |||
@@ -262,9 +262,6 @@ static int __init brcmstb_l2_intc_of_init(struct device_node *np, | |||
262 | ct->chip.irq_set_wake = irq_gc_set_wake; | 262 | ct->chip.irq_set_wake = irq_gc_set_wake; |
263 | } | 263 | } |
264 | 264 | ||
265 | pr_info("registered L2 intc (mem: 0x%p, parent irq: %d)\n", | ||
266 | base, parent_irq); | ||
267 | |||
268 | return 0; | 265 | return 0; |
269 | 266 | ||
270 | out_free_domain: | 267 | out_free_domain: |
diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c index 993a8426a453..1ff38aff9f29 100644 --- a/drivers/irqchip/irq-gic-v2m.c +++ b/drivers/irqchip/irq-gic-v2m.c | |||
@@ -94,7 +94,7 @@ static struct irq_chip gicv2m_msi_irq_chip = { | |||
94 | 94 | ||
95 | static struct msi_domain_info gicv2m_msi_domain_info = { | 95 | static struct msi_domain_info gicv2m_msi_domain_info = { |
96 | .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | | 96 | .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | |
97 | MSI_FLAG_PCI_MSIX), | 97 | MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI), |
98 | .chip = &gicv2m_msi_irq_chip, | 98 | .chip = &gicv2m_msi_irq_chip, |
99 | }; | 99 | }; |
100 | 100 | ||
@@ -155,18 +155,12 @@ static int gicv2m_irq_gic_domain_alloc(struct irq_domain *domain, | |||
155 | return 0; | 155 | return 0; |
156 | } | 156 | } |
157 | 157 | ||
158 | static void gicv2m_unalloc_msi(struct v2m_data *v2m, unsigned int hwirq) | 158 | static void gicv2m_unalloc_msi(struct v2m_data *v2m, unsigned int hwirq, |
159 | int nr_irqs) | ||
159 | { | 160 | { |
160 | int pos; | ||
161 | |||
162 | pos = hwirq - v2m->spi_start; | ||
163 | if (pos < 0 || pos >= v2m->nr_spis) { | ||
164 | pr_err("Failed to teardown msi. Invalid hwirq %d\n", hwirq); | ||
165 | return; | ||
166 | } | ||
167 | |||
168 | spin_lock(&v2m_lock); | 161 | spin_lock(&v2m_lock); |
169 | __clear_bit(pos, v2m->bm); | 162 | bitmap_release_region(v2m->bm, hwirq - v2m->spi_start, |
163 | get_count_order(nr_irqs)); | ||
170 | spin_unlock(&v2m_lock); | 164 | spin_unlock(&v2m_lock); |
171 | } | 165 | } |
172 | 166 | ||
@@ -174,13 +168,13 @@ static int gicv2m_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, | |||
174 | unsigned int nr_irqs, void *args) | 168 | unsigned int nr_irqs, void *args) |
175 | { | 169 | { |
176 | struct v2m_data *v2m = NULL, *tmp; | 170 | struct v2m_data *v2m = NULL, *tmp; |
177 | int hwirq, offset, err = 0; | 171 | int hwirq, offset, i, err = 0; |
178 | 172 | ||
179 | spin_lock(&v2m_lock); | 173 | spin_lock(&v2m_lock); |
180 | list_for_each_entry(tmp, &v2m_nodes, entry) { | 174 | list_for_each_entry(tmp, &v2m_nodes, entry) { |
181 | offset = find_first_zero_bit(tmp->bm, tmp->nr_spis); | 175 | offset = bitmap_find_free_region(tmp->bm, tmp->nr_spis, |
182 | if (offset < tmp->nr_spis) { | 176 | get_count_order(nr_irqs)); |
183 | __set_bit(offset, tmp->bm); | 177 | if (offset >= 0) { |
184 | v2m = tmp; | 178 | v2m = tmp; |
185 | break; | 179 | break; |
186 | } | 180 | } |
@@ -192,16 +186,21 @@ static int gicv2m_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, | |||
192 | 186 | ||
193 | hwirq = v2m->spi_start + offset; | 187 | hwirq = v2m->spi_start + offset; |
194 | 188 | ||
195 | err = gicv2m_irq_gic_domain_alloc(domain, virq, hwirq); | 189 | for (i = 0; i < nr_irqs; i++) { |
196 | if (err) { | 190 | err = gicv2m_irq_gic_domain_alloc(domain, virq + i, hwirq + i); |
197 | gicv2m_unalloc_msi(v2m, hwirq); | 191 | if (err) |
198 | return err; | 192 | goto fail; |
199 | } | ||
200 | 193 | ||
201 | irq_domain_set_hwirq_and_chip(domain, virq, hwirq, | 194 | irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i, |
202 | &gicv2m_irq_chip, v2m); | 195 | &gicv2m_irq_chip, v2m); |
196 | } | ||
203 | 197 | ||
204 | return 0; | 198 | return 0; |
199 | |||
200 | fail: | ||
201 | irq_domain_free_irqs_parent(domain, virq, nr_irqs); | ||
202 | gicv2m_unalloc_msi(v2m, hwirq, get_count_order(nr_irqs)); | ||
203 | return err; | ||
205 | } | 204 | } |
206 | 205 | ||
207 | static void gicv2m_irq_domain_free(struct irq_domain *domain, | 206 | static void gicv2m_irq_domain_free(struct irq_domain *domain, |
@@ -210,8 +209,7 @@ static void gicv2m_irq_domain_free(struct irq_domain *domain, | |||
210 | struct irq_data *d = irq_domain_get_irq_data(domain, virq); | 209 | struct irq_data *d = irq_domain_get_irq_data(domain, virq); |
211 | struct v2m_data *v2m = irq_data_get_irq_chip_data(d); | 210 | struct v2m_data *v2m = irq_data_get_irq_chip_data(d); |
212 | 211 | ||
213 | BUG_ON(nr_irqs != 1); | 212 | gicv2m_unalloc_msi(v2m, d->hwirq, nr_irqs); |
214 | gicv2m_unalloc_msi(v2m, d->hwirq); | ||
215 | irq_domain_free_irqs_parent(domain, virq, nr_irqs); | 213 | irq_domain_free_irqs_parent(domain, virq, nr_irqs); |
216 | } | 214 | } |
217 | 215 | ||
diff --git a/drivers/irqchip/irq-gic-v3-its-pci-msi.c b/drivers/irqchip/irq-gic-v3-its-pci-msi.c index 14a8c0a7e095..25a98de5cfb2 100644 --- a/drivers/irqchip/irq-gic-v3-its-pci-msi.c +++ b/drivers/irqchip/irq-gic-v3-its-pci-msi.c | |||
@@ -132,6 +132,8 @@ static int __init its_pci_of_msi_init(void) | |||
132 | 132 | ||
133 | for (np = of_find_matching_node(NULL, its_device_id); np; | 133 | for (np = of_find_matching_node(NULL, its_device_id); np; |
134 | np = of_find_matching_node(np, its_device_id)) { | 134 | np = of_find_matching_node(np, its_device_id)) { |
135 | if (!of_device_is_available(np)) | ||
136 | continue; | ||
135 | if (!of_property_read_bool(np, "msi-controller")) | 137 | if (!of_property_read_bool(np, "msi-controller")) |
136 | continue; | 138 | continue; |
137 | 139 | ||
diff --git a/drivers/irqchip/irq-gic-v3-its-platform-msi.c b/drivers/irqchip/irq-gic-v3-its-platform-msi.c index 833a90fe33ae..8881a053c173 100644 --- a/drivers/irqchip/irq-gic-v3-its-platform-msi.c +++ b/drivers/irqchip/irq-gic-v3-its-platform-msi.c | |||
@@ -154,6 +154,8 @@ static void __init its_pmsi_of_init(void) | |||
154 | 154 | ||
155 | for (np = of_find_matching_node(NULL, its_device_id); np; | 155 | for (np = of_find_matching_node(NULL, its_device_id); np; |
156 | np = of_find_matching_node(np, its_device_id)) { | 156 | np = of_find_matching_node(np, its_device_id)) { |
157 | if (!of_device_is_available(np)) | ||
158 | continue; | ||
157 | if (!of_property_read_bool(np, "msi-controller")) | 159 | if (!of_property_read_bool(np, "msi-controller")) |
158 | continue; | 160 | continue; |
159 | 161 | ||
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 06f025fd5726..2cbb19cddbf8 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c | |||
@@ -1412,7 +1412,7 @@ static struct irq_chip its_irq_chip = { | |||
1412 | * This gives us (((1UL << id_bits) - 8192) >> 5) possible allocations. | 1412 | * This gives us (((1UL << id_bits) - 8192) >> 5) possible allocations. |
1413 | */ | 1413 | */ |
1414 | #define IRQS_PER_CHUNK_SHIFT 5 | 1414 | #define IRQS_PER_CHUNK_SHIFT 5 |
1415 | #define IRQS_PER_CHUNK (1 << IRQS_PER_CHUNK_SHIFT) | 1415 | #define IRQS_PER_CHUNK (1UL << IRQS_PER_CHUNK_SHIFT) |
1416 | #define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */ | 1416 | #define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */ |
1417 | 1417 | ||
1418 | static unsigned long *lpi_bitmap; | 1418 | static unsigned long *lpi_bitmap; |
@@ -2119,11 +2119,10 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id, | |||
2119 | 2119 | ||
2120 | dev = kzalloc(sizeof(*dev), GFP_KERNEL); | 2120 | dev = kzalloc(sizeof(*dev), GFP_KERNEL); |
2121 | /* | 2121 | /* |
2122 | * At least one bit of EventID is being used, hence a minimum | 2122 | * We allocate at least one chunk worth of LPIs bet device, |
2123 | * of two entries. No, the architecture doesn't let you | 2123 | * and thus that many ITEs. The device may require less though. |
2124 | * express an ITT with a single entry. | ||
2125 | */ | 2124 | */ |
2126 | nr_ites = max(2UL, roundup_pow_of_two(nvecs)); | 2125 | nr_ites = max(IRQS_PER_CHUNK, roundup_pow_of_two(nvecs)); |
2127 | sz = nr_ites * its->ite_size; | 2126 | sz = nr_ites * its->ite_size; |
2128 | sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; | 2127 | sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; |
2129 | itt = kzalloc(sz, GFP_KERNEL); | 2128 | itt = kzalloc(sz, GFP_KERNEL); |
@@ -2495,7 +2494,7 @@ static int its_vpe_set_affinity(struct irq_data *d, | |||
2495 | 2494 | ||
2496 | static void its_vpe_schedule(struct its_vpe *vpe) | 2495 | static void its_vpe_schedule(struct its_vpe *vpe) |
2497 | { | 2496 | { |
2498 | void * __iomem vlpi_base = gic_data_rdist_vlpi_base(); | 2497 | void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); |
2499 | u64 val; | 2498 | u64 val; |
2500 | 2499 | ||
2501 | /* Schedule the VPE */ | 2500 | /* Schedule the VPE */ |
@@ -2527,7 +2526,7 @@ static void its_vpe_schedule(struct its_vpe *vpe) | |||
2527 | 2526 | ||
2528 | static void its_vpe_deschedule(struct its_vpe *vpe) | 2527 | static void its_vpe_deschedule(struct its_vpe *vpe) |
2529 | { | 2528 | { |
2530 | void * __iomem vlpi_base = gic_data_rdist_vlpi_base(); | 2529 | void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); |
2531 | u32 count = 1000000; /* 1s! */ | 2530 | u32 count = 1000000; /* 1s! */ |
2532 | bool clean; | 2531 | bool clean; |
2533 | u64 val; | 2532 | u64 val; |
@@ -3314,6 +3313,8 @@ static int __init its_of_probe(struct device_node *node) | |||
3314 | 3313 | ||
3315 | for (np = of_find_matching_node(node, its_device_id); np; | 3314 | for (np = of_find_matching_node(node, its_device_id); np; |
3316 | np = of_find_matching_node(np, its_device_id)) { | 3315 | np = of_find_matching_node(np, its_device_id)) { |
3316 | if (!of_device_is_available(np)) | ||
3317 | continue; | ||
3317 | if (!of_property_read_bool(np, "msi-controller")) { | 3318 | if (!of_property_read_bool(np, "msi-controller")) { |
3318 | pr_warn("%pOF: no msi-controller property, ITS ignored\n", | 3319 | pr_warn("%pOF: no msi-controller property, ITS ignored\n", |
3319 | np); | 3320 | np); |
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index a57c0fbbd34a..d99cc07903ec 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c | |||
@@ -673,7 +673,7 @@ static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq) | |||
673 | MPIDR_TO_SGI_RS(cluster_id) | | 673 | MPIDR_TO_SGI_RS(cluster_id) | |
674 | tlist << ICC_SGI1R_TARGET_LIST_SHIFT); | 674 | tlist << ICC_SGI1R_TARGET_LIST_SHIFT); |
675 | 675 | ||
676 | pr_debug("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val); | 676 | pr_devel("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val); |
677 | gic_write_sgi1r(val); | 677 | gic_write_sgi1r(val); |
678 | } | 678 | } |
679 | 679 | ||
@@ -688,7 +688,7 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq) | |||
688 | * Ensure that stores to Normal memory are visible to the | 688 | * Ensure that stores to Normal memory are visible to the |
689 | * other CPUs before issuing the IPI. | 689 | * other CPUs before issuing the IPI. |
690 | */ | 690 | */ |
691 | smp_wmb(); | 691 | wmb(); |
692 | 692 | ||
693 | for_each_cpu(cpu, mask) { | 693 | for_each_cpu(cpu, mask) { |
694 | u64 cluster_id = MPIDR_TO_SGI_CLUSTER_ID(cpu_logical_map(cpu)); | 694 | u64 cluster_id = MPIDR_TO_SGI_CLUSTER_ID(cpu_logical_map(cpu)); |
diff --git a/drivers/irqchip/irq-imx-gpcv2.c b/drivers/irqchip/irq-imx-gpcv2.c index 675eda5ff2b8..4760307ab43f 100644 --- a/drivers/irqchip/irq-imx-gpcv2.c +++ b/drivers/irqchip/irq-imx-gpcv2.c | |||
@@ -28,20 +28,6 @@ struct gpcv2_irqchip_data { | |||
28 | 28 | ||
29 | static struct gpcv2_irqchip_data *imx_gpcv2_instance; | 29 | static struct gpcv2_irqchip_data *imx_gpcv2_instance; |
30 | 30 | ||
31 | /* | ||
32 | * Interface for the low level wakeup code. | ||
33 | */ | ||
34 | u32 imx_gpcv2_get_wakeup_source(u32 **sources) | ||
35 | { | ||
36 | if (!imx_gpcv2_instance) | ||
37 | return 0; | ||
38 | |||
39 | if (sources) | ||
40 | *sources = imx_gpcv2_instance->wakeup_sources; | ||
41 | |||
42 | return IMR_NUM; | ||
43 | } | ||
44 | |||
45 | static int gpcv2_wakeup_source_save(void) | 31 | static int gpcv2_wakeup_source_save(void) |
46 | { | 32 | { |
47 | struct gpcv2_irqchip_data *cd; | 33 | struct gpcv2_irqchip_data *cd; |
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c index ef92a4d2038e..d32268cc1174 100644 --- a/drivers/irqchip/irq-mips-gic.c +++ b/drivers/irqchip/irq-mips-gic.c | |||
@@ -424,8 +424,6 @@ static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq, | |||
424 | spin_lock_irqsave(&gic_lock, flags); | 424 | spin_lock_irqsave(&gic_lock, flags); |
425 | write_gic_map_pin(intr, GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin); | 425 | write_gic_map_pin(intr, GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin); |
426 | write_gic_map_vp(intr, BIT(mips_cm_vp_id(cpu))); | 426 | write_gic_map_vp(intr, BIT(mips_cm_vp_id(cpu))); |
427 | gic_clear_pcpu_masks(intr); | ||
428 | set_bit(intr, per_cpu_ptr(pcpu_masks, cpu)); | ||
429 | irq_data_update_effective_affinity(data, cpumask_of(cpu)); | 427 | irq_data_update_effective_affinity(data, cpumask_of(cpu)); |
430 | spin_unlock_irqrestore(&gic_lock, flags); | 428 | spin_unlock_irqrestore(&gic_lock, flags); |
431 | 429 | ||
diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c index 62f541f968f6..07074820a167 100644 --- a/drivers/macintosh/macio_asic.c +++ b/drivers/macintosh/macio_asic.c | |||
@@ -375,6 +375,7 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip, | |||
375 | dev->ofdev.dev.of_node = np; | 375 | dev->ofdev.dev.of_node = np; |
376 | dev->ofdev.archdata.dma_mask = 0xffffffffUL; | 376 | dev->ofdev.archdata.dma_mask = 0xffffffffUL; |
377 | dev->ofdev.dev.dma_mask = &dev->ofdev.archdata.dma_mask; | 377 | dev->ofdev.dev.dma_mask = &dev->ofdev.archdata.dma_mask; |
378 | dev->ofdev.dev.coherent_dma_mask = dev->ofdev.archdata.dma_mask; | ||
378 | dev->ofdev.dev.parent = parent; | 379 | dev->ofdev.dev.parent = parent; |
379 | dev->ofdev.dev.bus = &macio_bus_type; | 380 | dev->ofdev.dev.bus = &macio_bus_type; |
380 | dev->ofdev.dev.release = macio_release_dev; | 381 | dev->ofdev.dev.release = macio_release_dev; |
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 1a46b41dac70..6422846b546e 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c | |||
@@ -659,11 +659,11 @@ static void do_bio_hook(struct search *s, struct bio *orig_bio) | |||
659 | static void search_free(struct closure *cl) | 659 | static void search_free(struct closure *cl) |
660 | { | 660 | { |
661 | struct search *s = container_of(cl, struct search, cl); | 661 | struct search *s = container_of(cl, struct search, cl); |
662 | bio_complete(s); | ||
663 | 662 | ||
664 | if (s->iop.bio) | 663 | if (s->iop.bio) |
665 | bio_put(s->iop.bio); | 664 | bio_put(s->iop.bio); |
666 | 665 | ||
666 | bio_complete(s); | ||
667 | closure_debug_destroy(cl); | 667 | closure_debug_destroy(cl); |
668 | mempool_free(s, s->d->c->search); | 668 | mempool_free(s, s->d->c->search); |
669 | } | 669 | } |
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 312895788036..f2273143b3cb 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c | |||
@@ -963,6 +963,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c, | |||
963 | uint32_t rtime = cpu_to_le32(get_seconds()); | 963 | uint32_t rtime = cpu_to_le32(get_seconds()); |
964 | struct uuid_entry *u; | 964 | struct uuid_entry *u; |
965 | char buf[BDEVNAME_SIZE]; | 965 | char buf[BDEVNAME_SIZE]; |
966 | struct cached_dev *exist_dc, *t; | ||
966 | 967 | ||
967 | bdevname(dc->bdev, buf); | 968 | bdevname(dc->bdev, buf); |
968 | 969 | ||
@@ -987,6 +988,16 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c, | |||
987 | return -EINVAL; | 988 | return -EINVAL; |
988 | } | 989 | } |
989 | 990 | ||
991 | /* Check whether already attached */ | ||
992 | list_for_each_entry_safe(exist_dc, t, &c->cached_devs, list) { | ||
993 | if (!memcmp(dc->sb.uuid, exist_dc->sb.uuid, 16)) { | ||
994 | pr_err("Tried to attach %s but duplicate UUID already attached", | ||
995 | buf); | ||
996 | |||
997 | return -EINVAL; | ||
998 | } | ||
999 | } | ||
1000 | |||
990 | u = uuid_find(c, dc->sb.uuid); | 1001 | u = uuid_find(c, dc->sb.uuid); |
991 | 1002 | ||
992 | if (u && | 1003 | if (u && |
@@ -1204,7 +1215,7 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page, | |||
1204 | 1215 | ||
1205 | return; | 1216 | return; |
1206 | err: | 1217 | err: |
1207 | pr_notice("error opening %s: %s", bdevname(bdev, name), err); | 1218 | pr_notice("error %s: %s", bdevname(bdev, name), err); |
1208 | bcache_device_stop(&dc->disk); | 1219 | bcache_device_stop(&dc->disk); |
1209 | } | 1220 | } |
1210 | 1221 | ||
@@ -1274,7 +1285,7 @@ static int flash_devs_run(struct cache_set *c) | |||
1274 | struct uuid_entry *u; | 1285 | struct uuid_entry *u; |
1275 | 1286 | ||
1276 | for (u = c->uuids; | 1287 | for (u = c->uuids; |
1277 | u < c->uuids + c->devices_max_used && !ret; | 1288 | u < c->uuids + c->nr_uuids && !ret; |
1278 | u++) | 1289 | u++) |
1279 | if (UUID_FLASH_ONLY(u)) | 1290 | if (UUID_FLASH_ONLY(u)) |
1280 | ret = flash_dev_run(c, u); | 1291 | ret = flash_dev_run(c, u); |
@@ -1883,6 +1894,8 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page, | |||
1883 | const char *err = NULL; /* must be set for any error case */ | 1894 | const char *err = NULL; /* must be set for any error case */ |
1884 | int ret = 0; | 1895 | int ret = 0; |
1885 | 1896 | ||
1897 | bdevname(bdev, name); | ||
1898 | |||
1886 | memcpy(&ca->sb, sb, sizeof(struct cache_sb)); | 1899 | memcpy(&ca->sb, sb, sizeof(struct cache_sb)); |
1887 | ca->bdev = bdev; | 1900 | ca->bdev = bdev; |
1888 | ca->bdev->bd_holder = ca; | 1901 | ca->bdev->bd_holder = ca; |
@@ -1891,11 +1904,12 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page, | |||
1891 | bio_first_bvec_all(&ca->sb_bio)->bv_page = sb_page; | 1904 | bio_first_bvec_all(&ca->sb_bio)->bv_page = sb_page; |
1892 | get_page(sb_page); | 1905 | get_page(sb_page); |
1893 | 1906 | ||
1894 | if (blk_queue_discard(bdev_get_queue(ca->bdev))) | 1907 | if (blk_queue_discard(bdev_get_queue(bdev))) |
1895 | ca->discard = CACHE_DISCARD(&ca->sb); | 1908 | ca->discard = CACHE_DISCARD(&ca->sb); |
1896 | 1909 | ||
1897 | ret = cache_alloc(ca); | 1910 | ret = cache_alloc(ca); |
1898 | if (ret != 0) { | 1911 | if (ret != 0) { |
1912 | blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); | ||
1899 | if (ret == -ENOMEM) | 1913 | if (ret == -ENOMEM) |
1900 | err = "cache_alloc(): -ENOMEM"; | 1914 | err = "cache_alloc(): -ENOMEM"; |
1901 | else | 1915 | else |
@@ -1918,14 +1932,14 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page, | |||
1918 | goto out; | 1932 | goto out; |
1919 | } | 1933 | } |
1920 | 1934 | ||
1921 | pr_info("registered cache device %s", bdevname(bdev, name)); | 1935 | pr_info("registered cache device %s", name); |
1922 | 1936 | ||
1923 | out: | 1937 | out: |
1924 | kobject_put(&ca->kobj); | 1938 | kobject_put(&ca->kobj); |
1925 | 1939 | ||
1926 | err: | 1940 | err: |
1927 | if (err) | 1941 | if (err) |
1928 | pr_notice("error opening %s: %s", bdevname(bdev, name), err); | 1942 | pr_notice("error %s: %s", name, err); |
1929 | 1943 | ||
1930 | return ret; | 1944 | return ret; |
1931 | } | 1945 | } |
@@ -2014,6 +2028,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, | |||
2014 | if (err) | 2028 | if (err) |
2015 | goto err_close; | 2029 | goto err_close; |
2016 | 2030 | ||
2031 | err = "failed to register device"; | ||
2017 | if (SB_IS_BDEV(sb)) { | 2032 | if (SB_IS_BDEV(sb)) { |
2018 | struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL); | 2033 | struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL); |
2019 | if (!dc) | 2034 | if (!dc) |
@@ -2028,7 +2043,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, | |||
2028 | goto err_close; | 2043 | goto err_close; |
2029 | 2044 | ||
2030 | if (register_cache(sb, sb_page, bdev, ca) != 0) | 2045 | if (register_cache(sb, sb_page, bdev, ca) != 0) |
2031 | goto err_close; | 2046 | goto err; |
2032 | } | 2047 | } |
2033 | out: | 2048 | out: |
2034 | if (sb_page) | 2049 | if (sb_page) |
@@ -2041,7 +2056,7 @@ out: | |||
2041 | err_close: | 2056 | err_close: |
2042 | blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); | 2057 | blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); |
2043 | err: | 2058 | err: |
2044 | pr_info("error opening %s: %s", path, err); | 2059 | pr_info("error %s: %s", path, err); |
2045 | ret = -EINVAL; | 2060 | ret = -EINVAL; |
2046 | goto out; | 2061 | goto out; |
2047 | } | 2062 | } |
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index 414c9af54ded..aa2032fa80d4 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c | |||
@@ -386,9 +386,6 @@ static void __cache_size_refresh(void) | |||
386 | static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask, | 386 | static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask, |
387 | enum data_mode *data_mode) | 387 | enum data_mode *data_mode) |
388 | { | 388 | { |
389 | unsigned noio_flag; | ||
390 | void *ptr; | ||
391 | |||
392 | if (c->block_size <= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT) { | 389 | if (c->block_size <= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT) { |
393 | *data_mode = DATA_MODE_SLAB; | 390 | *data_mode = DATA_MODE_SLAB; |
394 | return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask); | 391 | return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask); |
@@ -412,16 +409,15 @@ static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask, | |||
412 | * all allocations done by this process (including pagetables) are done | 409 | * all allocations done by this process (including pagetables) are done |
413 | * as if GFP_NOIO was specified. | 410 | * as if GFP_NOIO was specified. |
414 | */ | 411 | */ |
412 | if (gfp_mask & __GFP_NORETRY) { | ||
413 | unsigned noio_flag = memalloc_noio_save(); | ||
414 | void *ptr = __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL); | ||
415 | 415 | ||
416 | if (gfp_mask & __GFP_NORETRY) | ||
417 | noio_flag = memalloc_noio_save(); | ||
418 | |||
419 | ptr = __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL); | ||
420 | |||
421 | if (gfp_mask & __GFP_NORETRY) | ||
422 | memalloc_noio_restore(noio_flag); | 416 | memalloc_noio_restore(noio_flag); |
417 | return ptr; | ||
418 | } | ||
423 | 419 | ||
424 | return ptr; | 420 | return __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL); |
425 | } | 421 | } |
426 | 422 | ||
427 | /* | 423 | /* |
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 7d3e572072f5..a05a560d3cba 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c | |||
@@ -211,29 +211,27 @@ static int alloc_multipath_stage2(struct dm_target *ti, struct multipath *m) | |||
211 | else | 211 | else |
212 | m->queue_mode = DM_TYPE_REQUEST_BASED; | 212 | m->queue_mode = DM_TYPE_REQUEST_BASED; |
213 | 213 | ||
214 | } else if (m->queue_mode == DM_TYPE_BIO_BASED || | 214 | } else if (m->queue_mode == DM_TYPE_BIO_BASED) { |
215 | m->queue_mode == DM_TYPE_NVME_BIO_BASED) { | ||
216 | INIT_WORK(&m->process_queued_bios, process_queued_bios); | 215 | INIT_WORK(&m->process_queued_bios, process_queued_bios); |
217 | 216 | /* | |
218 | if (m->queue_mode == DM_TYPE_BIO_BASED) { | 217 | * bio-based doesn't support any direct scsi_dh management; |
219 | /* | 218 | * it just discovers if a scsi_dh is attached. |
220 | * bio-based doesn't support any direct scsi_dh management; | 219 | */ |
221 | * it just discovers if a scsi_dh is attached. | 220 | set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags); |
222 | */ | ||
223 | set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags); | ||
224 | } | ||
225 | } | ||
226 | |||
227 | if (m->queue_mode != DM_TYPE_NVME_BIO_BASED) { | ||
228 | set_bit(MPATHF_QUEUE_IO, &m->flags); | ||
229 | atomic_set(&m->pg_init_in_progress, 0); | ||
230 | atomic_set(&m->pg_init_count, 0); | ||
231 | m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT; | ||
232 | init_waitqueue_head(&m->pg_init_wait); | ||
233 | } | 221 | } |
234 | 222 | ||
235 | dm_table_set_type(ti->table, m->queue_mode); | 223 | dm_table_set_type(ti->table, m->queue_mode); |
236 | 224 | ||
225 | /* | ||
226 | * Init fields that are only used when a scsi_dh is attached | ||
227 | * - must do this unconditionally (really doesn't hurt non-SCSI uses) | ||
228 | */ | ||
229 | set_bit(MPATHF_QUEUE_IO, &m->flags); | ||
230 | atomic_set(&m->pg_init_in_progress, 0); | ||
231 | atomic_set(&m->pg_init_count, 0); | ||
232 | m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT; | ||
233 | init_waitqueue_head(&m->pg_init_wait); | ||
234 | |||
237 | return 0; | 235 | return 0; |
238 | } | 236 | } |
239 | 237 | ||
@@ -337,9 +335,6 @@ static void __switch_pg(struct multipath *m, struct priority_group *pg) | |||
337 | { | 335 | { |
338 | m->current_pg = pg; | 336 | m->current_pg = pg; |
339 | 337 | ||
340 | if (m->queue_mode == DM_TYPE_NVME_BIO_BASED) | ||
341 | return; | ||
342 | |||
343 | /* Must we initialise the PG first, and queue I/O till it's ready? */ | 338 | /* Must we initialise the PG first, and queue I/O till it's ready? */ |
344 | if (m->hw_handler_name) { | 339 | if (m->hw_handler_name) { |
345 | set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags); | 340 | set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags); |
@@ -385,8 +380,7 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes) | |||
385 | unsigned bypassed = 1; | 380 | unsigned bypassed = 1; |
386 | 381 | ||
387 | if (!atomic_read(&m->nr_valid_paths)) { | 382 | if (!atomic_read(&m->nr_valid_paths)) { |
388 | if (m->queue_mode != DM_TYPE_NVME_BIO_BASED) | 383 | clear_bit(MPATHF_QUEUE_IO, &m->flags); |
389 | clear_bit(MPATHF_QUEUE_IO, &m->flags); | ||
390 | goto failed; | 384 | goto failed; |
391 | } | 385 | } |
392 | 386 | ||
@@ -599,7 +593,7 @@ static struct pgpath *__map_bio(struct multipath *m, struct bio *bio) | |||
599 | return pgpath; | 593 | return pgpath; |
600 | } | 594 | } |
601 | 595 | ||
602 | static struct pgpath *__map_bio_nvme(struct multipath *m, struct bio *bio) | 596 | static struct pgpath *__map_bio_fast(struct multipath *m, struct bio *bio) |
603 | { | 597 | { |
604 | struct pgpath *pgpath; | 598 | struct pgpath *pgpath; |
605 | unsigned long flags; | 599 | unsigned long flags; |
@@ -634,8 +628,8 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, | |||
634 | { | 628 | { |
635 | struct pgpath *pgpath; | 629 | struct pgpath *pgpath; |
636 | 630 | ||
637 | if (m->queue_mode == DM_TYPE_NVME_BIO_BASED) | 631 | if (!m->hw_handler_name) |
638 | pgpath = __map_bio_nvme(m, bio); | 632 | pgpath = __map_bio_fast(m, bio); |
639 | else | 633 | else |
640 | pgpath = __map_bio(m, bio); | 634 | pgpath = __map_bio(m, bio); |
641 | 635 | ||
@@ -675,8 +669,7 @@ static void process_queued_io_list(struct multipath *m) | |||
675 | { | 669 | { |
676 | if (m->queue_mode == DM_TYPE_MQ_REQUEST_BASED) | 670 | if (m->queue_mode == DM_TYPE_MQ_REQUEST_BASED) |
677 | dm_mq_kick_requeue_list(dm_table_get_md(m->ti->table)); | 671 | dm_mq_kick_requeue_list(dm_table_get_md(m->ti->table)); |
678 | else if (m->queue_mode == DM_TYPE_BIO_BASED || | 672 | else if (m->queue_mode == DM_TYPE_BIO_BASED) |
679 | m->queue_mode == DM_TYPE_NVME_BIO_BASED) | ||
680 | queue_work(kmultipathd, &m->process_queued_bios); | 673 | queue_work(kmultipathd, &m->process_queued_bios); |
681 | } | 674 | } |
682 | 675 | ||
@@ -811,15 +804,14 @@ static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg, | |||
811 | return 0; | 804 | return 0; |
812 | } | 805 | } |
813 | 806 | ||
814 | static int setup_scsi_dh(struct block_device *bdev, struct multipath *m, char **error) | 807 | static int setup_scsi_dh(struct block_device *bdev, struct multipath *m, |
808 | const char *attached_handler_name, char **error) | ||
815 | { | 809 | { |
816 | struct request_queue *q = bdev_get_queue(bdev); | 810 | struct request_queue *q = bdev_get_queue(bdev); |
817 | const char *attached_handler_name; | ||
818 | int r; | 811 | int r; |
819 | 812 | ||
820 | if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags)) { | 813 | if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags)) { |
821 | retain: | 814 | retain: |
822 | attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL); | ||
823 | if (attached_handler_name) { | 815 | if (attached_handler_name) { |
824 | /* | 816 | /* |
825 | * Clear any hw_handler_params associated with a | 817 | * Clear any hw_handler_params associated with a |
@@ -873,6 +865,8 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps | |||
873 | int r; | 865 | int r; |
874 | struct pgpath *p; | 866 | struct pgpath *p; |
875 | struct multipath *m = ti->private; | 867 | struct multipath *m = ti->private; |
868 | struct request_queue *q; | ||
869 | const char *attached_handler_name; | ||
876 | 870 | ||
877 | /* we need at least a path arg */ | 871 | /* we need at least a path arg */ |
878 | if (as->argc < 1) { | 872 | if (as->argc < 1) { |
@@ -891,9 +885,11 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps | |||
891 | goto bad; | 885 | goto bad; |
892 | } | 886 | } |
893 | 887 | ||
894 | if (m->queue_mode != DM_TYPE_NVME_BIO_BASED) { | 888 | q = bdev_get_queue(p->path.dev->bdev); |
889 | attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL); | ||
890 | if (attached_handler_name) { | ||
895 | INIT_DELAYED_WORK(&p->activate_path, activate_path_work); | 891 | INIT_DELAYED_WORK(&p->activate_path, activate_path_work); |
896 | r = setup_scsi_dh(p->path.dev->bdev, m, &ti->error); | 892 | r = setup_scsi_dh(p->path.dev->bdev, m, attached_handler_name, &ti->error); |
897 | if (r) { | 893 | if (r) { |
898 | dm_put_device(ti, p->path.dev); | 894 | dm_put_device(ti, p->path.dev); |
899 | goto bad; | 895 | goto bad; |
@@ -1001,8 +997,7 @@ static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m) | |||
1001 | if (!hw_argc) | 997 | if (!hw_argc) |
1002 | return 0; | 998 | return 0; |
1003 | 999 | ||
1004 | if (m->queue_mode == DM_TYPE_BIO_BASED || | 1000 | if (m->queue_mode == DM_TYPE_BIO_BASED) { |
1005 | m->queue_mode == DM_TYPE_NVME_BIO_BASED) { | ||
1006 | dm_consume_args(as, hw_argc); | 1001 | dm_consume_args(as, hw_argc); |
1007 | DMERR("bio-based multipath doesn't allow hardware handler args"); | 1002 | DMERR("bio-based multipath doesn't allow hardware handler args"); |
1008 | return 0; | 1003 | return 0; |
@@ -1091,8 +1086,6 @@ static int parse_features(struct dm_arg_set *as, struct multipath *m) | |||
1091 | 1086 | ||
1092 | if (!strcasecmp(queue_mode_name, "bio")) | 1087 | if (!strcasecmp(queue_mode_name, "bio")) |
1093 | m->queue_mode = DM_TYPE_BIO_BASED; | 1088 | m->queue_mode = DM_TYPE_BIO_BASED; |
1094 | else if (!strcasecmp(queue_mode_name, "nvme")) | ||
1095 | m->queue_mode = DM_TYPE_NVME_BIO_BASED; | ||
1096 | else if (!strcasecmp(queue_mode_name, "rq")) | 1089 | else if (!strcasecmp(queue_mode_name, "rq")) |
1097 | m->queue_mode = DM_TYPE_REQUEST_BASED; | 1090 | m->queue_mode = DM_TYPE_REQUEST_BASED; |
1098 | else if (!strcasecmp(queue_mode_name, "mq")) | 1091 | else if (!strcasecmp(queue_mode_name, "mq")) |
@@ -1193,7 +1186,7 @@ static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv) | |||
1193 | ti->num_discard_bios = 1; | 1186 | ti->num_discard_bios = 1; |
1194 | ti->num_write_same_bios = 1; | 1187 | ti->num_write_same_bios = 1; |
1195 | ti->num_write_zeroes_bios = 1; | 1188 | ti->num_write_zeroes_bios = 1; |
1196 | if (m->queue_mode == DM_TYPE_BIO_BASED || m->queue_mode == DM_TYPE_NVME_BIO_BASED) | 1189 | if (m->queue_mode == DM_TYPE_BIO_BASED) |
1197 | ti->per_io_data_size = multipath_per_bio_data_size(); | 1190 | ti->per_io_data_size = multipath_per_bio_data_size(); |
1198 | else | 1191 | else |
1199 | ti->per_io_data_size = sizeof(struct dm_mpath_io); | 1192 | ti->per_io_data_size = sizeof(struct dm_mpath_io); |
@@ -1730,9 +1723,6 @@ static void multipath_status(struct dm_target *ti, status_type_t type, | |||
1730 | case DM_TYPE_BIO_BASED: | 1723 | case DM_TYPE_BIO_BASED: |
1731 | DMEMIT("queue_mode bio "); | 1724 | DMEMIT("queue_mode bio "); |
1732 | break; | 1725 | break; |
1733 | case DM_TYPE_NVME_BIO_BASED: | ||
1734 | DMEMIT("queue_mode nvme "); | ||
1735 | break; | ||
1736 | case DM_TYPE_MQ_REQUEST_BASED: | 1726 | case DM_TYPE_MQ_REQUEST_BASED: |
1737 | DMEMIT("queue_mode mq "); | 1727 | DMEMIT("queue_mode mq "); |
1738 | break; | 1728 | break; |
@@ -2030,8 +2020,9 @@ static int multipath_busy(struct dm_target *ti) | |||
2030 | *---------------------------------------------------------------*/ | 2020 | *---------------------------------------------------------------*/ |
2031 | static struct target_type multipath_target = { | 2021 | static struct target_type multipath_target = { |
2032 | .name = "multipath", | 2022 | .name = "multipath", |
2033 | .version = {1, 12, 0}, | 2023 | .version = {1, 13, 0}, |
2034 | .features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE, | 2024 | .features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE | |
2025 | DM_TARGET_PASSES_INTEGRITY, | ||
2035 | .module = THIS_MODULE, | 2026 | .module = THIS_MODULE, |
2036 | .ctr = multipath_ctr, | 2027 | .ctr = multipath_ctr, |
2037 | .dtr = multipath_dtr, | 2028 | .dtr = multipath_dtr, |
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 7ef469e902c6..c1d1034ff7b7 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c | |||
@@ -3408,9 +3408,10 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery, | |||
3408 | set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags); | 3408 | set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags); |
3409 | 3409 | ||
3410 | } else { | 3410 | } else { |
3411 | if (test_bit(MD_RECOVERY_NEEDED, &recovery) || | 3411 | if (!test_bit(MD_RECOVERY_INTR, &recovery) && |
3412 | test_bit(MD_RECOVERY_RESHAPE, &recovery) || | 3412 | (test_bit(MD_RECOVERY_NEEDED, &recovery) || |
3413 | test_bit(MD_RECOVERY_RUNNING, &recovery)) | 3413 | test_bit(MD_RECOVERY_RESHAPE, &recovery) || |
3414 | test_bit(MD_RECOVERY_RUNNING, &recovery))) | ||
3414 | r = mddev->curr_resync_completed; | 3415 | r = mddev->curr_resync_completed; |
3415 | else | 3416 | else |
3416 | r = mddev->recovery_cp; | 3417 | r = mddev->recovery_cp; |
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 5fe7ec356c33..7eb3e2a3c07d 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c | |||
@@ -942,17 +942,12 @@ static int dm_table_determine_type(struct dm_table *t) | |||
942 | 942 | ||
943 | if (t->type != DM_TYPE_NONE) { | 943 | if (t->type != DM_TYPE_NONE) { |
944 | /* target already set the table's type */ | 944 | /* target already set the table's type */ |
945 | if (t->type == DM_TYPE_BIO_BASED) | 945 | if (t->type == DM_TYPE_BIO_BASED) { |
946 | return 0; | 946 | /* possibly upgrade to a variant of bio-based */ |
947 | else if (t->type == DM_TYPE_NVME_BIO_BASED) { | 947 | goto verify_bio_based; |
948 | if (!dm_table_does_not_support_partial_completion(t)) { | ||
949 | DMERR("nvme bio-based is only possible with devices" | ||
950 | " that don't support partial completion"); | ||
951 | return -EINVAL; | ||
952 | } | ||
953 | /* Fallthru, also verify all devices are blk-mq */ | ||
954 | } | 948 | } |
955 | BUG_ON(t->type == DM_TYPE_DAX_BIO_BASED); | 949 | BUG_ON(t->type == DM_TYPE_DAX_BIO_BASED); |
950 | BUG_ON(t->type == DM_TYPE_NVME_BIO_BASED); | ||
956 | goto verify_rq_based; | 951 | goto verify_rq_based; |
957 | } | 952 | } |
958 | 953 | ||
@@ -985,6 +980,7 @@ static int dm_table_determine_type(struct dm_table *t) | |||
985 | } | 980 | } |
986 | 981 | ||
987 | if (bio_based) { | 982 | if (bio_based) { |
983 | verify_bio_based: | ||
988 | /* We must use this table as bio-based */ | 984 | /* We must use this table as bio-based */ |
989 | t->type = DM_TYPE_BIO_BASED; | 985 | t->type = DM_TYPE_BIO_BASED; |
990 | if (dm_table_supports_dax(t) || | 986 | if (dm_table_supports_dax(t) || |
@@ -1755,7 +1751,7 @@ static int device_no_partial_completion(struct dm_target *ti, struct dm_dev *dev | |||
1755 | char b[BDEVNAME_SIZE]; | 1751 | char b[BDEVNAME_SIZE]; |
1756 | 1752 | ||
1757 | /* For now, NVMe devices are the only devices of this class */ | 1753 | /* For now, NVMe devices are the only devices of this class */ |
1758 | return (strncmp(bdevname(dev->bdev, b), "nvme", 3) == 0); | 1754 | return (strncmp(bdevname(dev->bdev, b), "nvme", 4) == 0); |
1759 | } | 1755 | } |
1760 | 1756 | ||
1761 | static bool dm_table_does_not_support_partial_completion(struct dm_table *t) | 1757 | static bool dm_table_does_not_support_partial_completion(struct dm_table *t) |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index d6de00f367ef..45328d8b2859 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -458,9 +458,11 @@ static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) | |||
458 | return dm_get_geometry(md, geo); | 458 | return dm_get_geometry(md, geo); |
459 | } | 459 | } |
460 | 460 | ||
461 | static int dm_grab_bdev_for_ioctl(struct mapped_device *md, | 461 | static char *_dm_claim_ptr = "I belong to device-mapper"; |
462 | struct block_device **bdev, | 462 | |
463 | fmode_t *mode) | 463 | static int dm_get_bdev_for_ioctl(struct mapped_device *md, |
464 | struct block_device **bdev, | ||
465 | fmode_t *mode) | ||
464 | { | 466 | { |
465 | struct dm_target *tgt; | 467 | struct dm_target *tgt; |
466 | struct dm_table *map; | 468 | struct dm_table *map; |
@@ -490,6 +492,10 @@ retry: | |||
490 | goto out; | 492 | goto out; |
491 | 493 | ||
492 | bdgrab(*bdev); | 494 | bdgrab(*bdev); |
495 | r = blkdev_get(*bdev, *mode, _dm_claim_ptr); | ||
496 | if (r < 0) | ||
497 | goto out; | ||
498 | |||
493 | dm_put_live_table(md, srcu_idx); | 499 | dm_put_live_table(md, srcu_idx); |
494 | return r; | 500 | return r; |
495 | 501 | ||
@@ -508,7 +514,7 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, | |||
508 | struct mapped_device *md = bdev->bd_disk->private_data; | 514 | struct mapped_device *md = bdev->bd_disk->private_data; |
509 | int r; | 515 | int r; |
510 | 516 | ||
511 | r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); | 517 | r = dm_get_bdev_for_ioctl(md, &bdev, &mode); |
512 | if (r < 0) | 518 | if (r < 0) |
513 | return r; | 519 | return r; |
514 | 520 | ||
@@ -528,7 +534,7 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, | |||
528 | 534 | ||
529 | r = __blkdev_driver_ioctl(bdev, mode, cmd, arg); | 535 | r = __blkdev_driver_ioctl(bdev, mode, cmd, arg); |
530 | out: | 536 | out: |
531 | bdput(bdev); | 537 | blkdev_put(bdev, mode); |
532 | return r; | 538 | return r; |
533 | } | 539 | } |
534 | 540 | ||
@@ -708,14 +714,13 @@ static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU) | |||
708 | static int open_table_device(struct table_device *td, dev_t dev, | 714 | static int open_table_device(struct table_device *td, dev_t dev, |
709 | struct mapped_device *md) | 715 | struct mapped_device *md) |
710 | { | 716 | { |
711 | static char *_claim_ptr = "I belong to device-mapper"; | ||
712 | struct block_device *bdev; | 717 | struct block_device *bdev; |
713 | 718 | ||
714 | int r; | 719 | int r; |
715 | 720 | ||
716 | BUG_ON(td->dm_dev.bdev); | 721 | BUG_ON(td->dm_dev.bdev); |
717 | 722 | ||
718 | bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _claim_ptr); | 723 | bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _dm_claim_ptr); |
719 | if (IS_ERR(bdev)) | 724 | if (IS_ERR(bdev)) |
720 | return PTR_ERR(bdev); | 725 | return PTR_ERR(bdev); |
721 | 726 | ||
@@ -903,7 +908,8 @@ static void dec_pending(struct dm_io *io, blk_status_t error) | |||
903 | queue_io(md, bio); | 908 | queue_io(md, bio); |
904 | } else { | 909 | } else { |
905 | /* done with normal IO or empty flush */ | 910 | /* done with normal IO or empty flush */ |
906 | bio->bi_status = io_error; | 911 | if (io_error) |
912 | bio->bi_status = io_error; | ||
907 | bio_endio(bio); | 913 | bio_endio(bio); |
908 | } | 914 | } |
909 | } | 915 | } |
@@ -3010,7 +3016,7 @@ static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type, | |||
3010 | fmode_t mode; | 3016 | fmode_t mode; |
3011 | int r; | 3017 | int r; |
3012 | 3018 | ||
3013 | r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); | 3019 | r = dm_get_bdev_for_ioctl(md, &bdev, &mode); |
3014 | if (r < 0) | 3020 | if (r < 0) |
3015 | return r; | 3021 | return r; |
3016 | 3022 | ||
@@ -3020,7 +3026,7 @@ static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type, | |||
3020 | else | 3026 | else |
3021 | r = -EOPNOTSUPP; | 3027 | r = -EOPNOTSUPP; |
3022 | 3028 | ||
3023 | bdput(bdev); | 3029 | blkdev_put(bdev, mode); |
3024 | return r; | 3030 | return r; |
3025 | } | 3031 | } |
3026 | 3032 | ||
@@ -3031,7 +3037,7 @@ static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type) | |||
3031 | fmode_t mode; | 3037 | fmode_t mode; |
3032 | int r; | 3038 | int r; |
3033 | 3039 | ||
3034 | r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); | 3040 | r = dm_get_bdev_for_ioctl(md, &bdev, &mode); |
3035 | if (r < 0) | 3041 | if (r < 0) |
3036 | return r; | 3042 | return r; |
3037 | 3043 | ||
@@ -3041,7 +3047,7 @@ static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type) | |||
3041 | else | 3047 | else |
3042 | r = -EOPNOTSUPP; | 3048 | r = -EOPNOTSUPP; |
3043 | 3049 | ||
3044 | bdput(bdev); | 3050 | blkdev_put(bdev, mode); |
3045 | return r; | 3051 | return r; |
3046 | } | 3052 | } |
3047 | 3053 | ||
@@ -3053,7 +3059,7 @@ static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key, | |||
3053 | fmode_t mode; | 3059 | fmode_t mode; |
3054 | int r; | 3060 | int r; |
3055 | 3061 | ||
3056 | r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); | 3062 | r = dm_get_bdev_for_ioctl(md, &bdev, &mode); |
3057 | if (r < 0) | 3063 | if (r < 0) |
3058 | return r; | 3064 | return r; |
3059 | 3065 | ||
@@ -3063,7 +3069,7 @@ static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key, | |||
3063 | else | 3069 | else |
3064 | r = -EOPNOTSUPP; | 3070 | r = -EOPNOTSUPP; |
3065 | 3071 | ||
3066 | bdput(bdev); | 3072 | blkdev_put(bdev, mode); |
3067 | return r; | 3073 | return r; |
3068 | } | 3074 | } |
3069 | 3075 | ||
@@ -3074,7 +3080,7 @@ static int dm_pr_clear(struct block_device *bdev, u64 key) | |||
3074 | fmode_t mode; | 3080 | fmode_t mode; |
3075 | int r; | 3081 | int r; |
3076 | 3082 | ||
3077 | r = dm_grab_bdev_for_ioctl(md, &bdev, &mode); | 3083 | r = dm_get_bdev_for_ioctl(md, &bdev, &mode); |
3078 | if (r < 0) | 3084 | if (r < 0) |
3079 | return r; | 3085 | return r; |
3080 | 3086 | ||
@@ -3084,7 +3090,7 @@ static int dm_pr_clear(struct block_device *bdev, u64 key) | |||
3084 | else | 3090 | else |
3085 | r = -EOPNOTSUPP; | 3091 | r = -EOPNOTSUPP; |
3086 | 3092 | ||
3087 | bdput(bdev); | 3093 | blkdev_put(bdev, mode); |
3088 | return r; | 3094 | return r; |
3089 | } | 3095 | } |
3090 | 3096 | ||
diff --git a/drivers/md/md-multipath.c b/drivers/md/md-multipath.c index e40065bdbfc8..0a7e99d62c69 100644 --- a/drivers/md/md-multipath.c +++ b/drivers/md/md-multipath.c | |||
@@ -157,7 +157,7 @@ static void multipath_status(struct seq_file *seq, struct mddev *mddev) | |||
157 | seq_printf (seq, "%s", rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_"); | 157 | seq_printf (seq, "%s", rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_"); |
158 | } | 158 | } |
159 | rcu_read_unlock(); | 159 | rcu_read_unlock(); |
160 | seq_printf (seq, "]"); | 160 | seq_putc(seq, ']'); |
161 | } | 161 | } |
162 | 162 | ||
163 | static int multipath_congested(struct mddev *mddev, int bits) | 163 | static int multipath_congested(struct mddev *mddev, int bits) |
diff --git a/drivers/md/md.c b/drivers/md/md.c index bc67ab6844f0..254e44e44668 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -801,6 +801,9 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev, | |||
801 | struct bio *bio; | 801 | struct bio *bio; |
802 | int ff = 0; | 802 | int ff = 0; |
803 | 803 | ||
804 | if (!page) | ||
805 | return; | ||
806 | |||
804 | if (test_bit(Faulty, &rdev->flags)) | 807 | if (test_bit(Faulty, &rdev->flags)) |
805 | return; | 808 | return; |
806 | 809 | ||
@@ -5452,6 +5455,7 @@ int md_run(struct mddev *mddev) | |||
5452 | * the only valid external interface is through the md | 5455 | * the only valid external interface is through the md |
5453 | * device. | 5456 | * device. |
5454 | */ | 5457 | */ |
5458 | mddev->has_superblocks = false; | ||
5455 | rdev_for_each(rdev, mddev) { | 5459 | rdev_for_each(rdev, mddev) { |
5456 | if (test_bit(Faulty, &rdev->flags)) | 5460 | if (test_bit(Faulty, &rdev->flags)) |
5457 | continue; | 5461 | continue; |
@@ -5465,6 +5469,9 @@ int md_run(struct mddev *mddev) | |||
5465 | set_disk_ro(mddev->gendisk, 1); | 5469 | set_disk_ro(mddev->gendisk, 1); |
5466 | } | 5470 | } |
5467 | 5471 | ||
5472 | if (rdev->sb_page) | ||
5473 | mddev->has_superblocks = true; | ||
5474 | |||
5468 | /* perform some consistency tests on the device. | 5475 | /* perform some consistency tests on the device. |
5469 | * We don't want the data to overlap the metadata, | 5476 | * We don't want the data to overlap the metadata, |
5470 | * Internal Bitmap issues have been handled elsewhere. | 5477 | * Internal Bitmap issues have been handled elsewhere. |
@@ -5497,8 +5504,10 @@ int md_run(struct mddev *mddev) | |||
5497 | } | 5504 | } |
5498 | if (mddev->sync_set == NULL) { | 5505 | if (mddev->sync_set == NULL) { |
5499 | mddev->sync_set = bioset_create(BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS); | 5506 | mddev->sync_set = bioset_create(BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS); |
5500 | if (!mddev->sync_set) | 5507 | if (!mddev->sync_set) { |
5501 | return -ENOMEM; | 5508 | err = -ENOMEM; |
5509 | goto abort; | ||
5510 | } | ||
5502 | } | 5511 | } |
5503 | 5512 | ||
5504 | spin_lock(&pers_lock); | 5513 | spin_lock(&pers_lock); |
@@ -5511,7 +5520,8 @@ int md_run(struct mddev *mddev) | |||
5511 | else | 5520 | else |
5512 | pr_warn("md: personality for level %s is not loaded!\n", | 5521 | pr_warn("md: personality for level %s is not loaded!\n", |
5513 | mddev->clevel); | 5522 | mddev->clevel); |
5514 | return -EINVAL; | 5523 | err = -EINVAL; |
5524 | goto abort; | ||
5515 | } | 5525 | } |
5516 | spin_unlock(&pers_lock); | 5526 | spin_unlock(&pers_lock); |
5517 | if (mddev->level != pers->level) { | 5527 | if (mddev->level != pers->level) { |
@@ -5524,7 +5534,8 @@ int md_run(struct mddev *mddev) | |||
5524 | pers->start_reshape == NULL) { | 5534 | pers->start_reshape == NULL) { |
5525 | /* This personality cannot handle reshaping... */ | 5535 | /* This personality cannot handle reshaping... */ |
5526 | module_put(pers->owner); | 5536 | module_put(pers->owner); |
5527 | return -EINVAL; | 5537 | err = -EINVAL; |
5538 | goto abort; | ||
5528 | } | 5539 | } |
5529 | 5540 | ||
5530 | if (pers->sync_request) { | 5541 | if (pers->sync_request) { |
@@ -5593,7 +5604,7 @@ int md_run(struct mddev *mddev) | |||
5593 | mddev->private = NULL; | 5604 | mddev->private = NULL; |
5594 | module_put(pers->owner); | 5605 | module_put(pers->owner); |
5595 | bitmap_destroy(mddev); | 5606 | bitmap_destroy(mddev); |
5596 | return err; | 5607 | goto abort; |
5597 | } | 5608 | } |
5598 | if (mddev->queue) { | 5609 | if (mddev->queue) { |
5599 | bool nonrot = true; | 5610 | bool nonrot = true; |
@@ -5655,6 +5666,18 @@ int md_run(struct mddev *mddev) | |||
5655 | sysfs_notify_dirent_safe(mddev->sysfs_action); | 5666 | sysfs_notify_dirent_safe(mddev->sysfs_action); |
5656 | sysfs_notify(&mddev->kobj, NULL, "degraded"); | 5667 | sysfs_notify(&mddev->kobj, NULL, "degraded"); |
5657 | return 0; | 5668 | return 0; |
5669 | |||
5670 | abort: | ||
5671 | if (mddev->bio_set) { | ||
5672 | bioset_free(mddev->bio_set); | ||
5673 | mddev->bio_set = NULL; | ||
5674 | } | ||
5675 | if (mddev->sync_set) { | ||
5676 | bioset_free(mddev->sync_set); | ||
5677 | mddev->sync_set = NULL; | ||
5678 | } | ||
5679 | |||
5680 | return err; | ||
5658 | } | 5681 | } |
5659 | EXPORT_SYMBOL_GPL(md_run); | 5682 | EXPORT_SYMBOL_GPL(md_run); |
5660 | 5683 | ||
@@ -8049,6 +8072,7 @@ EXPORT_SYMBOL(md_done_sync); | |||
8049 | bool md_write_start(struct mddev *mddev, struct bio *bi) | 8072 | bool md_write_start(struct mddev *mddev, struct bio *bi) |
8050 | { | 8073 | { |
8051 | int did_change = 0; | 8074 | int did_change = 0; |
8075 | |||
8052 | if (bio_data_dir(bi) != WRITE) | 8076 | if (bio_data_dir(bi) != WRITE) |
8053 | return true; | 8077 | return true; |
8054 | 8078 | ||
@@ -8081,6 +8105,8 @@ bool md_write_start(struct mddev *mddev, struct bio *bi) | |||
8081 | rcu_read_unlock(); | 8105 | rcu_read_unlock(); |
8082 | if (did_change) | 8106 | if (did_change) |
8083 | sysfs_notify_dirent_safe(mddev->sysfs_state); | 8107 | sysfs_notify_dirent_safe(mddev->sysfs_state); |
8108 | if (!mddev->has_superblocks) | ||
8109 | return true; | ||
8084 | wait_event(mddev->sb_wait, | 8110 | wait_event(mddev->sb_wait, |
8085 | !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) || | 8111 | !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) || |
8086 | mddev->suspended); | 8112 | mddev->suspended); |
@@ -8543,6 +8569,19 @@ void md_do_sync(struct md_thread *thread) | |||
8543 | set_mask_bits(&mddev->sb_flags, 0, | 8569 | set_mask_bits(&mddev->sb_flags, 0, |
8544 | BIT(MD_SB_CHANGE_PENDING) | BIT(MD_SB_CHANGE_DEVS)); | 8570 | BIT(MD_SB_CHANGE_PENDING) | BIT(MD_SB_CHANGE_DEVS)); |
8545 | 8571 | ||
8572 | if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && | ||
8573 | !test_bit(MD_RECOVERY_INTR, &mddev->recovery) && | ||
8574 | mddev->delta_disks > 0 && | ||
8575 | mddev->pers->finish_reshape && | ||
8576 | mddev->pers->size && | ||
8577 | mddev->queue) { | ||
8578 | mddev_lock_nointr(mddev); | ||
8579 | md_set_array_sectors(mddev, mddev->pers->size(mddev, 0, 0)); | ||
8580 | mddev_unlock(mddev); | ||
8581 | set_capacity(mddev->gendisk, mddev->array_sectors); | ||
8582 | revalidate_disk(mddev->gendisk); | ||
8583 | } | ||
8584 | |||
8546 | spin_lock(&mddev->lock); | 8585 | spin_lock(&mddev->lock); |
8547 | if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { | 8586 | if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { |
8548 | /* We completed so min/max setting can be forgotten if used. */ | 8587 | /* We completed so min/max setting can be forgotten if used. */ |
@@ -8569,6 +8608,10 @@ static int remove_and_add_spares(struct mddev *mddev, | |||
8569 | int removed = 0; | 8608 | int removed = 0; |
8570 | bool remove_some = false; | 8609 | bool remove_some = false; |
8571 | 8610 | ||
8611 | if (this && test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) | ||
8612 | /* Mustn't remove devices when resync thread is running */ | ||
8613 | return 0; | ||
8614 | |||
8572 | rdev_for_each(rdev, mddev) { | 8615 | rdev_for_each(rdev, mddev) { |
8573 | if ((this == NULL || rdev == this) && | 8616 | if ((this == NULL || rdev == this) && |
8574 | rdev->raid_disk >= 0 && | 8617 | rdev->raid_disk >= 0 && |
diff --git a/drivers/md/md.h b/drivers/md/md.h index 58cd20a5e85e..fbc925cce810 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h | |||
@@ -468,6 +468,8 @@ struct mddev { | |||
468 | void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev); | 468 | void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev); |
469 | struct md_cluster_info *cluster_info; | 469 | struct md_cluster_info *cluster_info; |
470 | unsigned int good_device_nr; /* good device num within cluster raid */ | 470 | unsigned int good_device_nr; /* good device num within cluster raid */ |
471 | |||
472 | bool has_superblocks:1; | ||
471 | }; | 473 | }; |
472 | 474 | ||
473 | enum recovery_flags { | 475 | enum recovery_flags { |
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index b2eae332e1a2..fe872dc6712e 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
@@ -1108,7 +1108,7 @@ static void alloc_behind_master_bio(struct r1bio *r1_bio, | |||
1108 | 1108 | ||
1109 | bio_copy_data(behind_bio, bio); | 1109 | bio_copy_data(behind_bio, bio); |
1110 | skip_copy: | 1110 | skip_copy: |
1111 | r1_bio->behind_master_bio = behind_bio;; | 1111 | r1_bio->behind_master_bio = behind_bio; |
1112 | set_bit(R1BIO_BehindIO, &r1_bio->state); | 1112 | set_bit(R1BIO_BehindIO, &r1_bio->state); |
1113 | 1113 | ||
1114 | return; | 1114 | return; |
@@ -1809,6 +1809,17 @@ static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev) | |||
1809 | struct md_rdev *repl = | 1809 | struct md_rdev *repl = |
1810 | conf->mirrors[conf->raid_disks + number].rdev; | 1810 | conf->mirrors[conf->raid_disks + number].rdev; |
1811 | freeze_array(conf, 0); | 1811 | freeze_array(conf, 0); |
1812 | if (atomic_read(&repl->nr_pending)) { | ||
1813 | /* It means that some queued IO of retry_list | ||
1814 | * hold repl. Thus, we cannot set replacement | ||
1815 | * as NULL, avoiding rdev NULL pointer | ||
1816 | * dereference in sync_request_write and | ||
1817 | * handle_write_finished. | ||
1818 | */ | ||
1819 | err = -EBUSY; | ||
1820 | unfreeze_array(conf); | ||
1821 | goto abort; | ||
1822 | } | ||
1812 | clear_bit(Replacement, &repl->flags); | 1823 | clear_bit(Replacement, &repl->flags); |
1813 | p->rdev = repl; | 1824 | p->rdev = repl; |
1814 | conf->mirrors[conf->raid_disks + number].rdev = NULL; | 1825 | conf->mirrors[conf->raid_disks + number].rdev = NULL; |
diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h index c7294e7557e0..eb84bc68e2fd 100644 --- a/drivers/md/raid1.h +++ b/drivers/md/raid1.h | |||
@@ -26,6 +26,18 @@ | |||
26 | #define BARRIER_BUCKETS_NR_BITS (PAGE_SHIFT - ilog2(sizeof(atomic_t))) | 26 | #define BARRIER_BUCKETS_NR_BITS (PAGE_SHIFT - ilog2(sizeof(atomic_t))) |
27 | #define BARRIER_BUCKETS_NR (1<<BARRIER_BUCKETS_NR_BITS) | 27 | #define BARRIER_BUCKETS_NR (1<<BARRIER_BUCKETS_NR_BITS) |
28 | 28 | ||
29 | /* Note: raid1_info.rdev can be set to NULL asynchronously by raid1_remove_disk. | ||
30 | * There are three safe ways to access raid1_info.rdev. | ||
31 | * 1/ when holding mddev->reconfig_mutex | ||
32 | * 2/ when resync/recovery is known to be happening - i.e. in code that is | ||
33 | * called as part of performing resync/recovery. | ||
34 | * 3/ while holding rcu_read_lock(), use rcu_dereference to get the pointer | ||
35 | * and if it is non-NULL, increment rdev->nr_pending before dropping the | ||
36 | * RCU lock. | ||
37 | * When .rdev is set to NULL, the nr_pending count checked again and if it has | ||
38 | * been incremented, the pointer is put back in .rdev. | ||
39 | */ | ||
40 | |||
29 | struct raid1_info { | 41 | struct raid1_info { |
30 | struct md_rdev *rdev; | 42 | struct md_rdev *rdev; |
31 | sector_t head_position; | 43 | sector_t head_position; |
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 99c9207899a7..c5e6c60fc0d4 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
@@ -141,7 +141,7 @@ static void r10bio_pool_free(void *r10_bio, void *data) | |||
141 | #define RESYNC_WINDOW (1024*1024) | 141 | #define RESYNC_WINDOW (1024*1024) |
142 | /* maximum number of concurrent requests, memory permitting */ | 142 | /* maximum number of concurrent requests, memory permitting */ |
143 | #define RESYNC_DEPTH (32*1024*1024/RESYNC_BLOCK_SIZE) | 143 | #define RESYNC_DEPTH (32*1024*1024/RESYNC_BLOCK_SIZE) |
144 | #define CLUSTER_RESYNC_WINDOW (16 * RESYNC_WINDOW) | 144 | #define CLUSTER_RESYNC_WINDOW (32 * RESYNC_WINDOW) |
145 | #define CLUSTER_RESYNC_WINDOW_SECTORS (CLUSTER_RESYNC_WINDOW >> 9) | 145 | #define CLUSTER_RESYNC_WINDOW_SECTORS (CLUSTER_RESYNC_WINDOW >> 9) |
146 | 146 | ||
147 | /* | 147 | /* |
@@ -2655,7 +2655,8 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio) | |||
2655 | for (m = 0; m < conf->copies; m++) { | 2655 | for (m = 0; m < conf->copies; m++) { |
2656 | int dev = r10_bio->devs[m].devnum; | 2656 | int dev = r10_bio->devs[m].devnum; |
2657 | rdev = conf->mirrors[dev].rdev; | 2657 | rdev = conf->mirrors[dev].rdev; |
2658 | if (r10_bio->devs[m].bio == NULL) | 2658 | if (r10_bio->devs[m].bio == NULL || |
2659 | r10_bio->devs[m].bio->bi_end_io == NULL) | ||
2659 | continue; | 2660 | continue; |
2660 | if (!r10_bio->devs[m].bio->bi_status) { | 2661 | if (!r10_bio->devs[m].bio->bi_status) { |
2661 | rdev_clear_badblocks( | 2662 | rdev_clear_badblocks( |
@@ -2670,7 +2671,8 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio) | |||
2670 | md_error(conf->mddev, rdev); | 2671 | md_error(conf->mddev, rdev); |
2671 | } | 2672 | } |
2672 | rdev = conf->mirrors[dev].replacement; | 2673 | rdev = conf->mirrors[dev].replacement; |
2673 | if (r10_bio->devs[m].repl_bio == NULL) | 2674 | if (r10_bio->devs[m].repl_bio == NULL || |
2675 | r10_bio->devs[m].repl_bio->bi_end_io == NULL) | ||
2674 | continue; | 2676 | continue; |
2675 | 2677 | ||
2676 | if (!r10_bio->devs[m].repl_bio->bi_status) { | 2678 | if (!r10_bio->devs[m].repl_bio->bi_status) { |
@@ -3782,7 +3784,7 @@ static int raid10_run(struct mddev *mddev) | |||
3782 | if (fc > 1 || fo > 0) { | 3784 | if (fc > 1 || fo > 0) { |
3783 | pr_err("only near layout is supported by clustered" | 3785 | pr_err("only near layout is supported by clustered" |
3784 | " raid10\n"); | 3786 | " raid10\n"); |
3785 | goto out; | 3787 | goto out_free_conf; |
3786 | } | 3788 | } |
3787 | } | 3789 | } |
3788 | 3790 | ||
@@ -4830,17 +4832,11 @@ static void raid10_finish_reshape(struct mddev *mddev) | |||
4830 | return; | 4832 | return; |
4831 | 4833 | ||
4832 | if (mddev->delta_disks > 0) { | 4834 | if (mddev->delta_disks > 0) { |
4833 | sector_t size = raid10_size(mddev, 0, 0); | ||
4834 | md_set_array_sectors(mddev, size); | ||
4835 | if (mddev->recovery_cp > mddev->resync_max_sectors) { | 4835 | if (mddev->recovery_cp > mddev->resync_max_sectors) { |
4836 | mddev->recovery_cp = mddev->resync_max_sectors; | 4836 | mddev->recovery_cp = mddev->resync_max_sectors; |
4837 | set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); | 4837 | set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); |
4838 | } | 4838 | } |
4839 | mddev->resync_max_sectors = size; | 4839 | mddev->resync_max_sectors = mddev->array_sectors; |
4840 | if (mddev->queue) { | ||
4841 | set_capacity(mddev->gendisk, mddev->array_sectors); | ||
4842 | revalidate_disk(mddev->gendisk); | ||
4843 | } | ||
4844 | } else { | 4840 | } else { |
4845 | int d; | 4841 | int d; |
4846 | rcu_read_lock(); | 4842 | rcu_read_lock(); |
diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h index db2ac22ac1b4..e2e8840de9bf 100644 --- a/drivers/md/raid10.h +++ b/drivers/md/raid10.h | |||
@@ -2,6 +2,19 @@ | |||
2 | #ifndef _RAID10_H | 2 | #ifndef _RAID10_H |
3 | #define _RAID10_H | 3 | #define _RAID10_H |
4 | 4 | ||
5 | /* Note: raid10_info.rdev can be set to NULL asynchronously by | ||
6 | * raid10_remove_disk. | ||
7 | * There are three safe ways to access raid10_info.rdev. | ||
8 | * 1/ when holding mddev->reconfig_mutex | ||
9 | * 2/ when resync/recovery/reshape is known to be happening - i.e. in code | ||
10 | * that is called as part of performing resync/recovery/reshape. | ||
11 | * 3/ while holding rcu_read_lock(), use rcu_dereference to get the pointer | ||
12 | * and if it is non-NULL, increment rdev->nr_pending before dropping the | ||
13 | * RCU lock. | ||
14 | * When .rdev is set to NULL, the nr_pending count checked again and if it has | ||
15 | * been incremented, the pointer is put back in .rdev. | ||
16 | */ | ||
17 | |||
5 | struct raid10_info { | 18 | struct raid10_info { |
6 | struct md_rdev *rdev, *replacement; | 19 | struct md_rdev *rdev, *replacement; |
7 | sector_t head_position; | 20 | sector_t head_position; |
diff --git a/drivers/md/raid5-log.h b/drivers/md/raid5-log.h index 0c76bcedfc1c..a001808a2b77 100644 --- a/drivers/md/raid5-log.h +++ b/drivers/md/raid5-log.h | |||
@@ -44,6 +44,7 @@ extern void ppl_write_stripe_run(struct r5conf *conf); | |||
44 | extern void ppl_stripe_write_finished(struct stripe_head *sh); | 44 | extern void ppl_stripe_write_finished(struct stripe_head *sh); |
45 | extern int ppl_modify_log(struct r5conf *conf, struct md_rdev *rdev, bool add); | 45 | extern int ppl_modify_log(struct r5conf *conf, struct md_rdev *rdev, bool add); |
46 | extern void ppl_quiesce(struct r5conf *conf, int quiesce); | 46 | extern void ppl_quiesce(struct r5conf *conf, int quiesce); |
47 | extern int ppl_handle_flush_request(struct r5l_log *log, struct bio *bio); | ||
47 | 48 | ||
48 | static inline bool raid5_has_ppl(struct r5conf *conf) | 49 | static inline bool raid5_has_ppl(struct r5conf *conf) |
49 | { | 50 | { |
@@ -104,7 +105,7 @@ static inline int log_handle_flush_request(struct r5conf *conf, struct bio *bio) | |||
104 | if (conf->log) | 105 | if (conf->log) |
105 | ret = r5l_handle_flush_request(conf->log, bio); | 106 | ret = r5l_handle_flush_request(conf->log, bio); |
106 | else if (raid5_has_ppl(conf)) | 107 | else if (raid5_has_ppl(conf)) |
107 | ret = 0; | 108 | ret = ppl_handle_flush_request(conf->log, bio); |
108 | 109 | ||
109 | return ret; | 110 | return ret; |
110 | } | 111 | } |
diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c index 2764c2290062..42890a08375b 100644 --- a/drivers/md/raid5-ppl.c +++ b/drivers/md/raid5-ppl.c | |||
@@ -693,6 +693,16 @@ void ppl_quiesce(struct r5conf *conf, int quiesce) | |||
693 | } | 693 | } |
694 | } | 694 | } |
695 | 695 | ||
696 | int ppl_handle_flush_request(struct r5l_log *log, struct bio *bio) | ||
697 | { | ||
698 | if (bio->bi_iter.bi_size == 0) { | ||
699 | bio_endio(bio); | ||
700 | return 0; | ||
701 | } | ||
702 | bio->bi_opf &= ~REQ_PREFLUSH; | ||
703 | return -EAGAIN; | ||
704 | } | ||
705 | |||
696 | void ppl_stripe_write_finished(struct stripe_head *sh) | 706 | void ppl_stripe_write_finished(struct stripe_head *sh) |
697 | { | 707 | { |
698 | struct ppl_io_unit *io; | 708 | struct ppl_io_unit *io; |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 50d01144b805..b5d2601483e3 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -2196,15 +2196,16 @@ static int grow_one_stripe(struct r5conf *conf, gfp_t gfp) | |||
2196 | static int grow_stripes(struct r5conf *conf, int num) | 2196 | static int grow_stripes(struct r5conf *conf, int num) |
2197 | { | 2197 | { |
2198 | struct kmem_cache *sc; | 2198 | struct kmem_cache *sc; |
2199 | size_t namelen = sizeof(conf->cache_name[0]); | ||
2199 | int devs = max(conf->raid_disks, conf->previous_raid_disks); | 2200 | int devs = max(conf->raid_disks, conf->previous_raid_disks); |
2200 | 2201 | ||
2201 | if (conf->mddev->gendisk) | 2202 | if (conf->mddev->gendisk) |
2202 | sprintf(conf->cache_name[0], | 2203 | snprintf(conf->cache_name[0], namelen, |
2203 | "raid%d-%s", conf->level, mdname(conf->mddev)); | 2204 | "raid%d-%s", conf->level, mdname(conf->mddev)); |
2204 | else | 2205 | else |
2205 | sprintf(conf->cache_name[0], | 2206 | snprintf(conf->cache_name[0], namelen, |
2206 | "raid%d-%p", conf->level, conf->mddev); | 2207 | "raid%d-%p", conf->level, conf->mddev); |
2207 | sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]); | 2208 | snprintf(conf->cache_name[1], namelen, "%.27s-alt", conf->cache_name[0]); |
2208 | 2209 | ||
2209 | conf->active_name = 0; | 2210 | conf->active_name = 0; |
2210 | sc = kmem_cache_create(conf->cache_name[conf->active_name], | 2211 | sc = kmem_cache_create(conf->cache_name[conf->active_name], |
@@ -6764,9 +6765,7 @@ static void free_conf(struct r5conf *conf) | |||
6764 | 6765 | ||
6765 | log_exit(conf); | 6766 | log_exit(conf); |
6766 | 6767 | ||
6767 | if (conf->shrinker.nr_deferred) | 6768 | unregister_shrinker(&conf->shrinker); |
6768 | unregister_shrinker(&conf->shrinker); | ||
6769 | |||
6770 | free_thread_groups(conf); | 6769 | free_thread_groups(conf); |
6771 | shrink_stripes(conf); | 6770 | shrink_stripes(conf); |
6772 | raid5_free_percpu(conf); | 6771 | raid5_free_percpu(conf); |
@@ -8001,13 +8000,7 @@ static void raid5_finish_reshape(struct mddev *mddev) | |||
8001 | 8000 | ||
8002 | if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { | 8001 | if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { |
8003 | 8002 | ||
8004 | if (mddev->delta_disks > 0) { | 8003 | if (mddev->delta_disks <= 0) { |
8005 | md_set_array_sectors(mddev, raid5_size(mddev, 0, 0)); | ||
8006 | if (mddev->queue) { | ||
8007 | set_capacity(mddev->gendisk, mddev->array_sectors); | ||
8008 | revalidate_disk(mddev->gendisk); | ||
8009 | } | ||
8010 | } else { | ||
8011 | int d; | 8004 | int d; |
8012 | spin_lock_irq(&conf->device_lock); | 8005 | spin_lock_irq(&conf->device_lock); |
8013 | mddev->degraded = raid5_calc_degraded(conf); | 8006 | mddev->degraded = raid5_calc_degraded(conf); |
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index 2e6123825095..3f8da26032ac 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h | |||
@@ -450,6 +450,18 @@ enum { | |||
450 | * HANDLE gets cleared if stripe_handle leaves nothing locked. | 450 | * HANDLE gets cleared if stripe_handle leaves nothing locked. |
451 | */ | 451 | */ |
452 | 452 | ||
453 | /* Note: disk_info.rdev can be set to NULL asynchronously by raid5_remove_disk. | ||
454 | * There are three safe ways to access disk_info.rdev. | ||
455 | * 1/ when holding mddev->reconfig_mutex | ||
456 | * 2/ when resync/recovery/reshape is known to be happening - i.e. in code that | ||
457 | * is called as part of performing resync/recovery/reshape. | ||
458 | * 3/ while holding rcu_read_lock(), use rcu_dereference to get the pointer | ||
459 | * and if it is non-NULL, increment rdev->nr_pending before dropping the RCU | ||
460 | * lock. | ||
461 | * When .rdev is set to NULL, the nr_pending count checked again and if | ||
462 | * it has been incremented, the pointer is put back in .rdev. | ||
463 | */ | ||
464 | |||
453 | struct disk_info { | 465 | struct disk_info { |
454 | struct md_rdev *rdev, *replacement; | 466 | struct md_rdev *rdev, *replacement; |
455 | struct page *extra_page; /* extra page to use in prexor */ | 467 | struct page *extra_page; /* extra page to use in prexor */ |
diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig index 145e12bfb819..86c1a190d946 100644 --- a/drivers/media/Kconfig +++ b/drivers/media/Kconfig | |||
@@ -147,9 +147,11 @@ config DVB_CORE | |||
147 | config DVB_MMAP | 147 | config DVB_MMAP |
148 | bool "Enable DVB memory-mapped API (EXPERIMENTAL)" | 148 | bool "Enable DVB memory-mapped API (EXPERIMENTAL)" |
149 | depends on DVB_CORE | 149 | depends on DVB_CORE |
150 | depends on VIDEO_V4L2=y || VIDEO_V4L2=DVB_CORE | ||
151 | select VIDEOBUF2_VMALLOC | ||
150 | default n | 152 | default n |
151 | help | 153 | help |
152 | This option enables DVB experimental memory-mapped API, with | 154 | This option enables DVB experimental memory-mapped API, which |
153 | reduces the number of context switches to read DVB buffers, as | 155 | reduces the number of context switches to read DVB buffers, as |
154 | the buffers can use mmap() syscalls. | 156 | the buffers can use mmap() syscalls. |
155 | 157 | ||
diff --git a/drivers/media/common/videobuf2/Kconfig b/drivers/media/common/videobuf2/Kconfig index 5df05250de94..17c32ea58395 100644 --- a/drivers/media/common/videobuf2/Kconfig +++ b/drivers/media/common/videobuf2/Kconfig | |||
@@ -3,6 +3,9 @@ config VIDEOBUF2_CORE | |||
3 | select DMA_SHARED_BUFFER | 3 | select DMA_SHARED_BUFFER |
4 | tristate | 4 | tristate |
5 | 5 | ||
6 | config VIDEOBUF2_V4L2 | ||
7 | tristate | ||
8 | |||
6 | config VIDEOBUF2_MEMOPS | 9 | config VIDEOBUF2_MEMOPS |
7 | tristate | 10 | tristate |
8 | select FRAME_VECTOR | 11 | select FRAME_VECTOR |
diff --git a/drivers/media/common/videobuf2/Makefile b/drivers/media/common/videobuf2/Makefile index 19de5ccda20b..77bebe8b202f 100644 --- a/drivers/media/common/videobuf2/Makefile +++ b/drivers/media/common/videobuf2/Makefile | |||
@@ -1,5 +1,12 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | ||
2 | videobuf2-common-objs := videobuf2-core.o | ||
1 | 3 | ||
2 | obj-$(CONFIG_VIDEOBUF2_CORE) += videobuf2-core.o videobuf2-v4l2.o | 4 | ifeq ($(CONFIG_TRACEPOINTS),y) |
5 | videobuf2-common-objs += vb2-trace.o | ||
6 | endif | ||
7 | |||
8 | obj-$(CONFIG_VIDEOBUF2_CORE) += videobuf2-common.o | ||
9 | obj-$(CONFIG_VIDEOBUF2_V4L2) += videobuf2-v4l2.o | ||
3 | obj-$(CONFIG_VIDEOBUF2_MEMOPS) += videobuf2-memops.o | 10 | obj-$(CONFIG_VIDEOBUF2_MEMOPS) += videobuf2-memops.o |
4 | obj-$(CONFIG_VIDEOBUF2_VMALLOC) += videobuf2-vmalloc.o | 11 | obj-$(CONFIG_VIDEOBUF2_VMALLOC) += videobuf2-vmalloc.o |
5 | obj-$(CONFIG_VIDEOBUF2_DMA_CONTIG) += videobuf2-dma-contig.o | 12 | obj-$(CONFIG_VIDEOBUF2_DMA_CONTIG) += videobuf2-dma-contig.o |
diff --git a/drivers/media/v4l2-core/vb2-trace.c b/drivers/media/common/videobuf2/vb2-trace.c index 4c0f39d271f0..4c0f39d271f0 100644 --- a/drivers/media/v4l2-core/vb2-trace.c +++ b/drivers/media/common/videobuf2/vb2-trace.c | |||
diff --git a/drivers/media/dvb-core/Makefile b/drivers/media/dvb-core/Makefile index 3a105d82019a..62b028ded9f7 100644 --- a/drivers/media/dvb-core/Makefile +++ b/drivers/media/dvb-core/Makefile | |||
@@ -4,7 +4,7 @@ | |||
4 | # | 4 | # |
5 | 5 | ||
6 | dvb-net-$(CONFIG_DVB_NET) := dvb_net.o | 6 | dvb-net-$(CONFIG_DVB_NET) := dvb_net.o |
7 | dvb-vb2-$(CONFIG_DVB_MMSP) := dvb_vb2.o | 7 | dvb-vb2-$(CONFIG_DVB_MMAP) := dvb_vb2.o |
8 | 8 | ||
9 | dvb-core-objs := dvbdev.o dmxdev.o dvb_demux.o \ | 9 | dvb-core-objs := dvbdev.o dmxdev.o dvb_demux.o \ |
10 | dvb_ca_en50221.o dvb_frontend.o \ | 10 | dvb_ca_en50221.o dvb_frontend.o \ |
diff --git a/drivers/media/dvb-core/dmxdev.c b/drivers/media/dvb-core/dmxdev.c index 6d53af00190e..61a750fae465 100644 --- a/drivers/media/dvb-core/dmxdev.c +++ b/drivers/media/dvb-core/dmxdev.c | |||
@@ -128,11 +128,7 @@ static int dvb_dvr_open(struct inode *inode, struct file *file) | |||
128 | struct dvb_device *dvbdev = file->private_data; | 128 | struct dvb_device *dvbdev = file->private_data; |
129 | struct dmxdev *dmxdev = dvbdev->priv; | 129 | struct dmxdev *dmxdev = dvbdev->priv; |
130 | struct dmx_frontend *front; | 130 | struct dmx_frontend *front; |
131 | #ifndef DVB_MMAP | ||
132 | bool need_ringbuffer = false; | 131 | bool need_ringbuffer = false; |
133 | #else | ||
134 | const bool need_ringbuffer = true; | ||
135 | #endif | ||
136 | 132 | ||
137 | dprintk("%s\n", __func__); | 133 | dprintk("%s\n", __func__); |
138 | 134 | ||
@@ -144,17 +140,31 @@ static int dvb_dvr_open(struct inode *inode, struct file *file) | |||
144 | return -ENODEV; | 140 | return -ENODEV; |
145 | } | 141 | } |
146 | 142 | ||
147 | #ifndef DVB_MMAP | 143 | dmxdev->may_do_mmap = 0; |
144 | |||
145 | /* | ||
146 | * The logic here is a little tricky due to the ifdef. | ||
147 | * | ||
148 | * The ringbuffer is used for both read and mmap. | ||
149 | * | ||
150 | * It is not needed, however, on two situations: | ||
151 | * - Write devices (access with O_WRONLY); | ||
152 | * - For duplex device nodes, opened with O_RDWR. | ||
153 | */ | ||
154 | |||
148 | if ((file->f_flags & O_ACCMODE) == O_RDONLY) | 155 | if ((file->f_flags & O_ACCMODE) == O_RDONLY) |
149 | need_ringbuffer = true; | 156 | need_ringbuffer = true; |
150 | #else | 157 | else if ((file->f_flags & O_ACCMODE) == O_RDWR) { |
151 | if ((file->f_flags & O_ACCMODE) == O_RDWR) { | ||
152 | if (!(dmxdev->capabilities & DMXDEV_CAP_DUPLEX)) { | 158 | if (!(dmxdev->capabilities & DMXDEV_CAP_DUPLEX)) { |
159 | #ifdef CONFIG_DVB_MMAP | ||
160 | dmxdev->may_do_mmap = 1; | ||
161 | need_ringbuffer = true; | ||
162 | #else | ||
153 | mutex_unlock(&dmxdev->mutex); | 163 | mutex_unlock(&dmxdev->mutex); |
154 | return -EOPNOTSUPP; | 164 | return -EOPNOTSUPP; |
165 | #endif | ||
155 | } | 166 | } |
156 | } | 167 | } |
157 | #endif | ||
158 | 168 | ||
159 | if (need_ringbuffer) { | 169 | if (need_ringbuffer) { |
160 | void *mem; | 170 | void *mem; |
@@ -169,8 +179,9 @@ static int dvb_dvr_open(struct inode *inode, struct file *file) | |||
169 | return -ENOMEM; | 179 | return -ENOMEM; |
170 | } | 180 | } |
171 | dvb_ringbuffer_init(&dmxdev->dvr_buffer, mem, DVR_BUFFER_SIZE); | 181 | dvb_ringbuffer_init(&dmxdev->dvr_buffer, mem, DVR_BUFFER_SIZE); |
172 | dvb_vb2_init(&dmxdev->dvr_vb2_ctx, "dvr", | 182 | if (dmxdev->may_do_mmap) |
173 | file->f_flags & O_NONBLOCK); | 183 | dvb_vb2_init(&dmxdev->dvr_vb2_ctx, "dvr", |
184 | file->f_flags & O_NONBLOCK); | ||
174 | dvbdev->readers--; | 185 | dvbdev->readers--; |
175 | } | 186 | } |
176 | 187 | ||
@@ -200,11 +211,6 @@ static int dvb_dvr_release(struct inode *inode, struct file *file) | |||
200 | { | 211 | { |
201 | struct dvb_device *dvbdev = file->private_data; | 212 | struct dvb_device *dvbdev = file->private_data; |
202 | struct dmxdev *dmxdev = dvbdev->priv; | 213 | struct dmxdev *dmxdev = dvbdev->priv; |
203 | #ifndef DVB_MMAP | ||
204 | bool need_ringbuffer = false; | ||
205 | #else | ||
206 | const bool need_ringbuffer = true; | ||
207 | #endif | ||
208 | 214 | ||
209 | mutex_lock(&dmxdev->mutex); | 215 | mutex_lock(&dmxdev->mutex); |
210 | 216 | ||
@@ -213,15 +219,14 @@ static int dvb_dvr_release(struct inode *inode, struct file *file) | |||
213 | dmxdev->demux->connect_frontend(dmxdev->demux, | 219 | dmxdev->demux->connect_frontend(dmxdev->demux, |
214 | dmxdev->dvr_orig_fe); | 220 | dmxdev->dvr_orig_fe); |
215 | } | 221 | } |
216 | #ifndef DVB_MMAP | ||
217 | if ((file->f_flags & O_ACCMODE) == O_RDONLY) | ||
218 | need_ringbuffer = true; | ||
219 | #endif | ||
220 | 222 | ||
221 | if (need_ringbuffer) { | 223 | if (((file->f_flags & O_ACCMODE) == O_RDONLY) || |
222 | if (dvb_vb2_is_streaming(&dmxdev->dvr_vb2_ctx)) | 224 | dmxdev->may_do_mmap) { |
223 | dvb_vb2_stream_off(&dmxdev->dvr_vb2_ctx); | 225 | if (dmxdev->may_do_mmap) { |
224 | dvb_vb2_release(&dmxdev->dvr_vb2_ctx); | 226 | if (dvb_vb2_is_streaming(&dmxdev->dvr_vb2_ctx)) |
227 | dvb_vb2_stream_off(&dmxdev->dvr_vb2_ctx); | ||
228 | dvb_vb2_release(&dmxdev->dvr_vb2_ctx); | ||
229 | } | ||
225 | dvbdev->readers++; | 230 | dvbdev->readers++; |
226 | if (dmxdev->dvr_buffer.data) { | 231 | if (dmxdev->dvr_buffer.data) { |
227 | void *mem = dmxdev->dvr_buffer.data; | 232 | void *mem = dmxdev->dvr_buffer.data; |
@@ -380,7 +385,8 @@ static void dvb_dmxdev_filter_timer(struct dmxdev_filter *dmxdevfilter) | |||
380 | 385 | ||
381 | static int dvb_dmxdev_section_callback(const u8 *buffer1, size_t buffer1_len, | 386 | static int dvb_dmxdev_section_callback(const u8 *buffer1, size_t buffer1_len, |
382 | const u8 *buffer2, size_t buffer2_len, | 387 | const u8 *buffer2, size_t buffer2_len, |
383 | struct dmx_section_filter *filter) | 388 | struct dmx_section_filter *filter, |
389 | u32 *buffer_flags) | ||
384 | { | 390 | { |
385 | struct dmxdev_filter *dmxdevfilter = filter->priv; | 391 | struct dmxdev_filter *dmxdevfilter = filter->priv; |
386 | int ret; | 392 | int ret; |
@@ -399,10 +405,12 @@ static int dvb_dmxdev_section_callback(const u8 *buffer1, size_t buffer1_len, | |||
399 | dprintk("section callback %*ph\n", 6, buffer1); | 405 | dprintk("section callback %*ph\n", 6, buffer1); |
400 | if (dvb_vb2_is_streaming(&dmxdevfilter->vb2_ctx)) { | 406 | if (dvb_vb2_is_streaming(&dmxdevfilter->vb2_ctx)) { |
401 | ret = dvb_vb2_fill_buffer(&dmxdevfilter->vb2_ctx, | 407 | ret = dvb_vb2_fill_buffer(&dmxdevfilter->vb2_ctx, |
402 | buffer1, buffer1_len); | 408 | buffer1, buffer1_len, |
409 | buffer_flags); | ||
403 | if (ret == buffer1_len) | 410 | if (ret == buffer1_len) |
404 | ret = dvb_vb2_fill_buffer(&dmxdevfilter->vb2_ctx, | 411 | ret = dvb_vb2_fill_buffer(&dmxdevfilter->vb2_ctx, |
405 | buffer2, buffer2_len); | 412 | buffer2, buffer2_len, |
413 | buffer_flags); | ||
406 | } else { | 414 | } else { |
407 | ret = dvb_dmxdev_buffer_write(&dmxdevfilter->buffer, | 415 | ret = dvb_dmxdev_buffer_write(&dmxdevfilter->buffer, |
408 | buffer1, buffer1_len); | 416 | buffer1, buffer1_len); |
@@ -422,11 +430,12 @@ static int dvb_dmxdev_section_callback(const u8 *buffer1, size_t buffer1_len, | |||
422 | 430 | ||
423 | static int dvb_dmxdev_ts_callback(const u8 *buffer1, size_t buffer1_len, | 431 | static int dvb_dmxdev_ts_callback(const u8 *buffer1, size_t buffer1_len, |
424 | const u8 *buffer2, size_t buffer2_len, | 432 | const u8 *buffer2, size_t buffer2_len, |
425 | struct dmx_ts_feed *feed) | 433 | struct dmx_ts_feed *feed, |
434 | u32 *buffer_flags) | ||
426 | { | 435 | { |
427 | struct dmxdev_filter *dmxdevfilter = feed->priv; | 436 | struct dmxdev_filter *dmxdevfilter = feed->priv; |
428 | struct dvb_ringbuffer *buffer; | 437 | struct dvb_ringbuffer *buffer; |
429 | #ifdef DVB_MMAP | 438 | #ifdef CONFIG_DVB_MMAP |
430 | struct dvb_vb2_ctx *ctx; | 439 | struct dvb_vb2_ctx *ctx; |
431 | #endif | 440 | #endif |
432 | int ret; | 441 | int ret; |
@@ -440,20 +449,22 @@ static int dvb_dmxdev_ts_callback(const u8 *buffer1, size_t buffer1_len, | |||
440 | if (dmxdevfilter->params.pes.output == DMX_OUT_TAP || | 449 | if (dmxdevfilter->params.pes.output == DMX_OUT_TAP || |
441 | dmxdevfilter->params.pes.output == DMX_OUT_TSDEMUX_TAP) { | 450 | dmxdevfilter->params.pes.output == DMX_OUT_TSDEMUX_TAP) { |
442 | buffer = &dmxdevfilter->buffer; | 451 | buffer = &dmxdevfilter->buffer; |
443 | #ifdef DVB_MMAP | 452 | #ifdef CONFIG_DVB_MMAP |
444 | ctx = &dmxdevfilter->vb2_ctx; | 453 | ctx = &dmxdevfilter->vb2_ctx; |
445 | #endif | 454 | #endif |
446 | } else { | 455 | } else { |
447 | buffer = &dmxdevfilter->dev->dvr_buffer; | 456 | buffer = &dmxdevfilter->dev->dvr_buffer; |
448 | #ifdef DVB_MMAP | 457 | #ifdef CONFIG_DVB_MMAP |
449 | ctx = &dmxdevfilter->dev->dvr_vb2_ctx; | 458 | ctx = &dmxdevfilter->dev->dvr_vb2_ctx; |
450 | #endif | 459 | #endif |
451 | } | 460 | } |
452 | 461 | ||
453 | if (dvb_vb2_is_streaming(ctx)) { | 462 | if (dvb_vb2_is_streaming(ctx)) { |
454 | ret = dvb_vb2_fill_buffer(ctx, buffer1, buffer1_len); | 463 | ret = dvb_vb2_fill_buffer(ctx, buffer1, buffer1_len, |
464 | buffer_flags); | ||
455 | if (ret == buffer1_len) | 465 | if (ret == buffer1_len) |
456 | ret = dvb_vb2_fill_buffer(ctx, buffer2, buffer2_len); | 466 | ret = dvb_vb2_fill_buffer(ctx, buffer2, buffer2_len, |
467 | buffer_flags); | ||
457 | } else { | 468 | } else { |
458 | if (buffer->error) { | 469 | if (buffer->error) { |
459 | spin_unlock(&dmxdevfilter->dev->lock); | 470 | spin_unlock(&dmxdevfilter->dev->lock); |
@@ -802,6 +813,12 @@ static int dvb_demux_open(struct inode *inode, struct file *file) | |||
802 | mutex_init(&dmxdevfilter->mutex); | 813 | mutex_init(&dmxdevfilter->mutex); |
803 | file->private_data = dmxdevfilter; | 814 | file->private_data = dmxdevfilter; |
804 | 815 | ||
816 | #ifdef CONFIG_DVB_MMAP | ||
817 | dmxdev->may_do_mmap = 1; | ||
818 | #else | ||
819 | dmxdev->may_do_mmap = 0; | ||
820 | #endif | ||
821 | |||
805 | dvb_ringbuffer_init(&dmxdevfilter->buffer, NULL, 8192); | 822 | dvb_ringbuffer_init(&dmxdevfilter->buffer, NULL, 8192); |
806 | dvb_vb2_init(&dmxdevfilter->vb2_ctx, "demux_filter", | 823 | dvb_vb2_init(&dmxdevfilter->vb2_ctx, "demux_filter", |
807 | file->f_flags & O_NONBLOCK); | 824 | file->f_flags & O_NONBLOCK); |
@@ -1111,7 +1128,7 @@ static int dvb_demux_do_ioctl(struct file *file, | |||
1111 | mutex_unlock(&dmxdevfilter->mutex); | 1128 | mutex_unlock(&dmxdevfilter->mutex); |
1112 | break; | 1129 | break; |
1113 | 1130 | ||
1114 | #ifdef DVB_MMAP | 1131 | #ifdef CONFIG_DVB_MMAP |
1115 | case DMX_REQBUFS: | 1132 | case DMX_REQBUFS: |
1116 | if (mutex_lock_interruptible(&dmxdevfilter->mutex)) { | 1133 | if (mutex_lock_interruptible(&dmxdevfilter->mutex)) { |
1117 | mutex_unlock(&dmxdev->mutex); | 1134 | mutex_unlock(&dmxdev->mutex); |
@@ -1160,7 +1177,7 @@ static int dvb_demux_do_ioctl(struct file *file, | |||
1160 | break; | 1177 | break; |
1161 | #endif | 1178 | #endif |
1162 | default: | 1179 | default: |
1163 | ret = -EINVAL; | 1180 | ret = -ENOTTY; |
1164 | break; | 1181 | break; |
1165 | } | 1182 | } |
1166 | mutex_unlock(&dmxdev->mutex); | 1183 | mutex_unlock(&dmxdev->mutex); |
@@ -1199,13 +1216,16 @@ static __poll_t dvb_demux_poll(struct file *file, poll_table *wait) | |||
1199 | return mask; | 1216 | return mask; |
1200 | } | 1217 | } |
1201 | 1218 | ||
1202 | #ifdef DVB_MMAP | 1219 | #ifdef CONFIG_DVB_MMAP |
1203 | static int dvb_demux_mmap(struct file *file, struct vm_area_struct *vma) | 1220 | static int dvb_demux_mmap(struct file *file, struct vm_area_struct *vma) |
1204 | { | 1221 | { |
1205 | struct dmxdev_filter *dmxdevfilter = file->private_data; | 1222 | struct dmxdev_filter *dmxdevfilter = file->private_data; |
1206 | struct dmxdev *dmxdev = dmxdevfilter->dev; | 1223 | struct dmxdev *dmxdev = dmxdevfilter->dev; |
1207 | int ret; | 1224 | int ret; |
1208 | 1225 | ||
1226 | if (!dmxdev->may_do_mmap) | ||
1227 | return -ENOTTY; | ||
1228 | |||
1209 | if (mutex_lock_interruptible(&dmxdev->mutex)) | 1229 | if (mutex_lock_interruptible(&dmxdev->mutex)) |
1210 | return -ERESTARTSYS; | 1230 | return -ERESTARTSYS; |
1211 | 1231 | ||
@@ -1249,7 +1269,7 @@ static const struct file_operations dvb_demux_fops = { | |||
1249 | .release = dvb_demux_release, | 1269 | .release = dvb_demux_release, |
1250 | .poll = dvb_demux_poll, | 1270 | .poll = dvb_demux_poll, |
1251 | .llseek = default_llseek, | 1271 | .llseek = default_llseek, |
1252 | #ifdef DVB_MMAP | 1272 | #ifdef CONFIG_DVB_MMAP |
1253 | .mmap = dvb_demux_mmap, | 1273 | .mmap = dvb_demux_mmap, |
1254 | #endif | 1274 | #endif |
1255 | }; | 1275 | }; |
@@ -1280,7 +1300,7 @@ static int dvb_dvr_do_ioctl(struct file *file, | |||
1280 | ret = dvb_dvr_set_buffer_size(dmxdev, arg); | 1300 | ret = dvb_dvr_set_buffer_size(dmxdev, arg); |
1281 | break; | 1301 | break; |
1282 | 1302 | ||
1283 | #ifdef DVB_MMAP | 1303 | #ifdef CONFIG_DVB_MMAP |
1284 | case DMX_REQBUFS: | 1304 | case DMX_REQBUFS: |
1285 | ret = dvb_vb2_reqbufs(&dmxdev->dvr_vb2_ctx, parg); | 1305 | ret = dvb_vb2_reqbufs(&dmxdev->dvr_vb2_ctx, parg); |
1286 | break; | 1306 | break; |
@@ -1304,7 +1324,7 @@ static int dvb_dvr_do_ioctl(struct file *file, | |||
1304 | break; | 1324 | break; |
1305 | #endif | 1325 | #endif |
1306 | default: | 1326 | default: |
1307 | ret = -EINVAL; | 1327 | ret = -ENOTTY; |
1308 | break; | 1328 | break; |
1309 | } | 1329 | } |
1310 | mutex_unlock(&dmxdev->mutex); | 1330 | mutex_unlock(&dmxdev->mutex); |
@@ -1322,11 +1342,6 @@ static __poll_t dvb_dvr_poll(struct file *file, poll_table *wait) | |||
1322 | struct dvb_device *dvbdev = file->private_data; | 1342 | struct dvb_device *dvbdev = file->private_data; |
1323 | struct dmxdev *dmxdev = dvbdev->priv; | 1343 | struct dmxdev *dmxdev = dvbdev->priv; |
1324 | __poll_t mask = 0; | 1344 | __poll_t mask = 0; |
1325 | #ifndef DVB_MMAP | ||
1326 | bool need_ringbuffer = false; | ||
1327 | #else | ||
1328 | const bool need_ringbuffer = true; | ||
1329 | #endif | ||
1330 | 1345 | ||
1331 | dprintk("%s\n", __func__); | 1346 | dprintk("%s\n", __func__); |
1332 | 1347 | ||
@@ -1337,11 +1352,8 @@ static __poll_t dvb_dvr_poll(struct file *file, poll_table *wait) | |||
1337 | 1352 | ||
1338 | poll_wait(file, &dmxdev->dvr_buffer.queue, wait); | 1353 | poll_wait(file, &dmxdev->dvr_buffer.queue, wait); |
1339 | 1354 | ||
1340 | #ifndef DVB_MMAP | 1355 | if (((file->f_flags & O_ACCMODE) == O_RDONLY) || |
1341 | if ((file->f_flags & O_ACCMODE) == O_RDONLY) | 1356 | dmxdev->may_do_mmap) { |
1342 | need_ringbuffer = true; | ||
1343 | #endif | ||
1344 | if (need_ringbuffer) { | ||
1345 | if (dmxdev->dvr_buffer.error) | 1357 | if (dmxdev->dvr_buffer.error) |
1346 | mask |= (EPOLLIN | EPOLLRDNORM | EPOLLPRI | EPOLLERR); | 1358 | mask |= (EPOLLIN | EPOLLRDNORM | EPOLLPRI | EPOLLERR); |
1347 | 1359 | ||
@@ -1353,13 +1365,16 @@ static __poll_t dvb_dvr_poll(struct file *file, poll_table *wait) | |||
1353 | return mask; | 1365 | return mask; |
1354 | } | 1366 | } |
1355 | 1367 | ||
1356 | #ifdef DVB_MMAP | 1368 | #ifdef CONFIG_DVB_MMAP |
1357 | static int dvb_dvr_mmap(struct file *file, struct vm_area_struct *vma) | 1369 | static int dvb_dvr_mmap(struct file *file, struct vm_area_struct *vma) |
1358 | { | 1370 | { |
1359 | struct dvb_device *dvbdev = file->private_data; | 1371 | struct dvb_device *dvbdev = file->private_data; |
1360 | struct dmxdev *dmxdev = dvbdev->priv; | 1372 | struct dmxdev *dmxdev = dvbdev->priv; |
1361 | int ret; | 1373 | int ret; |
1362 | 1374 | ||
1375 | if (!dmxdev->may_do_mmap) | ||
1376 | return -ENOTTY; | ||
1377 | |||
1363 | if (dmxdev->exit) | 1378 | if (dmxdev->exit) |
1364 | return -ENODEV; | 1379 | return -ENODEV; |
1365 | 1380 | ||
@@ -1381,7 +1396,7 @@ static const struct file_operations dvb_dvr_fops = { | |||
1381 | .release = dvb_dvr_release, | 1396 | .release = dvb_dvr_release, |
1382 | .poll = dvb_dvr_poll, | 1397 | .poll = dvb_dvr_poll, |
1383 | .llseek = default_llseek, | 1398 | .llseek = default_llseek, |
1384 | #ifdef DVB_MMAP | 1399 | #ifdef CONFIG_DVB_MMAP |
1385 | .mmap = dvb_dvr_mmap, | 1400 | .mmap = dvb_dvr_mmap, |
1386 | #endif | 1401 | #endif |
1387 | }; | 1402 | }; |
diff --git a/drivers/media/dvb-core/dvb_demux.c b/drivers/media/dvb-core/dvb_demux.c index 210eed0269b0..f45091246bdc 100644 --- a/drivers/media/dvb-core/dvb_demux.c +++ b/drivers/media/dvb-core/dvb_demux.c | |||
@@ -55,6 +55,17 @@ MODULE_PARM_DESC(dvb_demux_feed_err_pkts, | |||
55 | dprintk(x); \ | 55 | dprintk(x); \ |
56 | } while (0) | 56 | } while (0) |
57 | 57 | ||
58 | #ifdef CONFIG_DVB_DEMUX_SECTION_LOSS_LOG | ||
59 | # define dprintk_sect_loss(x...) dprintk(x) | ||
60 | #else | ||
61 | # define dprintk_sect_loss(x...) | ||
62 | #endif | ||
63 | |||
64 | #define set_buf_flags(__feed, __flag) \ | ||
65 | do { \ | ||
66 | (__feed)->buffer_flags |= (__flag); \ | ||
67 | } while (0) | ||
68 | |||
58 | /****************************************************************************** | 69 | /****************************************************************************** |
59 | * static inlined helper functions | 70 | * static inlined helper functions |
60 | ******************************************************************************/ | 71 | ******************************************************************************/ |
@@ -104,31 +115,30 @@ static inline int dvb_dmx_swfilter_payload(struct dvb_demux_feed *feed, | |||
104 | { | 115 | { |
105 | int count = payload(buf); | 116 | int count = payload(buf); |
106 | int p; | 117 | int p; |
107 | #ifdef CONFIG_DVB_DEMUX_SECTION_LOSS_LOG | ||
108 | int ccok; | 118 | int ccok; |
109 | u8 cc; | 119 | u8 cc; |
110 | #endif | ||
111 | 120 | ||
112 | if (count == 0) | 121 | if (count == 0) |
113 | return -1; | 122 | return -1; |
114 | 123 | ||
115 | p = 188 - count; | 124 | p = 188 - count; |
116 | 125 | ||
117 | #ifdef CONFIG_DVB_DEMUX_SECTION_LOSS_LOG | ||
118 | cc = buf[3] & 0x0f; | 126 | cc = buf[3] & 0x0f; |
119 | ccok = ((feed->cc + 1) & 0x0f) == cc; | 127 | ccok = ((feed->cc + 1) & 0x0f) == cc; |
120 | feed->cc = cc; | 128 | feed->cc = cc; |
121 | if (!ccok) | 129 | if (!ccok) { |
122 | dprintk("missed packet: %d instead of %d!\n", | 130 | set_buf_flags(feed, DMX_BUFFER_FLAG_DISCONTINUITY_DETECTED); |
123 | cc, (feed->cc + 1) & 0x0f); | 131 | dprintk_sect_loss("missed packet: %d instead of %d!\n", |
124 | #endif | 132 | cc, (feed->cc + 1) & 0x0f); |
133 | } | ||
125 | 134 | ||
126 | if (buf[1] & 0x40) // PUSI ? | 135 | if (buf[1] & 0x40) // PUSI ? |
127 | feed->peslen = 0xfffa; | 136 | feed->peslen = 0xfffa; |
128 | 137 | ||
129 | feed->peslen += count; | 138 | feed->peslen += count; |
130 | 139 | ||
131 | return feed->cb.ts(&buf[p], count, NULL, 0, &feed->feed.ts); | 140 | return feed->cb.ts(&buf[p], count, NULL, 0, &feed->feed.ts, |
141 | &feed->buffer_flags); | ||
132 | } | 142 | } |
133 | 143 | ||
134 | static int dvb_dmx_swfilter_sectionfilter(struct dvb_demux_feed *feed, | 144 | static int dvb_dmx_swfilter_sectionfilter(struct dvb_demux_feed *feed, |
@@ -150,7 +160,7 @@ static int dvb_dmx_swfilter_sectionfilter(struct dvb_demux_feed *feed, | |||
150 | return 0; | 160 | return 0; |
151 | 161 | ||
152 | return feed->cb.sec(feed->feed.sec.secbuf, feed->feed.sec.seclen, | 162 | return feed->cb.sec(feed->feed.sec.secbuf, feed->feed.sec.seclen, |
153 | NULL, 0, &f->filter); | 163 | NULL, 0, &f->filter, &feed->buffer_flags); |
154 | } | 164 | } |
155 | 165 | ||
156 | static inline int dvb_dmx_swfilter_section_feed(struct dvb_demux_feed *feed) | 166 | static inline int dvb_dmx_swfilter_section_feed(struct dvb_demux_feed *feed) |
@@ -169,8 +179,10 @@ static inline int dvb_dmx_swfilter_section_feed(struct dvb_demux_feed *feed) | |||
169 | if (sec->check_crc) { | 179 | if (sec->check_crc) { |
170 | section_syntax_indicator = ((sec->secbuf[1] & 0x80) != 0); | 180 | section_syntax_indicator = ((sec->secbuf[1] & 0x80) != 0); |
171 | if (section_syntax_indicator && | 181 | if (section_syntax_indicator && |
172 | demux->check_crc32(feed, sec->secbuf, sec->seclen)) | 182 | demux->check_crc32(feed, sec->secbuf, sec->seclen)) { |
183 | set_buf_flags(feed, DMX_BUFFER_FLAG_HAD_CRC32_DISCARD); | ||
173 | return -1; | 184 | return -1; |
185 | } | ||
174 | } | 186 | } |
175 | 187 | ||
176 | do { | 188 | do { |
@@ -187,7 +199,6 @@ static void dvb_dmx_swfilter_section_new(struct dvb_demux_feed *feed) | |||
187 | { | 199 | { |
188 | struct dmx_section_feed *sec = &feed->feed.sec; | 200 | struct dmx_section_feed *sec = &feed->feed.sec; |
189 | 201 | ||
190 | #ifdef CONFIG_DVB_DEMUX_SECTION_LOSS_LOG | ||
191 | if (sec->secbufp < sec->tsfeedp) { | 202 | if (sec->secbufp < sec->tsfeedp) { |
192 | int n = sec->tsfeedp - sec->secbufp; | 203 | int n = sec->tsfeedp - sec->secbufp; |
193 | 204 | ||
@@ -197,12 +208,13 @@ static void dvb_dmx_swfilter_section_new(struct dvb_demux_feed *feed) | |||
197 | * but just first and last. | 208 | * but just first and last. |
198 | */ | 209 | */ |
199 | if (sec->secbuf[0] != 0xff || sec->secbuf[n - 1] != 0xff) { | 210 | if (sec->secbuf[0] != 0xff || sec->secbuf[n - 1] != 0xff) { |
200 | dprintk("section ts padding loss: %d/%d\n", | 211 | set_buf_flags(feed, |
201 | n, sec->tsfeedp); | 212 | DMX_BUFFER_FLAG_DISCONTINUITY_DETECTED); |
202 | dprintk("pad data: %*ph\n", n, sec->secbuf); | 213 | dprintk_sect_loss("section ts padding loss: %d/%d\n", |
214 | n, sec->tsfeedp); | ||
215 | dprintk_sect_loss("pad data: %*ph\n", n, sec->secbuf); | ||
203 | } | 216 | } |
204 | } | 217 | } |
205 | #endif | ||
206 | 218 | ||
207 | sec->tsfeedp = sec->secbufp = sec->seclen = 0; | 219 | sec->tsfeedp = sec->secbufp = sec->seclen = 0; |
208 | sec->secbuf = sec->secbuf_base; | 220 | sec->secbuf = sec->secbuf_base; |
@@ -237,11 +249,10 @@ static int dvb_dmx_swfilter_section_copy_dump(struct dvb_demux_feed *feed, | |||
237 | return 0; | 249 | return 0; |
238 | 250 | ||
239 | if (sec->tsfeedp + len > DMX_MAX_SECFEED_SIZE) { | 251 | if (sec->tsfeedp + len > DMX_MAX_SECFEED_SIZE) { |
240 | #ifdef CONFIG_DVB_DEMUX_SECTION_LOSS_LOG | 252 | set_buf_flags(feed, DMX_BUFFER_FLAG_DISCONTINUITY_DETECTED); |
241 | dprintk("section buffer full loss: %d/%d\n", | 253 | dprintk_sect_loss("section buffer full loss: %d/%d\n", |
242 | sec->tsfeedp + len - DMX_MAX_SECFEED_SIZE, | 254 | sec->tsfeedp + len - DMX_MAX_SECFEED_SIZE, |
243 | DMX_MAX_SECFEED_SIZE); | 255 | DMX_MAX_SECFEED_SIZE); |
244 | #endif | ||
245 | len = DMX_MAX_SECFEED_SIZE - sec->tsfeedp; | 256 | len = DMX_MAX_SECFEED_SIZE - sec->tsfeedp; |
246 | } | 257 | } |
247 | 258 | ||
@@ -269,12 +280,13 @@ static int dvb_dmx_swfilter_section_copy_dump(struct dvb_demux_feed *feed, | |||
269 | sec->seclen = seclen; | 280 | sec->seclen = seclen; |
270 | sec->crc_val = ~0; | 281 | sec->crc_val = ~0; |
271 | /* dump [secbuf .. secbuf+seclen) */ | 282 | /* dump [secbuf .. secbuf+seclen) */ |
272 | if (feed->pusi_seen) | 283 | if (feed->pusi_seen) { |
273 | dvb_dmx_swfilter_section_feed(feed); | 284 | dvb_dmx_swfilter_section_feed(feed); |
274 | #ifdef CONFIG_DVB_DEMUX_SECTION_LOSS_LOG | 285 | } else { |
275 | else | 286 | set_buf_flags(feed, |
276 | dprintk("pusi not seen, discarding section data\n"); | 287 | DMX_BUFFER_FLAG_DISCONTINUITY_DETECTED); |
277 | #endif | 288 | dprintk_sect_loss("pusi not seen, discarding section data\n"); |
289 | } | ||
278 | sec->secbufp += seclen; /* secbufp and secbuf moving together is */ | 290 | sec->secbufp += seclen; /* secbufp and secbuf moving together is */ |
279 | sec->secbuf += seclen; /* redundant but saves pointer arithmetic */ | 291 | sec->secbuf += seclen; /* redundant but saves pointer arithmetic */ |
280 | } | 292 | } |
@@ -307,18 +319,22 @@ static int dvb_dmx_swfilter_section_packet(struct dvb_demux_feed *feed, | |||
307 | } | 319 | } |
308 | 320 | ||
309 | if (!ccok || dc_i) { | 321 | if (!ccok || dc_i) { |
310 | #ifdef CONFIG_DVB_DEMUX_SECTION_LOSS_LOG | 322 | if (dc_i) { |
311 | if (dc_i) | 323 | set_buf_flags(feed, |
312 | dprintk("%d frame with disconnect indicator\n", | 324 | DMX_BUFFER_FLAG_DISCONTINUITY_INDICATOR); |
325 | dprintk_sect_loss("%d frame with disconnect indicator\n", | ||
313 | cc); | 326 | cc); |
314 | else | 327 | } else { |
315 | dprintk("discontinuity: %d instead of %d. %d bytes lost\n", | 328 | set_buf_flags(feed, |
329 | DMX_BUFFER_FLAG_DISCONTINUITY_DETECTED); | ||
330 | dprintk_sect_loss("discontinuity: %d instead of %d. %d bytes lost\n", | ||
316 | cc, (feed->cc + 1) & 0x0f, count + 4); | 331 | cc, (feed->cc + 1) & 0x0f, count + 4); |
332 | } | ||
317 | /* | 333 | /* |
318 | * those bytes under sume circumstances will again be reported | 334 | * those bytes under some circumstances will again be reported |
319 | * in the following dvb_dmx_swfilter_section_new | 335 | * in the following dvb_dmx_swfilter_section_new |
320 | */ | 336 | */ |
321 | #endif | 337 | |
322 | /* | 338 | /* |
323 | * Discontinuity detected. Reset pusi_seen to | 339 | * Discontinuity detected. Reset pusi_seen to |
324 | * stop feeding of suspicious data until next PUSI=1 arrives | 340 | * stop feeding of suspicious data until next PUSI=1 arrives |
@@ -326,6 +342,7 @@ static int dvb_dmx_swfilter_section_packet(struct dvb_demux_feed *feed, | |||
326 | * FIXME: does it make sense if the MPEG-TS is the one | 342 | * FIXME: does it make sense if the MPEG-TS is the one |
327 | * reporting discontinuity? | 343 | * reporting discontinuity? |
328 | */ | 344 | */ |
345 | |||
329 | feed->pusi_seen = false; | 346 | feed->pusi_seen = false; |
330 | dvb_dmx_swfilter_section_new(feed); | 347 | dvb_dmx_swfilter_section_new(feed); |
331 | } | 348 | } |
@@ -345,11 +362,11 @@ static int dvb_dmx_swfilter_section_packet(struct dvb_demux_feed *feed, | |||
345 | dvb_dmx_swfilter_section_new(feed); | 362 | dvb_dmx_swfilter_section_new(feed); |
346 | dvb_dmx_swfilter_section_copy_dump(feed, after, | 363 | dvb_dmx_swfilter_section_copy_dump(feed, after, |
347 | after_len); | 364 | after_len); |
365 | } else if (count > 0) { | ||
366 | set_buf_flags(feed, | ||
367 | DMX_BUFFER_FLAG_DISCONTINUITY_DETECTED); | ||
368 | dprintk_sect_loss("PUSI=1 but %d bytes lost\n", count); | ||
348 | } | 369 | } |
349 | #ifdef CONFIG_DVB_DEMUX_SECTION_LOSS_LOG | ||
350 | else if (count > 0) | ||
351 | dprintk("PUSI=1 but %d bytes lost\n", count); | ||
352 | #endif | ||
353 | } else { | 370 | } else { |
354 | /* PUSI=0 (is not set), no section boundary */ | 371 | /* PUSI=0 (is not set), no section boundary */ |
355 | dvb_dmx_swfilter_section_copy_dump(feed, &buf[p], count); | 372 | dvb_dmx_swfilter_section_copy_dump(feed, &buf[p], count); |
@@ -369,7 +386,8 @@ static inline void dvb_dmx_swfilter_packet_type(struct dvb_demux_feed *feed, | |||
369 | if (feed->ts_type & TS_PAYLOAD_ONLY) | 386 | if (feed->ts_type & TS_PAYLOAD_ONLY) |
370 | dvb_dmx_swfilter_payload(feed, buf); | 387 | dvb_dmx_swfilter_payload(feed, buf); |
371 | else | 388 | else |
372 | feed->cb.ts(buf, 188, NULL, 0, &feed->feed.ts); | 389 | feed->cb.ts(buf, 188, NULL, 0, &feed->feed.ts, |
390 | &feed->buffer_flags); | ||
373 | } | 391 | } |
374 | /* Used only on full-featured devices */ | 392 | /* Used only on full-featured devices */ |
375 | if (feed->ts_type & TS_DECODER) | 393 | if (feed->ts_type & TS_DECODER) |
@@ -430,6 +448,11 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf) | |||
430 | } | 448 | } |
431 | 449 | ||
432 | if (buf[1] & 0x80) { | 450 | if (buf[1] & 0x80) { |
451 | list_for_each_entry(feed, &demux->feed_list, list_head) { | ||
452 | if ((feed->pid != pid) && (feed->pid != 0x2000)) | ||
453 | continue; | ||
454 | set_buf_flags(feed, DMX_BUFFER_FLAG_TEI); | ||
455 | } | ||
433 | dprintk_tscheck("TEI detected. PID=0x%x data1=0x%x\n", | 456 | dprintk_tscheck("TEI detected. PID=0x%x data1=0x%x\n", |
434 | pid, buf[1]); | 457 | pid, buf[1]); |
435 | /* data in this packet can't be trusted - drop it unless | 458 | /* data in this packet can't be trusted - drop it unless |
@@ -445,6 +468,13 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf) | |||
445 | (demux->cnt_storage[pid] + 1) & 0xf; | 468 | (demux->cnt_storage[pid] + 1) & 0xf; |
446 | 469 | ||
447 | if ((buf[3] & 0xf) != demux->cnt_storage[pid]) { | 470 | if ((buf[3] & 0xf) != demux->cnt_storage[pid]) { |
471 | list_for_each_entry(feed, &demux->feed_list, list_head) { | ||
472 | if ((feed->pid != pid) && (feed->pid != 0x2000)) | ||
473 | continue; | ||
474 | set_buf_flags(feed, | ||
475 | DMX_BUFFER_PKT_COUNTER_MISMATCH); | ||
476 | } | ||
477 | |||
448 | dprintk_tscheck("TS packet counter mismatch. PID=0x%x expected 0x%x got 0x%x\n", | 478 | dprintk_tscheck("TS packet counter mismatch. PID=0x%x expected 0x%x got 0x%x\n", |
449 | pid, demux->cnt_storage[pid], | 479 | pid, demux->cnt_storage[pid], |
450 | buf[3] & 0xf); | 480 | buf[3] & 0xf); |
@@ -466,7 +496,8 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf) | |||
466 | if (feed->pid == pid) | 496 | if (feed->pid == pid) |
467 | dvb_dmx_swfilter_packet_type(feed, buf); | 497 | dvb_dmx_swfilter_packet_type(feed, buf); |
468 | else if (feed->pid == 0x2000) | 498 | else if (feed->pid == 0x2000) |
469 | feed->cb.ts(buf, 188, NULL, 0, &feed->feed.ts); | 499 | feed->cb.ts(buf, 188, NULL, 0, &feed->feed.ts, |
500 | &feed->buffer_flags); | ||
470 | } | 501 | } |
471 | } | 502 | } |
472 | 503 | ||
@@ -585,7 +616,8 @@ void dvb_dmx_swfilter_raw(struct dvb_demux *demux, const u8 *buf, size_t count) | |||
585 | 616 | ||
586 | spin_lock_irqsave(&demux->lock, flags); | 617 | spin_lock_irqsave(&demux->lock, flags); |
587 | 618 | ||
588 | demux->feed->cb.ts(buf, count, NULL, 0, &demux->feed->feed.ts); | 619 | demux->feed->cb.ts(buf, count, NULL, 0, &demux->feed->feed.ts, |
620 | &demux->feed->buffer_flags); | ||
589 | 621 | ||
590 | spin_unlock_irqrestore(&demux->lock, flags); | 622 | spin_unlock_irqrestore(&demux->lock, flags); |
591 | } | 623 | } |
@@ -785,6 +817,7 @@ static int dvbdmx_allocate_ts_feed(struct dmx_demux *dmx, | |||
785 | feed->demux = demux; | 817 | feed->demux = demux; |
786 | feed->pid = 0xffff; | 818 | feed->pid = 0xffff; |
787 | feed->peslen = 0xfffa; | 819 | feed->peslen = 0xfffa; |
820 | feed->buffer_flags = 0; | ||
788 | 821 | ||
789 | (*ts_feed) = &feed->feed.ts; | 822 | (*ts_feed) = &feed->feed.ts; |
790 | (*ts_feed)->parent = dmx; | 823 | (*ts_feed)->parent = dmx; |
@@ -1042,6 +1075,7 @@ static int dvbdmx_allocate_section_feed(struct dmx_demux *demux, | |||
1042 | dvbdmxfeed->cb.sec = callback; | 1075 | dvbdmxfeed->cb.sec = callback; |
1043 | dvbdmxfeed->demux = dvbdmx; | 1076 | dvbdmxfeed->demux = dvbdmx; |
1044 | dvbdmxfeed->pid = 0xffff; | 1077 | dvbdmxfeed->pid = 0xffff; |
1078 | dvbdmxfeed->buffer_flags = 0; | ||
1045 | dvbdmxfeed->feed.sec.secbuf = dvbdmxfeed->feed.sec.secbuf_base; | 1079 | dvbdmxfeed->feed.sec.secbuf = dvbdmxfeed->feed.sec.secbuf_base; |
1046 | dvbdmxfeed->feed.sec.secbufp = dvbdmxfeed->feed.sec.seclen = 0; | 1080 | dvbdmxfeed->feed.sec.secbufp = dvbdmxfeed->feed.sec.seclen = 0; |
1047 | dvbdmxfeed->feed.sec.tsfeedp = 0; | 1081 | dvbdmxfeed->feed.sec.tsfeedp = 0; |
diff --git a/drivers/media/dvb-core/dvb_net.c b/drivers/media/dvb-core/dvb_net.c index b6c7eec863b9..ba39f9942e1d 100644 --- a/drivers/media/dvb-core/dvb_net.c +++ b/drivers/media/dvb-core/dvb_net.c | |||
@@ -883,7 +883,8 @@ static void dvb_net_ule(struct net_device *dev, const u8 *buf, size_t buf_len) | |||
883 | 883 | ||
884 | static int dvb_net_ts_callback(const u8 *buffer1, size_t buffer1_len, | 884 | static int dvb_net_ts_callback(const u8 *buffer1, size_t buffer1_len, |
885 | const u8 *buffer2, size_t buffer2_len, | 885 | const u8 *buffer2, size_t buffer2_len, |
886 | struct dmx_ts_feed *feed) | 886 | struct dmx_ts_feed *feed, |
887 | u32 *buffer_flags) | ||
887 | { | 888 | { |
888 | struct net_device *dev = feed->priv; | 889 | struct net_device *dev = feed->priv; |
889 | 890 | ||
@@ -992,7 +993,7 @@ static void dvb_net_sec(struct net_device *dev, | |||
992 | 993 | ||
993 | static int dvb_net_sec_callback(const u8 *buffer1, size_t buffer1_len, | 994 | static int dvb_net_sec_callback(const u8 *buffer1, size_t buffer1_len, |
994 | const u8 *buffer2, size_t buffer2_len, | 995 | const u8 *buffer2, size_t buffer2_len, |
995 | struct dmx_section_filter *filter) | 996 | struct dmx_section_filter *filter, u32 *buffer_flags) |
996 | { | 997 | { |
997 | struct net_device *dev = filter->priv; | 998 | struct net_device *dev = filter->priv; |
998 | 999 | ||
diff --git a/drivers/media/dvb-core/dvb_vb2.c b/drivers/media/dvb-core/dvb_vb2.c index 763145d74e83..b811adf88afa 100644 --- a/drivers/media/dvb-core/dvb_vb2.c +++ b/drivers/media/dvb-core/dvb_vb2.c | |||
@@ -256,7 +256,8 @@ int dvb_vb2_is_streaming(struct dvb_vb2_ctx *ctx) | |||
256 | } | 256 | } |
257 | 257 | ||
258 | int dvb_vb2_fill_buffer(struct dvb_vb2_ctx *ctx, | 258 | int dvb_vb2_fill_buffer(struct dvb_vb2_ctx *ctx, |
259 | const unsigned char *src, int len) | 259 | const unsigned char *src, int len, |
260 | enum dmx_buffer_flags *buffer_flags) | ||
260 | { | 261 | { |
261 | unsigned long flags = 0; | 262 | unsigned long flags = 0; |
262 | void *vbuf = NULL; | 263 | void *vbuf = NULL; |
@@ -264,15 +265,17 @@ int dvb_vb2_fill_buffer(struct dvb_vb2_ctx *ctx, | |||
264 | unsigned char *psrc = (unsigned char *)src; | 265 | unsigned char *psrc = (unsigned char *)src; |
265 | int ll = 0; | 266 | int ll = 0; |
266 | 267 | ||
267 | dprintk(3, "[%s] %d bytes are rcvd\n", ctx->name, len); | 268 | /* |
268 | if (!src) { | 269 | * normal case: This func is called twice from demux driver |
269 | dprintk(3, "[%s]:NULL pointer src\n", ctx->name); | 270 | * one with valid src pointer, second time with NULL pointer |
270 | /**normal case: This func is called twice from demux driver | 271 | */ |
271 | * once with valid src pointer, second time with NULL pointer | 272 | if (!src || !len) |
272 | */ | ||
273 | return 0; | 273 | return 0; |
274 | } | ||
275 | spin_lock_irqsave(&ctx->slock, flags); | 274 | spin_lock_irqsave(&ctx->slock, flags); |
275 | if (buffer_flags && *buffer_flags) { | ||
276 | ctx->flags |= *buffer_flags; | ||
277 | *buffer_flags = 0; | ||
278 | } | ||
276 | while (todo) { | 279 | while (todo) { |
277 | if (!ctx->buf) { | 280 | if (!ctx->buf) { |
278 | if (list_empty(&ctx->dvb_q)) { | 281 | if (list_empty(&ctx->dvb_q)) { |
@@ -395,6 +398,7 @@ int dvb_vb2_qbuf(struct dvb_vb2_ctx *ctx, struct dmx_buffer *b) | |||
395 | 398 | ||
396 | int dvb_vb2_dqbuf(struct dvb_vb2_ctx *ctx, struct dmx_buffer *b) | 399 | int dvb_vb2_dqbuf(struct dvb_vb2_ctx *ctx, struct dmx_buffer *b) |
397 | { | 400 | { |
401 | unsigned long flags; | ||
398 | int ret; | 402 | int ret; |
399 | 403 | ||
400 | ret = vb2_core_dqbuf(&ctx->vb_q, &b->index, b, ctx->nonblocking); | 404 | ret = vb2_core_dqbuf(&ctx->vb_q, &b->index, b, ctx->nonblocking); |
@@ -402,7 +406,16 @@ int dvb_vb2_dqbuf(struct dvb_vb2_ctx *ctx, struct dmx_buffer *b) | |||
402 | dprintk(1, "[%s] errno=%d\n", ctx->name, ret); | 406 | dprintk(1, "[%s] errno=%d\n", ctx->name, ret); |
403 | return ret; | 407 | return ret; |
404 | } | 408 | } |
405 | dprintk(5, "[%s] index=%d\n", ctx->name, b->index); | 409 | |
410 | spin_lock_irqsave(&ctx->slock, flags); | ||
411 | b->count = ctx->count++; | ||
412 | b->flags = ctx->flags; | ||
413 | ctx->flags = 0; | ||
414 | spin_unlock_irqrestore(&ctx->slock, flags); | ||
415 | |||
416 | dprintk(5, "[%s] index=%d, count=%d, flags=%d\n", | ||
417 | ctx->name, b->index, ctx->count, b->flags); | ||
418 | |||
406 | 419 | ||
407 | return 0; | 420 | return 0; |
408 | } | 421 | } |
diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c index 50bce68ffd66..65d157fe76d1 100644 --- a/drivers/media/dvb-frontends/m88ds3103.c +++ b/drivers/media/dvb-frontends/m88ds3103.c | |||
@@ -1262,11 +1262,12 @@ static int m88ds3103_select(struct i2c_mux_core *muxc, u32 chan) | |||
1262 | * New users must use I2C client binding directly! | 1262 | * New users must use I2C client binding directly! |
1263 | */ | 1263 | */ |
1264 | struct dvb_frontend *m88ds3103_attach(const struct m88ds3103_config *cfg, | 1264 | struct dvb_frontend *m88ds3103_attach(const struct m88ds3103_config *cfg, |
1265 | struct i2c_adapter *i2c, struct i2c_adapter **tuner_i2c_adapter) | 1265 | struct i2c_adapter *i2c, |
1266 | struct i2c_adapter **tuner_i2c_adapter) | ||
1266 | { | 1267 | { |
1267 | struct i2c_client *client; | 1268 | struct i2c_client *client; |
1268 | struct i2c_board_info board_info; | 1269 | struct i2c_board_info board_info; |
1269 | struct m88ds3103_platform_data pdata; | 1270 | struct m88ds3103_platform_data pdata = {}; |
1270 | 1271 | ||
1271 | pdata.clk = cfg->clock; | 1272 | pdata.clk = cfg->clock; |
1272 | pdata.i2c_wr_max = cfg->i2c_wr_max; | 1273 | pdata.i2c_wr_max = cfg->i2c_wr_max; |
@@ -1409,6 +1410,8 @@ static int m88ds3103_probe(struct i2c_client *client, | |||
1409 | case M88DS3103_CHIP_ID: | 1410 | case M88DS3103_CHIP_ID: |
1410 | break; | 1411 | break; |
1411 | default: | 1412 | default: |
1413 | ret = -ENODEV; | ||
1414 | dev_err(&client->dev, "Unknown device. Chip_id=%02x\n", dev->chip_id); | ||
1412 | goto err_kfree; | 1415 | goto err_kfree; |
1413 | } | 1416 | } |
1414 | 1417 | ||
diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c index 3c1851984b90..2476d812f669 100644 --- a/drivers/media/i2c/tvp5150.c +++ b/drivers/media/i2c/tvp5150.c | |||
@@ -505,80 +505,77 @@ static struct i2c_vbi_ram_value vbi_ram_default[] = | |||
505 | /* FIXME: Current api doesn't handle all VBI types, those not | 505 | /* FIXME: Current api doesn't handle all VBI types, those not |
506 | yet supported are placed under #if 0 */ | 506 | yet supported are placed under #if 0 */ |
507 | #if 0 | 507 | #if 0 |
508 | {0x010, /* Teletext, SECAM, WST System A */ | 508 | [0] = {0x010, /* Teletext, SECAM, WST System A */ |
509 | {V4L2_SLICED_TELETEXT_SECAM,6,23,1}, | 509 | {V4L2_SLICED_TELETEXT_SECAM,6,23,1}, |
510 | { 0xaa, 0xaa, 0xff, 0xff, 0xe7, 0x2e, 0x20, 0x26, | 510 | { 0xaa, 0xaa, 0xff, 0xff, 0xe7, 0x2e, 0x20, 0x26, |
511 | 0xe6, 0xb4, 0x0e, 0x00, 0x00, 0x00, 0x10, 0x00 } | 511 | 0xe6, 0xb4, 0x0e, 0x00, 0x00, 0x00, 0x10, 0x00 } |
512 | }, | 512 | }, |
513 | #endif | 513 | #endif |
514 | {0x030, /* Teletext, PAL, WST System B */ | 514 | [1] = {0x030, /* Teletext, PAL, WST System B */ |
515 | {V4L2_SLICED_TELETEXT_B,6,22,1}, | 515 | {V4L2_SLICED_TELETEXT_B,6,22,1}, |
516 | { 0xaa, 0xaa, 0xff, 0xff, 0x27, 0x2e, 0x20, 0x2b, | 516 | { 0xaa, 0xaa, 0xff, 0xff, 0x27, 0x2e, 0x20, 0x2b, |
517 | 0xa6, 0x72, 0x10, 0x00, 0x00, 0x00, 0x10, 0x00 } | 517 | 0xa6, 0x72, 0x10, 0x00, 0x00, 0x00, 0x10, 0x00 } |
518 | }, | 518 | }, |
519 | #if 0 | 519 | #if 0 |
520 | {0x050, /* Teletext, PAL, WST System C */ | 520 | [2] = {0x050, /* Teletext, PAL, WST System C */ |
521 | {V4L2_SLICED_TELETEXT_PAL_C,6,22,1}, | 521 | {V4L2_SLICED_TELETEXT_PAL_C,6,22,1}, |
522 | { 0xaa, 0xaa, 0xff, 0xff, 0xe7, 0x2e, 0x20, 0x22, | 522 | { 0xaa, 0xaa, 0xff, 0xff, 0xe7, 0x2e, 0x20, 0x22, |
523 | 0xa6, 0x98, 0x0d, 0x00, 0x00, 0x00, 0x10, 0x00 } | 523 | 0xa6, 0x98, 0x0d, 0x00, 0x00, 0x00, 0x10, 0x00 } |
524 | }, | 524 | }, |
525 | {0x070, /* Teletext, NTSC, WST System B */ | 525 | [3] = {0x070, /* Teletext, NTSC, WST System B */ |
526 | {V4L2_SLICED_TELETEXT_NTSC_B,10,21,1}, | 526 | {V4L2_SLICED_TELETEXT_NTSC_B,10,21,1}, |
527 | { 0xaa, 0xaa, 0xff, 0xff, 0x27, 0x2e, 0x20, 0x23, | 527 | { 0xaa, 0xaa, 0xff, 0xff, 0x27, 0x2e, 0x20, 0x23, |
528 | 0x69, 0x93, 0x0d, 0x00, 0x00, 0x00, 0x10, 0x00 } | 528 | 0x69, 0x93, 0x0d, 0x00, 0x00, 0x00, 0x10, 0x00 } |
529 | }, | 529 | }, |
530 | {0x090, /* Tetetext, NTSC NABTS System C */ | 530 | [4] = {0x090, /* Tetetext, NTSC NABTS System C */ |
531 | {V4L2_SLICED_TELETEXT_NTSC_C,10,21,1}, | 531 | {V4L2_SLICED_TELETEXT_NTSC_C,10,21,1}, |
532 | { 0xaa, 0xaa, 0xff, 0xff, 0xe7, 0x2e, 0x20, 0x22, | 532 | { 0xaa, 0xaa, 0xff, 0xff, 0xe7, 0x2e, 0x20, 0x22, |
533 | 0x69, 0x93, 0x0d, 0x00, 0x00, 0x00, 0x15, 0x00 } | 533 | 0x69, 0x93, 0x0d, 0x00, 0x00, 0x00, 0x15, 0x00 } |
534 | }, | 534 | }, |
535 | {0x0b0, /* Teletext, NTSC-J, NABTS System D */ | 535 | [5] = {0x0b0, /* Teletext, NTSC-J, NABTS System D */ |
536 | {V4L2_SLICED_TELETEXT_NTSC_D,10,21,1}, | 536 | {V4L2_SLICED_TELETEXT_NTSC_D,10,21,1}, |
537 | { 0xaa, 0xaa, 0xff, 0xff, 0xa7, 0x2e, 0x20, 0x23, | 537 | { 0xaa, 0xaa, 0xff, 0xff, 0xa7, 0x2e, 0x20, 0x23, |
538 | 0x69, 0x93, 0x0d, 0x00, 0x00, 0x00, 0x10, 0x00 } | 538 | 0x69, 0x93, 0x0d, 0x00, 0x00, 0x00, 0x10, 0x00 } |
539 | }, | 539 | }, |
540 | {0x0d0, /* Closed Caption, PAL/SECAM */ | 540 | [6] = {0x0d0, /* Closed Caption, PAL/SECAM */ |
541 | {V4L2_SLICED_CAPTION_625,22,22,1}, | 541 | {V4L2_SLICED_CAPTION_625,22,22,1}, |
542 | { 0xaa, 0x2a, 0xff, 0x3f, 0x04, 0x51, 0x6e, 0x02, | 542 | { 0xaa, 0x2a, 0xff, 0x3f, 0x04, 0x51, 0x6e, 0x02, |
543 | 0xa6, 0x7b, 0x09, 0x00, 0x00, 0x00, 0x27, 0x00 } | 543 | 0xa6, 0x7b, 0x09, 0x00, 0x00, 0x00, 0x27, 0x00 } |
544 | }, | 544 | }, |
545 | #endif | 545 | #endif |
546 | {0x0f0, /* Closed Caption, NTSC */ | 546 | [7] = {0x0f0, /* Closed Caption, NTSC */ |
547 | {V4L2_SLICED_CAPTION_525,21,21,1}, | 547 | {V4L2_SLICED_CAPTION_525,21,21,1}, |
548 | { 0xaa, 0x2a, 0xff, 0x3f, 0x04, 0x51, 0x6e, 0x02, | 548 | { 0xaa, 0x2a, 0xff, 0x3f, 0x04, 0x51, 0x6e, 0x02, |
549 | 0x69, 0x8c, 0x09, 0x00, 0x00, 0x00, 0x27, 0x00 } | 549 | 0x69, 0x8c, 0x09, 0x00, 0x00, 0x00, 0x27, 0x00 } |
550 | }, | 550 | }, |
551 | {0x110, /* Wide Screen Signal, PAL/SECAM */ | 551 | [8] = {0x110, /* Wide Screen Signal, PAL/SECAM */ |
552 | {V4L2_SLICED_WSS_625,23,23,1}, | 552 | {V4L2_SLICED_WSS_625,23,23,1}, |
553 | { 0x5b, 0x55, 0xc5, 0xff, 0x00, 0x71, 0x6e, 0x42, | 553 | { 0x5b, 0x55, 0xc5, 0xff, 0x00, 0x71, 0x6e, 0x42, |
554 | 0xa6, 0xcd, 0x0f, 0x00, 0x00, 0x00, 0x3a, 0x00 } | 554 | 0xa6, 0xcd, 0x0f, 0x00, 0x00, 0x00, 0x3a, 0x00 } |
555 | }, | 555 | }, |
556 | #if 0 | 556 | #if 0 |
557 | {0x130, /* Wide Screen Signal, NTSC C */ | 557 | [9] = {0x130, /* Wide Screen Signal, NTSC C */ |
558 | {V4L2_SLICED_WSS_525,20,20,1}, | 558 | {V4L2_SLICED_WSS_525,20,20,1}, |
559 | { 0x38, 0x00, 0x3f, 0x00, 0x00, 0x71, 0x6e, 0x43, | 559 | { 0x38, 0x00, 0x3f, 0x00, 0x00, 0x71, 0x6e, 0x43, |
560 | 0x69, 0x7c, 0x08, 0x00, 0x00, 0x00, 0x39, 0x00 } | 560 | 0x69, 0x7c, 0x08, 0x00, 0x00, 0x00, 0x39, 0x00 } |
561 | }, | 561 | }, |
562 | {0x150, /* Vertical Interval Timecode (VITC), PAL/SECAM */ | 562 | [10] = {0x150, /* Vertical Interval Timecode (VITC), PAL/SECAM */ |
563 | {V4l2_SLICED_VITC_625,6,22,0}, | 563 | {V4l2_SLICED_VITC_625,6,22,0}, |
564 | { 0x00, 0x00, 0x00, 0x00, 0x00, 0x8f, 0x6d, 0x49, | 564 | { 0x00, 0x00, 0x00, 0x00, 0x00, 0x8f, 0x6d, 0x49, |
565 | 0xa6, 0x85, 0x08, 0x00, 0x00, 0x00, 0x4c, 0x00 } | 565 | 0xa6, 0x85, 0x08, 0x00, 0x00, 0x00, 0x4c, 0x00 } |
566 | }, | 566 | }, |
567 | {0x170, /* Vertical Interval Timecode (VITC), NTSC */ | 567 | [11] = {0x170, /* Vertical Interval Timecode (VITC), NTSC */ |
568 | {V4l2_SLICED_VITC_525,10,20,0}, | 568 | {V4l2_SLICED_VITC_525,10,20,0}, |
569 | { 0x00, 0x00, 0x00, 0x00, 0x00, 0x8f, 0x6d, 0x49, | 569 | { 0x00, 0x00, 0x00, 0x00, 0x00, 0x8f, 0x6d, 0x49, |
570 | 0x69, 0x94, 0x08, 0x00, 0x00, 0x00, 0x4c, 0x00 } | 570 | 0x69, 0x94, 0x08, 0x00, 0x00, 0x00, 0x4c, 0x00 } |
571 | }, | 571 | }, |
572 | #endif | 572 | #endif |
573 | {0x190, /* Video Program System (VPS), PAL */ | 573 | [12] = {0x190, /* Video Program System (VPS), PAL */ |
574 | {V4L2_SLICED_VPS,16,16,0}, | 574 | {V4L2_SLICED_VPS,16,16,0}, |
575 | { 0xaa, 0xaa, 0xff, 0xff, 0xba, 0xce, 0x2b, 0x0d, | 575 | { 0xaa, 0xaa, 0xff, 0xff, 0xba, 0xce, 0x2b, 0x0d, |
576 | 0xa6, 0xda, 0x0b, 0x00, 0x00, 0x00, 0x60, 0x00 } | 576 | 0xa6, 0xda, 0x0b, 0x00, 0x00, 0x00, 0x60, 0x00 } |
577 | }, | 577 | }, |
578 | /* 0x1d0 User programmable */ | 578 | /* 0x1d0 User programmable */ |
579 | |||
580 | /* End of struct */ | ||
581 | { (u16)-1 } | ||
582 | }; | 579 | }; |
583 | 580 | ||
584 | static int tvp5150_write_inittab(struct v4l2_subdev *sd, | 581 | static int tvp5150_write_inittab(struct v4l2_subdev *sd, |
@@ -591,10 +588,10 @@ static int tvp5150_write_inittab(struct v4l2_subdev *sd, | |||
591 | return 0; | 588 | return 0; |
592 | } | 589 | } |
593 | 590 | ||
594 | static int tvp5150_vdp_init(struct v4l2_subdev *sd, | 591 | static int tvp5150_vdp_init(struct v4l2_subdev *sd) |
595 | const struct i2c_vbi_ram_value *regs) | ||
596 | { | 592 | { |
597 | unsigned int i; | 593 | unsigned int i; |
594 | int j; | ||
598 | 595 | ||
599 | /* Disable Full Field */ | 596 | /* Disable Full Field */ |
600 | tvp5150_write(sd, TVP5150_FULL_FIELD_ENA, 0); | 597 | tvp5150_write(sd, TVP5150_FULL_FIELD_ENA, 0); |
@@ -604,14 +601,17 @@ static int tvp5150_vdp_init(struct v4l2_subdev *sd, | |||
604 | tvp5150_write(sd, i, 0xff); | 601 | tvp5150_write(sd, i, 0xff); |
605 | 602 | ||
606 | /* Load Ram Table */ | 603 | /* Load Ram Table */ |
607 | while (regs->reg != (u16)-1) { | 604 | for (j = 0; j < ARRAY_SIZE(vbi_ram_default); j++) { |
605 | const struct i2c_vbi_ram_value *regs = &vbi_ram_default[j]; | ||
606 | |||
607 | if (!regs->type.vbi_type) | ||
608 | continue; | ||
609 | |||
608 | tvp5150_write(sd, TVP5150_CONF_RAM_ADDR_HIGH, regs->reg >> 8); | 610 | tvp5150_write(sd, TVP5150_CONF_RAM_ADDR_HIGH, regs->reg >> 8); |
609 | tvp5150_write(sd, TVP5150_CONF_RAM_ADDR_LOW, regs->reg); | 611 | tvp5150_write(sd, TVP5150_CONF_RAM_ADDR_LOW, regs->reg); |
610 | 612 | ||
611 | for (i = 0; i < 16; i++) | 613 | for (i = 0; i < 16; i++) |
612 | tvp5150_write(sd, TVP5150_VDP_CONF_RAM_DATA, regs->values[i]); | 614 | tvp5150_write(sd, TVP5150_VDP_CONF_RAM_DATA, regs->values[i]); |
613 | |||
614 | regs++; | ||
615 | } | 615 | } |
616 | return 0; | 616 | return 0; |
617 | } | 617 | } |
@@ -620,19 +620,23 @@ static int tvp5150_vdp_init(struct v4l2_subdev *sd, | |||
620 | static int tvp5150_g_sliced_vbi_cap(struct v4l2_subdev *sd, | 620 | static int tvp5150_g_sliced_vbi_cap(struct v4l2_subdev *sd, |
621 | struct v4l2_sliced_vbi_cap *cap) | 621 | struct v4l2_sliced_vbi_cap *cap) |
622 | { | 622 | { |
623 | const struct i2c_vbi_ram_value *regs = vbi_ram_default; | 623 | int line, i; |
624 | int line; | ||
625 | 624 | ||
626 | dev_dbg_lvl(sd->dev, 1, debug, "g_sliced_vbi_cap\n"); | 625 | dev_dbg_lvl(sd->dev, 1, debug, "g_sliced_vbi_cap\n"); |
627 | memset(cap, 0, sizeof *cap); | 626 | memset(cap, 0, sizeof *cap); |
628 | 627 | ||
629 | while (regs->reg != (u16)-1 ) { | 628 | for (i = 0; i < ARRAY_SIZE(vbi_ram_default); i++) { |
630 | for (line=regs->type.ini_line;line<=regs->type.end_line;line++) { | 629 | const struct i2c_vbi_ram_value *regs = &vbi_ram_default[i]; |
630 | |||
631 | if (!regs->type.vbi_type) | ||
632 | continue; | ||
633 | |||
634 | for (line = regs->type.ini_line; | ||
635 | line <= regs->type.end_line; | ||
636 | line++) { | ||
631 | cap->service_lines[0][line] |= regs->type.vbi_type; | 637 | cap->service_lines[0][line] |= regs->type.vbi_type; |
632 | } | 638 | } |
633 | cap->service_set |= regs->type.vbi_type; | 639 | cap->service_set |= regs->type.vbi_type; |
634 | |||
635 | regs++; | ||
636 | } | 640 | } |
637 | return 0; | 641 | return 0; |
638 | } | 642 | } |
@@ -651,14 +655,13 @@ static int tvp5150_g_sliced_vbi_cap(struct v4l2_subdev *sd, | |||
651 | * MSB = field2 | 655 | * MSB = field2 |
652 | */ | 656 | */ |
653 | static int tvp5150_set_vbi(struct v4l2_subdev *sd, | 657 | static int tvp5150_set_vbi(struct v4l2_subdev *sd, |
654 | const struct i2c_vbi_ram_value *regs, | ||
655 | unsigned int type,u8 flags, int line, | 658 | unsigned int type,u8 flags, int line, |
656 | const int fields) | 659 | const int fields) |
657 | { | 660 | { |
658 | struct tvp5150 *decoder = to_tvp5150(sd); | 661 | struct tvp5150 *decoder = to_tvp5150(sd); |
659 | v4l2_std_id std = decoder->norm; | 662 | v4l2_std_id std = decoder->norm; |
660 | u8 reg; | 663 | u8 reg; |
661 | int pos = 0; | 664 | int i, pos = 0; |
662 | 665 | ||
663 | if (std == V4L2_STD_ALL) { | 666 | if (std == V4L2_STD_ALL) { |
664 | dev_err(sd->dev, "VBI can't be configured without knowing number of lines\n"); | 667 | dev_err(sd->dev, "VBI can't be configured without knowing number of lines\n"); |
@@ -671,19 +674,19 @@ static int tvp5150_set_vbi(struct v4l2_subdev *sd, | |||
671 | if (line < 6 || line > 27) | 674 | if (line < 6 || line > 27) |
672 | return 0; | 675 | return 0; |
673 | 676 | ||
674 | while (regs->reg != (u16)-1) { | 677 | for (i = 0; i < ARRAY_SIZE(vbi_ram_default); i++) { |
678 | const struct i2c_vbi_ram_value *regs = &vbi_ram_default[i]; | ||
679 | |||
680 | if (!regs->type.vbi_type) | ||
681 | continue; | ||
682 | |||
675 | if ((type & regs->type.vbi_type) && | 683 | if ((type & regs->type.vbi_type) && |
676 | (line >= regs->type.ini_line) && | 684 | (line >= regs->type.ini_line) && |
677 | (line <= regs->type.end_line)) | 685 | (line <= regs->type.end_line)) |
678 | break; | 686 | break; |
679 | |||
680 | regs++; | ||
681 | pos++; | 687 | pos++; |
682 | } | 688 | } |
683 | 689 | ||
684 | if (regs->reg == (u16)-1) | ||
685 | return 0; | ||
686 | |||
687 | type = pos | (flags & 0xf0); | 690 | type = pos | (flags & 0xf0); |
688 | reg = ((line - 6) << 1) + TVP5150_LINE_MODE_INI; | 691 | reg = ((line - 6) << 1) + TVP5150_LINE_MODE_INI; |
689 | 692 | ||
@@ -696,8 +699,7 @@ static int tvp5150_set_vbi(struct v4l2_subdev *sd, | |||
696 | return type; | 699 | return type; |
697 | } | 700 | } |
698 | 701 | ||
699 | static int tvp5150_get_vbi(struct v4l2_subdev *sd, | 702 | static int tvp5150_get_vbi(struct v4l2_subdev *sd, int line) |
700 | const struct i2c_vbi_ram_value *regs, int line) | ||
701 | { | 703 | { |
702 | struct tvp5150 *decoder = to_tvp5150(sd); | 704 | struct tvp5150 *decoder = to_tvp5150(sd); |
703 | v4l2_std_id std = decoder->norm; | 705 | v4l2_std_id std = decoder->norm; |
@@ -726,8 +728,8 @@ static int tvp5150_get_vbi(struct v4l2_subdev *sd, | |||
726 | return 0; | 728 | return 0; |
727 | } | 729 | } |
728 | pos = ret & 0x0f; | 730 | pos = ret & 0x0f; |
729 | if (pos < 0x0f) | 731 | if (pos < ARRAY_SIZE(vbi_ram_default)) |
730 | type |= regs[pos].type.vbi_type; | 732 | type |= vbi_ram_default[pos].type.vbi_type; |
731 | } | 733 | } |
732 | 734 | ||
733 | return type; | 735 | return type; |
@@ -788,7 +790,7 @@ static int tvp5150_reset(struct v4l2_subdev *sd, u32 val) | |||
788 | tvp5150_write_inittab(sd, tvp5150_init_default); | 790 | tvp5150_write_inittab(sd, tvp5150_init_default); |
789 | 791 | ||
790 | /* Initializes VDP registers */ | 792 | /* Initializes VDP registers */ |
791 | tvp5150_vdp_init(sd, vbi_ram_default); | 793 | tvp5150_vdp_init(sd); |
792 | 794 | ||
793 | /* Selects decoder input */ | 795 | /* Selects decoder input */ |
794 | tvp5150_selmux(sd); | 796 | tvp5150_selmux(sd); |
@@ -1121,8 +1123,8 @@ static int tvp5150_s_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_f | |||
1121 | for (i = 0; i <= 23; i++) { | 1123 | for (i = 0; i <= 23; i++) { |
1122 | svbi->service_lines[1][i] = 0; | 1124 | svbi->service_lines[1][i] = 0; |
1123 | svbi->service_lines[0][i] = | 1125 | svbi->service_lines[0][i] = |
1124 | tvp5150_set_vbi(sd, vbi_ram_default, | 1126 | tvp5150_set_vbi(sd, svbi->service_lines[0][i], |
1125 | svbi->service_lines[0][i], 0xf0, i, 3); | 1127 | 0xf0, i, 3); |
1126 | } | 1128 | } |
1127 | /* Enables FIFO */ | 1129 | /* Enables FIFO */ |
1128 | tvp5150_write(sd, TVP5150_FIFO_OUT_CTRL, 1); | 1130 | tvp5150_write(sd, TVP5150_FIFO_OUT_CTRL, 1); |
@@ -1148,7 +1150,7 @@ static int tvp5150_g_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_f | |||
1148 | 1150 | ||
1149 | for (i = 0; i <= 23; i++) { | 1151 | for (i = 0; i <= 23; i++) { |
1150 | svbi->service_lines[0][i] = | 1152 | svbi->service_lines[0][i] = |
1151 | tvp5150_get_vbi(sd, vbi_ram_default, i); | 1153 | tvp5150_get_vbi(sd, i); |
1152 | mask |= svbi->service_lines[0][i]; | 1154 | mask |= svbi->service_lines[0][i]; |
1153 | } | 1155 | } |
1154 | svbi->service_set = mask; | 1156 | svbi->service_set = mask; |
diff --git a/drivers/media/pci/ttpci/av7110.c b/drivers/media/pci/ttpci/av7110.c index dc8e577b2f74..d6816effb878 100644 --- a/drivers/media/pci/ttpci/av7110.c +++ b/drivers/media/pci/ttpci/av7110.c | |||
@@ -324,14 +324,15 @@ static int DvbDmxFilterCallback(u8 *buffer1, size_t buffer1_len, | |||
324 | } | 324 | } |
325 | return dvbdmxfilter->feed->cb.sec(buffer1, buffer1_len, | 325 | return dvbdmxfilter->feed->cb.sec(buffer1, buffer1_len, |
326 | buffer2, buffer2_len, | 326 | buffer2, buffer2_len, |
327 | &dvbdmxfilter->filter); | 327 | &dvbdmxfilter->filter, NULL); |
328 | case DMX_TYPE_TS: | 328 | case DMX_TYPE_TS: |
329 | if (!(dvbdmxfilter->feed->ts_type & TS_PACKET)) | 329 | if (!(dvbdmxfilter->feed->ts_type & TS_PACKET)) |
330 | return 0; | 330 | return 0; |
331 | if (dvbdmxfilter->feed->ts_type & TS_PAYLOAD_ONLY) | 331 | if (dvbdmxfilter->feed->ts_type & TS_PAYLOAD_ONLY) |
332 | return dvbdmxfilter->feed->cb.ts(buffer1, buffer1_len, | 332 | return dvbdmxfilter->feed->cb.ts(buffer1, buffer1_len, |
333 | buffer2, buffer2_len, | 333 | buffer2, buffer2_len, |
334 | &dvbdmxfilter->feed->feed.ts); | 334 | &dvbdmxfilter->feed->feed.ts, |
335 | NULL); | ||
335 | else | 336 | else |
336 | av7110_p2t_write(buffer1, buffer1_len, | 337 | av7110_p2t_write(buffer1, buffer1_len, |
337 | dvbdmxfilter->feed->pid, | 338 | dvbdmxfilter->feed->pid, |
diff --git a/drivers/media/pci/ttpci/av7110_av.c b/drivers/media/pci/ttpci/av7110_av.c index 4daba76ec240..ef1bc17cdc4d 100644 --- a/drivers/media/pci/ttpci/av7110_av.c +++ b/drivers/media/pci/ttpci/av7110_av.c | |||
@@ -99,7 +99,7 @@ int av7110_record_cb(struct dvb_filter_pes2ts *p2t, u8 *buf, size_t len) | |||
99 | buf[4] = buf[5] = 0; | 99 | buf[4] = buf[5] = 0; |
100 | if (dvbdmxfeed->ts_type & TS_PAYLOAD_ONLY) | 100 | if (dvbdmxfeed->ts_type & TS_PAYLOAD_ONLY) |
101 | return dvbdmxfeed->cb.ts(buf, len, NULL, 0, | 101 | return dvbdmxfeed->cb.ts(buf, len, NULL, 0, |
102 | &dvbdmxfeed->feed.ts); | 102 | &dvbdmxfeed->feed.ts, NULL); |
103 | else | 103 | else |
104 | return dvb_filter_pes2ts(p2t, buf, len, 1); | 104 | return dvb_filter_pes2ts(p2t, buf, len, 1); |
105 | } | 105 | } |
@@ -109,7 +109,7 @@ static int dvb_filter_pes2ts_cb(void *priv, unsigned char *data) | |||
109 | struct dvb_demux_feed *dvbdmxfeed = (struct dvb_demux_feed *) priv; | 109 | struct dvb_demux_feed *dvbdmxfeed = (struct dvb_demux_feed *) priv; |
110 | 110 | ||
111 | dvbdmxfeed->cb.ts(data, 188, NULL, 0, | 111 | dvbdmxfeed->cb.ts(data, 188, NULL, 0, |
112 | &dvbdmxfeed->feed.ts); | 112 | &dvbdmxfeed->feed.ts, NULL); |
113 | return 0; | 113 | return 0; |
114 | } | 114 | } |
115 | 115 | ||
@@ -814,7 +814,7 @@ static void p_to_t(u8 const *buf, long int length, u16 pid, u8 *counter, | |||
814 | memcpy(obuf + l, buf + c, TS_SIZE - l); | 814 | memcpy(obuf + l, buf + c, TS_SIZE - l); |
815 | c = length; | 815 | c = length; |
816 | } | 816 | } |
817 | feed->cb.ts(obuf, 188, NULL, 0, &feed->feed.ts); | 817 | feed->cb.ts(obuf, 188, NULL, 0, &feed->feed.ts, NULL); |
818 | pes_start = 0; | 818 | pes_start = 0; |
819 | } | 819 | } |
820 | } | 820 | } |
diff --git a/drivers/media/platform/tegra-cec/tegra_cec.c b/drivers/media/platform/tegra-cec/tegra_cec.c index 92f93a880015..aba488cd0e64 100644 --- a/drivers/media/platform/tegra-cec/tegra_cec.c +++ b/drivers/media/platform/tegra-cec/tegra_cec.c | |||
@@ -172,16 +172,13 @@ static irqreturn_t tegra_cec_irq_handler(int irq, void *data) | |||
172 | } | 172 | } |
173 | } | 173 | } |
174 | 174 | ||
175 | if (status & (TEGRA_CEC_INT_STAT_RX_REGISTER_OVERRUN | | 175 | if (status & TEGRA_CEC_INT_STAT_RX_START_BIT_DETECTED) { |
176 | TEGRA_CEC_INT_STAT_RX_BUS_ANOMALY_DETECTED | | ||
177 | TEGRA_CEC_INT_STAT_RX_START_BIT_DETECTED | | ||
178 | TEGRA_CEC_INT_STAT_RX_BUS_ERROR_DETECTED)) { | ||
179 | cec_write(cec, TEGRA_CEC_INT_STAT, | 176 | cec_write(cec, TEGRA_CEC_INT_STAT, |
180 | (TEGRA_CEC_INT_STAT_RX_REGISTER_OVERRUN | | 177 | TEGRA_CEC_INT_STAT_RX_START_BIT_DETECTED); |
181 | TEGRA_CEC_INT_STAT_RX_BUS_ANOMALY_DETECTED | | 178 | cec->rx_done = false; |
182 | TEGRA_CEC_INT_STAT_RX_START_BIT_DETECTED | | 179 | cec->rx_buf_cnt = 0; |
183 | TEGRA_CEC_INT_STAT_RX_BUS_ERROR_DETECTED)); | 180 | } |
184 | } else if (status & TEGRA_CEC_INT_STAT_RX_REGISTER_FULL) { | 181 | if (status & TEGRA_CEC_INT_STAT_RX_REGISTER_FULL) { |
185 | u32 v; | 182 | u32 v; |
186 | 183 | ||
187 | cec_write(cec, TEGRA_CEC_INT_STAT, | 184 | cec_write(cec, TEGRA_CEC_INT_STAT, |
@@ -255,7 +252,7 @@ static int tegra_cec_adap_enable(struct cec_adapter *adap, bool enable) | |||
255 | TEGRA_CEC_INT_MASK_TX_BUS_ANOMALY_DETECTED | | 252 | TEGRA_CEC_INT_MASK_TX_BUS_ANOMALY_DETECTED | |
256 | TEGRA_CEC_INT_MASK_TX_FRAME_TRANSMITTED | | 253 | TEGRA_CEC_INT_MASK_TX_FRAME_TRANSMITTED | |
257 | TEGRA_CEC_INT_MASK_RX_REGISTER_FULL | | 254 | TEGRA_CEC_INT_MASK_RX_REGISTER_FULL | |
258 | TEGRA_CEC_INT_MASK_RX_REGISTER_OVERRUN); | 255 | TEGRA_CEC_INT_MASK_RX_START_BIT_DETECTED); |
259 | 256 | ||
260 | cec_write(cec, TEGRA_CEC_HW_CONTROL, TEGRA_CEC_HWCTRL_TX_RX_MODE); | 257 | cec_write(cec, TEGRA_CEC_HW_CONTROL, TEGRA_CEC_HWCTRL_TX_RX_MODE); |
261 | return 0; | 258 | return 0; |
diff --git a/drivers/media/usb/au0828/Kconfig b/drivers/media/usb/au0828/Kconfig index 70521e0b4c53..bfaa806633df 100644 --- a/drivers/media/usb/au0828/Kconfig +++ b/drivers/media/usb/au0828/Kconfig | |||
@@ -1,7 +1,7 @@ | |||
1 | 1 | ||
2 | config VIDEO_AU0828 | 2 | config VIDEO_AU0828 |
3 | tristate "Auvitek AU0828 support" | 3 | tristate "Auvitek AU0828 support" |
4 | depends on I2C && INPUT && DVB_CORE && USB | 4 | depends on I2C && INPUT && DVB_CORE && USB && VIDEO_V4L2 |
5 | select I2C_ALGOBIT | 5 | select I2C_ALGOBIT |
6 | select VIDEO_TVEEPROM | 6 | select VIDEO_TVEEPROM |
7 | select VIDEOBUF2_VMALLOC | 7 | select VIDEOBUF2_VMALLOC |
diff --git a/drivers/media/usb/ttusb-dec/ttusb_dec.c b/drivers/media/usb/ttusb-dec/ttusb_dec.c index a8900f5571f7..44ca66cb9b8f 100644 --- a/drivers/media/usb/ttusb-dec/ttusb_dec.c +++ b/drivers/media/usb/ttusb-dec/ttusb_dec.c | |||
@@ -428,7 +428,7 @@ static int ttusb_dec_audio_pes2ts_cb(void *priv, unsigned char *data) | |||
428 | struct ttusb_dec *dec = priv; | 428 | struct ttusb_dec *dec = priv; |
429 | 429 | ||
430 | dec->audio_filter->feed->cb.ts(data, 188, NULL, 0, | 430 | dec->audio_filter->feed->cb.ts(data, 188, NULL, 0, |
431 | &dec->audio_filter->feed->feed.ts); | 431 | &dec->audio_filter->feed->feed.ts, NULL); |
432 | 432 | ||
433 | return 0; | 433 | return 0; |
434 | } | 434 | } |
@@ -438,7 +438,7 @@ static int ttusb_dec_video_pes2ts_cb(void *priv, unsigned char *data) | |||
438 | struct ttusb_dec *dec = priv; | 438 | struct ttusb_dec *dec = priv; |
439 | 439 | ||
440 | dec->video_filter->feed->cb.ts(data, 188, NULL, 0, | 440 | dec->video_filter->feed->cb.ts(data, 188, NULL, 0, |
441 | &dec->video_filter->feed->feed.ts); | 441 | &dec->video_filter->feed->feed.ts, NULL); |
442 | 442 | ||
443 | return 0; | 443 | return 0; |
444 | } | 444 | } |
@@ -490,7 +490,7 @@ static void ttusb_dec_process_pva(struct ttusb_dec *dec, u8 *pva, int length) | |||
490 | 490 | ||
491 | if (output_pva) { | 491 | if (output_pva) { |
492 | dec->video_filter->feed->cb.ts(pva, length, NULL, 0, | 492 | dec->video_filter->feed->cb.ts(pva, length, NULL, 0, |
493 | &dec->video_filter->feed->feed.ts); | 493 | &dec->video_filter->feed->feed.ts, NULL); |
494 | return; | 494 | return; |
495 | } | 495 | } |
496 | 496 | ||
@@ -551,7 +551,7 @@ static void ttusb_dec_process_pva(struct ttusb_dec *dec, u8 *pva, int length) | |||
551 | case 0x02: /* MainAudioStream */ | 551 | case 0x02: /* MainAudioStream */ |
552 | if (output_pva) { | 552 | if (output_pva) { |
553 | dec->audio_filter->feed->cb.ts(pva, length, NULL, 0, | 553 | dec->audio_filter->feed->cb.ts(pva, length, NULL, 0, |
554 | &dec->audio_filter->feed->feed.ts); | 554 | &dec->audio_filter->feed->feed.ts, NULL); |
555 | return; | 555 | return; |
556 | } | 556 | } |
557 | 557 | ||
@@ -589,7 +589,7 @@ static void ttusb_dec_process_filter(struct ttusb_dec *dec, u8 *packet, | |||
589 | 589 | ||
590 | if (filter) | 590 | if (filter) |
591 | filter->feed->cb.sec(&packet[2], length - 2, NULL, 0, | 591 | filter->feed->cb.sec(&packet[2], length - 2, NULL, 0, |
592 | &filter->filter); | 592 | &filter->filter, NULL); |
593 | } | 593 | } |
594 | 594 | ||
595 | static void ttusb_dec_process_packet(struct ttusb_dec *dec) | 595 | static void ttusb_dec_process_packet(struct ttusb_dec *dec) |
diff --git a/drivers/media/v4l2-core/Kconfig b/drivers/media/v4l2-core/Kconfig index bf52fbd07aed..8e37e7c5e0f7 100644 --- a/drivers/media/v4l2-core/Kconfig +++ b/drivers/media/v4l2-core/Kconfig | |||
@@ -7,6 +7,7 @@ config VIDEO_V4L2 | |||
7 | tristate | 7 | tristate |
8 | depends on (I2C || I2C=n) && VIDEO_DEV | 8 | depends on (I2C || I2C=n) && VIDEO_DEV |
9 | select RATIONAL | 9 | select RATIONAL |
10 | select VIDEOBUF2_V4L2 if VIDEOBUF2_CORE | ||
10 | default (I2C || I2C=n) && VIDEO_DEV | 11 | default (I2C || I2C=n) && VIDEO_DEV |
11 | 12 | ||
12 | config VIDEO_ADV_DEBUG | 13 | config VIDEO_ADV_DEBUG |
diff --git a/drivers/media/v4l2-core/Makefile b/drivers/media/v4l2-core/Makefile index 80de2cb9c476..7df54582e956 100644 --- a/drivers/media/v4l2-core/Makefile +++ b/drivers/media/v4l2-core/Makefile | |||
@@ -13,7 +13,7 @@ ifeq ($(CONFIG_COMPAT),y) | |||
13 | endif | 13 | endif |
14 | obj-$(CONFIG_V4L2_FWNODE) += v4l2-fwnode.o | 14 | obj-$(CONFIG_V4L2_FWNODE) += v4l2-fwnode.o |
15 | ifeq ($(CONFIG_TRACEPOINTS),y) | 15 | ifeq ($(CONFIG_TRACEPOINTS),y) |
16 | videodev-objs += vb2-trace.o v4l2-trace.o | 16 | videodev-objs += v4l2-trace.o |
17 | endif | 17 | endif |
18 | videodev-$(CONFIG_MEDIA_CONTROLLER) += v4l2-mc.o | 18 | videodev-$(CONFIG_MEDIA_CONTROLLER) += v4l2-mc.o |
19 | 19 | ||
@@ -35,4 +35,3 @@ obj-$(CONFIG_VIDEOBUF_DVB) += videobuf-dvb.o | |||
35 | 35 | ||
36 | ccflags-y += -I$(srctree)/drivers/media/dvb-frontends | 36 | ccflags-y += -I$(srctree)/drivers/media/dvb-frontends |
37 | ccflags-y += -I$(srctree)/drivers/media/tuners | 37 | ccflags-y += -I$(srctree)/drivers/media/tuners |
38 | |||
diff --git a/drivers/memory/brcmstb_dpfe.c b/drivers/memory/brcmstb_dpfe.c index 0a7bdbed3a6f..e9c1485c32b9 100644 --- a/drivers/memory/brcmstb_dpfe.c +++ b/drivers/memory/brcmstb_dpfe.c | |||
@@ -45,8 +45,16 @@ | |||
45 | #define REG_TO_DCPU_MBOX 0x10 | 45 | #define REG_TO_DCPU_MBOX 0x10 |
46 | #define REG_TO_HOST_MBOX 0x14 | 46 | #define REG_TO_HOST_MBOX 0x14 |
47 | 47 | ||
48 | /* Macros to process offsets returned by the DCPU */ | ||
49 | #define DRAM_MSG_ADDR_OFFSET 0x0 | ||
50 | #define DRAM_MSG_TYPE_OFFSET 0x1c | ||
51 | #define DRAM_MSG_ADDR_MASK ((1UL << DRAM_MSG_TYPE_OFFSET) - 1) | ||
52 | #define DRAM_MSG_TYPE_MASK ((1UL << \ | ||
53 | (BITS_PER_LONG - DRAM_MSG_TYPE_OFFSET)) - 1) | ||
54 | |||
48 | /* Message RAM */ | 55 | /* Message RAM */ |
49 | #define DCPU_MSG_RAM(x) (0x100 + (x) * sizeof(u32)) | 56 | #define DCPU_MSG_RAM_START 0x100 |
57 | #define DCPU_MSG_RAM(x) (DCPU_MSG_RAM_START + (x) * sizeof(u32)) | ||
50 | 58 | ||
51 | /* DRAM Info Offsets & Masks */ | 59 | /* DRAM Info Offsets & Masks */ |
52 | #define DRAM_INFO_INTERVAL 0x0 | 60 | #define DRAM_INFO_INTERVAL 0x0 |
@@ -255,6 +263,40 @@ static unsigned int get_msg_chksum(const u32 msg[]) | |||
255 | return sum; | 263 | return sum; |
256 | } | 264 | } |
257 | 265 | ||
266 | static void __iomem *get_msg_ptr(struct private_data *priv, u32 response, | ||
267 | char *buf, ssize_t *size) | ||
268 | { | ||
269 | unsigned int msg_type; | ||
270 | unsigned int offset; | ||
271 | void __iomem *ptr = NULL; | ||
272 | |||
273 | msg_type = (response >> DRAM_MSG_TYPE_OFFSET) & DRAM_MSG_TYPE_MASK; | ||
274 | offset = (response >> DRAM_MSG_ADDR_OFFSET) & DRAM_MSG_ADDR_MASK; | ||
275 | |||
276 | /* | ||
277 | * msg_type == 1: the offset is relative to the message RAM | ||
278 | * msg_type == 0: the offset is relative to the data RAM (this is the | ||
279 | * previous way of passing data) | ||
280 | * msg_type is anything else: there's critical hardware problem | ||
281 | */ | ||
282 | switch (msg_type) { | ||
283 | case 1: | ||
284 | ptr = priv->regs + DCPU_MSG_RAM_START + offset; | ||
285 | break; | ||
286 | case 0: | ||
287 | ptr = priv->dmem + offset; | ||
288 | break; | ||
289 | default: | ||
290 | dev_emerg(priv->dev, "invalid message reply from DCPU: %#x\n", | ||
291 | response); | ||
292 | if (buf && size) | ||
293 | *size = sprintf(buf, | ||
294 | "FATAL: communication error with DCPU\n"); | ||
295 | } | ||
296 | |||
297 | return ptr; | ||
298 | } | ||
299 | |||
258 | static int __send_command(struct private_data *priv, unsigned int cmd, | 300 | static int __send_command(struct private_data *priv, unsigned int cmd, |
259 | u32 result[]) | 301 | u32 result[]) |
260 | { | 302 | { |
@@ -507,7 +549,7 @@ static ssize_t show_info(struct device *dev, struct device_attribute *devattr, | |||
507 | { | 549 | { |
508 | u32 response[MSG_FIELD_MAX]; | 550 | u32 response[MSG_FIELD_MAX]; |
509 | unsigned int info; | 551 | unsigned int info; |
510 | int ret; | 552 | ssize_t ret; |
511 | 553 | ||
512 | ret = generic_show(DPFE_CMD_GET_INFO, response, dev, buf); | 554 | ret = generic_show(DPFE_CMD_GET_INFO, response, dev, buf); |
513 | if (ret) | 555 | if (ret) |
@@ -528,18 +570,19 @@ static ssize_t show_refresh(struct device *dev, | |||
528 | u32 response[MSG_FIELD_MAX]; | 570 | u32 response[MSG_FIELD_MAX]; |
529 | void __iomem *info; | 571 | void __iomem *info; |
530 | struct private_data *priv; | 572 | struct private_data *priv; |
531 | unsigned int offset; | ||
532 | u8 refresh, sr_abort, ppre, thermal_offs, tuf; | 573 | u8 refresh, sr_abort, ppre, thermal_offs, tuf; |
533 | u32 mr4; | 574 | u32 mr4; |
534 | int ret; | 575 | ssize_t ret; |
535 | 576 | ||
536 | ret = generic_show(DPFE_CMD_GET_REFRESH, response, dev, buf); | 577 | ret = generic_show(DPFE_CMD_GET_REFRESH, response, dev, buf); |
537 | if (ret) | 578 | if (ret) |
538 | return ret; | 579 | return ret; |
539 | 580 | ||
540 | priv = dev_get_drvdata(dev); | 581 | priv = dev_get_drvdata(dev); |
541 | offset = response[MSG_ARG0]; | 582 | |
542 | info = priv->dmem + offset; | 583 | info = get_msg_ptr(priv, response[MSG_ARG0], buf, &ret); |
584 | if (!info) | ||
585 | return ret; | ||
543 | 586 | ||
544 | mr4 = readl_relaxed(info + DRAM_INFO_MR4) & DRAM_INFO_MR4_MASK; | 587 | mr4 = readl_relaxed(info + DRAM_INFO_MR4) & DRAM_INFO_MR4_MASK; |
545 | 588 | ||
@@ -561,7 +604,6 @@ static ssize_t store_refresh(struct device *dev, struct device_attribute *attr, | |||
561 | u32 response[MSG_FIELD_MAX]; | 604 | u32 response[MSG_FIELD_MAX]; |
562 | struct private_data *priv; | 605 | struct private_data *priv; |
563 | void __iomem *info; | 606 | void __iomem *info; |
564 | unsigned int offset; | ||
565 | unsigned long val; | 607 | unsigned long val; |
566 | int ret; | 608 | int ret; |
567 | 609 | ||
@@ -574,8 +616,10 @@ static ssize_t store_refresh(struct device *dev, struct device_attribute *attr, | |||
574 | if (ret) | 616 | if (ret) |
575 | return ret; | 617 | return ret; |
576 | 618 | ||
577 | offset = response[MSG_ARG0]; | 619 | info = get_msg_ptr(priv, response[MSG_ARG0], NULL, NULL); |
578 | info = priv->dmem + offset; | 620 | if (!info) |
621 | return -EIO; | ||
622 | |||
579 | writel_relaxed(val, info + DRAM_INFO_INTERVAL); | 623 | writel_relaxed(val, info + DRAM_INFO_INTERVAL); |
580 | 624 | ||
581 | return count; | 625 | return count; |
@@ -587,23 +631,25 @@ static ssize_t show_vendor(struct device *dev, struct device_attribute *devattr, | |||
587 | u32 response[MSG_FIELD_MAX]; | 631 | u32 response[MSG_FIELD_MAX]; |
588 | struct private_data *priv; | 632 | struct private_data *priv; |
589 | void __iomem *info; | 633 | void __iomem *info; |
590 | unsigned int offset; | 634 | ssize_t ret; |
591 | int ret; | ||
592 | 635 | ||
593 | ret = generic_show(DPFE_CMD_GET_VENDOR, response, dev, buf); | 636 | ret = generic_show(DPFE_CMD_GET_VENDOR, response, dev, buf); |
594 | if (ret) | 637 | if (ret) |
595 | return ret; | 638 | return ret; |
596 | 639 | ||
597 | offset = response[MSG_ARG0]; | ||
598 | priv = dev_get_drvdata(dev); | 640 | priv = dev_get_drvdata(dev); |
599 | info = priv->dmem + offset; | 641 | |
642 | info = get_msg_ptr(priv, response[MSG_ARG0], buf, &ret); | ||
643 | if (!info) | ||
644 | return ret; | ||
600 | 645 | ||
601 | return sprintf(buf, "%#x %#x %#x %#x %#x\n", | 646 | return sprintf(buf, "%#x %#x %#x %#x %#x\n", |
602 | readl_relaxed(info + DRAM_VENDOR_MR5) & DRAM_VENDOR_MASK, | 647 | readl_relaxed(info + DRAM_VENDOR_MR5) & DRAM_VENDOR_MASK, |
603 | readl_relaxed(info + DRAM_VENDOR_MR6) & DRAM_VENDOR_MASK, | 648 | readl_relaxed(info + DRAM_VENDOR_MR6) & DRAM_VENDOR_MASK, |
604 | readl_relaxed(info + DRAM_VENDOR_MR7) & DRAM_VENDOR_MASK, | 649 | readl_relaxed(info + DRAM_VENDOR_MR7) & DRAM_VENDOR_MASK, |
605 | readl_relaxed(info + DRAM_VENDOR_MR8) & DRAM_VENDOR_MASK, | 650 | readl_relaxed(info + DRAM_VENDOR_MR8) & DRAM_VENDOR_MASK, |
606 | readl_relaxed(info + DRAM_VENDOR_ERROR)); | 651 | readl_relaxed(info + DRAM_VENDOR_ERROR) & |
652 | DRAM_VENDOR_MASK); | ||
607 | } | 653 | } |
608 | 654 | ||
609 | static int brcmstb_dpfe_resume(struct platform_device *pdev) | 655 | static int brcmstb_dpfe_resume(struct platform_device *pdev) |
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c index 8d12017b9893..4470630dd545 100644 --- a/drivers/message/fusion/mptctl.c +++ b/drivers/message/fusion/mptctl.c | |||
@@ -2687,6 +2687,8 @@ mptctl_hp_targetinfo(unsigned long arg) | |||
2687 | __FILE__, __LINE__, iocnum); | 2687 | __FILE__, __LINE__, iocnum); |
2688 | return -ENODEV; | 2688 | return -ENODEV; |
2689 | } | 2689 | } |
2690 | if (karg.hdr.id >= MPT_MAX_FC_DEVICES) | ||
2691 | return -EINVAL; | ||
2690 | dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_hp_targetinfo called.\n", | 2692 | dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_hp_targetinfo called.\n", |
2691 | ioc->name)); | 2693 | ioc->name)); |
2692 | 2694 | ||
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c index 3e5eabdae8d9..772d02922529 100644 --- a/drivers/misc/mei/bus.c +++ b/drivers/misc/mei/bus.c | |||
@@ -548,12 +548,6 @@ int mei_cldev_disable(struct mei_cl_device *cldev) | |||
548 | goto out; | 548 | goto out; |
549 | } | 549 | } |
550 | 550 | ||
551 | if (bus->dev_state == MEI_DEV_POWER_DOWN) { | ||
552 | dev_dbg(bus->dev, "Device is powering down, don't bother with disconnection\n"); | ||
553 | err = 0; | ||
554 | goto out; | ||
555 | } | ||
556 | |||
557 | err = mei_cl_disconnect(cl); | 551 | err = mei_cl_disconnect(cl); |
558 | if (err < 0) | 552 | if (err < 0) |
559 | dev_err(bus->dev, "Could not disconnect from the ME client\n"); | 553 | dev_err(bus->dev, "Could not disconnect from the ME client\n"); |
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c index be64969d986a..7e60c1817c31 100644 --- a/drivers/misc/mei/client.c +++ b/drivers/misc/mei/client.c | |||
@@ -945,6 +945,12 @@ int mei_cl_disconnect(struct mei_cl *cl) | |||
945 | return 0; | 945 | return 0; |
946 | } | 946 | } |
947 | 947 | ||
948 | if (dev->dev_state == MEI_DEV_POWER_DOWN) { | ||
949 | cl_dbg(dev, cl, "Device is powering down, don't bother with disconnection\n"); | ||
950 | mei_cl_set_disconnected(cl); | ||
951 | return 0; | ||
952 | } | ||
953 | |||
948 | rets = pm_runtime_get(dev->dev); | 954 | rets = pm_runtime_get(dev->dev); |
949 | if (rets < 0 && rets != -EINPROGRESS) { | 955 | if (rets < 0 && rets != -EINPROGRESS) { |
950 | pm_runtime_put_noidle(dev->dev); | 956 | pm_runtime_put_noidle(dev->dev); |
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h index 0ccccbaf530d..e4b10b2d1a08 100644 --- a/drivers/misc/mei/hw-me-regs.h +++ b/drivers/misc/mei/hw-me-regs.h | |||
@@ -132,6 +132,11 @@ | |||
132 | #define MEI_DEV_ID_KBP 0xA2BA /* Kaby Point */ | 132 | #define MEI_DEV_ID_KBP 0xA2BA /* Kaby Point */ |
133 | #define MEI_DEV_ID_KBP_2 0xA2BB /* Kaby Point 2 */ | 133 | #define MEI_DEV_ID_KBP_2 0xA2BB /* Kaby Point 2 */ |
134 | 134 | ||
135 | #define MEI_DEV_ID_CNP_LP 0x9DE0 /* Cannon Point LP */ | ||
136 | #define MEI_DEV_ID_CNP_LP_4 0x9DE4 /* Cannon Point LP 4 (iTouch) */ | ||
137 | #define MEI_DEV_ID_CNP_H 0xA360 /* Cannon Point H */ | ||
138 | #define MEI_DEV_ID_CNP_H_4 0xA364 /* Cannon Point H 4 (iTouch) */ | ||
139 | |||
135 | /* | 140 | /* |
136 | * MEI HW Section | 141 | * MEI HW Section |
137 | */ | 142 | */ |
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index 4a0ccda4d04b..ea4e152270a3 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c | |||
@@ -98,6 +98,11 @@ static const struct pci_device_id mei_me_pci_tbl[] = { | |||
98 | {MEI_PCI_DEVICE(MEI_DEV_ID_KBP, MEI_ME_PCH8_CFG)}, | 98 | {MEI_PCI_DEVICE(MEI_DEV_ID_KBP, MEI_ME_PCH8_CFG)}, |
99 | {MEI_PCI_DEVICE(MEI_DEV_ID_KBP_2, MEI_ME_PCH8_CFG)}, | 99 | {MEI_PCI_DEVICE(MEI_DEV_ID_KBP_2, MEI_ME_PCH8_CFG)}, |
100 | 100 | ||
101 | {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP, MEI_ME_PCH8_CFG)}, | ||
102 | {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP_4, MEI_ME_PCH8_CFG)}, | ||
103 | {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH8_CFG)}, | ||
104 | {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_4, MEI_ME_PCH8_CFG)}, | ||
105 | |||
101 | /* required last entry */ | 106 | /* required last entry */ |
102 | {0, } | 107 | {0, } |
103 | }; | 108 | }; |
diff --git a/drivers/misc/ocxl/file.c b/drivers/misc/ocxl/file.c index d9aa407db06a..038509e5d031 100644 --- a/drivers/misc/ocxl/file.c +++ b/drivers/misc/ocxl/file.c | |||
@@ -102,10 +102,32 @@ static long afu_ioctl_attach(struct ocxl_context *ctx, | |||
102 | return rc; | 102 | return rc; |
103 | } | 103 | } |
104 | 104 | ||
105 | static long afu_ioctl_get_metadata(struct ocxl_context *ctx, | ||
106 | struct ocxl_ioctl_metadata __user *uarg) | ||
107 | { | ||
108 | struct ocxl_ioctl_metadata arg; | ||
109 | |||
110 | memset(&arg, 0, sizeof(arg)); | ||
111 | |||
112 | arg.version = 0; | ||
113 | |||
114 | arg.afu_version_major = ctx->afu->config.version_major; | ||
115 | arg.afu_version_minor = ctx->afu->config.version_minor; | ||
116 | arg.pasid = ctx->pasid; | ||
117 | arg.pp_mmio_size = ctx->afu->config.pp_mmio_stride; | ||
118 | arg.global_mmio_size = ctx->afu->config.global_mmio_size; | ||
119 | |||
120 | if (copy_to_user(uarg, &arg, sizeof(arg))) | ||
121 | return -EFAULT; | ||
122 | |||
123 | return 0; | ||
124 | } | ||
125 | |||
105 | #define CMD_STR(x) (x == OCXL_IOCTL_ATTACH ? "ATTACH" : \ | 126 | #define CMD_STR(x) (x == OCXL_IOCTL_ATTACH ? "ATTACH" : \ |
106 | x == OCXL_IOCTL_IRQ_ALLOC ? "IRQ_ALLOC" : \ | 127 | x == OCXL_IOCTL_IRQ_ALLOC ? "IRQ_ALLOC" : \ |
107 | x == OCXL_IOCTL_IRQ_FREE ? "IRQ_FREE" : \ | 128 | x == OCXL_IOCTL_IRQ_FREE ? "IRQ_FREE" : \ |
108 | x == OCXL_IOCTL_IRQ_SET_FD ? "IRQ_SET_FD" : \ | 129 | x == OCXL_IOCTL_IRQ_SET_FD ? "IRQ_SET_FD" : \ |
130 | x == OCXL_IOCTL_GET_METADATA ? "GET_METADATA" : \ | ||
109 | "UNKNOWN") | 131 | "UNKNOWN") |
110 | 132 | ||
111 | static long afu_ioctl(struct file *file, unsigned int cmd, | 133 | static long afu_ioctl(struct file *file, unsigned int cmd, |
@@ -133,8 +155,10 @@ static long afu_ioctl(struct file *file, unsigned int cmd, | |||
133 | if (!rc) { | 155 | if (!rc) { |
134 | rc = copy_to_user((u64 __user *) args, &irq_offset, | 156 | rc = copy_to_user((u64 __user *) args, &irq_offset, |
135 | sizeof(irq_offset)); | 157 | sizeof(irq_offset)); |
136 | if (rc) | 158 | if (rc) { |
137 | ocxl_afu_irq_free(ctx, irq_offset); | 159 | ocxl_afu_irq_free(ctx, irq_offset); |
160 | return -EFAULT; | ||
161 | } | ||
138 | } | 162 | } |
139 | break; | 163 | break; |
140 | 164 | ||
@@ -157,6 +181,11 @@ static long afu_ioctl(struct file *file, unsigned int cmd, | |||
157 | irq_fd.eventfd); | 181 | irq_fd.eventfd); |
158 | break; | 182 | break; |
159 | 183 | ||
184 | case OCXL_IOCTL_GET_METADATA: | ||
185 | rc = afu_ioctl_get_metadata(ctx, | ||
186 | (struct ocxl_ioctl_metadata __user *) args); | ||
187 | break; | ||
188 | |||
160 | default: | 189 | default: |
161 | rc = -EINVAL; | 190 | rc = -EINVAL; |
162 | } | 191 | } |
@@ -277,7 +306,7 @@ static ssize_t afu_read(struct file *file, char __user *buf, size_t count, | |||
277 | struct ocxl_context *ctx = file->private_data; | 306 | struct ocxl_context *ctx = file->private_data; |
278 | struct ocxl_kernel_event_header header; | 307 | struct ocxl_kernel_event_header header; |
279 | ssize_t rc; | 308 | ssize_t rc; |
280 | size_t used = 0; | 309 | ssize_t used = 0; |
281 | DEFINE_WAIT(event_wait); | 310 | DEFINE_WAIT(event_wait); |
282 | 311 | ||
283 | memset(&header, 0, sizeof(header)); | 312 | memset(&header, 0, sizeof(header)); |
@@ -329,7 +358,7 @@ static ssize_t afu_read(struct file *file, char __user *buf, size_t count, | |||
329 | 358 | ||
330 | used += sizeof(header); | 359 | used += sizeof(header); |
331 | 360 | ||
332 | rc = (ssize_t) used; | 361 | rc = used; |
333 | return rc; | 362 | return rc; |
334 | } | 363 | } |
335 | 364 | ||
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 20135a5de748..2cfb963d9f37 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c | |||
@@ -72,6 +72,7 @@ MODULE_ALIAS("mmc:block"); | |||
72 | #define MMC_BLK_TIMEOUT_MS (10 * 1000) | 72 | #define MMC_BLK_TIMEOUT_MS (10 * 1000) |
73 | #define MMC_SANITIZE_REQ_TIMEOUT 240000 | 73 | #define MMC_SANITIZE_REQ_TIMEOUT 240000 |
74 | #define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16) | 74 | #define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16) |
75 | #define MMC_EXTRACT_VALUE_FROM_ARG(x) ((x & 0x0000FF00) >> 8) | ||
75 | 76 | ||
76 | #define mmc_req_rel_wr(req) ((req->cmd_flags & REQ_FUA) && \ | 77 | #define mmc_req_rel_wr(req) ((req->cmd_flags & REQ_FUA) && \ |
77 | (rq_data_dir(req) == WRITE)) | 78 | (rq_data_dir(req) == WRITE)) |
@@ -587,6 +588,24 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md, | |||
587 | } | 588 | } |
588 | 589 | ||
589 | /* | 590 | /* |
591 | * Make sure the cache of the PARTITION_CONFIG register and | ||
592 | * PARTITION_ACCESS bits is updated in case the ioctl ext_csd write | ||
593 | * changed it successfully. | ||
594 | */ | ||
595 | if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_PART_CONFIG) && | ||
596 | (cmd.opcode == MMC_SWITCH)) { | ||
597 | struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev); | ||
598 | u8 value = MMC_EXTRACT_VALUE_FROM_ARG(cmd.arg); | ||
599 | |||
600 | /* | ||
601 | * Update cache so the next mmc_blk_part_switch call operates | ||
602 | * on up-to-date data. | ||
603 | */ | ||
604 | card->ext_csd.part_config = value; | ||
605 | main_md->part_curr = value & EXT_CSD_PART_CONFIG_ACC_MASK; | ||
606 | } | ||
607 | |||
608 | /* | ||
590 | * According to the SD specs, some commands require a delay after | 609 | * According to the SD specs, some commands require a delay after |
591 | * issuing the command. | 610 | * issuing the command. |
592 | */ | 611 | */ |
diff --git a/drivers/mmc/core/card.h b/drivers/mmc/core/card.h index 79a5b985ccf5..9c821eedd156 100644 --- a/drivers/mmc/core/card.h +++ b/drivers/mmc/core/card.h | |||
@@ -82,6 +82,7 @@ struct mmc_fixup { | |||
82 | #define CID_MANFID_APACER 0x27 | 82 | #define CID_MANFID_APACER 0x27 |
83 | #define CID_MANFID_KINGSTON 0x70 | 83 | #define CID_MANFID_KINGSTON 0x70 |
84 | #define CID_MANFID_HYNIX 0x90 | 84 | #define CID_MANFID_HYNIX 0x90 |
85 | #define CID_MANFID_NUMONYX 0xFE | ||
85 | 86 | ||
86 | #define END_FIXUP { NULL } | 87 | #define END_FIXUP { NULL } |
87 | 88 | ||
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c index 908e4db03535..42d6aa89a48a 100644 --- a/drivers/mmc/core/mmc_ops.c +++ b/drivers/mmc/core/mmc_ops.c | |||
@@ -848,7 +848,6 @@ int mmc_interrupt_hpi(struct mmc_card *card) | |||
848 | return 1; | 848 | return 1; |
849 | } | 849 | } |
850 | 850 | ||
851 | mmc_claim_host(card->host); | ||
852 | err = mmc_send_status(card, &status); | 851 | err = mmc_send_status(card, &status); |
853 | if (err) { | 852 | if (err) { |
854 | pr_err("%s: Get card status fail\n", mmc_hostname(card->host)); | 853 | pr_err("%s: Get card status fail\n", mmc_hostname(card->host)); |
@@ -890,7 +889,6 @@ int mmc_interrupt_hpi(struct mmc_card *card) | |||
890 | } while (!err); | 889 | } while (!err); |
891 | 890 | ||
892 | out: | 891 | out: |
893 | mmc_release_host(card->host); | ||
894 | return err; | 892 | return err; |
895 | } | 893 | } |
896 | 894 | ||
@@ -932,9 +930,7 @@ static int mmc_read_bkops_status(struct mmc_card *card) | |||
932 | int err; | 930 | int err; |
933 | u8 *ext_csd; | 931 | u8 *ext_csd; |
934 | 932 | ||
935 | mmc_claim_host(card->host); | ||
936 | err = mmc_get_ext_csd(card, &ext_csd); | 933 | err = mmc_get_ext_csd(card, &ext_csd); |
937 | mmc_release_host(card->host); | ||
938 | if (err) | 934 | if (err) |
939 | return err; | 935 | return err; |
940 | 936 | ||
diff --git a/drivers/mmc/core/quirks.h b/drivers/mmc/core/quirks.h index 75d317623852..5153577754f0 100644 --- a/drivers/mmc/core/quirks.h +++ b/drivers/mmc/core/quirks.h | |||
@@ -109,6 +109,12 @@ static const struct mmc_fixup mmc_ext_csd_fixups[] = { | |||
109 | */ | 109 | */ |
110 | MMC_FIXUP_EXT_CSD_REV(CID_NAME_ANY, CID_MANFID_HYNIX, | 110 | MMC_FIXUP_EXT_CSD_REV(CID_NAME_ANY, CID_MANFID_HYNIX, |
111 | 0x014a, add_quirk, MMC_QUIRK_BROKEN_HPI, 5), | 111 | 0x014a, add_quirk, MMC_QUIRK_BROKEN_HPI, 5), |
112 | /* | ||
113 | * Certain Micron (Numonyx) eMMC 4.5 cards might get broken when HPI | ||
114 | * feature is used so disable the HPI feature for such buggy cards. | ||
115 | */ | ||
116 | MMC_FIXUP_EXT_CSD_REV(CID_NAME_ANY, CID_MANFID_NUMONYX, | ||
117 | 0x014e, add_quirk, MMC_QUIRK_BROKEN_HPI, 6), | ||
112 | 118 | ||
113 | END_FIXUP | 119 | END_FIXUP |
114 | }; | 120 | }; |
diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c index 229dc18f0581..768972af8b85 100644 --- a/drivers/mmc/host/bcm2835.c +++ b/drivers/mmc/host/bcm2835.c | |||
@@ -1265,7 +1265,8 @@ static int bcm2835_add_host(struct bcm2835_host *host) | |||
1265 | char pio_limit_string[20]; | 1265 | char pio_limit_string[20]; |
1266 | int ret; | 1266 | int ret; |
1267 | 1267 | ||
1268 | mmc->f_max = host->max_clk; | 1268 | if (!mmc->f_max || mmc->f_max > host->max_clk) |
1269 | mmc->f_max = host->max_clk; | ||
1269 | mmc->f_min = host->max_clk / SDCDIV_MAX_CDIV; | 1270 | mmc->f_min = host->max_clk / SDCDIV_MAX_CDIV; |
1270 | 1271 | ||
1271 | mmc->max_busy_timeout = ~0 / (mmc->f_max / 1000); | 1272 | mmc->max_busy_timeout = ~0 / (mmc->f_max / 1000); |
diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c index 35026795be28..a84aa3f1ae85 100644 --- a/drivers/mmc/host/dw_mmc-exynos.c +++ b/drivers/mmc/host/dw_mmc-exynos.c | |||
@@ -165,9 +165,15 @@ static void dw_mci_exynos_set_clksel_timing(struct dw_mci *host, u32 timing) | |||
165 | static int dw_mci_exynos_runtime_resume(struct device *dev) | 165 | static int dw_mci_exynos_runtime_resume(struct device *dev) |
166 | { | 166 | { |
167 | struct dw_mci *host = dev_get_drvdata(dev); | 167 | struct dw_mci *host = dev_get_drvdata(dev); |
168 | int ret; | ||
169 | |||
170 | ret = dw_mci_runtime_resume(dev); | ||
171 | if (ret) | ||
172 | return ret; | ||
168 | 173 | ||
169 | dw_mci_exynos_config_smu(host); | 174 | dw_mci_exynos_config_smu(host); |
170 | return dw_mci_runtime_resume(dev); | 175 | |
176 | return ret; | ||
171 | } | 177 | } |
172 | 178 | ||
173 | /** | 179 | /** |
@@ -487,6 +493,7 @@ static unsigned long exynos_dwmmc_caps[4] = { | |||
487 | 493 | ||
488 | static const struct dw_mci_drv_data exynos_drv_data = { | 494 | static const struct dw_mci_drv_data exynos_drv_data = { |
489 | .caps = exynos_dwmmc_caps, | 495 | .caps = exynos_dwmmc_caps, |
496 | .num_caps = ARRAY_SIZE(exynos_dwmmc_caps), | ||
490 | .init = dw_mci_exynos_priv_init, | 497 | .init = dw_mci_exynos_priv_init, |
491 | .set_ios = dw_mci_exynos_set_ios, | 498 | .set_ios = dw_mci_exynos_set_ios, |
492 | .parse_dt = dw_mci_exynos_parse_dt, | 499 | .parse_dt = dw_mci_exynos_parse_dt, |
diff --git a/drivers/mmc/host/dw_mmc-k3.c b/drivers/mmc/host/dw_mmc-k3.c index 73fd75c3c824..89cdb3d533bb 100644 --- a/drivers/mmc/host/dw_mmc-k3.c +++ b/drivers/mmc/host/dw_mmc-k3.c | |||
@@ -135,6 +135,9 @@ static int dw_mci_hi6220_parse_dt(struct dw_mci *host) | |||
135 | if (priv->ctrl_id < 0) | 135 | if (priv->ctrl_id < 0) |
136 | priv->ctrl_id = 0; | 136 | priv->ctrl_id = 0; |
137 | 137 | ||
138 | if (priv->ctrl_id >= TIMING_MODE) | ||
139 | return -EINVAL; | ||
140 | |||
138 | host->priv = priv; | 141 | host->priv = priv; |
139 | return 0; | 142 | return 0; |
140 | } | 143 | } |
@@ -207,6 +210,7 @@ static int dw_mci_hi6220_execute_tuning(struct dw_mci_slot *slot, u32 opcode) | |||
207 | 210 | ||
208 | static const struct dw_mci_drv_data hi6220_data = { | 211 | static const struct dw_mci_drv_data hi6220_data = { |
209 | .caps = dw_mci_hi6220_caps, | 212 | .caps = dw_mci_hi6220_caps, |
213 | .num_caps = ARRAY_SIZE(dw_mci_hi6220_caps), | ||
210 | .switch_voltage = dw_mci_hi6220_switch_voltage, | 214 | .switch_voltage = dw_mci_hi6220_switch_voltage, |
211 | .set_ios = dw_mci_hi6220_set_ios, | 215 | .set_ios = dw_mci_hi6220_set_ios, |
212 | .parse_dt = dw_mci_hi6220_parse_dt, | 216 | .parse_dt = dw_mci_hi6220_parse_dt, |
diff --git a/drivers/mmc/host/dw_mmc-rockchip.c b/drivers/mmc/host/dw_mmc-rockchip.c index a3f1c2b30145..339295212935 100644 --- a/drivers/mmc/host/dw_mmc-rockchip.c +++ b/drivers/mmc/host/dw_mmc-rockchip.c | |||
@@ -319,6 +319,7 @@ static const struct dw_mci_drv_data rk2928_drv_data = { | |||
319 | 319 | ||
320 | static const struct dw_mci_drv_data rk3288_drv_data = { | 320 | static const struct dw_mci_drv_data rk3288_drv_data = { |
321 | .caps = dw_mci_rk3288_dwmmc_caps, | 321 | .caps = dw_mci_rk3288_dwmmc_caps, |
322 | .num_caps = ARRAY_SIZE(dw_mci_rk3288_dwmmc_caps), | ||
322 | .set_ios = dw_mci_rk3288_set_ios, | 323 | .set_ios = dw_mci_rk3288_set_ios, |
323 | .execute_tuning = dw_mci_rk3288_execute_tuning, | 324 | .execute_tuning = dw_mci_rk3288_execute_tuning, |
324 | .parse_dt = dw_mci_rk3288_parse_dt, | 325 | .parse_dt = dw_mci_rk3288_parse_dt, |
diff --git a/drivers/mmc/host/dw_mmc-zx.c b/drivers/mmc/host/dw_mmc-zx.c index d38e94ae2b85..c06b5393312f 100644 --- a/drivers/mmc/host/dw_mmc-zx.c +++ b/drivers/mmc/host/dw_mmc-zx.c | |||
@@ -195,6 +195,7 @@ static unsigned long zx_dwmmc_caps[3] = { | |||
195 | 195 | ||
196 | static const struct dw_mci_drv_data zx_drv_data = { | 196 | static const struct dw_mci_drv_data zx_drv_data = { |
197 | .caps = zx_dwmmc_caps, | 197 | .caps = zx_dwmmc_caps, |
198 | .num_caps = ARRAY_SIZE(zx_dwmmc_caps), | ||
198 | .execute_tuning = dw_mci_zx_execute_tuning, | 199 | .execute_tuning = dw_mci_zx_execute_tuning, |
199 | .prepare_hs400_tuning = dw_mci_zx_prepare_hs400_tuning, | 200 | .prepare_hs400_tuning = dw_mci_zx_prepare_hs400_tuning, |
200 | .parse_dt = dw_mci_zx_parse_dt, | 201 | .parse_dt = dw_mci_zx_parse_dt, |
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index 0aa39975f33b..06d47414d0c1 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c | |||
@@ -165,6 +165,8 @@ static int dw_mci_regs_show(struct seq_file *s, void *v) | |||
165 | { | 165 | { |
166 | struct dw_mci *host = s->private; | 166 | struct dw_mci *host = s->private; |
167 | 167 | ||
168 | pm_runtime_get_sync(host->dev); | ||
169 | |||
168 | seq_printf(s, "STATUS:\t0x%08x\n", mci_readl(host, STATUS)); | 170 | seq_printf(s, "STATUS:\t0x%08x\n", mci_readl(host, STATUS)); |
169 | seq_printf(s, "RINTSTS:\t0x%08x\n", mci_readl(host, RINTSTS)); | 171 | seq_printf(s, "RINTSTS:\t0x%08x\n", mci_readl(host, RINTSTS)); |
170 | seq_printf(s, "CMD:\t0x%08x\n", mci_readl(host, CMD)); | 172 | seq_printf(s, "CMD:\t0x%08x\n", mci_readl(host, CMD)); |
@@ -172,6 +174,8 @@ static int dw_mci_regs_show(struct seq_file *s, void *v) | |||
172 | seq_printf(s, "INTMASK:\t0x%08x\n", mci_readl(host, INTMASK)); | 174 | seq_printf(s, "INTMASK:\t0x%08x\n", mci_readl(host, INTMASK)); |
173 | seq_printf(s, "CLKENA:\t0x%08x\n", mci_readl(host, CLKENA)); | 175 | seq_printf(s, "CLKENA:\t0x%08x\n", mci_readl(host, CLKENA)); |
174 | 176 | ||
177 | pm_runtime_put_autosuspend(host->dev); | ||
178 | |||
175 | return 0; | 179 | return 0; |
176 | } | 180 | } |
177 | 181 | ||
@@ -409,7 +413,9 @@ static inline void dw_mci_set_cto(struct dw_mci *host) | |||
409 | cto_div = (mci_readl(host, CLKDIV) & 0xff) * 2; | 413 | cto_div = (mci_readl(host, CLKDIV) & 0xff) * 2; |
410 | if (cto_div == 0) | 414 | if (cto_div == 0) |
411 | cto_div = 1; | 415 | cto_div = 1; |
412 | cto_ms = DIV_ROUND_UP(MSEC_PER_SEC * cto_clks * cto_div, host->bus_hz); | 416 | |
417 | cto_ms = DIV_ROUND_UP_ULL((u64)MSEC_PER_SEC * cto_clks * cto_div, | ||
418 | host->bus_hz); | ||
413 | 419 | ||
414 | /* add a bit spare time */ | 420 | /* add a bit spare time */ |
415 | cto_ms += 10; | 421 | cto_ms += 10; |
@@ -558,6 +564,7 @@ static int dw_mci_idmac_init(struct dw_mci *host) | |||
558 | (sizeof(struct idmac_desc_64addr) * | 564 | (sizeof(struct idmac_desc_64addr) * |
559 | (i + 1))) >> 32; | 565 | (i + 1))) >> 32; |
560 | /* Initialize reserved and buffer size fields to "0" */ | 566 | /* Initialize reserved and buffer size fields to "0" */ |
567 | p->des0 = 0; | ||
561 | p->des1 = 0; | 568 | p->des1 = 0; |
562 | p->des2 = 0; | 569 | p->des2 = 0; |
563 | p->des3 = 0; | 570 | p->des3 = 0; |
@@ -580,6 +587,7 @@ static int dw_mci_idmac_init(struct dw_mci *host) | |||
580 | i++, p++) { | 587 | i++, p++) { |
581 | p->des3 = cpu_to_le32(host->sg_dma + | 588 | p->des3 = cpu_to_le32(host->sg_dma + |
582 | (sizeof(struct idmac_desc) * (i + 1))); | 589 | (sizeof(struct idmac_desc) * (i + 1))); |
590 | p->des0 = 0; | ||
583 | p->des1 = 0; | 591 | p->des1 = 0; |
584 | } | 592 | } |
585 | 593 | ||
@@ -1795,8 +1803,8 @@ static bool dw_mci_reset(struct dw_mci *host) | |||
1795 | } | 1803 | } |
1796 | 1804 | ||
1797 | if (host->use_dma == TRANS_MODE_IDMAC) | 1805 | if (host->use_dma == TRANS_MODE_IDMAC) |
1798 | /* It is also recommended that we reset and reprogram idmac */ | 1806 | /* It is also required that we reinit idmac */ |
1799 | dw_mci_idmac_reset(host); | 1807 | dw_mci_idmac_init(host); |
1800 | 1808 | ||
1801 | ret = true; | 1809 | ret = true; |
1802 | 1810 | ||
@@ -1944,8 +1952,9 @@ static void dw_mci_set_drto(struct dw_mci *host) | |||
1944 | drto_div = (mci_readl(host, CLKDIV) & 0xff) * 2; | 1952 | drto_div = (mci_readl(host, CLKDIV) & 0xff) * 2; |
1945 | if (drto_div == 0) | 1953 | if (drto_div == 0) |
1946 | drto_div = 1; | 1954 | drto_div = 1; |
1947 | drto_ms = DIV_ROUND_UP(MSEC_PER_SEC * drto_clks * drto_div, | 1955 | |
1948 | host->bus_hz); | 1956 | drto_ms = DIV_ROUND_UP_ULL((u64)MSEC_PER_SEC * drto_clks * drto_div, |
1957 | host->bus_hz); | ||
1949 | 1958 | ||
1950 | /* add a bit spare time */ | 1959 | /* add a bit spare time */ |
1951 | drto_ms += 10; | 1960 | drto_ms += 10; |
@@ -2778,12 +2787,57 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) | |||
2778 | return IRQ_HANDLED; | 2787 | return IRQ_HANDLED; |
2779 | } | 2788 | } |
2780 | 2789 | ||
2790 | static int dw_mci_init_slot_caps(struct dw_mci_slot *slot) | ||
2791 | { | ||
2792 | struct dw_mci *host = slot->host; | ||
2793 | const struct dw_mci_drv_data *drv_data = host->drv_data; | ||
2794 | struct mmc_host *mmc = slot->mmc; | ||
2795 | int ctrl_id; | ||
2796 | |||
2797 | if (host->pdata->caps) | ||
2798 | mmc->caps = host->pdata->caps; | ||
2799 | |||
2800 | /* | ||
2801 | * Support MMC_CAP_ERASE by default. | ||
2802 | * It needs to use trim/discard/erase commands. | ||
2803 | */ | ||
2804 | mmc->caps |= MMC_CAP_ERASE; | ||
2805 | |||
2806 | if (host->pdata->pm_caps) | ||
2807 | mmc->pm_caps = host->pdata->pm_caps; | ||
2808 | |||
2809 | if (host->dev->of_node) { | ||
2810 | ctrl_id = of_alias_get_id(host->dev->of_node, "mshc"); | ||
2811 | if (ctrl_id < 0) | ||
2812 | ctrl_id = 0; | ||
2813 | } else { | ||
2814 | ctrl_id = to_platform_device(host->dev)->id; | ||
2815 | } | ||
2816 | |||
2817 | if (drv_data && drv_data->caps) { | ||
2818 | if (ctrl_id >= drv_data->num_caps) { | ||
2819 | dev_err(host->dev, "invalid controller id %d\n", | ||
2820 | ctrl_id); | ||
2821 | return -EINVAL; | ||
2822 | } | ||
2823 | mmc->caps |= drv_data->caps[ctrl_id]; | ||
2824 | } | ||
2825 | |||
2826 | if (host->pdata->caps2) | ||
2827 | mmc->caps2 = host->pdata->caps2; | ||
2828 | |||
2829 | /* Process SDIO IRQs through the sdio_irq_work. */ | ||
2830 | if (mmc->caps & MMC_CAP_SDIO_IRQ) | ||
2831 | mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD; | ||
2832 | |||
2833 | return 0; | ||
2834 | } | ||
2835 | |||
2781 | static int dw_mci_init_slot(struct dw_mci *host) | 2836 | static int dw_mci_init_slot(struct dw_mci *host) |
2782 | { | 2837 | { |
2783 | struct mmc_host *mmc; | 2838 | struct mmc_host *mmc; |
2784 | struct dw_mci_slot *slot; | 2839 | struct dw_mci_slot *slot; |
2785 | const struct dw_mci_drv_data *drv_data = host->drv_data; | 2840 | int ret; |
2786 | int ctrl_id, ret; | ||
2787 | u32 freq[2]; | 2841 | u32 freq[2]; |
2788 | 2842 | ||
2789 | mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev); | 2843 | mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev); |
@@ -2817,38 +2871,13 @@ static int dw_mci_init_slot(struct dw_mci *host) | |||
2817 | if (!mmc->ocr_avail) | 2871 | if (!mmc->ocr_avail) |
2818 | mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; | 2872 | mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; |
2819 | 2873 | ||
2820 | if (host->pdata->caps) | ||
2821 | mmc->caps = host->pdata->caps; | ||
2822 | |||
2823 | /* | ||
2824 | * Support MMC_CAP_ERASE by default. | ||
2825 | * It needs to use trim/discard/erase commands. | ||
2826 | */ | ||
2827 | mmc->caps |= MMC_CAP_ERASE; | ||
2828 | |||
2829 | if (host->pdata->pm_caps) | ||
2830 | mmc->pm_caps = host->pdata->pm_caps; | ||
2831 | |||
2832 | if (host->dev->of_node) { | ||
2833 | ctrl_id = of_alias_get_id(host->dev->of_node, "mshc"); | ||
2834 | if (ctrl_id < 0) | ||
2835 | ctrl_id = 0; | ||
2836 | } else { | ||
2837 | ctrl_id = to_platform_device(host->dev)->id; | ||
2838 | } | ||
2839 | if (drv_data && drv_data->caps) | ||
2840 | mmc->caps |= drv_data->caps[ctrl_id]; | ||
2841 | |||
2842 | if (host->pdata->caps2) | ||
2843 | mmc->caps2 = host->pdata->caps2; | ||
2844 | |||
2845 | ret = mmc_of_parse(mmc); | 2874 | ret = mmc_of_parse(mmc); |
2846 | if (ret) | 2875 | if (ret) |
2847 | goto err_host_allocated; | 2876 | goto err_host_allocated; |
2848 | 2877 | ||
2849 | /* Process SDIO IRQs through the sdio_irq_work. */ | 2878 | ret = dw_mci_init_slot_caps(slot); |
2850 | if (mmc->caps & MMC_CAP_SDIO_IRQ) | 2879 | if (ret) |
2851 | mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD; | 2880 | goto err_host_allocated; |
2852 | 2881 | ||
2853 | /* Useful defaults if platform data is unset. */ | 2882 | /* Useful defaults if platform data is unset. */ |
2854 | if (host->use_dma == TRANS_MODE_IDMAC) { | 2883 | if (host->use_dma == TRANS_MODE_IDMAC) { |
diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h index e3124f06a47e..1424bd490dd1 100644 --- a/drivers/mmc/host/dw_mmc.h +++ b/drivers/mmc/host/dw_mmc.h | |||
@@ -543,6 +543,7 @@ struct dw_mci_slot { | |||
543 | /** | 543 | /** |
544 | * dw_mci driver data - dw-mshc implementation specific driver data. | 544 | * dw_mci driver data - dw-mshc implementation specific driver data. |
545 | * @caps: mmc subsystem specified capabilities of the controller(s). | 545 | * @caps: mmc subsystem specified capabilities of the controller(s). |
546 | * @num_caps: number of capabilities specified by @caps. | ||
546 | * @init: early implementation specific initialization. | 547 | * @init: early implementation specific initialization. |
547 | * @set_ios: handle bus specific extensions. | 548 | * @set_ios: handle bus specific extensions. |
548 | * @parse_dt: parse implementation specific device tree properties. | 549 | * @parse_dt: parse implementation specific device tree properties. |
@@ -554,6 +555,7 @@ struct dw_mci_slot { | |||
554 | */ | 555 | */ |
555 | struct dw_mci_drv_data { | 556 | struct dw_mci_drv_data { |
556 | unsigned long *caps; | 557 | unsigned long *caps; |
558 | u32 num_caps; | ||
557 | int (*init)(struct dw_mci *host); | 559 | int (*init)(struct dw_mci *host); |
558 | void (*set_ios)(struct dw_mci *host, struct mmc_ios *ios); | 560 | void (*set_ios)(struct dw_mci *host, struct mmc_ios *ios); |
559 | int (*parse_dt)(struct dw_mci *host); | 561 | int (*parse_dt)(struct dw_mci *host); |
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c index 22438ebfe4e6..4f972b879fe6 100644 --- a/drivers/mmc/host/meson-gx-mmc.c +++ b/drivers/mmc/host/meson-gx-mmc.c | |||
@@ -717,22 +717,6 @@ static int meson_mmc_clk_phase_tuning(struct mmc_host *mmc, u32 opcode, | |||
717 | static int meson_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode) | 717 | static int meson_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode) |
718 | { | 718 | { |
719 | struct meson_host *host = mmc_priv(mmc); | 719 | struct meson_host *host = mmc_priv(mmc); |
720 | int ret; | ||
721 | |||
722 | /* | ||
723 | * If this is the initial tuning, try to get a sane Rx starting | ||
724 | * phase before doing the actual tuning. | ||
725 | */ | ||
726 | if (!mmc->doing_retune) { | ||
727 | ret = meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk); | ||
728 | |||
729 | if (ret) | ||
730 | return ret; | ||
731 | } | ||
732 | |||
733 | ret = meson_mmc_clk_phase_tuning(mmc, opcode, host->tx_clk); | ||
734 | if (ret) | ||
735 | return ret; | ||
736 | 720 | ||
737 | return meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk); | 721 | return meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk); |
738 | } | 722 | } |
@@ -763,9 +747,8 @@ static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | |||
763 | if (!IS_ERR(mmc->supply.vmmc)) | 747 | if (!IS_ERR(mmc->supply.vmmc)) |
764 | mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd); | 748 | mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd); |
765 | 749 | ||
766 | /* Reset phases */ | 750 | /* Reset rx phase */ |
767 | clk_set_phase(host->rx_clk, 0); | 751 | clk_set_phase(host->rx_clk, 0); |
768 | clk_set_phase(host->tx_clk, 270); | ||
769 | 752 | ||
770 | break; | 753 | break; |
771 | 754 | ||
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c index 4065da58789d..32321bd596d8 100644 --- a/drivers/mmc/host/sdhci-acpi.c +++ b/drivers/mmc/host/sdhci-acpi.c | |||
@@ -680,7 +680,7 @@ static int sdhci_acpi_probe(struct platform_device *pdev) | |||
680 | host->hw_name = "ACPI"; | 680 | host->hw_name = "ACPI"; |
681 | host->ops = &sdhci_acpi_ops_dflt; | 681 | host->ops = &sdhci_acpi_ops_dflt; |
682 | host->irq = platform_get_irq(pdev, 0); | 682 | host->irq = platform_get_irq(pdev, 0); |
683 | if (host->irq <= 0) { | 683 | if (host->irq < 0) { |
684 | err = -EINVAL; | 684 | err = -EINVAL; |
685 | goto err_free; | 685 | goto err_free; |
686 | } | 686 | } |
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c index 6d1a983e6227..82c4f05f91d8 100644 --- a/drivers/mmc/host/sdhci-pci-core.c +++ b/drivers/mmc/host/sdhci-pci-core.c | |||
@@ -654,9 +654,36 @@ static void byt_read_dsm(struct sdhci_pci_slot *slot) | |||
654 | slot->chip->rpm_retune = intel_host->d3_retune; | 654 | slot->chip->rpm_retune = intel_host->d3_retune; |
655 | } | 655 | } |
656 | 656 | ||
657 | static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot) | 657 | static int intel_execute_tuning(struct mmc_host *mmc, u32 opcode) |
658 | { | ||
659 | int err = sdhci_execute_tuning(mmc, opcode); | ||
660 | struct sdhci_host *host = mmc_priv(mmc); | ||
661 | |||
662 | if (err) | ||
663 | return err; | ||
664 | |||
665 | /* | ||
666 | * Tuning can leave the IP in an active state (Buffer Read Enable bit | ||
667 | * set) which prevents the entry to low power states (i.e. S0i3). Data | ||
668 | * reset will clear it. | ||
669 | */ | ||
670 | sdhci_reset(host, SDHCI_RESET_DATA); | ||
671 | |||
672 | return 0; | ||
673 | } | ||
674 | |||
675 | static void byt_probe_slot(struct sdhci_pci_slot *slot) | ||
658 | { | 676 | { |
677 | struct mmc_host_ops *ops = &slot->host->mmc_host_ops; | ||
678 | |||
659 | byt_read_dsm(slot); | 679 | byt_read_dsm(slot); |
680 | |||
681 | ops->execute_tuning = intel_execute_tuning; | ||
682 | } | ||
683 | |||
684 | static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot) | ||
685 | { | ||
686 | byt_probe_slot(slot); | ||
660 | slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE | | 687 | slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE | |
661 | MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR | | 688 | MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR | |
662 | MMC_CAP_CMD_DURING_TFR | | 689 | MMC_CAP_CMD_DURING_TFR | |
@@ -779,7 +806,7 @@ static int ni_byt_sdio_probe_slot(struct sdhci_pci_slot *slot) | |||
779 | { | 806 | { |
780 | int err; | 807 | int err; |
781 | 808 | ||
782 | byt_read_dsm(slot); | 809 | byt_probe_slot(slot); |
783 | 810 | ||
784 | err = ni_set_max_freq(slot); | 811 | err = ni_set_max_freq(slot); |
785 | if (err) | 812 | if (err) |
@@ -792,7 +819,7 @@ static int ni_byt_sdio_probe_slot(struct sdhci_pci_slot *slot) | |||
792 | 819 | ||
793 | static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot) | 820 | static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot) |
794 | { | 821 | { |
795 | byt_read_dsm(slot); | 822 | byt_probe_slot(slot); |
796 | slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE | | 823 | slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE | |
797 | MMC_CAP_WAIT_WHILE_BUSY; | 824 | MMC_CAP_WAIT_WHILE_BUSY; |
798 | return 0; | 825 | return 0; |
@@ -800,7 +827,7 @@ static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot) | |||
800 | 827 | ||
801 | static int byt_sd_probe_slot(struct sdhci_pci_slot *slot) | 828 | static int byt_sd_probe_slot(struct sdhci_pci_slot *slot) |
802 | { | 829 | { |
803 | byt_read_dsm(slot); | 830 | byt_probe_slot(slot); |
804 | slot->host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | | 831 | slot->host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | |
805 | MMC_CAP_AGGRESSIVE_PM | MMC_CAP_CD_WAKE; | 832 | MMC_CAP_AGGRESSIVE_PM | MMC_CAP_CD_WAKE; |
806 | slot->cd_idx = 0; | 833 | slot->cd_idx = 0; |
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c index de8c902059b8..7d80a8bb96fe 100644 --- a/drivers/mtd/mtdchar.c +++ b/drivers/mtd/mtdchar.c | |||
@@ -479,7 +479,7 @@ static int shrink_ecclayout(struct mtd_info *mtd, | |||
479 | for (i = 0; i < MTD_MAX_ECCPOS_ENTRIES;) { | 479 | for (i = 0; i < MTD_MAX_ECCPOS_ENTRIES;) { |
480 | u32 eccpos; | 480 | u32 eccpos; |
481 | 481 | ||
482 | ret = mtd_ooblayout_ecc(mtd, section, &oobregion); | 482 | ret = mtd_ooblayout_ecc(mtd, section++, &oobregion); |
483 | if (ret < 0) { | 483 | if (ret < 0) { |
484 | if (ret != -ERANGE) | 484 | if (ret != -ERANGE) |
485 | return ret; | 485 | return ret; |
@@ -526,7 +526,7 @@ static int get_oobinfo(struct mtd_info *mtd, struct nand_oobinfo *to) | |||
526 | for (i = 0; i < ARRAY_SIZE(to->eccpos);) { | 526 | for (i = 0; i < ARRAY_SIZE(to->eccpos);) { |
527 | u32 eccpos; | 527 | u32 eccpos; |
528 | 528 | ||
529 | ret = mtd_ooblayout_ecc(mtd, section, &oobregion); | 529 | ret = mtd_ooblayout_ecc(mtd, section++, &oobregion); |
530 | if (ret < 0) { | 530 | if (ret < 0) { |
531 | if (ret != -ERANGE) | 531 | if (ret != -ERANGE) |
532 | return ret; | 532 | return ret; |
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig index e6b8c59f2c0d..736ac887303c 100644 --- a/drivers/mtd/nand/Kconfig +++ b/drivers/mtd/nand/Kconfig | |||
@@ -328,7 +328,7 @@ config MTD_NAND_MARVELL | |||
328 | tristate "NAND controller support on Marvell boards" | 328 | tristate "NAND controller support on Marvell boards" |
329 | depends on PXA3xx || ARCH_MMP || PLAT_ORION || ARCH_MVEBU || \ | 329 | depends on PXA3xx || ARCH_MMP || PLAT_ORION || ARCH_MVEBU || \ |
330 | COMPILE_TEST | 330 | COMPILE_TEST |
331 | depends on HAS_IOMEM | 331 | depends on HAS_IOMEM && HAS_DMA |
332 | help | 332 | help |
333 | This enables the NAND flash controller driver for Marvell boards, | 333 | This enables the NAND flash controller driver for Marvell boards, |
334 | including: | 334 | including: |
diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c index 4872a7ba6503..5a9c2f0020c2 100644 --- a/drivers/mtd/nand/fsl_ifc_nand.c +++ b/drivers/mtd/nand/fsl_ifc_nand.c | |||
@@ -173,14 +173,9 @@ static void set_addr(struct mtd_info *mtd, int column, int page_addr, int oob) | |||
173 | 173 | ||
174 | /* returns nonzero if entire page is blank */ | 174 | /* returns nonzero if entire page is blank */ |
175 | static int check_read_ecc(struct mtd_info *mtd, struct fsl_ifc_ctrl *ctrl, | 175 | static int check_read_ecc(struct mtd_info *mtd, struct fsl_ifc_ctrl *ctrl, |
176 | u32 *eccstat, unsigned int bufnum) | 176 | u32 eccstat, unsigned int bufnum) |
177 | { | 177 | { |
178 | u32 reg = eccstat[bufnum / 4]; | 178 | return (eccstat >> ((3 - bufnum % 4) * 8)) & 15; |
179 | int errors; | ||
180 | |||
181 | errors = (reg >> ((3 - bufnum % 4) * 8)) & 15; | ||
182 | |||
183 | return errors; | ||
184 | } | 179 | } |
185 | 180 | ||
186 | /* | 181 | /* |
@@ -193,7 +188,7 @@ static void fsl_ifc_run_command(struct mtd_info *mtd) | |||
193 | struct fsl_ifc_ctrl *ctrl = priv->ctrl; | 188 | struct fsl_ifc_ctrl *ctrl = priv->ctrl; |
194 | struct fsl_ifc_nand_ctrl *nctrl = ifc_nand_ctrl; | 189 | struct fsl_ifc_nand_ctrl *nctrl = ifc_nand_ctrl; |
195 | struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs; | 190 | struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs; |
196 | u32 eccstat[4]; | 191 | u32 eccstat; |
197 | int i; | 192 | int i; |
198 | 193 | ||
199 | /* set the chip select for NAND Transaction */ | 194 | /* set the chip select for NAND Transaction */ |
@@ -228,19 +223,17 @@ static void fsl_ifc_run_command(struct mtd_info *mtd) | |||
228 | if (nctrl->eccread) { | 223 | if (nctrl->eccread) { |
229 | int errors; | 224 | int errors; |
230 | int bufnum = nctrl->page & priv->bufnum_mask; | 225 | int bufnum = nctrl->page & priv->bufnum_mask; |
231 | int sector = bufnum * chip->ecc.steps; | 226 | int sector_start = bufnum * chip->ecc.steps; |
232 | int sector_end = sector + chip->ecc.steps - 1; | 227 | int sector_end = sector_start + chip->ecc.steps - 1; |
233 | __be32 *eccstat_regs; | 228 | __be32 *eccstat_regs; |
234 | 229 | ||
235 | if (ctrl->version >= FSL_IFC_VERSION_2_0_0) | 230 | eccstat_regs = ifc->ifc_nand.nand_eccstat; |
236 | eccstat_regs = ifc->ifc_nand.v2_nand_eccstat; | 231 | eccstat = ifc_in32(&eccstat_regs[sector_start / 4]); |
237 | else | ||
238 | eccstat_regs = ifc->ifc_nand.v1_nand_eccstat; | ||
239 | 232 | ||
240 | for (i = sector / 4; i <= sector_end / 4; i++) | 233 | for (i = sector_start; i <= sector_end; i++) { |
241 | eccstat[i] = ifc_in32(&eccstat_regs[i]); | 234 | if (i != sector_start && !(i % 4)) |
235 | eccstat = ifc_in32(&eccstat_regs[i / 4]); | ||
242 | 236 | ||
243 | for (i = sector; i <= sector_end; i++) { | ||
244 | errors = check_read_ecc(mtd, ctrl, eccstat, i); | 237 | errors = check_read_ecc(mtd, ctrl, eccstat, i); |
245 | 238 | ||
246 | if (errors == 15) { | 239 | if (errors == 15) { |
@@ -626,6 +619,7 @@ static int fsl_ifc_wait(struct mtd_info *mtd, struct nand_chip *chip) | |||
626 | struct fsl_ifc_ctrl *ctrl = priv->ctrl; | 619 | struct fsl_ifc_ctrl *ctrl = priv->ctrl; |
627 | struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs; | 620 | struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs; |
628 | u32 nand_fsr; | 621 | u32 nand_fsr; |
622 | int status; | ||
629 | 623 | ||
630 | /* Use READ_STATUS command, but wait for the device to be ready */ | 624 | /* Use READ_STATUS command, but wait for the device to be ready */ |
631 | ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | | 625 | ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | |
@@ -640,12 +634,12 @@ static int fsl_ifc_wait(struct mtd_info *mtd, struct nand_chip *chip) | |||
640 | fsl_ifc_run_command(mtd); | 634 | fsl_ifc_run_command(mtd); |
641 | 635 | ||
642 | nand_fsr = ifc_in32(&ifc->ifc_nand.nand_fsr); | 636 | nand_fsr = ifc_in32(&ifc->ifc_nand.nand_fsr); |
643 | 637 | status = nand_fsr >> 24; | |
644 | /* | 638 | /* |
645 | * The chip always seems to report that it is | 639 | * The chip always seems to report that it is |
646 | * write-protected, even when it is not. | 640 | * write-protected, even when it is not. |
647 | */ | 641 | */ |
648 | return nand_fsr | NAND_STATUS_WP; | 642 | return status | NAND_STATUS_WP; |
649 | } | 643 | } |
650 | 644 | ||
651 | /* | 645 | /* |
diff --git a/drivers/mtd/nand/vf610_nfc.c b/drivers/mtd/nand/vf610_nfc.c index 80d31a58e558..f367144f3c6f 100644 --- a/drivers/mtd/nand/vf610_nfc.c +++ b/drivers/mtd/nand/vf610_nfc.c | |||
@@ -752,10 +752,8 @@ static int vf610_nfc_probe(struct platform_device *pdev) | |||
752 | if (mtd->oobsize > 64) | 752 | if (mtd->oobsize > 64) |
753 | mtd->oobsize = 64; | 753 | mtd->oobsize = 64; |
754 | 754 | ||
755 | /* | 755 | /* Use default large page ECC layout defined in NAND core */ |
756 | * mtd->ecclayout is not specified here because we're using the | 756 | mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops); |
757 | * default large page ECC layout defined in NAND core. | ||
758 | */ | ||
759 | if (chip->ecc.strength == 32) { | 757 | if (chip->ecc.strength == 32) { |
760 | nfc->ecc_mode = ECC_60_BYTE; | 758 | nfc->ecc_mode = ECC_60_BYTE; |
761 | chip->ecc.bytes = 60; | 759 | chip->ecc.bytes = 60; |
diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c index 1e37313054f3..6da69af103e6 100644 --- a/drivers/net/can/cc770/cc770.c +++ b/drivers/net/can/cc770/cc770.c | |||
@@ -390,37 +390,23 @@ static int cc770_get_berr_counter(const struct net_device *dev, | |||
390 | return 0; | 390 | return 0; |
391 | } | 391 | } |
392 | 392 | ||
393 | static netdev_tx_t cc770_start_xmit(struct sk_buff *skb, struct net_device *dev) | 393 | static void cc770_tx(struct net_device *dev, int mo) |
394 | { | 394 | { |
395 | struct cc770_priv *priv = netdev_priv(dev); | 395 | struct cc770_priv *priv = netdev_priv(dev); |
396 | struct net_device_stats *stats = &dev->stats; | 396 | struct can_frame *cf = (struct can_frame *)priv->tx_skb->data; |
397 | struct can_frame *cf = (struct can_frame *)skb->data; | ||
398 | unsigned int mo = obj2msgobj(CC770_OBJ_TX); | ||
399 | u8 dlc, rtr; | 397 | u8 dlc, rtr; |
400 | u32 id; | 398 | u32 id; |
401 | int i; | 399 | int i; |
402 | 400 | ||
403 | if (can_dropped_invalid_skb(dev, skb)) | ||
404 | return NETDEV_TX_OK; | ||
405 | |||
406 | if ((cc770_read_reg(priv, | ||
407 | msgobj[mo].ctrl1) & TXRQST_UNC) == TXRQST_SET) { | ||
408 | netdev_err(dev, "TX register is still occupied!\n"); | ||
409 | return NETDEV_TX_BUSY; | ||
410 | } | ||
411 | |||
412 | netif_stop_queue(dev); | ||
413 | |||
414 | dlc = cf->can_dlc; | 401 | dlc = cf->can_dlc; |
415 | id = cf->can_id; | 402 | id = cf->can_id; |
416 | if (cf->can_id & CAN_RTR_FLAG) | 403 | rtr = cf->can_id & CAN_RTR_FLAG ? 0 : MSGCFG_DIR; |
417 | rtr = 0; | 404 | |
418 | else | 405 | cc770_write_reg(priv, msgobj[mo].ctrl0, |
419 | rtr = MSGCFG_DIR; | 406 | MSGVAL_RES | TXIE_RES | RXIE_RES | INTPND_RES); |
420 | cc770_write_reg(priv, msgobj[mo].ctrl1, | 407 | cc770_write_reg(priv, msgobj[mo].ctrl1, |
421 | RMTPND_RES | TXRQST_RES | CPUUPD_SET | NEWDAT_RES); | 408 | RMTPND_RES | TXRQST_RES | CPUUPD_SET | NEWDAT_RES); |
422 | cc770_write_reg(priv, msgobj[mo].ctrl0, | 409 | |
423 | MSGVAL_SET | TXIE_SET | RXIE_RES | INTPND_RES); | ||
424 | if (id & CAN_EFF_FLAG) { | 410 | if (id & CAN_EFF_FLAG) { |
425 | id &= CAN_EFF_MASK; | 411 | id &= CAN_EFF_MASK; |
426 | cc770_write_reg(priv, msgobj[mo].config, | 412 | cc770_write_reg(priv, msgobj[mo].config, |
@@ -439,22 +425,30 @@ static netdev_tx_t cc770_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
439 | for (i = 0; i < dlc; i++) | 425 | for (i = 0; i < dlc; i++) |
440 | cc770_write_reg(priv, msgobj[mo].data[i], cf->data[i]); | 426 | cc770_write_reg(priv, msgobj[mo].data[i], cf->data[i]); |
441 | 427 | ||
442 | /* Store echo skb before starting the transfer */ | ||
443 | can_put_echo_skb(skb, dev, 0); | ||
444 | |||
445 | cc770_write_reg(priv, msgobj[mo].ctrl1, | 428 | cc770_write_reg(priv, msgobj[mo].ctrl1, |
446 | RMTPND_RES | TXRQST_SET | CPUUPD_RES | NEWDAT_UNC); | 429 | RMTPND_UNC | TXRQST_SET | CPUUPD_RES | NEWDAT_UNC); |
430 | cc770_write_reg(priv, msgobj[mo].ctrl0, | ||
431 | MSGVAL_SET | TXIE_SET | RXIE_SET | INTPND_UNC); | ||
432 | } | ||
447 | 433 | ||
448 | stats->tx_bytes += dlc; | 434 | static netdev_tx_t cc770_start_xmit(struct sk_buff *skb, struct net_device *dev) |
435 | { | ||
436 | struct cc770_priv *priv = netdev_priv(dev); | ||
437 | unsigned int mo = obj2msgobj(CC770_OBJ_TX); | ||
449 | 438 | ||
439 | if (can_dropped_invalid_skb(dev, skb)) | ||
440 | return NETDEV_TX_OK; | ||
450 | 441 | ||
451 | /* | 442 | netif_stop_queue(dev); |
452 | * HM: We had some cases of repeated IRQs so make sure the | 443 | |
453 | * INT is acknowledged I know it's already further up, but | 444 | if ((cc770_read_reg(priv, |
454 | * doing again fixed the issue | 445 | msgobj[mo].ctrl1) & TXRQST_UNC) == TXRQST_SET) { |
455 | */ | 446 | netdev_err(dev, "TX register is still occupied!\n"); |
456 | cc770_write_reg(priv, msgobj[mo].ctrl0, | 447 | return NETDEV_TX_BUSY; |
457 | MSGVAL_UNC | TXIE_UNC | RXIE_UNC | INTPND_RES); | 448 | } |
449 | |||
450 | priv->tx_skb = skb; | ||
451 | cc770_tx(dev, mo); | ||
458 | 452 | ||
459 | return NETDEV_TX_OK; | 453 | return NETDEV_TX_OK; |
460 | } | 454 | } |
@@ -680,19 +674,46 @@ static void cc770_tx_interrupt(struct net_device *dev, unsigned int o) | |||
680 | struct cc770_priv *priv = netdev_priv(dev); | 674 | struct cc770_priv *priv = netdev_priv(dev); |
681 | struct net_device_stats *stats = &dev->stats; | 675 | struct net_device_stats *stats = &dev->stats; |
682 | unsigned int mo = obj2msgobj(o); | 676 | unsigned int mo = obj2msgobj(o); |
677 | struct can_frame *cf; | ||
678 | u8 ctrl1; | ||
679 | |||
680 | ctrl1 = cc770_read_reg(priv, msgobj[mo].ctrl1); | ||
683 | 681 | ||
684 | /* Nothing more to send, switch off interrupts */ | ||
685 | cc770_write_reg(priv, msgobj[mo].ctrl0, | 682 | cc770_write_reg(priv, msgobj[mo].ctrl0, |
686 | MSGVAL_RES | TXIE_RES | RXIE_RES | INTPND_RES); | 683 | MSGVAL_RES | TXIE_RES | RXIE_RES | INTPND_RES); |
687 | /* | 684 | cc770_write_reg(priv, msgobj[mo].ctrl1, |
688 | * We had some cases of repeated IRQ so make sure the | 685 | RMTPND_RES | TXRQST_RES | MSGLST_RES | NEWDAT_RES); |
689 | * INT is acknowledged | 686 | |
687 | if (unlikely(!priv->tx_skb)) { | ||
688 | netdev_err(dev, "missing tx skb in tx interrupt\n"); | ||
689 | return; | ||
690 | } | ||
691 | |||
692 | if (unlikely(ctrl1 & MSGLST_SET)) { | ||
693 | stats->rx_over_errors++; | ||
694 | stats->rx_errors++; | ||
695 | } | ||
696 | |||
697 | /* When the CC770 is sending an RTR message and it receives a regular | ||
698 | * message that matches the id of the RTR message, it will overwrite the | ||
699 | * outgoing message in the TX register. When this happens we must | ||
700 | * process the received message and try to transmit the outgoing skb | ||
701 | * again. | ||
690 | */ | 702 | */ |
691 | cc770_write_reg(priv, msgobj[mo].ctrl0, | 703 | if (unlikely(ctrl1 & NEWDAT_SET)) { |
692 | MSGVAL_UNC | TXIE_UNC | RXIE_UNC | INTPND_RES); | 704 | cc770_rx(dev, mo, ctrl1); |
705 | cc770_tx(dev, mo); | ||
706 | return; | ||
707 | } | ||
693 | 708 | ||
709 | cf = (struct can_frame *)priv->tx_skb->data; | ||
710 | stats->tx_bytes += cf->can_dlc; | ||
694 | stats->tx_packets++; | 711 | stats->tx_packets++; |
712 | |||
713 | can_put_echo_skb(priv->tx_skb, dev, 0); | ||
695 | can_get_echo_skb(dev, 0); | 714 | can_get_echo_skb(dev, 0); |
715 | priv->tx_skb = NULL; | ||
716 | |||
696 | netif_wake_queue(dev); | 717 | netif_wake_queue(dev); |
697 | } | 718 | } |
698 | 719 | ||
@@ -804,6 +825,7 @@ struct net_device *alloc_cc770dev(int sizeof_priv) | |||
804 | priv->can.do_set_bittiming = cc770_set_bittiming; | 825 | priv->can.do_set_bittiming = cc770_set_bittiming; |
805 | priv->can.do_set_mode = cc770_set_mode; | 826 | priv->can.do_set_mode = cc770_set_mode; |
806 | priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES; | 827 | priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES; |
828 | priv->tx_skb = NULL; | ||
807 | 829 | ||
808 | memcpy(priv->obj_flags, cc770_obj_flags, sizeof(cc770_obj_flags)); | 830 | memcpy(priv->obj_flags, cc770_obj_flags, sizeof(cc770_obj_flags)); |
809 | 831 | ||
diff --git a/drivers/net/can/cc770/cc770.h b/drivers/net/can/cc770/cc770.h index a1739db98d91..95752e1d1283 100644 --- a/drivers/net/can/cc770/cc770.h +++ b/drivers/net/can/cc770/cc770.h | |||
@@ -193,6 +193,8 @@ struct cc770_priv { | |||
193 | u8 cpu_interface; /* CPU interface register */ | 193 | u8 cpu_interface; /* CPU interface register */ |
194 | u8 clkout; /* Clock out register */ | 194 | u8 clkout; /* Clock out register */ |
195 | u8 bus_config; /* Bus conffiguration register */ | 195 | u8 bus_config; /* Bus conffiguration register */ |
196 | |||
197 | struct sk_buff *tx_skb; | ||
196 | }; | 198 | }; |
197 | 199 | ||
198 | struct net_device *alloc_cc770dev(int sizeof_priv); | 200 | struct net_device *alloc_cc770dev(int sizeof_priv); |
diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c index 2772d05ff11c..fedd927ba6ed 100644 --- a/drivers/net/can/ifi_canfd/ifi_canfd.c +++ b/drivers/net/can/ifi_canfd/ifi_canfd.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #define IFI_CANFD_STCMD_ERROR_ACTIVE BIT(2) | 30 | #define IFI_CANFD_STCMD_ERROR_ACTIVE BIT(2) |
31 | #define IFI_CANFD_STCMD_ERROR_PASSIVE BIT(3) | 31 | #define IFI_CANFD_STCMD_ERROR_PASSIVE BIT(3) |
32 | #define IFI_CANFD_STCMD_BUSOFF BIT(4) | 32 | #define IFI_CANFD_STCMD_BUSOFF BIT(4) |
33 | #define IFI_CANFD_STCMD_ERROR_WARNING BIT(5) | ||
33 | #define IFI_CANFD_STCMD_BUSMONITOR BIT(16) | 34 | #define IFI_CANFD_STCMD_BUSMONITOR BIT(16) |
34 | #define IFI_CANFD_STCMD_LOOPBACK BIT(18) | 35 | #define IFI_CANFD_STCMD_LOOPBACK BIT(18) |
35 | #define IFI_CANFD_STCMD_DISABLE_CANFD BIT(24) | 36 | #define IFI_CANFD_STCMD_DISABLE_CANFD BIT(24) |
@@ -52,7 +53,10 @@ | |||
52 | #define IFI_CANFD_TXSTCMD_OVERFLOW BIT(13) | 53 | #define IFI_CANFD_TXSTCMD_OVERFLOW BIT(13) |
53 | 54 | ||
54 | #define IFI_CANFD_INTERRUPT 0xc | 55 | #define IFI_CANFD_INTERRUPT 0xc |
56 | #define IFI_CANFD_INTERRUPT_ERROR_BUSOFF BIT(0) | ||
55 | #define IFI_CANFD_INTERRUPT_ERROR_WARNING BIT(1) | 57 | #define IFI_CANFD_INTERRUPT_ERROR_WARNING BIT(1) |
58 | #define IFI_CANFD_INTERRUPT_ERROR_STATE_CHG BIT(2) | ||
59 | #define IFI_CANFD_INTERRUPT_ERROR_REC_TEC_INC BIT(3) | ||
56 | #define IFI_CANFD_INTERRUPT_ERROR_COUNTER BIT(10) | 60 | #define IFI_CANFD_INTERRUPT_ERROR_COUNTER BIT(10) |
57 | #define IFI_CANFD_INTERRUPT_TXFIFO_EMPTY BIT(16) | 61 | #define IFI_CANFD_INTERRUPT_TXFIFO_EMPTY BIT(16) |
58 | #define IFI_CANFD_INTERRUPT_TXFIFO_REMOVE BIT(22) | 62 | #define IFI_CANFD_INTERRUPT_TXFIFO_REMOVE BIT(22) |
@@ -61,6 +65,10 @@ | |||
61 | #define IFI_CANFD_INTERRUPT_SET_IRQ ((u32)BIT(31)) | 65 | #define IFI_CANFD_INTERRUPT_SET_IRQ ((u32)BIT(31)) |
62 | 66 | ||
63 | #define IFI_CANFD_IRQMASK 0x10 | 67 | #define IFI_CANFD_IRQMASK 0x10 |
68 | #define IFI_CANFD_IRQMASK_ERROR_BUSOFF BIT(0) | ||
69 | #define IFI_CANFD_IRQMASK_ERROR_WARNING BIT(1) | ||
70 | #define IFI_CANFD_IRQMASK_ERROR_STATE_CHG BIT(2) | ||
71 | #define IFI_CANFD_IRQMASK_ERROR_REC_TEC_INC BIT(3) | ||
64 | #define IFI_CANFD_IRQMASK_SET_ERR BIT(7) | 72 | #define IFI_CANFD_IRQMASK_SET_ERR BIT(7) |
65 | #define IFI_CANFD_IRQMASK_SET_TS BIT(15) | 73 | #define IFI_CANFD_IRQMASK_SET_TS BIT(15) |
66 | #define IFI_CANFD_IRQMASK_TXFIFO_EMPTY BIT(16) | 74 | #define IFI_CANFD_IRQMASK_TXFIFO_EMPTY BIT(16) |
@@ -136,6 +144,8 @@ | |||
136 | #define IFI_CANFD_SYSCLOCK 0x50 | 144 | #define IFI_CANFD_SYSCLOCK 0x50 |
137 | 145 | ||
138 | #define IFI_CANFD_VER 0x54 | 146 | #define IFI_CANFD_VER 0x54 |
147 | #define IFI_CANFD_VER_REV_MASK 0xff | ||
148 | #define IFI_CANFD_VER_REV_MIN_SUPPORTED 0x15 | ||
139 | 149 | ||
140 | #define IFI_CANFD_IP_ID 0x58 | 150 | #define IFI_CANFD_IP_ID 0x58 |
141 | #define IFI_CANFD_IP_ID_VALUE 0xD073CAFD | 151 | #define IFI_CANFD_IP_ID_VALUE 0xD073CAFD |
@@ -220,7 +230,10 @@ static void ifi_canfd_irq_enable(struct net_device *ndev, bool enable) | |||
220 | 230 | ||
221 | if (enable) { | 231 | if (enable) { |
222 | enirq = IFI_CANFD_IRQMASK_TXFIFO_EMPTY | | 232 | enirq = IFI_CANFD_IRQMASK_TXFIFO_EMPTY | |
223 | IFI_CANFD_IRQMASK_RXFIFO_NEMPTY; | 233 | IFI_CANFD_IRQMASK_RXFIFO_NEMPTY | |
234 | IFI_CANFD_IRQMASK_ERROR_STATE_CHG | | ||
235 | IFI_CANFD_IRQMASK_ERROR_WARNING | | ||
236 | IFI_CANFD_IRQMASK_ERROR_BUSOFF; | ||
224 | if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) | 237 | if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) |
225 | enirq |= IFI_CANFD_INTERRUPT_ERROR_COUNTER; | 238 | enirq |= IFI_CANFD_INTERRUPT_ERROR_COUNTER; |
226 | } | 239 | } |
@@ -361,12 +374,13 @@ static int ifi_canfd_handle_lost_msg(struct net_device *ndev) | |||
361 | return 1; | 374 | return 1; |
362 | } | 375 | } |
363 | 376 | ||
364 | static int ifi_canfd_handle_lec_err(struct net_device *ndev, const u32 errctr) | 377 | static int ifi_canfd_handle_lec_err(struct net_device *ndev) |
365 | { | 378 | { |
366 | struct ifi_canfd_priv *priv = netdev_priv(ndev); | 379 | struct ifi_canfd_priv *priv = netdev_priv(ndev); |
367 | struct net_device_stats *stats = &ndev->stats; | 380 | struct net_device_stats *stats = &ndev->stats; |
368 | struct can_frame *cf; | 381 | struct can_frame *cf; |
369 | struct sk_buff *skb; | 382 | struct sk_buff *skb; |
383 | u32 errctr = readl(priv->base + IFI_CANFD_ERROR_CTR); | ||
370 | const u32 errmask = IFI_CANFD_ERROR_CTR_OVERLOAD_FIRST | | 384 | const u32 errmask = IFI_CANFD_ERROR_CTR_OVERLOAD_FIRST | |
371 | IFI_CANFD_ERROR_CTR_ACK_ERROR_FIRST | | 385 | IFI_CANFD_ERROR_CTR_ACK_ERROR_FIRST | |
372 | IFI_CANFD_ERROR_CTR_BIT0_ERROR_FIRST | | 386 | IFI_CANFD_ERROR_CTR_BIT0_ERROR_FIRST | |
@@ -449,6 +463,11 @@ static int ifi_canfd_handle_state_change(struct net_device *ndev, | |||
449 | 463 | ||
450 | switch (new_state) { | 464 | switch (new_state) { |
451 | case CAN_STATE_ERROR_ACTIVE: | 465 | case CAN_STATE_ERROR_ACTIVE: |
466 | /* error active state */ | ||
467 | priv->can.can_stats.error_warning++; | ||
468 | priv->can.state = CAN_STATE_ERROR_ACTIVE; | ||
469 | break; | ||
470 | case CAN_STATE_ERROR_WARNING: | ||
452 | /* error warning state */ | 471 | /* error warning state */ |
453 | priv->can.can_stats.error_warning++; | 472 | priv->can.can_stats.error_warning++; |
454 | priv->can.state = CAN_STATE_ERROR_WARNING; | 473 | priv->can.state = CAN_STATE_ERROR_WARNING; |
@@ -477,7 +496,7 @@ static int ifi_canfd_handle_state_change(struct net_device *ndev, | |||
477 | ifi_canfd_get_berr_counter(ndev, &bec); | 496 | ifi_canfd_get_berr_counter(ndev, &bec); |
478 | 497 | ||
479 | switch (new_state) { | 498 | switch (new_state) { |
480 | case CAN_STATE_ERROR_ACTIVE: | 499 | case CAN_STATE_ERROR_WARNING: |
481 | /* error warning state */ | 500 | /* error warning state */ |
482 | cf->can_id |= CAN_ERR_CRTL; | 501 | cf->can_id |= CAN_ERR_CRTL; |
483 | cf->data[1] = (bec.txerr > bec.rxerr) ? | 502 | cf->data[1] = (bec.txerr > bec.rxerr) ? |
@@ -510,22 +529,21 @@ static int ifi_canfd_handle_state_change(struct net_device *ndev, | |||
510 | return 1; | 529 | return 1; |
511 | } | 530 | } |
512 | 531 | ||
513 | static int ifi_canfd_handle_state_errors(struct net_device *ndev, u32 stcmd) | 532 | static int ifi_canfd_handle_state_errors(struct net_device *ndev) |
514 | { | 533 | { |
515 | struct ifi_canfd_priv *priv = netdev_priv(ndev); | 534 | struct ifi_canfd_priv *priv = netdev_priv(ndev); |
535 | u32 stcmd = readl(priv->base + IFI_CANFD_STCMD); | ||
516 | int work_done = 0; | 536 | int work_done = 0; |
517 | u32 isr; | ||
518 | 537 | ||
519 | /* | 538 | if ((stcmd & IFI_CANFD_STCMD_ERROR_ACTIVE) && |
520 | * The ErrWarn condition is a little special, since the bit is | 539 | (priv->can.state != CAN_STATE_ERROR_ACTIVE)) { |
521 | * located in the INTERRUPT register instead of STCMD register. | 540 | netdev_dbg(ndev, "Error, entered active state\n"); |
522 | */ | 541 | work_done += ifi_canfd_handle_state_change(ndev, |
523 | isr = readl(priv->base + IFI_CANFD_INTERRUPT); | 542 | CAN_STATE_ERROR_ACTIVE); |
524 | if ((isr & IFI_CANFD_INTERRUPT_ERROR_WARNING) && | 543 | } |
544 | |||
545 | if ((stcmd & IFI_CANFD_STCMD_ERROR_WARNING) && | ||
525 | (priv->can.state != CAN_STATE_ERROR_WARNING)) { | 546 | (priv->can.state != CAN_STATE_ERROR_WARNING)) { |
526 | /* Clear the interrupt */ | ||
527 | writel(IFI_CANFD_INTERRUPT_ERROR_WARNING, | ||
528 | priv->base + IFI_CANFD_INTERRUPT); | ||
529 | netdev_dbg(ndev, "Error, entered warning state\n"); | 547 | netdev_dbg(ndev, "Error, entered warning state\n"); |
530 | work_done += ifi_canfd_handle_state_change(ndev, | 548 | work_done += ifi_canfd_handle_state_change(ndev, |
531 | CAN_STATE_ERROR_WARNING); | 549 | CAN_STATE_ERROR_WARNING); |
@@ -552,18 +570,11 @@ static int ifi_canfd_poll(struct napi_struct *napi, int quota) | |||
552 | { | 570 | { |
553 | struct net_device *ndev = napi->dev; | 571 | struct net_device *ndev = napi->dev; |
554 | struct ifi_canfd_priv *priv = netdev_priv(ndev); | 572 | struct ifi_canfd_priv *priv = netdev_priv(ndev); |
555 | const u32 stcmd_state_mask = IFI_CANFD_STCMD_ERROR_PASSIVE | | ||
556 | IFI_CANFD_STCMD_BUSOFF; | ||
557 | int work_done = 0; | ||
558 | |||
559 | u32 stcmd = readl(priv->base + IFI_CANFD_STCMD); | ||
560 | u32 rxstcmd = readl(priv->base + IFI_CANFD_RXSTCMD); | 573 | u32 rxstcmd = readl(priv->base + IFI_CANFD_RXSTCMD); |
561 | u32 errctr = readl(priv->base + IFI_CANFD_ERROR_CTR); | 574 | int work_done = 0; |
562 | 575 | ||
563 | /* Handle bus state changes */ | 576 | /* Handle bus state changes */ |
564 | if ((stcmd & stcmd_state_mask) || | 577 | work_done += ifi_canfd_handle_state_errors(ndev); |
565 | ((stcmd & IFI_CANFD_STCMD_ERROR_ACTIVE) == 0)) | ||
566 | work_done += ifi_canfd_handle_state_errors(ndev, stcmd); | ||
567 | 578 | ||
568 | /* Handle lost messages on RX */ | 579 | /* Handle lost messages on RX */ |
569 | if (rxstcmd & IFI_CANFD_RXSTCMD_OVERFLOW) | 580 | if (rxstcmd & IFI_CANFD_RXSTCMD_OVERFLOW) |
@@ -571,7 +582,7 @@ static int ifi_canfd_poll(struct napi_struct *napi, int quota) | |||
571 | 582 | ||
572 | /* Handle lec errors on the bus */ | 583 | /* Handle lec errors on the bus */ |
573 | if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) | 584 | if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) |
574 | work_done += ifi_canfd_handle_lec_err(ndev, errctr); | 585 | work_done += ifi_canfd_handle_lec_err(ndev); |
575 | 586 | ||
576 | /* Handle normal messages on RX */ | 587 | /* Handle normal messages on RX */ |
577 | if (!(rxstcmd & IFI_CANFD_RXSTCMD_EMPTY)) | 588 | if (!(rxstcmd & IFI_CANFD_RXSTCMD_EMPTY)) |
@@ -592,12 +603,13 @@ static irqreturn_t ifi_canfd_isr(int irq, void *dev_id) | |||
592 | struct net_device_stats *stats = &ndev->stats; | 603 | struct net_device_stats *stats = &ndev->stats; |
593 | const u32 rx_irq_mask = IFI_CANFD_INTERRUPT_RXFIFO_NEMPTY | | 604 | const u32 rx_irq_mask = IFI_CANFD_INTERRUPT_RXFIFO_NEMPTY | |
594 | IFI_CANFD_INTERRUPT_RXFIFO_NEMPTY_PER | | 605 | IFI_CANFD_INTERRUPT_RXFIFO_NEMPTY_PER | |
606 | IFI_CANFD_INTERRUPT_ERROR_COUNTER | | ||
607 | IFI_CANFD_INTERRUPT_ERROR_STATE_CHG | | ||
595 | IFI_CANFD_INTERRUPT_ERROR_WARNING | | 608 | IFI_CANFD_INTERRUPT_ERROR_WARNING | |
596 | IFI_CANFD_INTERRUPT_ERROR_COUNTER; | 609 | IFI_CANFD_INTERRUPT_ERROR_BUSOFF; |
597 | const u32 tx_irq_mask = IFI_CANFD_INTERRUPT_TXFIFO_EMPTY | | 610 | const u32 tx_irq_mask = IFI_CANFD_INTERRUPT_TXFIFO_EMPTY | |
598 | IFI_CANFD_INTERRUPT_TXFIFO_REMOVE; | 611 | IFI_CANFD_INTERRUPT_TXFIFO_REMOVE; |
599 | const u32 clr_irq_mask = ~((u32)(IFI_CANFD_INTERRUPT_SET_IRQ | | 612 | const u32 clr_irq_mask = ~((u32)IFI_CANFD_INTERRUPT_SET_IRQ); |
600 | IFI_CANFD_INTERRUPT_ERROR_WARNING)); | ||
601 | u32 isr; | 613 | u32 isr; |
602 | 614 | ||
603 | isr = readl(priv->base + IFI_CANFD_INTERRUPT); | 615 | isr = readl(priv->base + IFI_CANFD_INTERRUPT); |
@@ -933,7 +945,7 @@ static int ifi_canfd_plat_probe(struct platform_device *pdev) | |||
933 | struct resource *res; | 945 | struct resource *res; |
934 | void __iomem *addr; | 946 | void __iomem *addr; |
935 | int irq, ret; | 947 | int irq, ret; |
936 | u32 id; | 948 | u32 id, rev; |
937 | 949 | ||
938 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 950 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
939 | addr = devm_ioremap_resource(dev, res); | 951 | addr = devm_ioremap_resource(dev, res); |
@@ -947,6 +959,13 @@ static int ifi_canfd_plat_probe(struct platform_device *pdev) | |||
947 | return -EINVAL; | 959 | return -EINVAL; |
948 | } | 960 | } |
949 | 961 | ||
962 | rev = readl(addr + IFI_CANFD_VER) & IFI_CANFD_VER_REV_MASK; | ||
963 | if (rev < IFI_CANFD_VER_REV_MIN_SUPPORTED) { | ||
964 | dev_err(dev, "This block is too old (rev %i), minimum supported is rev %i\n", | ||
965 | rev, IFI_CANFD_VER_REV_MIN_SUPPORTED); | ||
966 | return -EINVAL; | ||
967 | } | ||
968 | |||
950 | ndev = alloc_candev(sizeof(*priv), 1); | 969 | ndev = alloc_candev(sizeof(*priv), 1); |
951 | if (!ndev) | 970 | if (!ndev) |
952 | return -ENOMEM; | 971 | return -ENOMEM; |
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index 2594f7779c6f..b397a33f3d32 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/pm_runtime.h> | 26 | #include <linux/pm_runtime.h> |
27 | #include <linux/iopoll.h> | 27 | #include <linux/iopoll.h> |
28 | #include <linux/can/dev.h> | 28 | #include <linux/can/dev.h> |
29 | #include <linux/pinctrl/consumer.h> | ||
29 | 30 | ||
30 | /* napi related */ | 31 | /* napi related */ |
31 | #define M_CAN_NAPI_WEIGHT 64 | 32 | #define M_CAN_NAPI_WEIGHT 64 |
@@ -253,7 +254,7 @@ enum m_can_mram_cfg { | |||
253 | 254 | ||
254 | /* Rx FIFO 0/1 Configuration (RXF0C/RXF1C) */ | 255 | /* Rx FIFO 0/1 Configuration (RXF0C/RXF1C) */ |
255 | #define RXFC_FWM_SHIFT 24 | 256 | #define RXFC_FWM_SHIFT 24 |
256 | #define RXFC_FWM_MASK (0x7f < RXFC_FWM_SHIFT) | 257 | #define RXFC_FWM_MASK (0x7f << RXFC_FWM_SHIFT) |
257 | #define RXFC_FS_SHIFT 16 | 258 | #define RXFC_FS_SHIFT 16 |
258 | #define RXFC_FS_MASK (0x7f << RXFC_FS_SHIFT) | 259 | #define RXFC_FS_MASK (0x7f << RXFC_FS_SHIFT) |
259 | 260 | ||
@@ -1700,6 +1701,8 @@ static __maybe_unused int m_can_suspend(struct device *dev) | |||
1700 | m_can_clk_stop(priv); | 1701 | m_can_clk_stop(priv); |
1701 | } | 1702 | } |
1702 | 1703 | ||
1704 | pinctrl_pm_select_sleep_state(dev); | ||
1705 | |||
1703 | priv->can.state = CAN_STATE_SLEEPING; | 1706 | priv->can.state = CAN_STATE_SLEEPING; |
1704 | 1707 | ||
1705 | return 0; | 1708 | return 0; |
@@ -1710,6 +1713,8 @@ static __maybe_unused int m_can_resume(struct device *dev) | |||
1710 | struct net_device *ndev = dev_get_drvdata(dev); | 1713 | struct net_device *ndev = dev_get_drvdata(dev); |
1711 | struct m_can_priv *priv = netdev_priv(ndev); | 1714 | struct m_can_priv *priv = netdev_priv(ndev); |
1712 | 1715 | ||
1716 | pinctrl_pm_select_default_state(dev); | ||
1717 | |||
1713 | m_can_init_ram(priv); | 1718 | m_can_init_ram(priv); |
1714 | 1719 | ||
1715 | priv->can.state = CAN_STATE_ERROR_ACTIVE; | 1720 | priv->can.state = CAN_STATE_ERROR_ACTIVE; |
diff --git a/drivers/net/can/peak_canfd/peak_canfd.c b/drivers/net/can/peak_canfd/peak_canfd.c index 55513411a82e..ed8561d4a90f 100644 --- a/drivers/net/can/peak_canfd/peak_canfd.c +++ b/drivers/net/can/peak_canfd/peak_canfd.c | |||
@@ -262,7 +262,6 @@ static int pucan_handle_can_rx(struct peak_canfd_priv *priv, | |||
262 | 262 | ||
263 | spin_lock_irqsave(&priv->echo_lock, flags); | 263 | spin_lock_irqsave(&priv->echo_lock, flags); |
264 | can_get_echo_skb(priv->ndev, msg->client); | 264 | can_get_echo_skb(priv->ndev, msg->client); |
265 | spin_unlock_irqrestore(&priv->echo_lock, flags); | ||
266 | 265 | ||
267 | /* count bytes of the echo instead of skb */ | 266 | /* count bytes of the echo instead of skb */ |
268 | stats->tx_bytes += cf_len; | 267 | stats->tx_bytes += cf_len; |
@@ -271,6 +270,7 @@ static int pucan_handle_can_rx(struct peak_canfd_priv *priv, | |||
271 | /* restart tx queue (a slot is free) */ | 270 | /* restart tx queue (a slot is free) */ |
272 | netif_wake_queue(priv->ndev); | 271 | netif_wake_queue(priv->ndev); |
273 | 272 | ||
273 | spin_unlock_irqrestore(&priv->echo_lock, flags); | ||
274 | return 0; | 274 | return 0; |
275 | } | 275 | } |
276 | 276 | ||
@@ -333,7 +333,6 @@ static int pucan_handle_status(struct peak_canfd_priv *priv, | |||
333 | 333 | ||
334 | /* this STATUS is the CNF of the RX_BARRIER: Tx path can be setup */ | 334 | /* this STATUS is the CNF of the RX_BARRIER: Tx path can be setup */ |
335 | if (pucan_status_is_rx_barrier(msg)) { | 335 | if (pucan_status_is_rx_barrier(msg)) { |
336 | unsigned long flags; | ||
337 | 336 | ||
338 | if (priv->enable_tx_path) { | 337 | if (priv->enable_tx_path) { |
339 | int err = priv->enable_tx_path(priv); | 338 | int err = priv->enable_tx_path(priv); |
@@ -342,16 +341,8 @@ static int pucan_handle_status(struct peak_canfd_priv *priv, | |||
342 | return err; | 341 | return err; |
343 | } | 342 | } |
344 | 343 | ||
345 | /* restart network queue only if echo skb array is free */ | 344 | /* start network queue (echo_skb array is empty) */ |
346 | spin_lock_irqsave(&priv->echo_lock, flags); | 345 | netif_start_queue(ndev); |
347 | |||
348 | if (!priv->can.echo_skb[priv->echo_idx]) { | ||
349 | spin_unlock_irqrestore(&priv->echo_lock, flags); | ||
350 | |||
351 | netif_wake_queue(ndev); | ||
352 | } else { | ||
353 | spin_unlock_irqrestore(&priv->echo_lock, flags); | ||
354 | } | ||
355 | 346 | ||
356 | return 0; | 347 | return 0; |
357 | } | 348 | } |
@@ -726,11 +717,6 @@ static netdev_tx_t peak_canfd_start_xmit(struct sk_buff *skb, | |||
726 | */ | 717 | */ |
727 | should_stop_tx_queue = !!(priv->can.echo_skb[priv->echo_idx]); | 718 | should_stop_tx_queue = !!(priv->can.echo_skb[priv->echo_idx]); |
728 | 719 | ||
729 | spin_unlock_irqrestore(&priv->echo_lock, flags); | ||
730 | |||
731 | /* write the skb on the interface */ | ||
732 | priv->write_tx_msg(priv, msg); | ||
733 | |||
734 | /* stop network tx queue if not enough room to save one more msg too */ | 720 | /* stop network tx queue if not enough room to save one more msg too */ |
735 | if (priv->can.ctrlmode & CAN_CTRLMODE_FD) | 721 | if (priv->can.ctrlmode & CAN_CTRLMODE_FD) |
736 | should_stop_tx_queue |= (room_left < | 722 | should_stop_tx_queue |= (room_left < |
@@ -742,6 +728,11 @@ static netdev_tx_t peak_canfd_start_xmit(struct sk_buff *skb, | |||
742 | if (should_stop_tx_queue) | 728 | if (should_stop_tx_queue) |
743 | netif_stop_queue(ndev); | 729 | netif_stop_queue(ndev); |
744 | 730 | ||
731 | spin_unlock_irqrestore(&priv->echo_lock, flags); | ||
732 | |||
733 | /* write the skb on the interface */ | ||
734 | priv->write_tx_msg(priv, msg); | ||
735 | |||
745 | return NETDEV_TX_OK; | 736 | return NETDEV_TX_OK; |
746 | } | 737 | } |
747 | 738 | ||
diff --git a/drivers/net/can/peak_canfd/peak_pciefd_main.c b/drivers/net/can/peak_canfd/peak_pciefd_main.c index 788c3464a3b0..3c51a884db87 100644 --- a/drivers/net/can/peak_canfd/peak_pciefd_main.c +++ b/drivers/net/can/peak_canfd/peak_pciefd_main.c | |||
@@ -349,8 +349,12 @@ static irqreturn_t pciefd_irq_handler(int irq, void *arg) | |||
349 | priv->tx_pages_free++; | 349 | priv->tx_pages_free++; |
350 | spin_unlock_irqrestore(&priv->tx_lock, flags); | 350 | spin_unlock_irqrestore(&priv->tx_lock, flags); |
351 | 351 | ||
352 | /* wake producer up */ | 352 | /* wake producer up (only if enough room in echo_skb array) */ |
353 | netif_wake_queue(priv->ucan.ndev); | 353 | spin_lock_irqsave(&priv->ucan.echo_lock, flags); |
354 | if (!priv->ucan.can.echo_skb[priv->ucan.echo_idx]) | ||
355 | netif_wake_queue(priv->ucan.ndev); | ||
356 | |||
357 | spin_unlock_irqrestore(&priv->ucan.echo_lock, flags); | ||
354 | } | 358 | } |
355 | 359 | ||
356 | /* re-enable Rx DMA transfer for this CAN */ | 360 | /* re-enable Rx DMA transfer for this CAN */ |
diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile index d040aeb45172..15c2a831edf1 100644 --- a/drivers/net/dsa/Makefile +++ b/drivers/net/dsa/Makefile | |||
@@ -1,7 +1,10 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | 1 | # SPDX-License-Identifier: GPL-2.0 |
2 | obj-$(CONFIG_NET_DSA_BCM_SF2) += bcm-sf2.o | 2 | obj-$(CONFIG_NET_DSA_BCM_SF2) += bcm-sf2.o |
3 | bcm-sf2-objs := bcm_sf2.o bcm_sf2_cfp.o | 3 | bcm-sf2-objs := bcm_sf2.o bcm_sf2_cfp.o |
4 | obj-$(CONFIG_NET_DSA_LOOP) += dsa_loop.o dsa_loop_bdinfo.o | 4 | obj-$(CONFIG_NET_DSA_LOOP) += dsa_loop.o |
5 | ifdef CONFIG_NET_DSA_LOOP | ||
6 | obj-$(CONFIG_FIXED_PHY) += dsa_loop_bdinfo.o | ||
7 | endif | ||
5 | obj-$(CONFIG_NET_DSA_MT7530) += mt7530.o | 8 | obj-$(CONFIG_NET_DSA_MT7530) += mt7530.o |
6 | obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o | 9 | obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o |
7 | obj-$(CONFIG_NET_DSA_QCA8K) += qca8k.o | 10 | obj-$(CONFIG_NET_DSA_QCA8K) += qca8k.o |
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index db830a1141d9..63e02a54d537 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c | |||
@@ -814,8 +814,8 @@ void b53_get_strings(struct dsa_switch *ds, int port, uint8_t *data) | |||
814 | unsigned int i; | 814 | unsigned int i; |
815 | 815 | ||
816 | for (i = 0; i < mib_size; i++) | 816 | for (i = 0; i < mib_size; i++) |
817 | memcpy(data + i * ETH_GSTRING_LEN, | 817 | strlcpy(data + i * ETH_GSTRING_LEN, |
818 | mibs[i].name, ETH_GSTRING_LEN); | 818 | mibs[i].name, ETH_GSTRING_LEN); |
819 | } | 819 | } |
820 | EXPORT_SYMBOL(b53_get_strings); | 820 | EXPORT_SYMBOL(b53_get_strings); |
821 | 821 | ||
diff --git a/drivers/net/ethernet/8390/Kconfig b/drivers/net/ethernet/8390/Kconfig index 29c3075bfb05..fdc673484add 100644 --- a/drivers/net/ethernet/8390/Kconfig +++ b/drivers/net/ethernet/8390/Kconfig | |||
@@ -3,7 +3,7 @@ | |||
3 | # | 3 | # |
4 | 4 | ||
5 | config NET_VENDOR_8390 | 5 | config NET_VENDOR_8390 |
6 | bool "National Semi-conductor 8390 devices" | 6 | bool "National Semiconductor 8390 devices" |
7 | default y | 7 | default y |
8 | depends on NET_VENDOR_NATSEMI | 8 | depends on NET_VENDOR_NATSEMI |
9 | ---help--- | 9 | ---help--- |
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c index 3e5833cf1fab..eb23f9ba1a9a 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c | |||
@@ -426,6 +426,8 @@ static int xgbe_pci_resume(struct pci_dev *pdev) | |||
426 | struct net_device *netdev = pdata->netdev; | 426 | struct net_device *netdev = pdata->netdev; |
427 | int ret = 0; | 427 | int ret = 0; |
428 | 428 | ||
429 | XP_IOWRITE(pdata, XP_INT_EN, 0x1fffff); | ||
430 | |||
429 | pdata->lpm_ctrl &= ~MDIO_CTRL1_LPOWER; | 431 | pdata->lpm_ctrl &= ~MDIO_CTRL1_LPOWER; |
430 | XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl); | 432 | XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl); |
431 | 433 | ||
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h index 0b49f1aeebd3..fc7383106946 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h | |||
@@ -36,6 +36,8 @@ | |||
36 | #define AQ_CFG_TX_FRAME_MAX (16U * 1024U) | 36 | #define AQ_CFG_TX_FRAME_MAX (16U * 1024U) |
37 | #define AQ_CFG_RX_FRAME_MAX (4U * 1024U) | 37 | #define AQ_CFG_RX_FRAME_MAX (4U * 1024U) |
38 | 38 | ||
39 | #define AQ_CFG_TX_CLEAN_BUDGET 256U | ||
40 | |||
39 | /* LRO */ | 41 | /* LRO */ |
40 | #define AQ_CFG_IS_LRO_DEF 1U | 42 | #define AQ_CFG_IS_LRO_DEF 1U |
41 | 43 | ||
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index ebbaf63eaf47..c96a92118b8b 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c | |||
@@ -247,6 +247,8 @@ void aq_nic_ndev_init(struct aq_nic_s *self) | |||
247 | self->ndev->hw_features |= aq_hw_caps->hw_features; | 247 | self->ndev->hw_features |= aq_hw_caps->hw_features; |
248 | self->ndev->features = aq_hw_caps->hw_features; | 248 | self->ndev->features = aq_hw_caps->hw_features; |
249 | self->ndev->priv_flags = aq_hw_caps->hw_priv_flags; | 249 | self->ndev->priv_flags = aq_hw_caps->hw_priv_flags; |
250 | self->ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE; | ||
251 | |||
250 | self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN; | 252 | self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN; |
251 | self->ndev->max_mtu = aq_hw_caps->mtu - ETH_FCS_LEN - ETH_HLEN; | 253 | self->ndev->max_mtu = aq_hw_caps->mtu - ETH_FCS_LEN - ETH_HLEN; |
252 | 254 | ||
@@ -937,3 +939,23 @@ err_exit: | |||
937 | out: | 939 | out: |
938 | return err; | 940 | return err; |
939 | } | 941 | } |
942 | |||
943 | void aq_nic_shutdown(struct aq_nic_s *self) | ||
944 | { | ||
945 | int err = 0; | ||
946 | |||
947 | if (!self->ndev) | ||
948 | return; | ||
949 | |||
950 | rtnl_lock(); | ||
951 | |||
952 | netif_device_detach(self->ndev); | ||
953 | |||
954 | err = aq_nic_stop(self); | ||
955 | if (err < 0) | ||
956 | goto err_exit; | ||
957 | aq_nic_deinit(self); | ||
958 | |||
959 | err_exit: | ||
960 | rtnl_unlock(); | ||
961 | } \ No newline at end of file | ||
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h index d16b0f1a95aa..219b550d1665 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h | |||
@@ -118,5 +118,6 @@ struct aq_nic_cfg_s *aq_nic_get_cfg(struct aq_nic_s *self); | |||
118 | u32 aq_nic_get_fw_version(struct aq_nic_s *self); | 118 | u32 aq_nic_get_fw_version(struct aq_nic_s *self); |
119 | int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg); | 119 | int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg); |
120 | int aq_nic_update_interrupt_moderation_settings(struct aq_nic_s *self); | 120 | int aq_nic_update_interrupt_moderation_settings(struct aq_nic_s *self); |
121 | void aq_nic_shutdown(struct aq_nic_s *self); | ||
121 | 122 | ||
122 | #endif /* AQ_NIC_H */ | 123 | #endif /* AQ_NIC_H */ |
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c index 22889fc158f2..ecc6306f940f 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c | |||
@@ -226,6 +226,10 @@ static int aq_pci_probe(struct pci_dev *pdev, | |||
226 | goto err_ioremap; | 226 | goto err_ioremap; |
227 | 227 | ||
228 | self->aq_hw = kzalloc(sizeof(*self->aq_hw), GFP_KERNEL); | 228 | self->aq_hw = kzalloc(sizeof(*self->aq_hw), GFP_KERNEL); |
229 | if (!self->aq_hw) { | ||
230 | err = -ENOMEM; | ||
231 | goto err_ioremap; | ||
232 | } | ||
229 | self->aq_hw->aq_nic_cfg = aq_nic_get_cfg(self); | 233 | self->aq_hw->aq_nic_cfg = aq_nic_get_cfg(self); |
230 | 234 | ||
231 | for (bar = 0; bar < 4; ++bar) { | 235 | for (bar = 0; bar < 4; ++bar) { |
@@ -235,19 +239,19 @@ static int aq_pci_probe(struct pci_dev *pdev, | |||
235 | mmio_pa = pci_resource_start(pdev, bar); | 239 | mmio_pa = pci_resource_start(pdev, bar); |
236 | if (mmio_pa == 0U) { | 240 | if (mmio_pa == 0U) { |
237 | err = -EIO; | 241 | err = -EIO; |
238 | goto err_ioremap; | 242 | goto err_free_aq_hw; |
239 | } | 243 | } |
240 | 244 | ||
241 | reg_sz = pci_resource_len(pdev, bar); | 245 | reg_sz = pci_resource_len(pdev, bar); |
242 | if ((reg_sz <= 24 /*ATL_REGS_SIZE*/)) { | 246 | if ((reg_sz <= 24 /*ATL_REGS_SIZE*/)) { |
243 | err = -EIO; | 247 | err = -EIO; |
244 | goto err_ioremap; | 248 | goto err_free_aq_hw; |
245 | } | 249 | } |
246 | 250 | ||
247 | self->aq_hw->mmio = ioremap_nocache(mmio_pa, reg_sz); | 251 | self->aq_hw->mmio = ioremap_nocache(mmio_pa, reg_sz); |
248 | if (!self->aq_hw->mmio) { | 252 | if (!self->aq_hw->mmio) { |
249 | err = -EIO; | 253 | err = -EIO; |
250 | goto err_ioremap; | 254 | goto err_free_aq_hw; |
251 | } | 255 | } |
252 | break; | 256 | break; |
253 | } | 257 | } |
@@ -255,7 +259,7 @@ static int aq_pci_probe(struct pci_dev *pdev, | |||
255 | 259 | ||
256 | if (bar == 4) { | 260 | if (bar == 4) { |
257 | err = -EIO; | 261 | err = -EIO; |
258 | goto err_ioremap; | 262 | goto err_free_aq_hw; |
259 | } | 263 | } |
260 | 264 | ||
261 | numvecs = min((u8)AQ_CFG_VECS_DEF, | 265 | numvecs = min((u8)AQ_CFG_VECS_DEF, |
@@ -290,6 +294,8 @@ err_register: | |||
290 | aq_pci_free_irq_vectors(self); | 294 | aq_pci_free_irq_vectors(self); |
291 | err_hwinit: | 295 | err_hwinit: |
292 | iounmap(self->aq_hw->mmio); | 296 | iounmap(self->aq_hw->mmio); |
297 | err_free_aq_hw: | ||
298 | kfree(self->aq_hw); | ||
293 | err_ioremap: | 299 | err_ioremap: |
294 | free_netdev(ndev); | 300 | free_netdev(ndev); |
295 | err_pci_func: | 301 | err_pci_func: |
@@ -317,6 +323,20 @@ static void aq_pci_remove(struct pci_dev *pdev) | |||
317 | pci_disable_device(pdev); | 323 | pci_disable_device(pdev); |
318 | } | 324 | } |
319 | 325 | ||
326 | static void aq_pci_shutdown(struct pci_dev *pdev) | ||
327 | { | ||
328 | struct aq_nic_s *self = pci_get_drvdata(pdev); | ||
329 | |||
330 | aq_nic_shutdown(self); | ||
331 | |||
332 | pci_disable_device(pdev); | ||
333 | |||
334 | if (system_state == SYSTEM_POWER_OFF) { | ||
335 | pci_wake_from_d3(pdev, false); | ||
336 | pci_set_power_state(pdev, PCI_D3hot); | ||
337 | } | ||
338 | } | ||
339 | |||
320 | static int aq_pci_suspend(struct pci_dev *pdev, pm_message_t pm_msg) | 340 | static int aq_pci_suspend(struct pci_dev *pdev, pm_message_t pm_msg) |
321 | { | 341 | { |
322 | struct aq_nic_s *self = pci_get_drvdata(pdev); | 342 | struct aq_nic_s *self = pci_get_drvdata(pdev); |
@@ -339,6 +359,7 @@ static struct pci_driver aq_pci_ops = { | |||
339 | .remove = aq_pci_remove, | 359 | .remove = aq_pci_remove, |
340 | .suspend = aq_pci_suspend, | 360 | .suspend = aq_pci_suspend, |
341 | .resume = aq_pci_resume, | 361 | .resume = aq_pci_resume, |
362 | .shutdown = aq_pci_shutdown, | ||
342 | }; | 363 | }; |
343 | 364 | ||
344 | module_pci_driver(aq_pci_ops); | 365 | module_pci_driver(aq_pci_ops); |
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c index 0be6a11370bb..b5f1f62e8e25 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c | |||
@@ -136,11 +136,12 @@ void aq_ring_queue_stop(struct aq_ring_s *ring) | |||
136 | netif_stop_subqueue(ndev, ring->idx); | 136 | netif_stop_subqueue(ndev, ring->idx); |
137 | } | 137 | } |
138 | 138 | ||
139 | void aq_ring_tx_clean(struct aq_ring_s *self) | 139 | bool aq_ring_tx_clean(struct aq_ring_s *self) |
140 | { | 140 | { |
141 | struct device *dev = aq_nic_get_dev(self->aq_nic); | 141 | struct device *dev = aq_nic_get_dev(self->aq_nic); |
142 | unsigned int budget = AQ_CFG_TX_CLEAN_BUDGET; | ||
142 | 143 | ||
143 | for (; self->sw_head != self->hw_head; | 144 | for (; self->sw_head != self->hw_head && budget--; |
144 | self->sw_head = aq_ring_next_dx(self, self->sw_head)) { | 145 | self->sw_head = aq_ring_next_dx(self, self->sw_head)) { |
145 | struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head]; | 146 | struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head]; |
146 | 147 | ||
@@ -167,6 +168,8 @@ void aq_ring_tx_clean(struct aq_ring_s *self) | |||
167 | buff->pa = 0U; | 168 | buff->pa = 0U; |
168 | buff->eop_index = 0xffffU; | 169 | buff->eop_index = 0xffffU; |
169 | } | 170 | } |
171 | |||
172 | return !!budget; | ||
170 | } | 173 | } |
171 | 174 | ||
172 | #define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) | 175 | #define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) |
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h index 965fae0fb6e0..ac1329f4051d 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h | |||
@@ -153,7 +153,7 @@ void aq_ring_free(struct aq_ring_s *self); | |||
153 | void aq_ring_update_queue_state(struct aq_ring_s *ring); | 153 | void aq_ring_update_queue_state(struct aq_ring_s *ring); |
154 | void aq_ring_queue_wake(struct aq_ring_s *ring); | 154 | void aq_ring_queue_wake(struct aq_ring_s *ring); |
155 | void aq_ring_queue_stop(struct aq_ring_s *ring); | 155 | void aq_ring_queue_stop(struct aq_ring_s *ring); |
156 | void aq_ring_tx_clean(struct aq_ring_s *self); | 156 | bool aq_ring_tx_clean(struct aq_ring_s *self); |
157 | int aq_ring_rx_clean(struct aq_ring_s *self, | 157 | int aq_ring_rx_clean(struct aq_ring_s *self, |
158 | struct napi_struct *napi, | 158 | struct napi_struct *napi, |
159 | int *work_done, | 159 | int *work_done, |
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c index f890b8a5a862..d335c334fa56 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c | |||
@@ -35,12 +35,12 @@ struct aq_vec_s { | |||
35 | static int aq_vec_poll(struct napi_struct *napi, int budget) | 35 | static int aq_vec_poll(struct napi_struct *napi, int budget) |
36 | { | 36 | { |
37 | struct aq_vec_s *self = container_of(napi, struct aq_vec_s, napi); | 37 | struct aq_vec_s *self = container_of(napi, struct aq_vec_s, napi); |
38 | unsigned int sw_tail_old = 0U; | ||
38 | struct aq_ring_s *ring = NULL; | 39 | struct aq_ring_s *ring = NULL; |
40 | bool was_tx_cleaned = true; | ||
41 | unsigned int i = 0U; | ||
39 | int work_done = 0; | 42 | int work_done = 0; |
40 | int err = 0; | 43 | int err = 0; |
41 | unsigned int i = 0U; | ||
42 | unsigned int sw_tail_old = 0U; | ||
43 | bool was_tx_cleaned = false; | ||
44 | 44 | ||
45 | if (!self) { | 45 | if (!self) { |
46 | err = -EINVAL; | 46 | err = -EINVAL; |
@@ -57,9 +57,8 @@ static int aq_vec_poll(struct napi_struct *napi, int budget) | |||
57 | 57 | ||
58 | if (ring[AQ_VEC_TX_ID].sw_head != | 58 | if (ring[AQ_VEC_TX_ID].sw_head != |
59 | ring[AQ_VEC_TX_ID].hw_head) { | 59 | ring[AQ_VEC_TX_ID].hw_head) { |
60 | aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]); | 60 | was_tx_cleaned = aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]); |
61 | aq_ring_update_queue_state(&ring[AQ_VEC_TX_ID]); | 61 | aq_ring_update_queue_state(&ring[AQ_VEC_TX_ID]); |
62 | was_tx_cleaned = true; | ||
63 | } | 62 | } |
64 | 63 | ||
65 | err = self->aq_hw_ops->hw_ring_rx_receive(self->aq_hw, | 64 | err = self->aq_hw_ops->hw_ring_rx_receive(self->aq_hw, |
@@ -90,7 +89,7 @@ static int aq_vec_poll(struct napi_struct *napi, int budget) | |||
90 | } | 89 | } |
91 | } | 90 | } |
92 | 91 | ||
93 | if (was_tx_cleaned) | 92 | if (!was_tx_cleaned) |
94 | work_done = budget; | 93 | work_done = budget; |
95 | 94 | ||
96 | if (work_done < budget) { | 95 | if (work_done < budget) { |
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c index 967f0fd07fcf..d3b847ec7465 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c | |||
@@ -21,6 +21,10 @@ | |||
21 | 21 | ||
22 | #define HW_ATL_UCP_0X370_REG 0x0370U | 22 | #define HW_ATL_UCP_0X370_REG 0x0370U |
23 | 23 | ||
24 | #define HW_ATL_MIF_CMD 0x0200U | ||
25 | #define HW_ATL_MIF_ADDR 0x0208U | ||
26 | #define HW_ATL_MIF_VAL 0x020CU | ||
27 | |||
24 | #define HW_ATL_FW_SM_RAM 0x2U | 28 | #define HW_ATL_FW_SM_RAM 0x2U |
25 | #define HW_ATL_MPI_FW_VERSION 0x18 | 29 | #define HW_ATL_MPI_FW_VERSION 0x18 |
26 | #define HW_ATL_MPI_CONTROL_ADR 0x0368U | 30 | #define HW_ATL_MPI_CONTROL_ADR 0x0368U |
@@ -79,16 +83,15 @@ int hw_atl_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops) | |||
79 | 83 | ||
80 | static int hw_atl_utils_soft_reset_flb(struct aq_hw_s *self) | 84 | static int hw_atl_utils_soft_reset_flb(struct aq_hw_s *self) |
81 | { | 85 | { |
86 | u32 gsr, val; | ||
82 | int k = 0; | 87 | int k = 0; |
83 | u32 gsr; | ||
84 | 88 | ||
85 | aq_hw_write_reg(self, 0x404, 0x40e1); | 89 | aq_hw_write_reg(self, 0x404, 0x40e1); |
86 | AQ_HW_SLEEP(50); | 90 | AQ_HW_SLEEP(50); |
87 | 91 | ||
88 | /* Cleanup SPI */ | 92 | /* Cleanup SPI */ |
89 | aq_hw_write_reg(self, 0x534, 0xA0); | 93 | val = aq_hw_read_reg(self, 0x53C); |
90 | aq_hw_write_reg(self, 0x100, 0x9F); | 94 | aq_hw_write_reg(self, 0x53C, val | 0x10); |
91 | aq_hw_write_reg(self, 0x100, 0x809F); | ||
92 | 95 | ||
93 | gsr = aq_hw_read_reg(self, HW_ATL_GLB_SOFT_RES_ADR); | 96 | gsr = aq_hw_read_reg(self, HW_ATL_GLB_SOFT_RES_ADR); |
94 | aq_hw_write_reg(self, HW_ATL_GLB_SOFT_RES_ADR, (gsr & 0xBFFF) | 0x8000); | 97 | aq_hw_write_reg(self, HW_ATL_GLB_SOFT_RES_ADR, (gsr & 0xBFFF) | 0x8000); |
@@ -97,7 +100,14 @@ static int hw_atl_utils_soft_reset_flb(struct aq_hw_s *self) | |||
97 | aq_hw_write_reg(self, 0x404, 0x80e0); | 100 | aq_hw_write_reg(self, 0x404, 0x80e0); |
98 | aq_hw_write_reg(self, 0x32a8, 0x0); | 101 | aq_hw_write_reg(self, 0x32a8, 0x0); |
99 | aq_hw_write_reg(self, 0x520, 0x1); | 102 | aq_hw_write_reg(self, 0x520, 0x1); |
103 | |||
104 | /* Reset SPI again because of possible interrupted SPI burst */ | ||
105 | val = aq_hw_read_reg(self, 0x53C); | ||
106 | aq_hw_write_reg(self, 0x53C, val | 0x10); | ||
100 | AQ_HW_SLEEP(10); | 107 | AQ_HW_SLEEP(10); |
108 | /* Clear SPI reset state */ | ||
109 | aq_hw_write_reg(self, 0x53C, val & ~0x10); | ||
110 | |||
101 | aq_hw_write_reg(self, 0x404, 0x180e0); | 111 | aq_hw_write_reg(self, 0x404, 0x180e0); |
102 | 112 | ||
103 | for (k = 0; k < 1000; k++) { | 113 | for (k = 0; k < 1000; k++) { |
@@ -141,13 +151,15 @@ static int hw_atl_utils_soft_reset_flb(struct aq_hw_s *self) | |||
141 | aq_pr_err("FW kickstart failed\n"); | 151 | aq_pr_err("FW kickstart failed\n"); |
142 | return -EIO; | 152 | return -EIO; |
143 | } | 153 | } |
154 | /* Old FW requires fixed delay after init */ | ||
155 | AQ_HW_SLEEP(15); | ||
144 | 156 | ||
145 | return 0; | 157 | return 0; |
146 | } | 158 | } |
147 | 159 | ||
148 | static int hw_atl_utils_soft_reset_rbl(struct aq_hw_s *self) | 160 | static int hw_atl_utils_soft_reset_rbl(struct aq_hw_s *self) |
149 | { | 161 | { |
150 | u32 gsr, rbl_status; | 162 | u32 gsr, val, rbl_status; |
151 | int k; | 163 | int k; |
152 | 164 | ||
153 | aq_hw_write_reg(self, 0x404, 0x40e1); | 165 | aq_hw_write_reg(self, 0x404, 0x40e1); |
@@ -157,6 +169,10 @@ static int hw_atl_utils_soft_reset_rbl(struct aq_hw_s *self) | |||
157 | /* Alter RBL status */ | 169 | /* Alter RBL status */ |
158 | aq_hw_write_reg(self, 0x388, 0xDEAD); | 170 | aq_hw_write_reg(self, 0x388, 0xDEAD); |
159 | 171 | ||
172 | /* Cleanup SPI */ | ||
173 | val = aq_hw_read_reg(self, 0x53C); | ||
174 | aq_hw_write_reg(self, 0x53C, val | 0x10); | ||
175 | |||
160 | /* Global software reset*/ | 176 | /* Global software reset*/ |
161 | hw_atl_rx_rx_reg_res_dis_set(self, 0U); | 177 | hw_atl_rx_rx_reg_res_dis_set(self, 0U); |
162 | hw_atl_tx_tx_reg_res_dis_set(self, 0U); | 178 | hw_atl_tx_tx_reg_res_dis_set(self, 0U); |
@@ -204,6 +220,8 @@ static int hw_atl_utils_soft_reset_rbl(struct aq_hw_s *self) | |||
204 | aq_pr_err("FW kickstart failed\n"); | 220 | aq_pr_err("FW kickstart failed\n"); |
205 | return -EIO; | 221 | return -EIO; |
206 | } | 222 | } |
223 | /* Old FW requires fixed delay after init */ | ||
224 | AQ_HW_SLEEP(15); | ||
207 | 225 | ||
208 | return 0; | 226 | return 0; |
209 | } | 227 | } |
@@ -255,18 +273,22 @@ int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a, | |||
255 | } | 273 | } |
256 | } | 274 | } |
257 | 275 | ||
258 | aq_hw_write_reg(self, 0x00000208U, a); | 276 | aq_hw_write_reg(self, HW_ATL_MIF_ADDR, a); |
259 | |||
260 | for (++cnt; --cnt;) { | ||
261 | u32 i = 0U; | ||
262 | 277 | ||
263 | aq_hw_write_reg(self, 0x00000200U, 0x00008000U); | 278 | for (++cnt; --cnt && !err;) { |
279 | aq_hw_write_reg(self, HW_ATL_MIF_CMD, 0x00008000U); | ||
264 | 280 | ||
265 | for (i = 1024U; | 281 | if (IS_CHIP_FEATURE(REVISION_B1)) |
266 | (0x100U & aq_hw_read_reg(self, 0x00000200U)) && --i;) { | 282 | AQ_HW_WAIT_FOR(a != aq_hw_read_reg(self, |
267 | } | 283 | HW_ATL_MIF_ADDR), |
284 | 1, 1000U); | ||
285 | else | ||
286 | AQ_HW_WAIT_FOR(!(0x100 & aq_hw_read_reg(self, | ||
287 | HW_ATL_MIF_CMD)), | ||
288 | 1, 1000U); | ||
268 | 289 | ||
269 | *(p++) = aq_hw_read_reg(self, 0x0000020CU); | 290 | *(p++) = aq_hw_read_reg(self, HW_ATL_MIF_VAL); |
291 | a += 4; | ||
270 | } | 292 | } |
271 | 293 | ||
272 | hw_atl_reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM); | 294 | hw_atl_reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM); |
@@ -662,14 +684,18 @@ void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p) | |||
662 | u32 val = hw_atl_reg_glb_mif_id_get(self); | 684 | u32 val = hw_atl_reg_glb_mif_id_get(self); |
663 | u32 mif_rev = val & 0xFFU; | 685 | u32 mif_rev = val & 0xFFU; |
664 | 686 | ||
665 | if ((3U & mif_rev) == 1U) { | 687 | if ((0xFU & mif_rev) == 1U) { |
666 | chip_features |= | 688 | chip_features |= HAL_ATLANTIC_UTILS_CHIP_REVISION_A0 | |
667 | HAL_ATLANTIC_UTILS_CHIP_REVISION_A0 | | ||
668 | HAL_ATLANTIC_UTILS_CHIP_MPI_AQ | | 689 | HAL_ATLANTIC_UTILS_CHIP_MPI_AQ | |
669 | HAL_ATLANTIC_UTILS_CHIP_MIPS; | 690 | HAL_ATLANTIC_UTILS_CHIP_MIPS; |
670 | } else if ((3U & mif_rev) == 2U) { | 691 | } else if ((0xFU & mif_rev) == 2U) { |
671 | chip_features |= | 692 | chip_features |= HAL_ATLANTIC_UTILS_CHIP_REVISION_B0 | |
672 | HAL_ATLANTIC_UTILS_CHIP_REVISION_B0 | | 693 | HAL_ATLANTIC_UTILS_CHIP_MPI_AQ | |
694 | HAL_ATLANTIC_UTILS_CHIP_MIPS | | ||
695 | HAL_ATLANTIC_UTILS_CHIP_TPO2 | | ||
696 | HAL_ATLANTIC_UTILS_CHIP_RPF2; | ||
697 | } else if ((0xFU & mif_rev) == 0xAU) { | ||
698 | chip_features |= HAL_ATLANTIC_UTILS_CHIP_REVISION_B1 | | ||
673 | HAL_ATLANTIC_UTILS_CHIP_MPI_AQ | | 699 | HAL_ATLANTIC_UTILS_CHIP_MPI_AQ | |
674 | HAL_ATLANTIC_UTILS_CHIP_MIPS | | 700 | HAL_ATLANTIC_UTILS_CHIP_MIPS | |
675 | HAL_ATLANTIC_UTILS_CHIP_TPO2 | | 701 | HAL_ATLANTIC_UTILS_CHIP_TPO2 | |
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h index 2c690947910a..cd8f18f39c61 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h | |||
@@ -161,6 +161,7 @@ struct __packed hw_aq_atl_utils_mbox { | |||
161 | #define HAL_ATLANTIC_UTILS_CHIP_MPI_AQ 0x00000010U | 161 | #define HAL_ATLANTIC_UTILS_CHIP_MPI_AQ 0x00000010U |
162 | #define HAL_ATLANTIC_UTILS_CHIP_REVISION_A0 0x01000000U | 162 | #define HAL_ATLANTIC_UTILS_CHIP_REVISION_A0 0x01000000U |
163 | #define HAL_ATLANTIC_UTILS_CHIP_REVISION_B0 0x02000000U | 163 | #define HAL_ATLANTIC_UTILS_CHIP_REVISION_B0 0x02000000U |
164 | #define HAL_ATLANTIC_UTILS_CHIP_REVISION_B1 0x04000000U | ||
164 | 165 | ||
165 | #define IS_CHIP_FEATURE(_F_) (HAL_ATLANTIC_UTILS_CHIP_##_F_ & \ | 166 | #define IS_CHIP_FEATURE(_F_) (HAL_ATLANTIC_UTILS_CHIP_##_F_ & \ |
166 | self->chip_features) | 167 | self->chip_features) |
diff --git a/drivers/net/ethernet/aquantia/atlantic/ver.h b/drivers/net/ethernet/aquantia/atlantic/ver.h index 5265b937677b..a445de6837a6 100644 --- a/drivers/net/ethernet/aquantia/atlantic/ver.h +++ b/drivers/net/ethernet/aquantia/atlantic/ver.h | |||
@@ -13,7 +13,7 @@ | |||
13 | #define NIC_MAJOR_DRIVER_VERSION 2 | 13 | #define NIC_MAJOR_DRIVER_VERSION 2 |
14 | #define NIC_MINOR_DRIVER_VERSION 0 | 14 | #define NIC_MINOR_DRIVER_VERSION 0 |
15 | #define NIC_BUILD_DRIVER_VERSION 2 | 15 | #define NIC_BUILD_DRIVER_VERSION 2 |
16 | #define NIC_REVISION_DRIVER_VERSION 0 | 16 | #define NIC_REVISION_DRIVER_VERSION 1 |
17 | 17 | ||
18 | #define AQ_CFG_DRV_VERSION_SUFFIX "-kern" | 18 | #define AQ_CFG_DRV_VERSION_SUFFIX "-kern" |
19 | 19 | ||
diff --git a/drivers/net/ethernet/arc/emac_rockchip.c b/drivers/net/ethernet/arc/emac_rockchip.c index 16f9bee992fe..0f6576802607 100644 --- a/drivers/net/ethernet/arc/emac_rockchip.c +++ b/drivers/net/ethernet/arc/emac_rockchip.c | |||
@@ -169,8 +169,10 @@ static int emac_rockchip_probe(struct platform_device *pdev) | |||
169 | /* Optional regulator for PHY */ | 169 | /* Optional regulator for PHY */ |
170 | priv->regulator = devm_regulator_get_optional(dev, "phy"); | 170 | priv->regulator = devm_regulator_get_optional(dev, "phy"); |
171 | if (IS_ERR(priv->regulator)) { | 171 | if (IS_ERR(priv->regulator)) { |
172 | if (PTR_ERR(priv->regulator) == -EPROBE_DEFER) | 172 | if (PTR_ERR(priv->regulator) == -EPROBE_DEFER) { |
173 | return -EPROBE_DEFER; | 173 | err = -EPROBE_DEFER; |
174 | goto out_clk_disable; | ||
175 | } | ||
174 | dev_err(dev, "no regulator found\n"); | 176 | dev_err(dev, "no regulator found\n"); |
175 | priv->regulator = NULL; | 177 | priv->regulator = NULL; |
176 | } | 178 | } |
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index f15a8fc6dfc9..3fc549b88c43 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c | |||
@@ -855,10 +855,12 @@ static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_tx_ring *ring, | |||
855 | static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv, | 855 | static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv, |
856 | struct bcm_sysport_tx_ring *ring) | 856 | struct bcm_sysport_tx_ring *ring) |
857 | { | 857 | { |
858 | unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs; | ||
859 | unsigned int pkts_compl = 0, bytes_compl = 0; | 858 | unsigned int pkts_compl = 0, bytes_compl = 0; |
860 | struct net_device *ndev = priv->netdev; | 859 | struct net_device *ndev = priv->netdev; |
860 | unsigned int txbds_processed = 0; | ||
861 | struct bcm_sysport_cb *cb; | 861 | struct bcm_sysport_cb *cb; |
862 | unsigned int txbds_ready; | ||
863 | unsigned int c_index; | ||
862 | u32 hw_ind; | 864 | u32 hw_ind; |
863 | 865 | ||
864 | /* Clear status before servicing to reduce spurious interrupts */ | 866 | /* Clear status before servicing to reduce spurious interrupts */ |
@@ -871,29 +873,23 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv, | |||
871 | /* Compute how many descriptors have been processed since last call */ | 873 | /* Compute how many descriptors have been processed since last call */ |
872 | hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index)); | 874 | hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index)); |
873 | c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK; | 875 | c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK; |
874 | ring->p_index = (hw_ind & RING_PROD_INDEX_MASK); | 876 | txbds_ready = (c_index - ring->c_index) & RING_CONS_INDEX_MASK; |
875 | |||
876 | last_c_index = ring->c_index; | ||
877 | num_tx_cbs = ring->size; | ||
878 | |||
879 | c_index &= (num_tx_cbs - 1); | ||
880 | |||
881 | if (c_index >= last_c_index) | ||
882 | last_tx_cn = c_index - last_c_index; | ||
883 | else | ||
884 | last_tx_cn = num_tx_cbs - last_c_index + c_index; | ||
885 | 877 | ||
886 | netif_dbg(priv, tx_done, ndev, | 878 | netif_dbg(priv, tx_done, ndev, |
887 | "ring=%d c_index=%d last_tx_cn=%d last_c_index=%d\n", | 879 | "ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n", |
888 | ring->index, c_index, last_tx_cn, last_c_index); | 880 | ring->index, ring->c_index, c_index, txbds_ready); |
889 | 881 | ||
890 | while (last_tx_cn-- > 0) { | 882 | while (txbds_processed < txbds_ready) { |
891 | cb = ring->cbs + last_c_index; | 883 | cb = &ring->cbs[ring->clean_index]; |
892 | bcm_sysport_tx_reclaim_one(ring, cb, &bytes_compl, &pkts_compl); | 884 | bcm_sysport_tx_reclaim_one(ring, cb, &bytes_compl, &pkts_compl); |
893 | 885 | ||
894 | ring->desc_count++; | 886 | ring->desc_count++; |
895 | last_c_index++; | 887 | txbds_processed++; |
896 | last_c_index &= (num_tx_cbs - 1); | 888 | |
889 | if (likely(ring->clean_index < ring->size - 1)) | ||
890 | ring->clean_index++; | ||
891 | else | ||
892 | ring->clean_index = 0; | ||
897 | } | 893 | } |
898 | 894 | ||
899 | u64_stats_update_begin(&priv->syncp); | 895 | u64_stats_update_begin(&priv->syncp); |
@@ -1394,6 +1390,7 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv, | |||
1394 | netif_tx_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64); | 1390 | netif_tx_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64); |
1395 | ring->index = index; | 1391 | ring->index = index; |
1396 | ring->size = size; | 1392 | ring->size = size; |
1393 | ring->clean_index = 0; | ||
1397 | ring->alloc_size = ring->size; | 1394 | ring->alloc_size = ring->size; |
1398 | ring->desc_cpu = p; | 1395 | ring->desc_cpu = p; |
1399 | ring->desc_count = ring->size; | 1396 | ring->desc_count = ring->size; |
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h index f5a984c1c986..19c91c76e327 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.h +++ b/drivers/net/ethernet/broadcom/bcmsysport.h | |||
@@ -706,7 +706,7 @@ struct bcm_sysport_tx_ring { | |||
706 | unsigned int desc_count; /* Number of descriptors */ | 706 | unsigned int desc_count; /* Number of descriptors */ |
707 | unsigned int curr_desc; /* Current descriptor */ | 707 | unsigned int curr_desc; /* Current descriptor */ |
708 | unsigned int c_index; /* Last consumer index */ | 708 | unsigned int c_index; /* Last consumer index */ |
709 | unsigned int p_index; /* Current producer index */ | 709 | unsigned int clean_index; /* Current clean index */ |
710 | struct bcm_sysport_cb *cbs; /* Transmit control blocks */ | 710 | struct bcm_sysport_cb *cbs; /* Transmit control blocks */ |
711 | struct dma_desc *desc_cpu; /* CPU view of the descriptor */ | 711 | struct dma_desc *desc_cpu; /* CPU view of the descriptor */ |
712 | struct bcm_sysport_priv *priv; /* private context backpointer */ | 712 | struct bcm_sysport_priv *priv; /* private context backpointer */ |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 74fc9af4aadb..b8388e93520a 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | |||
@@ -13913,7 +13913,7 @@ static void bnx2x_register_phc(struct bnx2x *bp) | |||
13913 | bp->ptp_clock = ptp_clock_register(&bp->ptp_clock_info, &bp->pdev->dev); | 13913 | bp->ptp_clock = ptp_clock_register(&bp->ptp_clock_info, &bp->pdev->dev); |
13914 | if (IS_ERR(bp->ptp_clock)) { | 13914 | if (IS_ERR(bp->ptp_clock)) { |
13915 | bp->ptp_clock = NULL; | 13915 | bp->ptp_clock = NULL; |
13916 | BNX2X_ERR("PTP clock registeration failed\n"); | 13916 | BNX2X_ERR("PTP clock registration failed\n"); |
13917 | } | 13917 | } |
13918 | } | 13918 | } |
13919 | 13919 | ||
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 1500243b9886..c7e5e6f09647 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c | |||
@@ -1439,7 +1439,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, | |||
1439 | (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { | 1439 | (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { |
1440 | u16 vlan_proto = tpa_info->metadata >> | 1440 | u16 vlan_proto = tpa_info->metadata >> |
1441 | RX_CMP_FLAGS2_METADATA_TPID_SFT; | 1441 | RX_CMP_FLAGS2_METADATA_TPID_SFT; |
1442 | u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_VID_MASK; | 1442 | u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK; |
1443 | 1443 | ||
1444 | __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag); | 1444 | __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag); |
1445 | } | 1445 | } |
@@ -1623,7 +1623,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, | |||
1623 | cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) && | 1623 | cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) && |
1624 | (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { | 1624 | (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { |
1625 | u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data); | 1625 | u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data); |
1626 | u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_VID_MASK; | 1626 | u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK; |
1627 | u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT; | 1627 | u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT; |
1628 | 1628 | ||
1629 | __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag); | 1629 | __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag); |
@@ -3847,6 +3847,9 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags) | |||
3847 | struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; | 3847 | struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; |
3848 | struct hwrm_vnic_tpa_cfg_input req = {0}; | 3848 | struct hwrm_vnic_tpa_cfg_input req = {0}; |
3849 | 3849 | ||
3850 | if (vnic->fw_vnic_id == INVALID_HW_RING_ID) | ||
3851 | return 0; | ||
3852 | |||
3850 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1); | 3853 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1); |
3851 | 3854 | ||
3852 | if (tpa_flags) { | 3855 | if (tpa_flags) { |
@@ -4558,18 +4561,17 @@ int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings) | |||
4558 | return rc; | 4561 | return rc; |
4559 | } | 4562 | } |
4560 | 4563 | ||
4561 | static int | 4564 | static void |
4562 | bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, | 4565 | __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req, |
4563 | int ring_grps, int cp_rings, int vnics) | 4566 | int tx_rings, int rx_rings, int ring_grps, |
4567 | int cp_rings, int vnics) | ||
4564 | { | 4568 | { |
4565 | struct hwrm_func_cfg_input req = {0}; | ||
4566 | u32 enables = 0; | 4569 | u32 enables = 0; |
4567 | int rc; | ||
4568 | 4570 | ||
4569 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); | 4571 | bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_CFG, -1, -1); |
4570 | req.fid = cpu_to_le16(0xffff); | 4572 | req->fid = cpu_to_le16(0xffff); |
4571 | enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; | 4573 | enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; |
4572 | req.num_tx_rings = cpu_to_le16(tx_rings); | 4574 | req->num_tx_rings = cpu_to_le16(tx_rings); |
4573 | if (bp->flags & BNXT_FLAG_NEW_RM) { | 4575 | if (bp->flags & BNXT_FLAG_NEW_RM) { |
4574 | enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0; | 4576 | enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0; |
4575 | enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS | | 4577 | enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS | |
@@ -4578,16 +4580,53 @@ bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, | |||
4578 | FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0; | 4580 | FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0; |
4579 | enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0; | 4581 | enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0; |
4580 | 4582 | ||
4581 | req.num_rx_rings = cpu_to_le16(rx_rings); | 4583 | req->num_rx_rings = cpu_to_le16(rx_rings); |
4582 | req.num_hw_ring_grps = cpu_to_le16(ring_grps); | 4584 | req->num_hw_ring_grps = cpu_to_le16(ring_grps); |
4583 | req.num_cmpl_rings = cpu_to_le16(cp_rings); | 4585 | req->num_cmpl_rings = cpu_to_le16(cp_rings); |
4584 | req.num_stat_ctxs = req.num_cmpl_rings; | 4586 | req->num_stat_ctxs = req->num_cmpl_rings; |
4585 | req.num_vnics = cpu_to_le16(vnics); | 4587 | req->num_vnics = cpu_to_le16(vnics); |
4586 | } | 4588 | } |
4587 | if (!enables) | 4589 | req->enables = cpu_to_le32(enables); |
4590 | } | ||
4591 | |||
4592 | static void | ||
4593 | __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, | ||
4594 | struct hwrm_func_vf_cfg_input *req, int tx_rings, | ||
4595 | int rx_rings, int ring_grps, int cp_rings, | ||
4596 | int vnics) | ||
4597 | { | ||
4598 | u32 enables = 0; | ||
4599 | |||
4600 | bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_VF_CFG, -1, -1); | ||
4601 | enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; | ||
4602 | enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS : 0; | ||
4603 | enables |= cp_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS | | ||
4604 | FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; | ||
4605 | enables |= ring_grps ? FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0; | ||
4606 | enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0; | ||
4607 | |||
4608 | req->num_tx_rings = cpu_to_le16(tx_rings); | ||
4609 | req->num_rx_rings = cpu_to_le16(rx_rings); | ||
4610 | req->num_hw_ring_grps = cpu_to_le16(ring_grps); | ||
4611 | req->num_cmpl_rings = cpu_to_le16(cp_rings); | ||
4612 | req->num_stat_ctxs = req->num_cmpl_rings; | ||
4613 | req->num_vnics = cpu_to_le16(vnics); | ||
4614 | |||
4615 | req->enables = cpu_to_le32(enables); | ||
4616 | } | ||
4617 | |||
4618 | static int | ||
4619 | bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, | ||
4620 | int ring_grps, int cp_rings, int vnics) | ||
4621 | { | ||
4622 | struct hwrm_func_cfg_input req = {0}; | ||
4623 | int rc; | ||
4624 | |||
4625 | __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps, | ||
4626 | cp_rings, vnics); | ||
4627 | if (!req.enables) | ||
4588 | return 0; | 4628 | return 0; |
4589 | 4629 | ||
4590 | req.enables = cpu_to_le32(enables); | ||
4591 | rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); | 4630 | rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
4592 | if (rc) | 4631 | if (rc) |
4593 | return -ENOMEM; | 4632 | return -ENOMEM; |
@@ -4604,7 +4643,6 @@ bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, | |||
4604 | int ring_grps, int cp_rings, int vnics) | 4643 | int ring_grps, int cp_rings, int vnics) |
4605 | { | 4644 | { |
4606 | struct hwrm_func_vf_cfg_input req = {0}; | 4645 | struct hwrm_func_vf_cfg_input req = {0}; |
4607 | u32 enables = 0; | ||
4608 | int rc; | 4646 | int rc; |
4609 | 4647 | ||
4610 | if (!(bp->flags & BNXT_FLAG_NEW_RM)) { | 4648 | if (!(bp->flags & BNXT_FLAG_NEW_RM)) { |
@@ -4612,22 +4650,8 @@ bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, | |||
4612 | return 0; | 4650 | return 0; |
4613 | } | 4651 | } |
4614 | 4652 | ||
4615 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1); | 4653 | __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps, |
4616 | enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; | 4654 | cp_rings, vnics); |
4617 | enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS : 0; | ||
4618 | enables |= cp_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS | | ||
4619 | FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; | ||
4620 | enables |= ring_grps ? FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0; | ||
4621 | enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0; | ||
4622 | |||
4623 | req.num_tx_rings = cpu_to_le16(tx_rings); | ||
4624 | req.num_rx_rings = cpu_to_le16(rx_rings); | ||
4625 | req.num_hw_ring_grps = cpu_to_le16(ring_grps); | ||
4626 | req.num_cmpl_rings = cpu_to_le16(cp_rings); | ||
4627 | req.num_stat_ctxs = req.num_cmpl_rings; | ||
4628 | req.num_vnics = cpu_to_le16(vnics); | ||
4629 | |||
4630 | req.enables = cpu_to_le32(enables); | ||
4631 | rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); | 4655 | rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
4632 | if (rc) | 4656 | if (rc) |
4633 | return -ENOMEM; | 4657 | return -ENOMEM; |
@@ -4743,39 +4767,25 @@ static bool bnxt_need_reserve_rings(struct bnxt *bp) | |||
4743 | } | 4767 | } |
4744 | 4768 | ||
4745 | static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, | 4769 | static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, |
4746 | int ring_grps, int cp_rings) | 4770 | int ring_grps, int cp_rings, int vnics) |
4747 | { | 4771 | { |
4748 | struct hwrm_func_vf_cfg_input req = {0}; | 4772 | struct hwrm_func_vf_cfg_input req = {0}; |
4749 | u32 flags, enables; | 4773 | u32 flags; |
4750 | int rc; | 4774 | int rc; |
4751 | 4775 | ||
4752 | if (!(bp->flags & BNXT_FLAG_NEW_RM)) | 4776 | if (!(bp->flags & BNXT_FLAG_NEW_RM)) |
4753 | return 0; | 4777 | return 0; |
4754 | 4778 | ||
4755 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1); | 4779 | __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps, |
4780 | cp_rings, vnics); | ||
4756 | flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST | | 4781 | flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST | |
4757 | FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST | | 4782 | FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST | |
4758 | FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST | | 4783 | FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST | |
4759 | FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST | | 4784 | FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST | |
4760 | FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST | | 4785 | FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST | |
4761 | FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST; | 4786 | FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST; |
4762 | enables = FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS | | ||
4763 | FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS | | ||
4764 | FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS | | ||
4765 | FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS | | ||
4766 | FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS | | ||
4767 | FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS; | ||
4768 | 4787 | ||
4769 | req.flags = cpu_to_le32(flags); | 4788 | req.flags = cpu_to_le32(flags); |
4770 | req.enables = cpu_to_le32(enables); | ||
4771 | req.num_tx_rings = cpu_to_le16(tx_rings); | ||
4772 | req.num_rx_rings = cpu_to_le16(rx_rings); | ||
4773 | req.num_cmpl_rings = cpu_to_le16(cp_rings); | ||
4774 | req.num_hw_ring_grps = cpu_to_le16(ring_grps); | ||
4775 | req.num_stat_ctxs = cpu_to_le16(cp_rings); | ||
4776 | req.num_vnics = cpu_to_le16(1); | ||
4777 | if (bp->flags & BNXT_FLAG_RFS) | ||
4778 | req.num_vnics = cpu_to_le16(rx_rings + 1); | ||
4779 | rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); | 4789 | rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
4780 | if (rc) | 4790 | if (rc) |
4781 | return -ENOMEM; | 4791 | return -ENOMEM; |
@@ -4783,38 +4793,23 @@ static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, | |||
4783 | } | 4793 | } |
4784 | 4794 | ||
4785 | static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, | 4795 | static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, |
4786 | int ring_grps, int cp_rings) | 4796 | int ring_grps, int cp_rings, int vnics) |
4787 | { | 4797 | { |
4788 | struct hwrm_func_cfg_input req = {0}; | 4798 | struct hwrm_func_cfg_input req = {0}; |
4789 | u32 flags, enables; | 4799 | u32 flags; |
4790 | int rc; | 4800 | int rc; |
4791 | 4801 | ||
4792 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); | 4802 | __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps, |
4793 | req.fid = cpu_to_le16(0xffff); | 4803 | cp_rings, vnics); |
4794 | flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST; | 4804 | flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST; |
4795 | enables = FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS; | 4805 | if (bp->flags & BNXT_FLAG_NEW_RM) |
4796 | req.num_tx_rings = cpu_to_le16(tx_rings); | ||
4797 | if (bp->flags & BNXT_FLAG_NEW_RM) { | ||
4798 | flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST | | 4806 | flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST | |
4799 | FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST | | 4807 | FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST | |
4800 | FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST | | 4808 | FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST | |
4801 | FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST | | 4809 | FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST | |
4802 | FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST; | 4810 | FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST; |
4803 | enables |= FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS | | 4811 | |
4804 | FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS | | ||
4805 | FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS | | ||
4806 | FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS | | ||
4807 | FUNC_CFG_REQ_ENABLES_NUM_VNICS; | ||
4808 | req.num_rx_rings = cpu_to_le16(rx_rings); | ||
4809 | req.num_cmpl_rings = cpu_to_le16(cp_rings); | ||
4810 | req.num_hw_ring_grps = cpu_to_le16(ring_grps); | ||
4811 | req.num_stat_ctxs = cpu_to_le16(cp_rings); | ||
4812 | req.num_vnics = cpu_to_le16(1); | ||
4813 | if (bp->flags & BNXT_FLAG_RFS) | ||
4814 | req.num_vnics = cpu_to_le16(rx_rings + 1); | ||
4815 | } | ||
4816 | req.flags = cpu_to_le32(flags); | 4812 | req.flags = cpu_to_le32(flags); |
4817 | req.enables = cpu_to_le32(enables); | ||
4818 | rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); | 4813 | rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
4819 | if (rc) | 4814 | if (rc) |
4820 | return -ENOMEM; | 4815 | return -ENOMEM; |
@@ -4822,17 +4817,17 @@ static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, | |||
4822 | } | 4817 | } |
4823 | 4818 | ||
4824 | static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings, | 4819 | static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings, |
4825 | int ring_grps, int cp_rings) | 4820 | int ring_grps, int cp_rings, int vnics) |
4826 | { | 4821 | { |
4827 | if (bp->hwrm_spec_code < 0x10801) | 4822 | if (bp->hwrm_spec_code < 0x10801) |
4828 | return 0; | 4823 | return 0; |
4829 | 4824 | ||
4830 | if (BNXT_PF(bp)) | 4825 | if (BNXT_PF(bp)) |
4831 | return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings, | 4826 | return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings, |
4832 | ring_grps, cp_rings); | 4827 | ring_grps, cp_rings, vnics); |
4833 | 4828 | ||
4834 | return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps, | 4829 | return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps, |
4835 | cp_rings); | 4830 | cp_rings, vnics); |
4836 | } | 4831 | } |
4837 | 4832 | ||
4838 | static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal, | 4833 | static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal, |
@@ -5865,7 +5860,6 @@ static int bnxt_init_msix(struct bnxt *bp) | |||
5865 | if (rc) | 5860 | if (rc) |
5866 | goto msix_setup_exit; | 5861 | goto msix_setup_exit; |
5867 | 5862 | ||
5868 | bp->tx_nr_rings_per_tc = bp->tx_nr_rings; | ||
5869 | bp->cp_nr_rings = (min == 1) ? | 5863 | bp->cp_nr_rings = (min == 1) ? |
5870 | max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : | 5864 | max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : |
5871 | bp->tx_nr_rings + bp->rx_nr_rings; | 5865 | bp->tx_nr_rings + bp->rx_nr_rings; |
@@ -5897,7 +5891,6 @@ static int bnxt_init_inta(struct bnxt *bp) | |||
5897 | bp->rx_nr_rings = 1; | 5891 | bp->rx_nr_rings = 1; |
5898 | bp->tx_nr_rings = 1; | 5892 | bp->tx_nr_rings = 1; |
5899 | bp->cp_nr_rings = 1; | 5893 | bp->cp_nr_rings = 1; |
5900 | bp->tx_nr_rings_per_tc = bp->tx_nr_rings; | ||
5901 | bp->flags |= BNXT_FLAG_SHARED_RINGS; | 5894 | bp->flags |= BNXT_FLAG_SHARED_RINGS; |
5902 | bp->irq_tbl[0].vector = bp->pdev->irq; | 5895 | bp->irq_tbl[0].vector = bp->pdev->irq; |
5903 | return 0; | 5896 | return 0; |
@@ -7531,7 +7524,7 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, | |||
7531 | int max_rx, max_tx, tx_sets = 1; | 7524 | int max_rx, max_tx, tx_sets = 1; |
7532 | int tx_rings_needed; | 7525 | int tx_rings_needed; |
7533 | int rx_rings = rx; | 7526 | int rx_rings = rx; |
7534 | int cp, rc; | 7527 | int cp, vnics, rc; |
7535 | 7528 | ||
7536 | if (tcs) | 7529 | if (tcs) |
7537 | tx_sets = tcs; | 7530 | tx_sets = tcs; |
@@ -7547,10 +7540,15 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, | |||
7547 | if (max_tx < tx_rings_needed) | 7540 | if (max_tx < tx_rings_needed) |
7548 | return -ENOMEM; | 7541 | return -ENOMEM; |
7549 | 7542 | ||
7543 | vnics = 1; | ||
7544 | if (bp->flags & BNXT_FLAG_RFS) | ||
7545 | vnics += rx_rings; | ||
7546 | |||
7550 | if (bp->flags & BNXT_FLAG_AGG_RINGS) | 7547 | if (bp->flags & BNXT_FLAG_AGG_RINGS) |
7551 | rx_rings <<= 1; | 7548 | rx_rings <<= 1; |
7552 | cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx; | 7549 | cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx; |
7553 | return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp); | 7550 | return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp, |
7551 | vnics); | ||
7554 | } | 7552 | } |
7555 | 7553 | ||
7556 | static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev) | 7554 | static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev) |
@@ -8437,13 +8435,20 @@ int bnxt_restore_pf_fw_resources(struct bnxt *bp) | |||
8437 | return 0; | 8435 | return 0; |
8438 | 8436 | ||
8439 | bnxt_hwrm_func_qcaps(bp); | 8437 | bnxt_hwrm_func_qcaps(bp); |
8440 | __bnxt_close_nic(bp, true, false); | 8438 | |
8439 | if (netif_running(bp->dev)) | ||
8440 | __bnxt_close_nic(bp, true, false); | ||
8441 | |||
8441 | bnxt_clear_int_mode(bp); | 8442 | bnxt_clear_int_mode(bp); |
8442 | rc = bnxt_init_int_mode(bp); | 8443 | rc = bnxt_init_int_mode(bp); |
8443 | if (rc) | 8444 | |
8444 | dev_close(bp->dev); | 8445 | if (netif_running(bp->dev)) { |
8445 | else | 8446 | if (rc) |
8446 | rc = bnxt_open_nic(bp, true, false); | 8447 | dev_close(bp->dev); |
8448 | else | ||
8449 | rc = bnxt_open_nic(bp, true, false); | ||
8450 | } | ||
8451 | |||
8447 | return rc; | 8452 | return rc; |
8448 | } | 8453 | } |
8449 | 8454 | ||
@@ -8664,6 +8669,11 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
8664 | if (rc) | 8669 | if (rc) |
8665 | goto init_err_pci_clean; | 8670 | goto init_err_pci_clean; |
8666 | 8671 | ||
8672 | /* No TC has been set yet and rings may have been trimmed due to | ||
8673 | * limited MSIX, so we re-initialize the TX rings per TC. | ||
8674 | */ | ||
8675 | bp->tx_nr_rings_per_tc = bp->tx_nr_rings; | ||
8676 | |||
8667 | bnxt_get_wol_settings(bp); | 8677 | bnxt_get_wol_settings(bp); |
8668 | if (bp->flags & BNXT_FLAG_WOL_CAP) | 8678 | if (bp->flags & BNXT_FLAG_WOL_CAP) |
8669 | device_set_wakeup_enable(&pdev->dev, bp->wol); | 8679 | device_set_wakeup_enable(&pdev->dev, bp->wol); |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 1989c470172c..5e3d62189cab 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h | |||
@@ -189,6 +189,7 @@ struct rx_cmp_ext { | |||
189 | #define RX_CMP_FLAGS2_T_L4_CS_CALC (0x1 << 3) | 189 | #define RX_CMP_FLAGS2_T_L4_CS_CALC (0x1 << 3) |
190 | #define RX_CMP_FLAGS2_META_FORMAT_VLAN (0x1 << 4) | 190 | #define RX_CMP_FLAGS2_META_FORMAT_VLAN (0x1 << 4) |
191 | __le32 rx_cmp_meta_data; | 191 | __le32 rx_cmp_meta_data; |
192 | #define RX_CMP_FLAGS2_METADATA_TCI_MASK 0xffff | ||
192 | #define RX_CMP_FLAGS2_METADATA_VID_MASK 0xfff | 193 | #define RX_CMP_FLAGS2_METADATA_VID_MASK 0xfff |
193 | #define RX_CMP_FLAGS2_METADATA_TPID_MASK 0xffff0000 | 194 | #define RX_CMP_FLAGS2_METADATA_TPID_MASK 0xffff0000 |
194 | #define RX_CMP_FLAGS2_METADATA_TPID_SFT 16 | 195 | #define RX_CMP_FLAGS2_METADATA_TPID_SFT 16 |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index fbe6e208e17b..65c2cee35766 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c | |||
@@ -349,6 +349,9 @@ static int bnxt_hwrm_cfa_flow_free(struct bnxt *bp, __le16 flow_handle) | |||
349 | if (rc) | 349 | if (rc) |
350 | netdev_info(bp->dev, "Error: %s: flow_handle=0x%x rc=%d", | 350 | netdev_info(bp->dev, "Error: %s: flow_handle=0x%x rc=%d", |
351 | __func__, flow_handle, rc); | 351 | __func__, flow_handle, rc); |
352 | |||
353 | if (rc) | ||
354 | rc = -EIO; | ||
352 | return rc; | 355 | return rc; |
353 | } | 356 | } |
354 | 357 | ||
@@ -484,13 +487,15 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow, | |||
484 | req.action_flags = cpu_to_le16(action_flags); | 487 | req.action_flags = cpu_to_le16(action_flags); |
485 | 488 | ||
486 | mutex_lock(&bp->hwrm_cmd_lock); | 489 | mutex_lock(&bp->hwrm_cmd_lock); |
487 | |||
488 | rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); | 490 | rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
489 | if (!rc) | 491 | if (!rc) |
490 | *flow_handle = resp->flow_handle; | 492 | *flow_handle = resp->flow_handle; |
491 | |||
492 | mutex_unlock(&bp->hwrm_cmd_lock); | 493 | mutex_unlock(&bp->hwrm_cmd_lock); |
493 | 494 | ||
495 | if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR) | ||
496 | rc = -ENOSPC; | ||
497 | else if (rc) | ||
498 | rc = -EIO; | ||
494 | return rc; | 499 | return rc; |
495 | } | 500 | } |
496 | 501 | ||
@@ -561,6 +566,8 @@ static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp, | |||
561 | netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); | 566 | netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); |
562 | mutex_unlock(&bp->hwrm_cmd_lock); | 567 | mutex_unlock(&bp->hwrm_cmd_lock); |
563 | 568 | ||
569 | if (rc) | ||
570 | rc = -EIO; | ||
564 | return rc; | 571 | return rc; |
565 | } | 572 | } |
566 | 573 | ||
@@ -576,6 +583,9 @@ static int hwrm_cfa_decap_filter_free(struct bnxt *bp, | |||
576 | rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); | 583 | rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
577 | if (rc) | 584 | if (rc) |
578 | netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); | 585 | netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); |
586 | |||
587 | if (rc) | ||
588 | rc = -EIO; | ||
579 | return rc; | 589 | return rc; |
580 | } | 590 | } |
581 | 591 | ||
@@ -624,6 +634,8 @@ static int hwrm_cfa_encap_record_alloc(struct bnxt *bp, | |||
624 | netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); | 634 | netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); |
625 | mutex_unlock(&bp->hwrm_cmd_lock); | 635 | mutex_unlock(&bp->hwrm_cmd_lock); |
626 | 636 | ||
637 | if (rc) | ||
638 | rc = -EIO; | ||
627 | return rc; | 639 | return rc; |
628 | } | 640 | } |
629 | 641 | ||
@@ -639,6 +651,9 @@ static int hwrm_cfa_encap_record_free(struct bnxt *bp, | |||
639 | rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); | 651 | rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
640 | if (rc) | 652 | if (rc) |
641 | netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); | 653 | netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); |
654 | |||
655 | if (rc) | ||
656 | rc = -EIO; | ||
642 | return rc; | 657 | return rc; |
643 | } | 658 | } |
644 | 659 | ||
@@ -1269,11 +1284,8 @@ static int bnxt_tc_del_flow(struct bnxt *bp, | |||
1269 | flow_node = rhashtable_lookup_fast(&tc_info->flow_table, | 1284 | flow_node = rhashtable_lookup_fast(&tc_info->flow_table, |
1270 | &tc_flow_cmd->cookie, | 1285 | &tc_flow_cmd->cookie, |
1271 | tc_info->flow_ht_params); | 1286 | tc_info->flow_ht_params); |
1272 | if (!flow_node) { | 1287 | if (!flow_node) |
1273 | netdev_info(bp->dev, "ERROR: no flow_node for cookie %lx", | ||
1274 | tc_flow_cmd->cookie); | ||
1275 | return -EINVAL; | 1288 | return -EINVAL; |
1276 | } | ||
1277 | 1289 | ||
1278 | return __bnxt_tc_del_flow(bp, flow_node); | 1290 | return __bnxt_tc_del_flow(bp, flow_node); |
1279 | } | 1291 | } |
@@ -1290,11 +1302,8 @@ static int bnxt_tc_get_flow_stats(struct bnxt *bp, | |||
1290 | flow_node = rhashtable_lookup_fast(&tc_info->flow_table, | 1302 | flow_node = rhashtable_lookup_fast(&tc_info->flow_table, |
1291 | &tc_flow_cmd->cookie, | 1303 | &tc_flow_cmd->cookie, |
1292 | tc_info->flow_ht_params); | 1304 | tc_info->flow_ht_params); |
1293 | if (!flow_node) { | 1305 | if (!flow_node) |
1294 | netdev_info(bp->dev, "Error: no flow_node for cookie %lx", | ||
1295 | tc_flow_cmd->cookie); | ||
1296 | return -1; | 1306 | return -1; |
1297 | } | ||
1298 | 1307 | ||
1299 | flow = &flow_node->flow; | 1308 | flow = &flow_node->flow; |
1300 | curr_stats = &flow->stats; | 1309 | curr_stats = &flow->stats; |
@@ -1344,8 +1353,10 @@ bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows, | |||
1344 | } else { | 1353 | } else { |
1345 | netdev_info(bp->dev, "error rc=%d", rc); | 1354 | netdev_info(bp->dev, "error rc=%d", rc); |
1346 | } | 1355 | } |
1347 | |||
1348 | mutex_unlock(&bp->hwrm_cmd_lock); | 1356 | mutex_unlock(&bp->hwrm_cmd_lock); |
1357 | |||
1358 | if (rc) | ||
1359 | rc = -EIO; | ||
1349 | return rc; | 1360 | return rc; |
1350 | } | 1361 | } |
1351 | 1362 | ||
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index a77ee2f8fb8d..f2593978ae75 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c | |||
@@ -922,8 +922,8 @@ static int tg3_ape_send_event(struct tg3 *tp, u32 event) | |||
922 | if (!(apedata & APE_FW_STATUS_READY)) | 922 | if (!(apedata & APE_FW_STATUS_READY)) |
923 | return -EAGAIN; | 923 | return -EAGAIN; |
924 | 924 | ||
925 | /* Wait for up to 1 millisecond for APE to service previous event. */ | 925 | /* Wait for up to 20 millisecond for APE to service previous event. */ |
926 | err = tg3_ape_event_lock(tp, 1000); | 926 | err = tg3_ape_event_lock(tp, 20000); |
927 | if (err) | 927 | if (err) |
928 | return err; | 928 | return err; |
929 | 929 | ||
@@ -946,6 +946,7 @@ static void tg3_ape_driver_state_change(struct tg3 *tp, int kind) | |||
946 | 946 | ||
947 | switch (kind) { | 947 | switch (kind) { |
948 | case RESET_KIND_INIT: | 948 | case RESET_KIND_INIT: |
949 | tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++); | ||
949 | tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, | 950 | tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, |
950 | APE_HOST_SEG_SIG_MAGIC); | 951 | APE_HOST_SEG_SIG_MAGIC); |
951 | tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN, | 952 | tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN, |
@@ -962,13 +963,6 @@ static void tg3_ape_driver_state_change(struct tg3 *tp, int kind) | |||
962 | event = APE_EVENT_STATUS_STATE_START; | 963 | event = APE_EVENT_STATUS_STATE_START; |
963 | break; | 964 | break; |
964 | case RESET_KIND_SHUTDOWN: | 965 | case RESET_KIND_SHUTDOWN: |
965 | /* With the interface we are currently using, | ||
966 | * APE does not track driver state. Wiping | ||
967 | * out the HOST SEGMENT SIGNATURE forces | ||
968 | * the APE to assume OS absent status. | ||
969 | */ | ||
970 | tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0); | ||
971 | |||
972 | if (device_may_wakeup(&tp->pdev->dev) && | 966 | if (device_may_wakeup(&tp->pdev->dev) && |
973 | tg3_flag(tp, WOL_ENABLE)) { | 967 | tg3_flag(tp, WOL_ENABLE)) { |
974 | tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED, | 968 | tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED, |
@@ -990,6 +984,18 @@ static void tg3_ape_driver_state_change(struct tg3 *tp, int kind) | |||
990 | tg3_ape_send_event(tp, event); | 984 | tg3_ape_send_event(tp, event); |
991 | } | 985 | } |
992 | 986 | ||
987 | static void tg3_send_ape_heartbeat(struct tg3 *tp, | ||
988 | unsigned long interval) | ||
989 | { | ||
990 | /* Check if hb interval has exceeded */ | ||
991 | if (!tg3_flag(tp, ENABLE_APE) || | ||
992 | time_before(jiffies, tp->ape_hb_jiffies + interval)) | ||
993 | return; | ||
994 | |||
995 | tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++); | ||
996 | tp->ape_hb_jiffies = jiffies; | ||
997 | } | ||
998 | |||
993 | static void tg3_disable_ints(struct tg3 *tp) | 999 | static void tg3_disable_ints(struct tg3 *tp) |
994 | { | 1000 | { |
995 | int i; | 1001 | int i; |
@@ -7262,6 +7268,7 @@ static int tg3_poll_msix(struct napi_struct *napi, int budget) | |||
7262 | } | 7268 | } |
7263 | } | 7269 | } |
7264 | 7270 | ||
7271 | tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1); | ||
7265 | return work_done; | 7272 | return work_done; |
7266 | 7273 | ||
7267 | tx_recovery: | 7274 | tx_recovery: |
@@ -7344,6 +7351,7 @@ static int tg3_poll(struct napi_struct *napi, int budget) | |||
7344 | } | 7351 | } |
7345 | } | 7352 | } |
7346 | 7353 | ||
7354 | tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1); | ||
7347 | return work_done; | 7355 | return work_done; |
7348 | 7356 | ||
7349 | tx_recovery: | 7357 | tx_recovery: |
@@ -10732,7 +10740,7 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy) | |||
10732 | if (tg3_flag(tp, ENABLE_APE)) | 10740 | if (tg3_flag(tp, ENABLE_APE)) |
10733 | /* Write our heartbeat update interval to APE. */ | 10741 | /* Write our heartbeat update interval to APE. */ |
10734 | tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS, | 10742 | tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS, |
10735 | APE_HOST_HEARTBEAT_INT_DISABLE); | 10743 | APE_HOST_HEARTBEAT_INT_5SEC); |
10736 | 10744 | ||
10737 | tg3_write_sig_post_reset(tp, RESET_KIND_INIT); | 10745 | tg3_write_sig_post_reset(tp, RESET_KIND_INIT); |
10738 | 10746 | ||
@@ -11077,6 +11085,9 @@ static void tg3_timer(struct timer_list *t) | |||
11077 | tp->asf_counter = tp->asf_multiplier; | 11085 | tp->asf_counter = tp->asf_multiplier; |
11078 | } | 11086 | } |
11079 | 11087 | ||
11088 | /* Update the APE heartbeat every 5 seconds.*/ | ||
11089 | tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL); | ||
11090 | |||
11080 | spin_unlock(&tp->lock); | 11091 | spin_unlock(&tp->lock); |
11081 | 11092 | ||
11082 | restart_timer: | 11093 | restart_timer: |
@@ -16653,6 +16664,8 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent) | |||
16653 | pci_state_reg); | 16664 | pci_state_reg); |
16654 | 16665 | ||
16655 | tg3_ape_lock_init(tp); | 16666 | tg3_ape_lock_init(tp); |
16667 | tp->ape_hb_interval = | ||
16668 | msecs_to_jiffies(APE_HOST_HEARTBEAT_INT_5SEC); | ||
16656 | } | 16669 | } |
16657 | 16670 | ||
16658 | /* Set up tp->grc_local_ctrl before calling | 16671 | /* Set up tp->grc_local_ctrl before calling |
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h index 47f51cc0566d..1d61aa3efda1 100644 --- a/drivers/net/ethernet/broadcom/tg3.h +++ b/drivers/net/ethernet/broadcom/tg3.h | |||
@@ -2508,6 +2508,7 @@ | |||
2508 | #define TG3_APE_LOCK_PHY3 5 | 2508 | #define TG3_APE_LOCK_PHY3 5 |
2509 | #define TG3_APE_LOCK_GPIO 7 | 2509 | #define TG3_APE_LOCK_GPIO 7 |
2510 | 2510 | ||
2511 | #define TG3_APE_HB_INTERVAL (tp->ape_hb_interval) | ||
2511 | #define TG3_EEPROM_SB_F1R2_MBA_OFF 0x10 | 2512 | #define TG3_EEPROM_SB_F1R2_MBA_OFF 0x10 |
2512 | 2513 | ||
2513 | 2514 | ||
@@ -3423,6 +3424,10 @@ struct tg3 { | |||
3423 | struct device *hwmon_dev; | 3424 | struct device *hwmon_dev; |
3424 | bool link_up; | 3425 | bool link_up; |
3425 | bool pcierr_recovery; | 3426 | bool pcierr_recovery; |
3427 | |||
3428 | u32 ape_hb; | ||
3429 | unsigned long ape_hb_interval; | ||
3430 | unsigned long ape_hb_jiffies; | ||
3426 | }; | 3431 | }; |
3427 | 3432 | ||
3428 | /* Accessor macros for chip and asic attributes | 3433 | /* Accessor macros for chip and asic attributes |
diff --git a/drivers/net/ethernet/cavium/common/cavium_ptp.c b/drivers/net/ethernet/cavium/common/cavium_ptp.c index c87c9c684a33..d59497a7bdce 100644 --- a/drivers/net/ethernet/cavium/common/cavium_ptp.c +++ b/drivers/net/ethernet/cavium/common/cavium_ptp.c | |||
@@ -75,6 +75,8 @@ EXPORT_SYMBOL(cavium_ptp_get); | |||
75 | 75 | ||
76 | void cavium_ptp_put(struct cavium_ptp *ptp) | 76 | void cavium_ptp_put(struct cavium_ptp *ptp) |
77 | { | 77 | { |
78 | if (!ptp) | ||
79 | return; | ||
78 | pci_dev_put(ptp->pdev); | 80 | pci_dev_put(ptp->pdev); |
79 | } | 81 | } |
80 | EXPORT_SYMBOL(cavium_ptp_put); | 82 | EXPORT_SYMBOL(cavium_ptp_put); |
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index b68cde9f17d2..7d9c5ffbd041 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c | |||
@@ -67,11 +67,6 @@ module_param(cpi_alg, int, S_IRUGO); | |||
67 | MODULE_PARM_DESC(cpi_alg, | 67 | MODULE_PARM_DESC(cpi_alg, |
68 | "PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)"); | 68 | "PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)"); |
69 | 69 | ||
70 | struct nicvf_xdp_tx { | ||
71 | u64 dma_addr; | ||
72 | u8 qidx; | ||
73 | }; | ||
74 | |||
75 | static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx) | 70 | static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx) |
76 | { | 71 | { |
77 | if (nic->sqs_mode) | 72 | if (nic->sqs_mode) |
@@ -507,29 +502,14 @@ static int nicvf_init_resources(struct nicvf *nic) | |||
507 | return 0; | 502 | return 0; |
508 | } | 503 | } |
509 | 504 | ||
510 | static void nicvf_unmap_page(struct nicvf *nic, struct page *page, u64 dma_addr) | ||
511 | { | ||
512 | /* Check if it's a recycled page, if not unmap the DMA mapping. | ||
513 | * Recycled page holds an extra reference. | ||
514 | */ | ||
515 | if (page_ref_count(page) == 1) { | ||
516 | dma_addr &= PAGE_MASK; | ||
517 | dma_unmap_page_attrs(&nic->pdev->dev, dma_addr, | ||
518 | RCV_FRAG_LEN + XDP_HEADROOM, | ||
519 | DMA_FROM_DEVICE, | ||
520 | DMA_ATTR_SKIP_CPU_SYNC); | ||
521 | } | ||
522 | } | ||
523 | |||
524 | static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog, | 505 | static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog, |
525 | struct cqe_rx_t *cqe_rx, struct snd_queue *sq, | 506 | struct cqe_rx_t *cqe_rx, struct snd_queue *sq, |
526 | struct rcv_queue *rq, struct sk_buff **skb) | 507 | struct rcv_queue *rq, struct sk_buff **skb) |
527 | { | 508 | { |
528 | struct xdp_buff xdp; | 509 | struct xdp_buff xdp; |
529 | struct page *page; | 510 | struct page *page; |
530 | struct nicvf_xdp_tx *xdp_tx = NULL; | ||
531 | u32 action; | 511 | u32 action; |
532 | u16 len, err, offset = 0; | 512 | u16 len, offset = 0; |
533 | u64 dma_addr, cpu_addr; | 513 | u64 dma_addr, cpu_addr; |
534 | void *orig_data; | 514 | void *orig_data; |
535 | 515 | ||
@@ -543,7 +523,7 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog, | |||
543 | cpu_addr = (u64)phys_to_virt(cpu_addr); | 523 | cpu_addr = (u64)phys_to_virt(cpu_addr); |
544 | page = virt_to_page((void *)cpu_addr); | 524 | page = virt_to_page((void *)cpu_addr); |
545 | 525 | ||
546 | xdp.data_hard_start = page_address(page) + RCV_BUF_HEADROOM; | 526 | xdp.data_hard_start = page_address(page); |
547 | xdp.data = (void *)cpu_addr; | 527 | xdp.data = (void *)cpu_addr; |
548 | xdp_set_data_meta_invalid(&xdp); | 528 | xdp_set_data_meta_invalid(&xdp); |
549 | xdp.data_end = xdp.data + len; | 529 | xdp.data_end = xdp.data + len; |
@@ -563,7 +543,18 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog, | |||
563 | 543 | ||
564 | switch (action) { | 544 | switch (action) { |
565 | case XDP_PASS: | 545 | case XDP_PASS: |
566 | nicvf_unmap_page(nic, page, dma_addr); | 546 | /* Check if it's a recycled page, if not |
547 | * unmap the DMA mapping. | ||
548 | * | ||
549 | * Recycled page holds an extra reference. | ||
550 | */ | ||
551 | if (page_ref_count(page) == 1) { | ||
552 | dma_addr &= PAGE_MASK; | ||
553 | dma_unmap_page_attrs(&nic->pdev->dev, dma_addr, | ||
554 | RCV_FRAG_LEN + XDP_PACKET_HEADROOM, | ||
555 | DMA_FROM_DEVICE, | ||
556 | DMA_ATTR_SKIP_CPU_SYNC); | ||
557 | } | ||
567 | 558 | ||
568 | /* Build SKB and pass on packet to network stack */ | 559 | /* Build SKB and pass on packet to network stack */ |
569 | *skb = build_skb(xdp.data, | 560 | *skb = build_skb(xdp.data, |
@@ -576,20 +567,6 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog, | |||
576 | case XDP_TX: | 567 | case XDP_TX: |
577 | nicvf_xdp_sq_append_pkt(nic, sq, (u64)xdp.data, dma_addr, len); | 568 | nicvf_xdp_sq_append_pkt(nic, sq, (u64)xdp.data, dma_addr, len); |
578 | return true; | 569 | return true; |
579 | case XDP_REDIRECT: | ||
580 | /* Save DMA address for use while transmitting */ | ||
581 | xdp_tx = (struct nicvf_xdp_tx *)page_address(page); | ||
582 | xdp_tx->dma_addr = dma_addr; | ||
583 | xdp_tx->qidx = nicvf_netdev_qidx(nic, cqe_rx->rq_idx); | ||
584 | |||
585 | err = xdp_do_redirect(nic->pnicvf->netdev, &xdp, prog); | ||
586 | if (!err) | ||
587 | return true; | ||
588 | |||
589 | /* Free the page on error */ | ||
590 | nicvf_unmap_page(nic, page, dma_addr); | ||
591 | put_page(page); | ||
592 | break; | ||
593 | default: | 570 | default: |
594 | bpf_warn_invalid_xdp_action(action); | 571 | bpf_warn_invalid_xdp_action(action); |
595 | /* fall through */ | 572 | /* fall through */ |
@@ -597,7 +574,18 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog, | |||
597 | trace_xdp_exception(nic->netdev, prog, action); | 574 | trace_xdp_exception(nic->netdev, prog, action); |
598 | /* fall through */ | 575 | /* fall through */ |
599 | case XDP_DROP: | 576 | case XDP_DROP: |
600 | nicvf_unmap_page(nic, page, dma_addr); | 577 | /* Check if it's a recycled page, if not |
578 | * unmap the DMA mapping. | ||
579 | * | ||
580 | * Recycled page holds an extra reference. | ||
581 | */ | ||
582 | if (page_ref_count(page) == 1) { | ||
583 | dma_addr &= PAGE_MASK; | ||
584 | dma_unmap_page_attrs(&nic->pdev->dev, dma_addr, | ||
585 | RCV_FRAG_LEN + XDP_PACKET_HEADROOM, | ||
586 | DMA_FROM_DEVICE, | ||
587 | DMA_ATTR_SKIP_CPU_SYNC); | ||
588 | } | ||
601 | put_page(page); | 589 | put_page(page); |
602 | return true; | 590 | return true; |
603 | } | 591 | } |
@@ -1864,50 +1852,6 @@ static int nicvf_xdp(struct net_device *netdev, struct netdev_bpf *xdp) | |||
1864 | } | 1852 | } |
1865 | } | 1853 | } |
1866 | 1854 | ||
1867 | static int nicvf_xdp_xmit(struct net_device *netdev, struct xdp_buff *xdp) | ||
1868 | { | ||
1869 | struct nicvf *nic = netdev_priv(netdev); | ||
1870 | struct nicvf *snic = nic; | ||
1871 | struct nicvf_xdp_tx *xdp_tx; | ||
1872 | struct snd_queue *sq; | ||
1873 | struct page *page; | ||
1874 | int err, qidx; | ||
1875 | |||
1876 | if (!netif_running(netdev) || !nic->xdp_prog) | ||
1877 | return -EINVAL; | ||
1878 | |||
1879 | page = virt_to_page(xdp->data); | ||
1880 | xdp_tx = (struct nicvf_xdp_tx *)page_address(page); | ||
1881 | qidx = xdp_tx->qidx; | ||
1882 | |||
1883 | if (xdp_tx->qidx >= nic->xdp_tx_queues) | ||
1884 | return -EINVAL; | ||
1885 | |||
1886 | /* Get secondary Qset's info */ | ||
1887 | if (xdp_tx->qidx >= MAX_SND_QUEUES_PER_QS) { | ||
1888 | qidx = xdp_tx->qidx / MAX_SND_QUEUES_PER_QS; | ||
1889 | snic = (struct nicvf *)nic->snicvf[qidx - 1]; | ||
1890 | if (!snic) | ||
1891 | return -EINVAL; | ||
1892 | qidx = xdp_tx->qidx % MAX_SND_QUEUES_PER_QS; | ||
1893 | } | ||
1894 | |||
1895 | sq = &snic->qs->sq[qidx]; | ||
1896 | err = nicvf_xdp_sq_append_pkt(snic, sq, (u64)xdp->data, | ||
1897 | xdp_tx->dma_addr, | ||
1898 | xdp->data_end - xdp->data); | ||
1899 | if (err) | ||
1900 | return -ENOMEM; | ||
1901 | |||
1902 | nicvf_xdp_sq_doorbell(snic, sq, qidx); | ||
1903 | return 0; | ||
1904 | } | ||
1905 | |||
1906 | static void nicvf_xdp_flush(struct net_device *dev) | ||
1907 | { | ||
1908 | return; | ||
1909 | } | ||
1910 | |||
1911 | static int nicvf_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr) | 1855 | static int nicvf_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr) |
1912 | { | 1856 | { |
1913 | struct hwtstamp_config config; | 1857 | struct hwtstamp_config config; |
@@ -1986,8 +1930,6 @@ static const struct net_device_ops nicvf_netdev_ops = { | |||
1986 | .ndo_fix_features = nicvf_fix_features, | 1930 | .ndo_fix_features = nicvf_fix_features, |
1987 | .ndo_set_features = nicvf_set_features, | 1931 | .ndo_set_features = nicvf_set_features, |
1988 | .ndo_bpf = nicvf_xdp, | 1932 | .ndo_bpf = nicvf_xdp, |
1989 | .ndo_xdp_xmit = nicvf_xdp_xmit, | ||
1990 | .ndo_xdp_flush = nicvf_xdp_flush, | ||
1991 | .ndo_do_ioctl = nicvf_ioctl, | 1933 | .ndo_do_ioctl = nicvf_ioctl, |
1992 | }; | 1934 | }; |
1993 | 1935 | ||
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index 3eae9ff9b53a..d42704d07484 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c | |||
@@ -204,7 +204,7 @@ static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, struct rbdr *rbdr, | |||
204 | 204 | ||
205 | /* Reserve space for header modifications by BPF program */ | 205 | /* Reserve space for header modifications by BPF program */ |
206 | if (rbdr->is_xdp) | 206 | if (rbdr->is_xdp) |
207 | buf_len += XDP_HEADROOM; | 207 | buf_len += XDP_PACKET_HEADROOM; |
208 | 208 | ||
209 | /* Check if it's recycled */ | 209 | /* Check if it's recycled */ |
210 | if (pgcache) | 210 | if (pgcache) |
@@ -224,9 +224,8 @@ ret: | |||
224 | nic->rb_page = NULL; | 224 | nic->rb_page = NULL; |
225 | return -ENOMEM; | 225 | return -ENOMEM; |
226 | } | 226 | } |
227 | |||
228 | if (pgcache) | 227 | if (pgcache) |
229 | pgcache->dma_addr = *rbuf + XDP_HEADROOM; | 228 | pgcache->dma_addr = *rbuf + XDP_PACKET_HEADROOM; |
230 | nic->rb_page_offset += buf_len; | 229 | nic->rb_page_offset += buf_len; |
231 | } | 230 | } |
232 | 231 | ||
@@ -1244,7 +1243,7 @@ int nicvf_xdp_sq_append_pkt(struct nicvf *nic, struct snd_queue *sq, | |||
1244 | int qentry; | 1243 | int qentry; |
1245 | 1244 | ||
1246 | if (subdesc_cnt > sq->xdp_free_cnt) | 1245 | if (subdesc_cnt > sq->xdp_free_cnt) |
1247 | return -1; | 1246 | return 0; |
1248 | 1247 | ||
1249 | qentry = nicvf_get_sq_desc(sq, subdesc_cnt); | 1248 | qentry = nicvf_get_sq_desc(sq, subdesc_cnt); |
1250 | 1249 | ||
@@ -1255,7 +1254,7 @@ int nicvf_xdp_sq_append_pkt(struct nicvf *nic, struct snd_queue *sq, | |||
1255 | 1254 | ||
1256 | sq->xdp_desc_cnt += subdesc_cnt; | 1255 | sq->xdp_desc_cnt += subdesc_cnt; |
1257 | 1256 | ||
1258 | return 0; | 1257 | return 1; |
1259 | } | 1258 | } |
1260 | 1259 | ||
1261 | /* Calculate no of SQ subdescriptors needed to transmit all | 1260 | /* Calculate no of SQ subdescriptors needed to transmit all |
@@ -1656,7 +1655,7 @@ static void nicvf_unmap_rcv_buffer(struct nicvf *nic, u64 dma_addr, | |||
1656 | if (page_ref_count(page) != 1) | 1655 | if (page_ref_count(page) != 1) |
1657 | return; | 1656 | return; |
1658 | 1657 | ||
1659 | len += XDP_HEADROOM; | 1658 | len += XDP_PACKET_HEADROOM; |
1660 | /* Receive buffers in XDP mode are mapped from page start */ | 1659 | /* Receive buffers in XDP mode are mapped from page start */ |
1661 | dma_addr &= PAGE_MASK; | 1660 | dma_addr &= PAGE_MASK; |
1662 | } | 1661 | } |
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h index ce1eed7a6d63..5e9a03cf1b4d 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h | |||
@@ -11,7 +11,6 @@ | |||
11 | 11 | ||
12 | #include <linux/netdevice.h> | 12 | #include <linux/netdevice.h> |
13 | #include <linux/iommu.h> | 13 | #include <linux/iommu.h> |
14 | #include <linux/bpf.h> | ||
15 | #include <net/xdp.h> | 14 | #include <net/xdp.h> |
16 | #include "q_struct.h" | 15 | #include "q_struct.h" |
17 | 16 | ||
@@ -94,9 +93,6 @@ | |||
94 | #define RCV_FRAG_LEN (SKB_DATA_ALIGN(DMA_BUFFER_LEN + NET_SKB_PAD) + \ | 93 | #define RCV_FRAG_LEN (SKB_DATA_ALIGN(DMA_BUFFER_LEN + NET_SKB_PAD) + \ |
95 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) | 94 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) |
96 | 95 | ||
97 | #define RCV_BUF_HEADROOM 128 /* To store dma address for XDP redirect */ | ||
98 | #define XDP_HEADROOM (XDP_PACKET_HEADROOM + RCV_BUF_HEADROOM) | ||
99 | |||
100 | #define MAX_CQES_FOR_TX ((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) * \ | 96 | #define MAX_CQES_FOR_TX ((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) * \ |
101 | MAX_CQE_PER_PKT_XMIT) | 97 | MAX_CQE_PER_PKT_XMIT) |
102 | 98 | ||
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c index 557fd8bfd54e..00a1d2d13169 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c | |||
@@ -472,7 +472,7 @@ int cudbg_collect_cim_la(struct cudbg_init *pdbg_init, | |||
472 | 472 | ||
473 | if (is_t6(padap->params.chip)) { | 473 | if (is_t6(padap->params.chip)) { |
474 | size = padap->params.cim_la_size / 10 + 1; | 474 | size = padap->params.cim_la_size / 10 + 1; |
475 | size *= 11 * sizeof(u32); | 475 | size *= 10 * sizeof(u32); |
476 | } else { | 476 | } else { |
477 | size = padap->params.cim_la_size / 8; | 477 | size = padap->params.cim_la_size / 8; |
478 | size *= 8 * sizeof(u32); | 478 | size *= 8 * sizeof(u32); |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c index 30485f9a598f..143686c60234 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c | |||
@@ -102,7 +102,7 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity) | |||
102 | case CUDBG_CIM_LA: | 102 | case CUDBG_CIM_LA: |
103 | if (is_t6(adap->params.chip)) { | 103 | if (is_t6(adap->params.chip)) { |
104 | len = adap->params.cim_la_size / 10 + 1; | 104 | len = adap->params.cim_la_size / 10 + 1; |
105 | len *= 11 * sizeof(u32); | 105 | len *= 10 * sizeof(u32); |
106 | } else { | 106 | } else { |
107 | len = adap->params.cim_la_size / 8; | 107 | len = adap->params.cim_la_size / 8; |
108 | len *= 8 * sizeof(u32); | 108 | len *= 8 * sizeof(u32); |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 56bc626ef006..61022b5f6743 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | |||
@@ -4970,7 +4970,6 @@ static void cxgb4_mgmt_setup(struct net_device *dev) | |||
4970 | /* Initialize the device structure. */ | 4970 | /* Initialize the device structure. */ |
4971 | dev->netdev_ops = &cxgb4_mgmt_netdev_ops; | 4971 | dev->netdev_ops = &cxgb4_mgmt_netdev_ops; |
4972 | dev->ethtool_ops = &cxgb4_mgmt_ethtool_ops; | 4972 | dev->ethtool_ops = &cxgb4_mgmt_ethtool_ops; |
4973 | dev->needs_free_netdev = true; | ||
4974 | } | 4973 | } |
4975 | 4974 | ||
4976 | static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs) | 4975 | static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs) |
@@ -4982,9 +4981,10 @@ static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs) | |||
4982 | 4981 | ||
4983 | pcie_fw = readl(adap->regs + PCIE_FW_A); | 4982 | pcie_fw = readl(adap->regs + PCIE_FW_A); |
4984 | /* Check if cxgb4 is the MASTER and fw is initialized */ | 4983 | /* Check if cxgb4 is the MASTER and fw is initialized */ |
4985 | if (!(pcie_fw & PCIE_FW_INIT_F) || | 4984 | if (num_vfs && |
4985 | (!(pcie_fw & PCIE_FW_INIT_F) || | ||
4986 | !(pcie_fw & PCIE_FW_MASTER_VLD_F) || | 4986 | !(pcie_fw & PCIE_FW_MASTER_VLD_F) || |
4987 | PCIE_FW_MASTER_G(pcie_fw) != CXGB4_UNIFIED_PF) { | 4987 | PCIE_FW_MASTER_G(pcie_fw) != CXGB4_UNIFIED_PF)) { |
4988 | dev_warn(&pdev->dev, | 4988 | dev_warn(&pdev->dev, |
4989 | "cxgb4 driver needs to be MASTER to support SRIOV\n"); | 4989 | "cxgb4 driver needs to be MASTER to support SRIOV\n"); |
4990 | return -EOPNOTSUPP; | 4990 | return -EOPNOTSUPP; |
@@ -5180,6 +5180,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
5180 | adapter->name = pci_name(pdev); | 5180 | adapter->name = pci_name(pdev); |
5181 | adapter->mbox = func; | 5181 | adapter->mbox = func; |
5182 | adapter->pf = func; | 5182 | adapter->pf = func; |
5183 | adapter->params.chip = chip; | ||
5184 | adapter->adap_idx = adap_idx; | ||
5183 | adapter->msg_enable = DFLT_MSG_ENABLE; | 5185 | adapter->msg_enable = DFLT_MSG_ENABLE; |
5184 | adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) + | 5186 | adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) + |
5185 | (sizeof(struct mbox_cmd) * | 5187 | (sizeof(struct mbox_cmd) * |
@@ -5599,24 +5601,24 @@ static void remove_one(struct pci_dev *pdev) | |||
5599 | #if IS_ENABLED(CONFIG_IPV6) | 5601 | #if IS_ENABLED(CONFIG_IPV6) |
5600 | t4_cleanup_clip_tbl(adapter); | 5602 | t4_cleanup_clip_tbl(adapter); |
5601 | #endif | 5603 | #endif |
5602 | iounmap(adapter->regs); | ||
5603 | if (!is_t4(adapter->params.chip)) | 5604 | if (!is_t4(adapter->params.chip)) |
5604 | iounmap(adapter->bar2); | 5605 | iounmap(adapter->bar2); |
5605 | pci_disable_pcie_error_reporting(pdev); | ||
5606 | if ((adapter->flags & DEV_ENABLED)) { | ||
5607 | pci_disable_device(pdev); | ||
5608 | adapter->flags &= ~DEV_ENABLED; | ||
5609 | } | ||
5610 | pci_release_regions(pdev); | ||
5611 | kfree(adapter->mbox_log); | ||
5612 | synchronize_rcu(); | ||
5613 | kfree(adapter); | ||
5614 | } | 5606 | } |
5615 | #ifdef CONFIG_PCI_IOV | 5607 | #ifdef CONFIG_PCI_IOV |
5616 | else { | 5608 | else { |
5617 | cxgb4_iov_configure(adapter->pdev, 0); | 5609 | cxgb4_iov_configure(adapter->pdev, 0); |
5618 | } | 5610 | } |
5619 | #endif | 5611 | #endif |
5612 | iounmap(adapter->regs); | ||
5613 | pci_disable_pcie_error_reporting(pdev); | ||
5614 | if ((adapter->flags & DEV_ENABLED)) { | ||
5615 | pci_disable_device(pdev); | ||
5616 | adapter->flags &= ~DEV_ENABLED; | ||
5617 | } | ||
5618 | pci_release_regions(pdev); | ||
5619 | kfree(adapter->mbox_log); | ||
5620 | synchronize_rcu(); | ||
5621 | kfree(adapter); | ||
5620 | } | 5622 | } |
5621 | 5623 | ||
5622 | /* "Shutdown" quiesces the device, stopping Ingress Packet and Interrupt | 5624 | /* "Shutdown" quiesces the device, stopping Ingress Packet and Interrupt |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 047609ef0515..920bccd6bc40 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | |||
@@ -2637,7 +2637,6 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) | |||
2637 | } | 2637 | } |
2638 | 2638 | ||
2639 | #define EEPROM_STAT_ADDR 0x7bfc | 2639 | #define EEPROM_STAT_ADDR 0x7bfc |
2640 | #define VPD_SIZE 0x800 | ||
2641 | #define VPD_BASE 0x400 | 2640 | #define VPD_BASE 0x400 |
2642 | #define VPD_BASE_OLD 0 | 2641 | #define VPD_BASE_OLD 0 |
2643 | #define VPD_LEN 1024 | 2642 | #define VPD_LEN 1024 |
@@ -2704,15 +2703,6 @@ int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p) | |||
2704 | if (!vpd) | 2703 | if (!vpd) |
2705 | return -ENOMEM; | 2704 | return -ENOMEM; |
2706 | 2705 | ||
2707 | /* We have two VPD data structures stored in the adapter VPD area. | ||
2708 | * By default, Linux calculates the size of the VPD area by traversing | ||
2709 | * the first VPD area at offset 0x0, so we need to tell the OS what | ||
2710 | * our real VPD size is. | ||
2711 | */ | ||
2712 | ret = pci_set_vpd_size(adapter->pdev, VPD_SIZE); | ||
2713 | if (ret < 0) | ||
2714 | goto out; | ||
2715 | |||
2716 | /* Card information normally starts at VPD_BASE but early cards had | 2706 | /* Card information normally starts at VPD_BASE but early cards had |
2717 | * it at 0. | 2707 | * it at 0. |
2718 | */ | 2708 | */ |
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c index 5eb999af2c40..bd3f6e4d1341 100644 --- a/drivers/net/ethernet/cortina/gemini.c +++ b/drivers/net/ethernet/cortina/gemini.c | |||
@@ -540,6 +540,7 @@ static int gmac_setup_txqs(struct net_device *netdev) | |||
540 | 540 | ||
541 | if (port->txq_dma_base & ~DMA_Q_BASE_MASK) { | 541 | if (port->txq_dma_base & ~DMA_Q_BASE_MASK) { |
542 | dev_warn(geth->dev, "TX queue base it not aligned\n"); | 542 | dev_warn(geth->dev, "TX queue base it not aligned\n"); |
543 | kfree(skb_tab); | ||
543 | return -ENOMEM; | 544 | return -ENOMEM; |
544 | } | 545 | } |
545 | 546 | ||
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index 7caa8da48421..e4ec32a9ca15 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c | |||
@@ -2008,7 +2008,6 @@ static inline int dpaa_xmit(struct dpaa_priv *priv, | |||
2008 | } | 2008 | } |
2009 | 2009 | ||
2010 | if (unlikely(err < 0)) { | 2010 | if (unlikely(err < 0)) { |
2011 | percpu_stats->tx_errors++; | ||
2012 | percpu_stats->tx_fifo_errors++; | 2011 | percpu_stats->tx_fifo_errors++; |
2013 | return err; | 2012 | return err; |
2014 | } | 2013 | } |
@@ -2278,7 +2277,6 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal, | |||
2278 | vaddr = phys_to_virt(addr); | 2277 | vaddr = phys_to_virt(addr); |
2279 | prefetch(vaddr + qm_fd_get_offset(fd)); | 2278 | prefetch(vaddr + qm_fd_get_offset(fd)); |
2280 | 2279 | ||
2281 | fd_format = qm_fd_get_format(fd); | ||
2282 | /* The only FD types that we may receive are contig and S/G */ | 2280 | /* The only FD types that we may receive are contig and S/G */ |
2283 | WARN_ON((fd_format != qm_fd_contig) && (fd_format != qm_fd_sg)); | 2281 | WARN_ON((fd_format != qm_fd_contig) && (fd_format != qm_fd_sg)); |
2284 | 2282 | ||
@@ -2311,8 +2309,10 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal, | |||
2311 | 2309 | ||
2312 | skb_len = skb->len; | 2310 | skb_len = skb->len; |
2313 | 2311 | ||
2314 | if (unlikely(netif_receive_skb(skb) == NET_RX_DROP)) | 2312 | if (unlikely(netif_receive_skb(skb) == NET_RX_DROP)) { |
2313 | percpu_stats->rx_dropped++; | ||
2315 | return qman_cb_dqrr_consume; | 2314 | return qman_cb_dqrr_consume; |
2315 | } | ||
2316 | 2316 | ||
2317 | percpu_stats->rx_packets++; | 2317 | percpu_stats->rx_packets++; |
2318 | percpu_stats->rx_bytes += skb_len; | 2318 | percpu_stats->rx_bytes += skb_len; |
@@ -2860,7 +2860,7 @@ static int dpaa_remove(struct platform_device *pdev) | |||
2860 | struct device *dev; | 2860 | struct device *dev; |
2861 | int err; | 2861 | int err; |
2862 | 2862 | ||
2863 | dev = &pdev->dev; | 2863 | dev = pdev->dev.parent; |
2864 | net_dev = dev_get_drvdata(dev); | 2864 | net_dev = dev_get_drvdata(dev); |
2865 | 2865 | ||
2866 | priv = netdev_priv(net_dev); | 2866 | priv = netdev_priv(net_dev); |
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 7a7f3a42b2aa..d4604bc8eb5b 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c | |||
@@ -3600,6 +3600,8 @@ fec_drv_remove(struct platform_device *pdev) | |||
3600 | fec_enet_mii_remove(fep); | 3600 | fec_enet_mii_remove(fep); |
3601 | if (fep->reg_phy) | 3601 | if (fep->reg_phy) |
3602 | regulator_disable(fep->reg_phy); | 3602 | regulator_disable(fep->reg_phy); |
3603 | pm_runtime_put(&pdev->dev); | ||
3604 | pm_runtime_disable(&pdev->dev); | ||
3603 | if (of_phy_is_fixed_link(np)) | 3605 | if (of_phy_is_fixed_link(np)) |
3604 | of_phy_deregister_fixed_link(np); | 3606 | of_phy_deregister_fixed_link(np); |
3605 | of_node_put(fep->phy_node); | 3607 | of_node_put(fep->phy_node); |
diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c index ea43b4974149..7af31ddd093f 100644 --- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c +++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c | |||
@@ -1100,7 +1100,7 @@ int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr) | |||
1100 | set_bucket(dtsec->regs, bucket, true); | 1100 | set_bucket(dtsec->regs, bucket, true); |
1101 | 1101 | ||
1102 | /* Create element to be added to the driver hash table */ | 1102 | /* Create element to be added to the driver hash table */ |
1103 | hash_entry = kmalloc(sizeof(*hash_entry), GFP_KERNEL); | 1103 | hash_entry = kmalloc(sizeof(*hash_entry), GFP_ATOMIC); |
1104 | if (!hash_entry) | 1104 | if (!hash_entry) |
1105 | return -ENOMEM; | 1105 | return -ENOMEM; |
1106 | hash_entry->addr = addr; | 1106 | hash_entry->addr = addr; |
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 3bdeb295514b..f27f9bae1a4a 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c | |||
@@ -2934,29 +2934,17 @@ static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus, | |||
2934 | { | 2934 | { |
2935 | int size = lstatus & BD_LENGTH_MASK; | 2935 | int size = lstatus & BD_LENGTH_MASK; |
2936 | struct page *page = rxb->page; | 2936 | struct page *page = rxb->page; |
2937 | bool last = !!(lstatus & BD_LFLAG(RXBD_LAST)); | ||
2938 | |||
2939 | /* Remove the FCS from the packet length */ | ||
2940 | if (last) | ||
2941 | size -= ETH_FCS_LEN; | ||
2942 | 2937 | ||
2943 | if (likely(first)) { | 2938 | if (likely(first)) { |
2944 | skb_put(skb, size); | 2939 | skb_put(skb, size); |
2945 | } else { | 2940 | } else { |
2946 | /* the last fragments' length contains the full frame length */ | 2941 | /* the last fragments' length contains the full frame length */ |
2947 | if (last) | 2942 | if (lstatus & BD_LFLAG(RXBD_LAST)) |
2948 | size -= skb->len; | 2943 | size -= skb->len; |
2949 | 2944 | ||
2950 | /* Add the last fragment if it contains something other than | 2945 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, |
2951 | * the FCS, otherwise drop it and trim off any part of the FCS | 2946 | rxb->page_offset + RXBUF_ALIGNMENT, |
2952 | * that was already received. | 2947 | size, GFAR_RXB_TRUESIZE); |
2953 | */ | ||
2954 | if (size > 0) | ||
2955 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, | ||
2956 | rxb->page_offset + RXBUF_ALIGNMENT, | ||
2957 | size, GFAR_RXB_TRUESIZE); | ||
2958 | else if (size < 0) | ||
2959 | pskb_trim(skb, skb->len + size); | ||
2960 | } | 2948 | } |
2961 | 2949 | ||
2962 | /* try reuse page */ | 2950 | /* try reuse page */ |
@@ -3069,12 +3057,12 @@ static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb) | |||
3069 | if (priv->padding) | 3057 | if (priv->padding) |
3070 | skb_pull(skb, priv->padding); | 3058 | skb_pull(skb, priv->padding); |
3071 | 3059 | ||
3060 | /* Trim off the FCS */ | ||
3061 | pskb_trim(skb, skb->len - ETH_FCS_LEN); | ||
3062 | |||
3072 | if (ndev->features & NETIF_F_RXCSUM) | 3063 | if (ndev->features & NETIF_F_RXCSUM) |
3073 | gfar_rx_checksum(skb, fcb); | 3064 | gfar_rx_checksum(skb, fcb); |
3074 | 3065 | ||
3075 | /* Tell the skb what kind of packet this is */ | ||
3076 | skb->protocol = eth_type_trans(skb, ndev); | ||
3077 | |||
3078 | /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here. | 3066 | /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here. |
3079 | * Even if vlan rx accel is disabled, on some chips | 3067 | * Even if vlan rx accel is disabled, on some chips |
3080 | * RXFCB_VLN is pseudo randomly set. | 3068 | * RXFCB_VLN is pseudo randomly set. |
@@ -3145,13 +3133,15 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) | |||
3145 | continue; | 3133 | continue; |
3146 | } | 3134 | } |
3147 | 3135 | ||
3136 | gfar_process_frame(ndev, skb); | ||
3137 | |||
3148 | /* Increment the number of packets */ | 3138 | /* Increment the number of packets */ |
3149 | total_pkts++; | 3139 | total_pkts++; |
3150 | total_bytes += skb->len; | 3140 | total_bytes += skb->len; |
3151 | 3141 | ||
3152 | skb_record_rx_queue(skb, rx_queue->qindex); | 3142 | skb_record_rx_queue(skb, rx_queue->qindex); |
3153 | 3143 | ||
3154 | gfar_process_frame(ndev, skb); | 3144 | skb->protocol = eth_type_trans(skb, ndev); |
3155 | 3145 | ||
3156 | /* Send the packet up the stack */ | 3146 | /* Send the packet up the stack */ |
3157 | napi_gro_receive(&rx_queue->grp->napi_rx, skb); | 3147 | napi_gro_receive(&rx_queue->grp->napi_rx, skb); |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c index 86944bc3b273..74bd260ca02a 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c | |||
@@ -666,7 +666,7 @@ static void hns_gmac_get_strings(u32 stringset, u8 *data) | |||
666 | 666 | ||
667 | static int hns_gmac_get_sset_count(int stringset) | 667 | static int hns_gmac_get_sset_count(int stringset) |
668 | { | 668 | { |
669 | if (stringset == ETH_SS_STATS || stringset == ETH_SS_PRIV_FLAGS) | 669 | if (stringset == ETH_SS_STATS) |
670 | return ARRAY_SIZE(g_gmac_stats_string); | 670 | return ARRAY_SIZE(g_gmac_stats_string); |
671 | 671 | ||
672 | return 0; | 672 | return 0; |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c index b62816c1574e..93e71e27401b 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c | |||
@@ -422,7 +422,7 @@ void hns_ppe_update_stats(struct hns_ppe_cb *ppe_cb) | |||
422 | 422 | ||
423 | int hns_ppe_get_sset_count(int stringset) | 423 | int hns_ppe_get_sset_count(int stringset) |
424 | { | 424 | { |
425 | if (stringset == ETH_SS_STATS || stringset == ETH_SS_PRIV_FLAGS) | 425 | if (stringset == ETH_SS_STATS) |
426 | return ETH_PPE_STATIC_NUM; | 426 | return ETH_PPE_STATIC_NUM; |
427 | return 0; | 427 | return 0; |
428 | } | 428 | } |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c index 6f3570cfb501..e2e28532e4dc 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c | |||
@@ -876,7 +876,7 @@ void hns_rcb_get_stats(struct hnae_queue *queue, u64 *data) | |||
876 | */ | 876 | */ |
877 | int hns_rcb_get_ring_sset_count(int stringset) | 877 | int hns_rcb_get_ring_sset_count(int stringset) |
878 | { | 878 | { |
879 | if (stringset == ETH_SS_STATS || stringset == ETH_SS_PRIV_FLAGS) | 879 | if (stringset == ETH_SS_STATS) |
880 | return HNS_RING_STATIC_REG_NUM; | 880 | return HNS_RING_STATIC_REG_NUM; |
881 | 881 | ||
882 | return 0; | 882 | return 0; |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c index 7ea7f8a4aa2a..2e14a3ae1d8b 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c | |||
@@ -993,8 +993,10 @@ int hns_get_sset_count(struct net_device *netdev, int stringset) | |||
993 | cnt--; | 993 | cnt--; |
994 | 994 | ||
995 | return cnt; | 995 | return cnt; |
996 | } else { | 996 | } else if (stringset == ETH_SS_STATS) { |
997 | return (HNS_NET_STATS_CNT + ops->get_sset_count(h, stringset)); | 997 | return (HNS_NET_STATS_CNT + ops->get_sset_count(h, stringset)); |
998 | } else { | ||
999 | return -EOPNOTSUPP; | ||
998 | } | 1000 | } |
999 | } | 1001 | } |
1000 | 1002 | ||
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 27447260215d..1b3cc8bb0705 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c | |||
@@ -791,6 +791,18 @@ static int ibmvnic_login(struct net_device *netdev) | |||
791 | return 0; | 791 | return 0; |
792 | } | 792 | } |
793 | 793 | ||
794 | static void release_login_buffer(struct ibmvnic_adapter *adapter) | ||
795 | { | ||
796 | kfree(adapter->login_buf); | ||
797 | adapter->login_buf = NULL; | ||
798 | } | ||
799 | |||
800 | static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter) | ||
801 | { | ||
802 | kfree(adapter->login_rsp_buf); | ||
803 | adapter->login_rsp_buf = NULL; | ||
804 | } | ||
805 | |||
794 | static void release_resources(struct ibmvnic_adapter *adapter) | 806 | static void release_resources(struct ibmvnic_adapter *adapter) |
795 | { | 807 | { |
796 | int i; | 808 | int i; |
@@ -813,6 +825,10 @@ static void release_resources(struct ibmvnic_adapter *adapter) | |||
813 | } | 825 | } |
814 | } | 826 | } |
815 | } | 827 | } |
828 | kfree(adapter->napi); | ||
829 | adapter->napi = NULL; | ||
830 | |||
831 | release_login_rsp_buffer(adapter); | ||
816 | } | 832 | } |
817 | 833 | ||
818 | static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state) | 834 | static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state) |
@@ -1057,6 +1073,35 @@ static int ibmvnic_open(struct net_device *netdev) | |||
1057 | return rc; | 1073 | return rc; |
1058 | } | 1074 | } |
1059 | 1075 | ||
1076 | static void clean_rx_pools(struct ibmvnic_adapter *adapter) | ||
1077 | { | ||
1078 | struct ibmvnic_rx_pool *rx_pool; | ||
1079 | u64 rx_entries; | ||
1080 | int rx_scrqs; | ||
1081 | int i, j; | ||
1082 | |||
1083 | if (!adapter->rx_pool) | ||
1084 | return; | ||
1085 | |||
1086 | rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); | ||
1087 | rx_entries = adapter->req_rx_add_entries_per_subcrq; | ||
1088 | |||
1089 | /* Free any remaining skbs in the rx buffer pools */ | ||
1090 | for (i = 0; i < rx_scrqs; i++) { | ||
1091 | rx_pool = &adapter->rx_pool[i]; | ||
1092 | if (!rx_pool) | ||
1093 | continue; | ||
1094 | |||
1095 | netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i); | ||
1096 | for (j = 0; j < rx_entries; j++) { | ||
1097 | if (rx_pool->rx_buff[j].skb) { | ||
1098 | dev_kfree_skb_any(rx_pool->rx_buff[j].skb); | ||
1099 | rx_pool->rx_buff[j].skb = NULL; | ||
1100 | } | ||
1101 | } | ||
1102 | } | ||
1103 | } | ||
1104 | |||
1060 | static void clean_tx_pools(struct ibmvnic_adapter *adapter) | 1105 | static void clean_tx_pools(struct ibmvnic_adapter *adapter) |
1061 | { | 1106 | { |
1062 | struct ibmvnic_tx_pool *tx_pool; | 1107 | struct ibmvnic_tx_pool *tx_pool; |
@@ -1134,7 +1179,7 @@ static int __ibmvnic_close(struct net_device *netdev) | |||
1134 | } | 1179 | } |
1135 | } | 1180 | } |
1136 | } | 1181 | } |
1137 | 1182 | clean_rx_pools(adapter); | |
1138 | clean_tx_pools(adapter); | 1183 | clean_tx_pools(adapter); |
1139 | adapter->state = VNIC_CLOSED; | 1184 | adapter->state = VNIC_CLOSED; |
1140 | return rc; | 1185 | return rc; |
@@ -1670,8 +1715,6 @@ static int do_reset(struct ibmvnic_adapter *adapter, | |||
1670 | return 0; | 1715 | return 0; |
1671 | } | 1716 | } |
1672 | 1717 | ||
1673 | netif_carrier_on(netdev); | ||
1674 | |||
1675 | /* kick napi */ | 1718 | /* kick napi */ |
1676 | for (i = 0; i < adapter->req_rx_queues; i++) | 1719 | for (i = 0; i < adapter->req_rx_queues; i++) |
1677 | napi_schedule(&adapter->napi[i]); | 1720 | napi_schedule(&adapter->napi[i]); |
@@ -1679,6 +1722,8 @@ static int do_reset(struct ibmvnic_adapter *adapter, | |||
1679 | if (adapter->reset_reason != VNIC_RESET_FAILOVER) | 1722 | if (adapter->reset_reason != VNIC_RESET_FAILOVER) |
1680 | netdev_notify_peers(netdev); | 1723 | netdev_notify_peers(netdev); |
1681 | 1724 | ||
1725 | netif_carrier_on(netdev); | ||
1726 | |||
1682 | return 0; | 1727 | return 0; |
1683 | } | 1728 | } |
1684 | 1729 | ||
@@ -1853,6 +1898,12 @@ restart_poll: | |||
1853 | be16_to_cpu(next->rx_comp.rc)); | 1898 | be16_to_cpu(next->rx_comp.rc)); |
1854 | /* free the entry */ | 1899 | /* free the entry */ |
1855 | next->rx_comp.first = 0; | 1900 | next->rx_comp.first = 0; |
1901 | dev_kfree_skb_any(rx_buff->skb); | ||
1902 | remove_buff_from_pool(adapter, rx_buff); | ||
1903 | continue; | ||
1904 | } else if (!rx_buff->skb) { | ||
1905 | /* free the entry */ | ||
1906 | next->rx_comp.first = 0; | ||
1856 | remove_buff_from_pool(adapter, rx_buff); | 1907 | remove_buff_from_pool(adapter, rx_buff); |
1857 | continue; | 1908 | continue; |
1858 | } | 1909 | } |
@@ -3013,6 +3064,7 @@ static void send_login(struct ibmvnic_adapter *adapter) | |||
3013 | struct vnic_login_client_data *vlcd; | 3064 | struct vnic_login_client_data *vlcd; |
3014 | int i; | 3065 | int i; |
3015 | 3066 | ||
3067 | release_login_rsp_buffer(adapter); | ||
3016 | client_data_len = vnic_client_data_len(adapter); | 3068 | client_data_len = vnic_client_data_len(adapter); |
3017 | 3069 | ||
3018 | buffer_size = | 3070 | buffer_size = |
@@ -3738,6 +3790,7 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq, | |||
3738 | ibmvnic_remove(adapter->vdev); | 3790 | ibmvnic_remove(adapter->vdev); |
3739 | return -EIO; | 3791 | return -EIO; |
3740 | } | 3792 | } |
3793 | release_login_buffer(adapter); | ||
3741 | complete(&adapter->init_done); | 3794 | complete(&adapter->init_done); |
3742 | 3795 | ||
3743 | return 0; | 3796 | return 0; |
diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h index afb7ebe20b24..824fd44e25f0 100644 --- a/drivers/net/ethernet/intel/e1000e/defines.h +++ b/drivers/net/ethernet/intel/e1000e/defines.h | |||
@@ -400,6 +400,10 @@ | |||
400 | #define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */ | 400 | #define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */ |
401 | #define E1000_ICR_RXO 0x00000040 /* Receiver Overrun */ | 401 | #define E1000_ICR_RXO 0x00000040 /* Receiver Overrun */ |
402 | #define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */ | 402 | #define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */ |
403 | #define E1000_ICR_MDAC 0x00000200 /* MDIO Access Complete */ | ||
404 | #define E1000_ICR_SRPD 0x00010000 /* Small Receive Packet Detected */ | ||
405 | #define E1000_ICR_ACK 0x00020000 /* Receive ACK Frame Detected */ | ||
406 | #define E1000_ICR_MNG 0x00040000 /* Manageability Event Detected */ | ||
403 | #define E1000_ICR_ECCER 0x00400000 /* Uncorrectable ECC Error */ | 407 | #define E1000_ICR_ECCER 0x00400000 /* Uncorrectable ECC Error */ |
404 | /* If this bit asserted, the driver should claim the interrupt */ | 408 | /* If this bit asserted, the driver should claim the interrupt */ |
405 | #define E1000_ICR_INT_ASSERTED 0x80000000 | 409 | #define E1000_ICR_INT_ASSERTED 0x80000000 |
@@ -407,7 +411,7 @@ | |||
407 | #define E1000_ICR_RXQ1 0x00200000 /* Rx Queue 1 Interrupt */ | 411 | #define E1000_ICR_RXQ1 0x00200000 /* Rx Queue 1 Interrupt */ |
408 | #define E1000_ICR_TXQ0 0x00400000 /* Tx Queue 0 Interrupt */ | 412 | #define E1000_ICR_TXQ0 0x00400000 /* Tx Queue 0 Interrupt */ |
409 | #define E1000_ICR_TXQ1 0x00800000 /* Tx Queue 1 Interrupt */ | 413 | #define E1000_ICR_TXQ1 0x00800000 /* Tx Queue 1 Interrupt */ |
410 | #define E1000_ICR_OTHER 0x01000000 /* Other Interrupts */ | 414 | #define E1000_ICR_OTHER 0x01000000 /* Other Interrupt */ |
411 | 415 | ||
412 | /* PBA ECC Register */ | 416 | /* PBA ECC Register */ |
413 | #define E1000_PBA_ECC_COUNTER_MASK 0xFFF00000 /* ECC counter mask */ | 417 | #define E1000_PBA_ECC_COUNTER_MASK 0xFFF00000 /* ECC counter mask */ |
@@ -431,12 +435,27 @@ | |||
431 | E1000_IMS_RXSEQ | \ | 435 | E1000_IMS_RXSEQ | \ |
432 | E1000_IMS_LSC) | 436 | E1000_IMS_LSC) |
433 | 437 | ||
438 | /* These are all of the events related to the OTHER interrupt. | ||
439 | */ | ||
440 | #define IMS_OTHER_MASK ( \ | ||
441 | E1000_IMS_LSC | \ | ||
442 | E1000_IMS_RXO | \ | ||
443 | E1000_IMS_MDAC | \ | ||
444 | E1000_IMS_SRPD | \ | ||
445 | E1000_IMS_ACK | \ | ||
446 | E1000_IMS_MNG) | ||
447 | |||
434 | /* Interrupt Mask Set */ | 448 | /* Interrupt Mask Set */ |
435 | #define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ | 449 | #define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ |
436 | #define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */ | 450 | #define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */ |
437 | #define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */ | 451 | #define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */ |
438 | #define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */ | 452 | #define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */ |
453 | #define E1000_IMS_RXO E1000_ICR_RXO /* Receiver Overrun */ | ||
439 | #define E1000_IMS_RXT0 E1000_ICR_RXT0 /* Rx timer intr */ | 454 | #define E1000_IMS_RXT0 E1000_ICR_RXT0 /* Rx timer intr */ |
455 | #define E1000_IMS_MDAC E1000_ICR_MDAC /* MDIO Access Complete */ | ||
456 | #define E1000_IMS_SRPD E1000_ICR_SRPD /* Small Receive Packet */ | ||
457 | #define E1000_IMS_ACK E1000_ICR_ACK /* Receive ACK Frame Detected */ | ||
458 | #define E1000_IMS_MNG E1000_ICR_MNG /* Manageability Event */ | ||
440 | #define E1000_IMS_ECCER E1000_ICR_ECCER /* Uncorrectable ECC Error */ | 459 | #define E1000_IMS_ECCER E1000_ICR_ECCER /* Uncorrectable ECC Error */ |
441 | #define E1000_IMS_RXQ0 E1000_ICR_RXQ0 /* Rx Queue 0 Interrupt */ | 460 | #define E1000_IMS_RXQ0 E1000_ICR_RXQ0 /* Rx Queue 0 Interrupt */ |
442 | #define E1000_IMS_RXQ1 E1000_ICR_RXQ1 /* Rx Queue 1 Interrupt */ | 461 | #define E1000_IMS_RXQ1 E1000_ICR_RXQ1 /* Rx Queue 1 Interrupt */ |
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index 31277d3bb7dc..1dddfb7b2de6 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c | |||
@@ -1367,9 +1367,6 @@ out: | |||
1367 | * Checks to see of the link status of the hardware has changed. If a | 1367 | * Checks to see of the link status of the hardware has changed. If a |
1368 | * change in link status has been detected, then we read the PHY registers | 1368 | * change in link status has been detected, then we read the PHY registers |
1369 | * to get the current speed/duplex if link exists. | 1369 | * to get the current speed/duplex if link exists. |
1370 | * | ||
1371 | * Returns a negative error code (-E1000_ERR_*) or 0 (link down) or 1 (link | ||
1372 | * up). | ||
1373 | **/ | 1370 | **/ |
1374 | static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) | 1371 | static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) |
1375 | { | 1372 | { |
@@ -1385,7 +1382,8 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) | |||
1385 | * Change or Rx Sequence Error interrupt. | 1382 | * Change or Rx Sequence Error interrupt. |
1386 | */ | 1383 | */ |
1387 | if (!mac->get_link_status) | 1384 | if (!mac->get_link_status) |
1388 | return 1; | 1385 | return 0; |
1386 | mac->get_link_status = false; | ||
1389 | 1387 | ||
1390 | /* First we want to see if the MII Status Register reports | 1388 | /* First we want to see if the MII Status Register reports |
1391 | * link. If so, then we want to get the current speed/duplex | 1389 | * link. If so, then we want to get the current speed/duplex |
@@ -1393,12 +1391,12 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) | |||
1393 | */ | 1391 | */ |
1394 | ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); | 1392 | ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); |
1395 | if (ret_val) | 1393 | if (ret_val) |
1396 | return ret_val; | 1394 | goto out; |
1397 | 1395 | ||
1398 | if (hw->mac.type == e1000_pchlan) { | 1396 | if (hw->mac.type == e1000_pchlan) { |
1399 | ret_val = e1000_k1_gig_workaround_hv(hw, link); | 1397 | ret_val = e1000_k1_gig_workaround_hv(hw, link); |
1400 | if (ret_val) | 1398 | if (ret_val) |
1401 | return ret_val; | 1399 | goto out; |
1402 | } | 1400 | } |
1403 | 1401 | ||
1404 | /* When connected at 10Mbps half-duplex, some parts are excessively | 1402 | /* When connected at 10Mbps half-duplex, some parts are excessively |
@@ -1431,7 +1429,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) | |||
1431 | 1429 | ||
1432 | ret_val = hw->phy.ops.acquire(hw); | 1430 | ret_val = hw->phy.ops.acquire(hw); |
1433 | if (ret_val) | 1431 | if (ret_val) |
1434 | return ret_val; | 1432 | goto out; |
1435 | 1433 | ||
1436 | if (hw->mac.type == e1000_pch2lan) | 1434 | if (hw->mac.type == e1000_pch2lan) |
1437 | emi_addr = I82579_RX_CONFIG; | 1435 | emi_addr = I82579_RX_CONFIG; |
@@ -1453,7 +1451,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) | |||
1453 | hw->phy.ops.release(hw); | 1451 | hw->phy.ops.release(hw); |
1454 | 1452 | ||
1455 | if (ret_val) | 1453 | if (ret_val) |
1456 | return ret_val; | 1454 | goto out; |
1457 | 1455 | ||
1458 | if (hw->mac.type >= e1000_pch_spt) { | 1456 | if (hw->mac.type >= e1000_pch_spt) { |
1459 | u16 data; | 1457 | u16 data; |
@@ -1462,14 +1460,14 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) | |||
1462 | if (speed == SPEED_1000) { | 1460 | if (speed == SPEED_1000) { |
1463 | ret_val = hw->phy.ops.acquire(hw); | 1461 | ret_val = hw->phy.ops.acquire(hw); |
1464 | if (ret_val) | 1462 | if (ret_val) |
1465 | return ret_val; | 1463 | goto out; |
1466 | 1464 | ||
1467 | ret_val = e1e_rphy_locked(hw, | 1465 | ret_val = e1e_rphy_locked(hw, |
1468 | PHY_REG(776, 20), | 1466 | PHY_REG(776, 20), |
1469 | &data); | 1467 | &data); |
1470 | if (ret_val) { | 1468 | if (ret_val) { |
1471 | hw->phy.ops.release(hw); | 1469 | hw->phy.ops.release(hw); |
1472 | return ret_val; | 1470 | goto out; |
1473 | } | 1471 | } |
1474 | 1472 | ||
1475 | ptr_gap = (data & (0x3FF << 2)) >> 2; | 1473 | ptr_gap = (data & (0x3FF << 2)) >> 2; |
@@ -1483,18 +1481,18 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) | |||
1483 | } | 1481 | } |
1484 | hw->phy.ops.release(hw); | 1482 | hw->phy.ops.release(hw); |
1485 | if (ret_val) | 1483 | if (ret_val) |
1486 | return ret_val; | 1484 | goto out; |
1487 | } else { | 1485 | } else { |
1488 | ret_val = hw->phy.ops.acquire(hw); | 1486 | ret_val = hw->phy.ops.acquire(hw); |
1489 | if (ret_val) | 1487 | if (ret_val) |
1490 | return ret_val; | 1488 | goto out; |
1491 | 1489 | ||
1492 | ret_val = e1e_wphy_locked(hw, | 1490 | ret_val = e1e_wphy_locked(hw, |
1493 | PHY_REG(776, 20), | 1491 | PHY_REG(776, 20), |
1494 | 0xC023); | 1492 | 0xC023); |
1495 | hw->phy.ops.release(hw); | 1493 | hw->phy.ops.release(hw); |
1496 | if (ret_val) | 1494 | if (ret_val) |
1497 | return ret_val; | 1495 | goto out; |
1498 | 1496 | ||
1499 | } | 1497 | } |
1500 | } | 1498 | } |
@@ -1521,7 +1519,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) | |||
1521 | (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V3)) { | 1519 | (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V3)) { |
1522 | ret_val = e1000_k1_workaround_lpt_lp(hw, link); | 1520 | ret_val = e1000_k1_workaround_lpt_lp(hw, link); |
1523 | if (ret_val) | 1521 | if (ret_val) |
1524 | return ret_val; | 1522 | goto out; |
1525 | } | 1523 | } |
1526 | if (hw->mac.type >= e1000_pch_lpt) { | 1524 | if (hw->mac.type >= e1000_pch_lpt) { |
1527 | /* Set platform power management values for | 1525 | /* Set platform power management values for |
@@ -1529,7 +1527,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) | |||
1529 | */ | 1527 | */ |
1530 | ret_val = e1000_platform_pm_pch_lpt(hw, link); | 1528 | ret_val = e1000_platform_pm_pch_lpt(hw, link); |
1531 | if (ret_val) | 1529 | if (ret_val) |
1532 | return ret_val; | 1530 | goto out; |
1533 | } | 1531 | } |
1534 | 1532 | ||
1535 | /* Clear link partner's EEE ability */ | 1533 | /* Clear link partner's EEE ability */ |
@@ -1552,9 +1550,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) | |||
1552 | } | 1550 | } |
1553 | 1551 | ||
1554 | if (!link) | 1552 | if (!link) |
1555 | return 0; /* No link detected */ | 1553 | goto out; |
1556 | |||
1557 | mac->get_link_status = false; | ||
1558 | 1554 | ||
1559 | switch (hw->mac.type) { | 1555 | switch (hw->mac.type) { |
1560 | case e1000_pch2lan: | 1556 | case e1000_pch2lan: |
@@ -1616,12 +1612,14 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) | |||
1616 | * different link partner. | 1612 | * different link partner. |
1617 | */ | 1613 | */ |
1618 | ret_val = e1000e_config_fc_after_link_up(hw); | 1614 | ret_val = e1000e_config_fc_after_link_up(hw); |
1619 | if (ret_val) { | 1615 | if (ret_val) |
1620 | e_dbg("Error configuring flow control\n"); | 1616 | e_dbg("Error configuring flow control\n"); |
1621 | return ret_val; | ||
1622 | } | ||
1623 | 1617 | ||
1624 | return 1; | 1618 | return ret_val; |
1619 | |||
1620 | out: | ||
1621 | mac->get_link_status = true; | ||
1622 | return ret_val; | ||
1625 | } | 1623 | } |
1626 | 1624 | ||
1627 | static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter) | 1625 | static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter) |
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c index f457c5703d0c..5bdc3a2d4fd7 100644 --- a/drivers/net/ethernet/intel/e1000e/mac.c +++ b/drivers/net/ethernet/intel/e1000e/mac.c | |||
@@ -410,9 +410,6 @@ void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw) | |||
410 | * Checks to see of the link status of the hardware has changed. If a | 410 | * Checks to see of the link status of the hardware has changed. If a |
411 | * change in link status has been detected, then we read the PHY registers | 411 | * change in link status has been detected, then we read the PHY registers |
412 | * to get the current speed/duplex if link exists. | 412 | * to get the current speed/duplex if link exists. |
413 | * | ||
414 | * Returns a negative error code (-E1000_ERR_*) or 0 (link down) or 1 (link | ||
415 | * up). | ||
416 | **/ | 413 | **/ |
417 | s32 e1000e_check_for_copper_link(struct e1000_hw *hw) | 414 | s32 e1000e_check_for_copper_link(struct e1000_hw *hw) |
418 | { | 415 | { |
@@ -426,20 +423,16 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw) | |||
426 | * Change or Rx Sequence Error interrupt. | 423 | * Change or Rx Sequence Error interrupt. |
427 | */ | 424 | */ |
428 | if (!mac->get_link_status) | 425 | if (!mac->get_link_status) |
429 | return 1; | 426 | return 0; |
427 | mac->get_link_status = false; | ||
430 | 428 | ||
431 | /* First we want to see if the MII Status Register reports | 429 | /* First we want to see if the MII Status Register reports |
432 | * link. If so, then we want to get the current speed/duplex | 430 | * link. If so, then we want to get the current speed/duplex |
433 | * of the PHY. | 431 | * of the PHY. |
434 | */ | 432 | */ |
435 | ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); | 433 | ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); |
436 | if (ret_val) | 434 | if (ret_val || !link) |
437 | return ret_val; | 435 | goto out; |
438 | |||
439 | if (!link) | ||
440 | return 0; /* No link detected */ | ||
441 | |||
442 | mac->get_link_status = false; | ||
443 | 436 | ||
444 | /* Check if there was DownShift, must be checked | 437 | /* Check if there was DownShift, must be checked |
445 | * immediately after link-up | 438 | * immediately after link-up |
@@ -464,12 +457,14 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw) | |||
464 | * different link partner. | 457 | * different link partner. |
465 | */ | 458 | */ |
466 | ret_val = e1000e_config_fc_after_link_up(hw); | 459 | ret_val = e1000e_config_fc_after_link_up(hw); |
467 | if (ret_val) { | 460 | if (ret_val) |
468 | e_dbg("Error configuring flow control\n"); | 461 | e_dbg("Error configuring flow control\n"); |
469 | return ret_val; | ||
470 | } | ||
471 | 462 | ||
472 | return 1; | 463 | return ret_val; |
464 | |||
465 | out: | ||
466 | mac->get_link_status = true; | ||
467 | return ret_val; | ||
473 | } | 468 | } |
474 | 469 | ||
475 | /** | 470 | /** |
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 1298b69f990b..dc853b0863af 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c | |||
@@ -1914,30 +1914,20 @@ static irqreturn_t e1000_msix_other(int __always_unused irq, void *data) | |||
1914 | struct net_device *netdev = data; | 1914 | struct net_device *netdev = data; |
1915 | struct e1000_adapter *adapter = netdev_priv(netdev); | 1915 | struct e1000_adapter *adapter = netdev_priv(netdev); |
1916 | struct e1000_hw *hw = &adapter->hw; | 1916 | struct e1000_hw *hw = &adapter->hw; |
1917 | u32 icr; | 1917 | u32 icr = er32(ICR); |
1918 | bool enable = true; | 1918 | |
1919 | 1919 | if (icr & adapter->eiac_mask) | |
1920 | icr = er32(ICR); | 1920 | ew32(ICS, (icr & adapter->eiac_mask)); |
1921 | if (icr & E1000_ICR_RXO) { | 1921 | |
1922 | ew32(ICR, E1000_ICR_RXO); | ||
1923 | enable = false; | ||
1924 | /* napi poll will re-enable Other, make sure it runs */ | ||
1925 | if (napi_schedule_prep(&adapter->napi)) { | ||
1926 | adapter->total_rx_bytes = 0; | ||
1927 | adapter->total_rx_packets = 0; | ||
1928 | __napi_schedule(&adapter->napi); | ||
1929 | } | ||
1930 | } | ||
1931 | if (icr & E1000_ICR_LSC) { | 1922 | if (icr & E1000_ICR_LSC) { |
1932 | ew32(ICR, E1000_ICR_LSC); | ||
1933 | hw->mac.get_link_status = true; | 1923 | hw->mac.get_link_status = true; |
1934 | /* guard against interrupt when we're going down */ | 1924 | /* guard against interrupt when we're going down */ |
1935 | if (!test_bit(__E1000_DOWN, &adapter->state)) | 1925 | if (!test_bit(__E1000_DOWN, &adapter->state)) |
1936 | mod_timer(&adapter->watchdog_timer, jiffies + 1); | 1926 | mod_timer(&adapter->watchdog_timer, jiffies + 1); |
1937 | } | 1927 | } |
1938 | 1928 | ||
1939 | if (enable && !test_bit(__E1000_DOWN, &adapter->state)) | 1929 | if (!test_bit(__E1000_DOWN, &adapter->state)) |
1940 | ew32(IMS, E1000_IMS_OTHER); | 1930 | ew32(IMS, E1000_IMS_OTHER | IMS_OTHER_MASK); |
1941 | 1931 | ||
1942 | return IRQ_HANDLED; | 1932 | return IRQ_HANDLED; |
1943 | } | 1933 | } |
@@ -2040,7 +2030,6 @@ static void e1000_configure_msix(struct e1000_adapter *adapter) | |||
2040 | hw->hw_addr + E1000_EITR_82574(vector)); | 2030 | hw->hw_addr + E1000_EITR_82574(vector)); |
2041 | else | 2031 | else |
2042 | writel(1, hw->hw_addr + E1000_EITR_82574(vector)); | 2032 | writel(1, hw->hw_addr + E1000_EITR_82574(vector)); |
2043 | adapter->eiac_mask |= E1000_IMS_OTHER; | ||
2044 | 2033 | ||
2045 | /* Cause Tx interrupts on every write back */ | 2034 | /* Cause Tx interrupts on every write back */ |
2046 | ivar |= BIT(31); | 2035 | ivar |= BIT(31); |
@@ -2265,7 +2254,8 @@ static void e1000_irq_enable(struct e1000_adapter *adapter) | |||
2265 | 2254 | ||
2266 | if (adapter->msix_entries) { | 2255 | if (adapter->msix_entries) { |
2267 | ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574); | 2256 | ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574); |
2268 | ew32(IMS, adapter->eiac_mask | E1000_IMS_LSC); | 2257 | ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | |
2258 | IMS_OTHER_MASK); | ||
2269 | } else if (hw->mac.type >= e1000_pch_lpt) { | 2259 | } else if (hw->mac.type >= e1000_pch_lpt) { |
2270 | ew32(IMS, IMS_ENABLE_MASK | E1000_IMS_ECCER); | 2260 | ew32(IMS, IMS_ENABLE_MASK | E1000_IMS_ECCER); |
2271 | } else { | 2261 | } else { |
@@ -2333,8 +2323,8 @@ static int e1000_alloc_ring_dma(struct e1000_adapter *adapter, | |||
2333 | { | 2323 | { |
2334 | struct pci_dev *pdev = adapter->pdev; | 2324 | struct pci_dev *pdev = adapter->pdev; |
2335 | 2325 | ||
2336 | ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma, | 2326 | ring->desc = dma_zalloc_coherent(&pdev->dev, ring->size, &ring->dma, |
2337 | GFP_KERNEL); | 2327 | GFP_KERNEL); |
2338 | if (!ring->desc) | 2328 | if (!ring->desc) |
2339 | return -ENOMEM; | 2329 | return -ENOMEM; |
2340 | 2330 | ||
@@ -2707,8 +2697,7 @@ static int e1000e_poll(struct napi_struct *napi, int weight) | |||
2707 | napi_complete_done(napi, work_done); | 2697 | napi_complete_done(napi, work_done); |
2708 | if (!test_bit(__E1000_DOWN, &adapter->state)) { | 2698 | if (!test_bit(__E1000_DOWN, &adapter->state)) { |
2709 | if (adapter->msix_entries) | 2699 | if (adapter->msix_entries) |
2710 | ew32(IMS, adapter->rx_ring->ims_val | | 2700 | ew32(IMS, adapter->rx_ring->ims_val); |
2711 | E1000_IMS_OTHER); | ||
2712 | else | 2701 | else |
2713 | e1000_irq_enable(adapter); | 2702 | e1000_irq_enable(adapter); |
2714 | } | 2703 | } |
@@ -5101,7 +5090,7 @@ static bool e1000e_has_link(struct e1000_adapter *adapter) | |||
5101 | case e1000_media_type_copper: | 5090 | case e1000_media_type_copper: |
5102 | if (hw->mac.get_link_status) { | 5091 | if (hw->mac.get_link_status) { |
5103 | ret_val = hw->mac.ops.check_for_link(hw); | 5092 | ret_val = hw->mac.ops.check_for_link(hw); |
5104 | link_active = ret_val > 0; | 5093 | link_active = !hw->mac.get_link_status; |
5105 | } else { | 5094 | } else { |
5106 | link_active = true; | 5095 | link_active = true; |
5107 | } | 5096 | } |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 0da5aa2c8aba..9fc063af233c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | |||
@@ -1888,6 +1888,14 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring, | |||
1888 | ixgbe_rx_pg_size(rx_ring), | 1888 | ixgbe_rx_pg_size(rx_ring), |
1889 | DMA_FROM_DEVICE, | 1889 | DMA_FROM_DEVICE, |
1890 | IXGBE_RX_DMA_ATTR); | 1890 | IXGBE_RX_DMA_ATTR); |
1891 | } else if (ring_uses_build_skb(rx_ring)) { | ||
1892 | unsigned long offset = (unsigned long)(skb->data) & ~PAGE_MASK; | ||
1893 | |||
1894 | dma_sync_single_range_for_cpu(rx_ring->dev, | ||
1895 | IXGBE_CB(skb)->dma, | ||
1896 | offset, | ||
1897 | skb_headlen(skb), | ||
1898 | DMA_FROM_DEVICE); | ||
1891 | } else { | 1899 | } else { |
1892 | struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; | 1900 | struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; |
1893 | 1901 | ||
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index a1d7b88cf083..5a1668cdb461 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c | |||
@@ -7137,6 +7137,7 @@ static void mvpp2_set_rx_mode(struct net_device *dev) | |||
7137 | int id = port->id; | 7137 | int id = port->id; |
7138 | bool allmulti = dev->flags & IFF_ALLMULTI; | 7138 | bool allmulti = dev->flags & IFF_ALLMULTI; |
7139 | 7139 | ||
7140 | retry: | ||
7140 | mvpp2_prs_mac_promisc_set(priv, id, dev->flags & IFF_PROMISC); | 7141 | mvpp2_prs_mac_promisc_set(priv, id, dev->flags & IFF_PROMISC); |
7141 | mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_ALL, allmulti); | 7142 | mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_ALL, allmulti); |
7142 | mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_IP6, allmulti); | 7143 | mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_IP6, allmulti); |
@@ -7144,9 +7145,13 @@ static void mvpp2_set_rx_mode(struct net_device *dev) | |||
7144 | /* Remove all port->id's mcast enries */ | 7145 | /* Remove all port->id's mcast enries */ |
7145 | mvpp2_prs_mcast_del_all(priv, id); | 7146 | mvpp2_prs_mcast_del_all(priv, id); |
7146 | 7147 | ||
7147 | if (allmulti && !netdev_mc_empty(dev)) { | 7148 | if (!allmulti) { |
7148 | netdev_for_each_mc_addr(ha, dev) | 7149 | netdev_for_each_mc_addr(ha, dev) { |
7149 | mvpp2_prs_mac_da_accept(priv, id, ha->addr, true); | 7150 | if (mvpp2_prs_mac_da_accept(priv, id, ha->addr, true)) { |
7151 | allmulti = true; | ||
7152 | goto retry; | ||
7153 | } | ||
7154 | } | ||
7150 | } | 7155 | } |
7151 | } | 7156 | } |
7152 | 7157 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c index 0be4575b58a2..fd509160c8f6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c | |||
@@ -96,10 +96,10 @@ static void print_lyr_2_4_hdrs(struct trace_seq *p, | |||
96 | "%pI4"); | 96 | "%pI4"); |
97 | } else if (ethertype.v == ETH_P_IPV6) { | 97 | } else if (ethertype.v == ETH_P_IPV6) { |
98 | static const struct in6_addr full_ones = { | 98 | static const struct in6_addr full_ones = { |
99 | .in6_u.u6_addr32 = {htonl(0xffffffff), | 99 | .in6_u.u6_addr32 = {__constant_htonl(0xffffffff), |
100 | htonl(0xffffffff), | 100 | __constant_htonl(0xffffffff), |
101 | htonl(0xffffffff), | 101 | __constant_htonl(0xffffffff), |
102 | htonl(0xffffffff)}, | 102 | __constant_htonl(0xffffffff)}, |
103 | }; | 103 | }; |
104 | DECLARE_MASK_VAL(struct in6_addr, src_ipv6); | 104 | DECLARE_MASK_VAL(struct in6_addr, src_ipv6); |
105 | DECLARE_MASK_VAL(struct in6_addr, dst_ipv6); | 105 | DECLARE_MASK_VAL(struct in6_addr, dst_ipv6); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 47bab842c5ee..da94c8cba5ee 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c | |||
@@ -1768,13 +1768,16 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv, | |||
1768 | param->wq.linear = 1; | 1768 | param->wq.linear = 1; |
1769 | } | 1769 | } |
1770 | 1770 | ||
1771 | static void mlx5e_build_drop_rq_param(struct mlx5e_rq_param *param) | 1771 | static void mlx5e_build_drop_rq_param(struct mlx5_core_dev *mdev, |
1772 | struct mlx5e_rq_param *param) | ||
1772 | { | 1773 | { |
1773 | void *rqc = param->rqc; | 1774 | void *rqc = param->rqc; |
1774 | void *wq = MLX5_ADDR_OF(rqc, rqc, wq); | 1775 | void *wq = MLX5_ADDR_OF(rqc, rqc, wq); |
1775 | 1776 | ||
1776 | MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST); | 1777 | MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST); |
1777 | MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe))); | 1778 | MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe))); |
1779 | |||
1780 | param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev); | ||
1778 | } | 1781 | } |
1779 | 1782 | ||
1780 | static void mlx5e_build_sq_param_common(struct mlx5e_priv *priv, | 1783 | static void mlx5e_build_sq_param_common(struct mlx5e_priv *priv, |
@@ -2634,6 +2637,9 @@ static int mlx5e_alloc_drop_cq(struct mlx5_core_dev *mdev, | |||
2634 | struct mlx5e_cq *cq, | 2637 | struct mlx5e_cq *cq, |
2635 | struct mlx5e_cq_param *param) | 2638 | struct mlx5e_cq_param *param) |
2636 | { | 2639 | { |
2640 | param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev); | ||
2641 | param->wq.db_numa_node = dev_to_node(&mdev->pdev->dev); | ||
2642 | |||
2637 | return mlx5e_alloc_cq_common(mdev, param, cq); | 2643 | return mlx5e_alloc_cq_common(mdev, param, cq); |
2638 | } | 2644 | } |
2639 | 2645 | ||
@@ -2645,7 +2651,7 @@ static int mlx5e_open_drop_rq(struct mlx5_core_dev *mdev, | |||
2645 | struct mlx5e_cq *cq = &drop_rq->cq; | 2651 | struct mlx5e_cq *cq = &drop_rq->cq; |
2646 | int err; | 2652 | int err; |
2647 | 2653 | ||
2648 | mlx5e_build_drop_rq_param(&rq_param); | 2654 | mlx5e_build_drop_rq_param(mdev, &rq_param); |
2649 | 2655 | ||
2650 | err = mlx5e_alloc_drop_cq(mdev, cq, &cq_param); | 2656 | err = mlx5e_alloc_drop_cq(mdev, cq, &cq_param); |
2651 | if (err) | 2657 | if (err) |
@@ -2994,8 +3000,8 @@ static int mlx5e_setup_tc_block(struct net_device *dev, | |||
2994 | } | 3000 | } |
2995 | #endif | 3001 | #endif |
2996 | 3002 | ||
2997 | int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type, | 3003 | static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type, |
2998 | void *type_data) | 3004 | void *type_data) |
2999 | { | 3005 | { |
3000 | switch (type) { | 3006 | switch (type) { |
3001 | #ifdef CONFIG_MLX5_ESWITCH | 3007 | #ifdef CONFIG_MLX5_ESWITCH |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 0d4bb0688faa..e5c3ab46a24a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <linux/tcp.h> | 36 | #include <linux/tcp.h> |
37 | #include <linux/bpf_trace.h> | 37 | #include <linux/bpf_trace.h> |
38 | #include <net/busy_poll.h> | 38 | #include <net/busy_poll.h> |
39 | #include <net/ip6_checksum.h> | ||
39 | #include "en.h" | 40 | #include "en.h" |
40 | #include "en_tc.h" | 41 | #include "en_tc.h" |
41 | #include "eswitch.h" | 42 | #include "eswitch.h" |
@@ -546,20 +547,33 @@ bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq) | |||
546 | return true; | 547 | return true; |
547 | } | 548 | } |
548 | 549 | ||
550 | static void mlx5e_lro_update_tcp_hdr(struct mlx5_cqe64 *cqe, struct tcphdr *tcp) | ||
551 | { | ||
552 | u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe); | ||
553 | u8 tcp_ack = (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA) || | ||
554 | (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA); | ||
555 | |||
556 | tcp->check = 0; | ||
557 | tcp->psh = get_cqe_lro_tcppsh(cqe); | ||
558 | |||
559 | if (tcp_ack) { | ||
560 | tcp->ack = 1; | ||
561 | tcp->ack_seq = cqe->lro_ack_seq_num; | ||
562 | tcp->window = cqe->lro_tcp_win; | ||
563 | } | ||
564 | } | ||
565 | |||
549 | static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe, | 566 | static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe, |
550 | u32 cqe_bcnt) | 567 | u32 cqe_bcnt) |
551 | { | 568 | { |
552 | struct ethhdr *eth = (struct ethhdr *)(skb->data); | 569 | struct ethhdr *eth = (struct ethhdr *)(skb->data); |
553 | struct tcphdr *tcp; | 570 | struct tcphdr *tcp; |
554 | int network_depth = 0; | 571 | int network_depth = 0; |
572 | __wsum check; | ||
555 | __be16 proto; | 573 | __be16 proto; |
556 | u16 tot_len; | 574 | u16 tot_len; |
557 | void *ip_p; | 575 | void *ip_p; |
558 | 576 | ||
559 | u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe); | ||
560 | u8 tcp_ack = (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA) || | ||
561 | (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA); | ||
562 | |||
563 | proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth); | 577 | proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth); |
564 | 578 | ||
565 | tot_len = cqe_bcnt - network_depth; | 579 | tot_len = cqe_bcnt - network_depth; |
@@ -576,23 +590,30 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe, | |||
576 | ipv4->check = 0; | 590 | ipv4->check = 0; |
577 | ipv4->check = ip_fast_csum((unsigned char *)ipv4, | 591 | ipv4->check = ip_fast_csum((unsigned char *)ipv4, |
578 | ipv4->ihl); | 592 | ipv4->ihl); |
593 | |||
594 | mlx5e_lro_update_tcp_hdr(cqe, tcp); | ||
595 | check = csum_partial(tcp, tcp->doff * 4, | ||
596 | csum_unfold((__force __sum16)cqe->check_sum)); | ||
597 | /* Almost done, don't forget the pseudo header */ | ||
598 | tcp->check = csum_tcpudp_magic(ipv4->saddr, ipv4->daddr, | ||
599 | tot_len - sizeof(struct iphdr), | ||
600 | IPPROTO_TCP, check); | ||
579 | } else { | 601 | } else { |
602 | u16 payload_len = tot_len - sizeof(struct ipv6hdr); | ||
580 | struct ipv6hdr *ipv6 = ip_p; | 603 | struct ipv6hdr *ipv6 = ip_p; |
581 | 604 | ||
582 | tcp = ip_p + sizeof(struct ipv6hdr); | 605 | tcp = ip_p + sizeof(struct ipv6hdr); |
583 | skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; | 606 | skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; |
584 | 607 | ||
585 | ipv6->hop_limit = cqe->lro_min_ttl; | 608 | ipv6->hop_limit = cqe->lro_min_ttl; |
586 | ipv6->payload_len = cpu_to_be16(tot_len - | 609 | ipv6->payload_len = cpu_to_be16(payload_len); |
587 | sizeof(struct ipv6hdr)); | 610 | |
588 | } | 611 | mlx5e_lro_update_tcp_hdr(cqe, tcp); |
589 | 612 | check = csum_partial(tcp, tcp->doff * 4, | |
590 | tcp->psh = get_cqe_lro_tcppsh(cqe); | 613 | csum_unfold((__force __sum16)cqe->check_sum)); |
591 | 614 | /* Almost done, don't forget the pseudo header */ | |
592 | if (tcp_ack) { | 615 | tcp->check = csum_ipv6_magic(&ipv6->saddr, &ipv6->daddr, payload_len, |
593 | tcp->ack = 1; | 616 | IPPROTO_TCP, check); |
594 | tcp->ack_seq = cqe->lro_ack_seq_num; | ||
595 | tcp->window = cqe->lro_tcp_win; | ||
596 | } | 617 | } |
597 | } | 618 | } |
598 | 619 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c index 5a4608281f38..707976482c09 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c | |||
@@ -216,7 +216,8 @@ mlx5e_test_loopback_validate(struct sk_buff *skb, | |||
216 | if (iph->protocol != IPPROTO_UDP) | 216 | if (iph->protocol != IPPROTO_UDP) |
217 | goto out; | 217 | goto out; |
218 | 218 | ||
219 | udph = udp_hdr(skb); | 219 | /* Don't assume skb_transport_header() was set */ |
220 | udph = (struct udphdr *)((u8 *)iph + 4 * iph->ihl); | ||
220 | if (udph->dest != htons(9)) | 221 | if (udph->dest != htons(9)) |
221 | goto out; | 222 | goto out; |
222 | 223 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index fd98b0dc610f..fa86a1466718 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | |||
@@ -2529,7 +2529,8 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, | |||
2529 | if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) { | 2529 | if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) { |
2530 | attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP; | 2530 | attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP; |
2531 | } else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) { | 2531 | } else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) { |
2532 | if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q)) | 2532 | if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q) || |
2533 | tcf_vlan_push_prio(a)) | ||
2533 | return -EOPNOTSUPP; | 2534 | return -EOPNOTSUPP; |
2534 | 2535 | ||
2535 | attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH; | 2536 | attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 569b42a01026..11b4f1089d1c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c | |||
@@ -176,7 +176,7 @@ static inline u16 mlx5e_calc_min_inline(enum mlx5_inline_modes mode, | |||
176 | default: | 176 | default: |
177 | hlen = mlx5e_skb_l2_header_offset(skb); | 177 | hlen = mlx5e_skb_l2_header_offset(skb); |
178 | } | 178 | } |
179 | return min_t(u16, hlen, skb->len); | 179 | return min_t(u16, hlen, skb_headlen(skb)); |
180 | } | 180 | } |
181 | 181 | ||
182 | static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data, | 182 | static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data, |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 5ecf2cddc16d..c2b1d7d351fc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | |||
@@ -1529,6 +1529,10 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num, | |||
1529 | 1529 | ||
1530 | esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num); | 1530 | esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num); |
1531 | 1531 | ||
1532 | /* Create steering drop counters for ingress and egress ACLs */ | ||
1533 | if (vport_num && esw->mode == SRIOV_LEGACY) | ||
1534 | esw_vport_create_drop_counters(vport); | ||
1535 | |||
1532 | /* Restore old vport configuration */ | 1536 | /* Restore old vport configuration */ |
1533 | esw_apply_vport_conf(esw, vport); | 1537 | esw_apply_vport_conf(esw, vport); |
1534 | 1538 | ||
@@ -1545,10 +1549,6 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num, | |||
1545 | if (!vport_num) | 1549 | if (!vport_num) |
1546 | vport->info.trusted = true; | 1550 | vport->info.trusted = true; |
1547 | 1551 | ||
1548 | /* create steering drop counters for ingress and egress ACLs */ | ||
1549 | if (vport_num && esw->mode == SRIOV_LEGACY) | ||
1550 | esw_vport_create_drop_counters(vport); | ||
1551 | |||
1552 | esw_vport_change_handle_locked(vport); | 1552 | esw_vport_change_handle_locked(vport); |
1553 | 1553 | ||
1554 | esw->enabled_vports++; | 1554 | esw->enabled_vports++; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index c025c98700e4..31fc2cfac3b3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | |||
@@ -1429,7 +1429,8 @@ static bool check_conflicting_actions(u32 action1, u32 action2) | |||
1429 | 1429 | ||
1430 | if (xored_actions & (MLX5_FLOW_CONTEXT_ACTION_DROP | | 1430 | if (xored_actions & (MLX5_FLOW_CONTEXT_ACTION_DROP | |
1431 | MLX5_FLOW_CONTEXT_ACTION_ENCAP | | 1431 | MLX5_FLOW_CONTEXT_ACTION_ENCAP | |
1432 | MLX5_FLOW_CONTEXT_ACTION_DECAP)) | 1432 | MLX5_FLOW_CONTEXT_ACTION_DECAP | |
1433 | MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)) | ||
1433 | return true; | 1434 | return true; |
1434 | 1435 | ||
1435 | return false; | 1436 | return false; |
@@ -1758,8 +1759,11 @@ search_again_locked: | |||
1758 | 1759 | ||
1759 | /* Collect all fgs which has a matching match_criteria */ | 1760 | /* Collect all fgs which has a matching match_criteria */ |
1760 | err = build_match_list(&match_head, ft, spec); | 1761 | err = build_match_list(&match_head, ft, spec); |
1761 | if (err) | 1762 | if (err) { |
1763 | if (take_write) | ||
1764 | up_write_ref_node(&ft->node); | ||
1762 | return ERR_PTR(err); | 1765 | return ERR_PTR(err); |
1766 | } | ||
1763 | 1767 | ||
1764 | if (!take_write) | 1768 | if (!take_write) |
1765 | up_read_ref_node(&ft->node); | 1769 | up_read_ref_node(&ft->node); |
@@ -1768,8 +1772,11 @@ search_again_locked: | |||
1768 | dest_num, version); | 1772 | dest_num, version); |
1769 | free_match_list(&match_head); | 1773 | free_match_list(&match_head); |
1770 | if (!IS_ERR(rule) || | 1774 | if (!IS_ERR(rule) || |
1771 | (PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN)) | 1775 | (PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN)) { |
1776 | if (take_write) | ||
1777 | up_write_ref_node(&ft->node); | ||
1772 | return rule; | 1778 | return rule; |
1779 | } | ||
1773 | 1780 | ||
1774 | if (!take_write) { | 1781 | if (!take_write) { |
1775 | nested_down_write_ref_node(&ft->node, FS_LOCK_GRANDPARENT); | 1782 | nested_down_write_ref_node(&ft->node, FS_LOCK_GRANDPARENT); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index 21d29f7936f6..d39b0b7011b2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c | |||
@@ -124,7 +124,7 @@ void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force) | |||
124 | trigger_cmd_completions(dev); | 124 | trigger_cmd_completions(dev); |
125 | } | 125 | } |
126 | 126 | ||
127 | mlx5_core_event(dev, MLX5_DEV_EVENT_SYS_ERROR, 0); | 127 | mlx5_core_event(dev, MLX5_DEV_EVENT_SYS_ERROR, 1); |
128 | mlx5_core_err(dev, "end\n"); | 128 | mlx5_core_err(dev, "end\n"); |
129 | 129 | ||
130 | unlock: | 130 | unlock: |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c index e159243e0fcf..857035583ccd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/highmem.h> | 34 | #include <linux/highmem.h> |
35 | #include <rdma/mlx5-abi.h> | 35 | #include <rdma/mlx5-abi.h> |
36 | #include "en.h" | 36 | #include "en.h" |
37 | #include "clock.h" | ||
37 | 38 | ||
38 | enum { | 39 | enum { |
39 | MLX5_CYCLES_SHIFT = 23 | 40 | MLX5_CYCLES_SHIFT = 23 |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 2ef641c91c26..ae391e4b7070 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c | |||
@@ -551,7 +551,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev) | |||
551 | MLX5_SET(cmd_hca_cap, | 551 | MLX5_SET(cmd_hca_cap, |
552 | set_hca_cap, | 552 | set_hca_cap, |
553 | cache_line_128byte, | 553 | cache_line_128byte, |
554 | cache_line_size() == 128 ? 1 : 0); | 554 | cache_line_size() >= 128 ? 1 : 0); |
555 | 555 | ||
556 | if (MLX5_CAP_GEN_MAX(dev, dct)) | 556 | if (MLX5_CAP_GEN_MAX(dev, dct)) |
557 | MLX5_SET(cmd_hca_cap, set_hca_cap, dct, 1); | 557 | MLX5_SET(cmd_hca_cap, set_hca_cap, dct, 1); |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c index b698fb481b2e..996dc099cd58 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c | |||
@@ -443,6 +443,17 @@ int mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id) | |||
443 | } | 443 | } |
444 | EXPORT_SYMBOL(mlxsw_afa_block_jump); | 444 | EXPORT_SYMBOL(mlxsw_afa_block_jump); |
445 | 445 | ||
446 | int mlxsw_afa_block_terminate(struct mlxsw_afa_block *block) | ||
447 | { | ||
448 | if (block->finished) | ||
449 | return -EINVAL; | ||
450 | mlxsw_afa_set_goto_set(block->cur_set, | ||
451 | MLXSW_AFA_SET_GOTO_BINDING_CMD_TERM, 0); | ||
452 | block->finished = true; | ||
453 | return 0; | ||
454 | } | ||
455 | EXPORT_SYMBOL(mlxsw_afa_block_terminate); | ||
456 | |||
446 | static struct mlxsw_afa_fwd_entry * | 457 | static struct mlxsw_afa_fwd_entry * |
447 | mlxsw_afa_fwd_entry_create(struct mlxsw_afa *mlxsw_afa, u8 local_port) | 458 | mlxsw_afa_fwd_entry_create(struct mlxsw_afa *mlxsw_afa, u8 local_port) |
448 | { | 459 | { |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h index 43132293475c..b91f2b0829b0 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h | |||
@@ -65,6 +65,7 @@ char *mlxsw_afa_block_first_set(struct mlxsw_afa_block *block); | |||
65 | u32 mlxsw_afa_block_first_set_kvdl_index(struct mlxsw_afa_block *block); | 65 | u32 mlxsw_afa_block_first_set_kvdl_index(struct mlxsw_afa_block *block); |
66 | int mlxsw_afa_block_continue(struct mlxsw_afa_block *block); | 66 | int mlxsw_afa_block_continue(struct mlxsw_afa_block *block); |
67 | int mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id); | 67 | int mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id); |
68 | int mlxsw_afa_block_terminate(struct mlxsw_afa_block *block); | ||
68 | int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block); | 69 | int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block); |
69 | int mlxsw_afa_block_append_trap(struct mlxsw_afa_block *block, u16 trap_id); | 70 | int mlxsw_afa_block_append_trap(struct mlxsw_afa_block *block, u16 trap_id); |
70 | int mlxsw_afa_block_append_trap_and_forward(struct mlxsw_afa_block *block, | 71 | int mlxsw_afa_block_append_trap_and_forward(struct mlxsw_afa_block *block, |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h index f6963b0b4a55..122506daa586 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h | |||
@@ -107,20 +107,20 @@ static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = { | |||
107 | MLXSW_AFK_ELEMENT_INFO_U32(VID, 0x10, 8, 12), | 107 | MLXSW_AFK_ELEMENT_INFO_U32(VID, 0x10, 8, 12), |
108 | MLXSW_AFK_ELEMENT_INFO_U32(PCP, 0x10, 20, 3), | 108 | MLXSW_AFK_ELEMENT_INFO_U32(PCP, 0x10, 20, 3), |
109 | MLXSW_AFK_ELEMENT_INFO_U32(TCP_FLAGS, 0x10, 23, 9), | 109 | MLXSW_AFK_ELEMENT_INFO_U32(TCP_FLAGS, 0x10, 23, 9), |
110 | MLXSW_AFK_ELEMENT_INFO_U32(IP_TTL_, 0x14, 0, 8), | ||
111 | MLXSW_AFK_ELEMENT_INFO_U32(IP_ECN, 0x14, 9, 2), | ||
112 | MLXSW_AFK_ELEMENT_INFO_U32(IP_DSCP, 0x14, 11, 6), | ||
113 | MLXSW_AFK_ELEMENT_INFO_U32(SRC_IP4, 0x18, 0, 32), | ||
114 | MLXSW_AFK_ELEMENT_INFO_U32(DST_IP4, 0x1C, 0, 32), | ||
115 | MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_HI, 0x18, 8), | ||
116 | MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_LO, 0x20, 8), | ||
117 | MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_HI, 0x28, 8), | ||
118 | MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_LO, 0x30, 8), | ||
119 | MLXSW_AFK_ELEMENT_INFO_U32(DST_L4_PORT, 0x14, 0, 16), | 110 | MLXSW_AFK_ELEMENT_INFO_U32(DST_L4_PORT, 0x14, 0, 16), |
120 | MLXSW_AFK_ELEMENT_INFO_U32(SRC_L4_PORT, 0x14, 16, 16), | 111 | MLXSW_AFK_ELEMENT_INFO_U32(SRC_L4_PORT, 0x14, 16, 16), |
112 | MLXSW_AFK_ELEMENT_INFO_U32(IP_TTL_, 0x18, 0, 8), | ||
113 | MLXSW_AFK_ELEMENT_INFO_U32(IP_ECN, 0x18, 9, 2), | ||
114 | MLXSW_AFK_ELEMENT_INFO_U32(IP_DSCP, 0x18, 11, 6), | ||
115 | MLXSW_AFK_ELEMENT_INFO_U32(SRC_IP4, 0x20, 0, 32), | ||
116 | MLXSW_AFK_ELEMENT_INFO_U32(DST_IP4, 0x24, 0, 32), | ||
117 | MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_HI, 0x20, 8), | ||
118 | MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_LO, 0x28, 8), | ||
119 | MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_HI, 0x30, 8), | ||
120 | MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_LO, 0x38, 8), | ||
121 | }; | 121 | }; |
122 | 122 | ||
123 | #define MLXSW_AFK_ELEMENT_STORAGE_SIZE 0x38 | 123 | #define MLXSW_AFK_ELEMENT_STORAGE_SIZE 0x40 |
124 | 124 | ||
125 | struct mlxsw_afk_element_inst { /* element instance in actual block */ | 125 | struct mlxsw_afk_element_inst { /* element instance in actual block */ |
126 | const struct mlxsw_afk_element_info *info; | 126 | const struct mlxsw_afk_element_info *info; |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 3dcc58d61506..bf400c75fcc8 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c | |||
@@ -655,13 +655,17 @@ static int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu) | |||
655 | } | 655 | } |
656 | 656 | ||
657 | static struct mlxsw_sp_span_inspected_port * | 657 | static struct mlxsw_sp_span_inspected_port * |
658 | mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_port *port, | 658 | mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_span_entry *span_entry, |
659 | struct mlxsw_sp_span_entry *span_entry) | 659 | enum mlxsw_sp_span_type type, |
660 | struct mlxsw_sp_port *port, | ||
661 | bool bind) | ||
660 | { | 662 | { |
661 | struct mlxsw_sp_span_inspected_port *p; | 663 | struct mlxsw_sp_span_inspected_port *p; |
662 | 664 | ||
663 | list_for_each_entry(p, &span_entry->bound_ports_list, list) | 665 | list_for_each_entry(p, &span_entry->bound_ports_list, list) |
664 | if (port->local_port == p->local_port) | 666 | if (type == p->type && |
667 | port->local_port == p->local_port && | ||
668 | bind == p->bound) | ||
665 | return p; | 669 | return p; |
666 | return NULL; | 670 | return NULL; |
667 | } | 671 | } |
@@ -691,8 +695,22 @@ mlxsw_sp_span_inspected_port_add(struct mlxsw_sp_port *port, | |||
691 | struct mlxsw_sp_span_inspected_port *inspected_port; | 695 | struct mlxsw_sp_span_inspected_port *inspected_port; |
692 | struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; | 696 | struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; |
693 | char sbib_pl[MLXSW_REG_SBIB_LEN]; | 697 | char sbib_pl[MLXSW_REG_SBIB_LEN]; |
698 | int i; | ||
694 | int err; | 699 | int err; |
695 | 700 | ||
701 | /* A given (source port, direction) can only be bound to one analyzer, | ||
702 | * so if a binding is requested, check for conflicts. | ||
703 | */ | ||
704 | if (bind) | ||
705 | for (i = 0; i < mlxsw_sp->span.entries_count; i++) { | ||
706 | struct mlxsw_sp_span_entry *curr = | ||
707 | &mlxsw_sp->span.entries[i]; | ||
708 | |||
709 | if (mlxsw_sp_span_entry_bound_port_find(curr, type, | ||
710 | port, bind)) | ||
711 | return -EEXIST; | ||
712 | } | ||
713 | |||
696 | /* if it is an egress SPAN, bind a shared buffer to it */ | 714 | /* if it is an egress SPAN, bind a shared buffer to it */ |
697 | if (type == MLXSW_SP_SPAN_EGRESS) { | 715 | if (type == MLXSW_SP_SPAN_EGRESS) { |
698 | u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp, | 716 | u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp, |
@@ -720,6 +738,7 @@ mlxsw_sp_span_inspected_port_add(struct mlxsw_sp_port *port, | |||
720 | } | 738 | } |
721 | inspected_port->local_port = port->local_port; | 739 | inspected_port->local_port = port->local_port; |
722 | inspected_port->type = type; | 740 | inspected_port->type = type; |
741 | inspected_port->bound = bind; | ||
723 | list_add_tail(&inspected_port->list, &span_entry->bound_ports_list); | 742 | list_add_tail(&inspected_port->list, &span_entry->bound_ports_list); |
724 | 743 | ||
725 | return 0; | 744 | return 0; |
@@ -746,7 +765,8 @@ mlxsw_sp_span_inspected_port_del(struct mlxsw_sp_port *port, | |||
746 | struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; | 765 | struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; |
747 | char sbib_pl[MLXSW_REG_SBIB_LEN]; | 766 | char sbib_pl[MLXSW_REG_SBIB_LEN]; |
748 | 767 | ||
749 | inspected_port = mlxsw_sp_span_entry_bound_port_find(port, span_entry); | 768 | inspected_port = mlxsw_sp_span_entry_bound_port_find(span_entry, type, |
769 | port, bind); | ||
750 | if (!inspected_port) | 770 | if (!inspected_port) |
751 | return; | 771 | return; |
752 | 772 | ||
@@ -1459,6 +1479,7 @@ mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) | |||
1459 | } | 1479 | } |
1460 | 1480 | ||
1461 | mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port; | 1481 | mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port; |
1482 | mlxsw_sp_port_vlan->ref_count = 1; | ||
1462 | mlxsw_sp_port_vlan->vid = vid; | 1483 | mlxsw_sp_port_vlan->vid = vid; |
1463 | list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list); | 1484 | list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list); |
1464 | 1485 | ||
@@ -1486,8 +1507,10 @@ mlxsw_sp_port_vlan_get(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) | |||
1486 | struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; | 1507 | struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; |
1487 | 1508 | ||
1488 | mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); | 1509 | mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); |
1489 | if (mlxsw_sp_port_vlan) | 1510 | if (mlxsw_sp_port_vlan) { |
1511 | mlxsw_sp_port_vlan->ref_count++; | ||
1490 | return mlxsw_sp_port_vlan; | 1512 | return mlxsw_sp_port_vlan; |
1513 | } | ||
1491 | 1514 | ||
1492 | return mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid); | 1515 | return mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid); |
1493 | } | 1516 | } |
@@ -1496,6 +1519,9 @@ void mlxsw_sp_port_vlan_put(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) | |||
1496 | { | 1519 | { |
1497 | struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid; | 1520 | struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid; |
1498 | 1521 | ||
1522 | if (--mlxsw_sp_port_vlan->ref_count != 0) | ||
1523 | return; | ||
1524 | |||
1499 | if (mlxsw_sp_port_vlan->bridge_port) | 1525 | if (mlxsw_sp_port_vlan->bridge_port) |
1500 | mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); | 1526 | mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); |
1501 | else if (fid) | 1527 | else if (fid) |
@@ -4207,13 +4233,12 @@ static struct devlink_resource_ops mlxsw_sp_resource_kvd_hash_double_ops = { | |||
4207 | .size_validate = mlxsw_sp_resource_kvd_hash_double_size_validate, | 4233 | .size_validate = mlxsw_sp_resource_kvd_hash_double_size_validate, |
4208 | }; | 4234 | }; |
4209 | 4235 | ||
4210 | static struct devlink_resource_size_params mlxsw_sp_kvd_size_params; | ||
4211 | static struct devlink_resource_size_params mlxsw_sp_linear_size_params; | ||
4212 | static struct devlink_resource_size_params mlxsw_sp_hash_single_size_params; | ||
4213 | static struct devlink_resource_size_params mlxsw_sp_hash_double_size_params; | ||
4214 | |||
4215 | static void | 4236 | static void |
4216 | mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core) | 4237 | mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core, |
4238 | struct devlink_resource_size_params *kvd_size_params, | ||
4239 | struct devlink_resource_size_params *linear_size_params, | ||
4240 | struct devlink_resource_size_params *hash_double_size_params, | ||
4241 | struct devlink_resource_size_params *hash_single_size_params) | ||
4217 | { | 4242 | { |
4218 | u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core, | 4243 | u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core, |
4219 | KVD_SINGLE_MIN_SIZE); | 4244 | KVD_SINGLE_MIN_SIZE); |
@@ -4222,37 +4247,35 @@ mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core) | |||
4222 | u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); | 4247 | u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); |
4223 | u32 linear_size_min = 0; | 4248 | u32 linear_size_min = 0; |
4224 | 4249 | ||
4225 | /* KVD top resource */ | 4250 | devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size, |
4226 | mlxsw_sp_kvd_size_params.size_min = kvd_size; | 4251 | MLXSW_SP_KVD_GRANULARITY, |
4227 | mlxsw_sp_kvd_size_params.size_max = kvd_size; | 4252 | DEVLINK_RESOURCE_UNIT_ENTRY); |
4228 | mlxsw_sp_kvd_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY; | 4253 | devlink_resource_size_params_init(linear_size_params, linear_size_min, |
4229 | mlxsw_sp_kvd_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY; | 4254 | kvd_size - single_size_min - |
4230 | 4255 | double_size_min, | |
4231 | /* Linear part init */ | 4256 | MLXSW_SP_KVD_GRANULARITY, |
4232 | mlxsw_sp_linear_size_params.size_min = linear_size_min; | 4257 | DEVLINK_RESOURCE_UNIT_ENTRY); |
4233 | mlxsw_sp_linear_size_params.size_max = kvd_size - single_size_min - | 4258 | devlink_resource_size_params_init(hash_double_size_params, |
4234 | double_size_min; | 4259 | double_size_min, |
4235 | mlxsw_sp_linear_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY; | 4260 | kvd_size - single_size_min - |
4236 | mlxsw_sp_linear_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY; | 4261 | linear_size_min, |
4237 | 4262 | MLXSW_SP_KVD_GRANULARITY, | |
4238 | /* Hash double part init */ | 4263 | DEVLINK_RESOURCE_UNIT_ENTRY); |
4239 | mlxsw_sp_hash_double_size_params.size_min = double_size_min; | 4264 | devlink_resource_size_params_init(hash_single_size_params, |
4240 | mlxsw_sp_hash_double_size_params.size_max = kvd_size - single_size_min - | 4265 | single_size_min, |
4241 | linear_size_min; | 4266 | kvd_size - double_size_min - |
4242 | mlxsw_sp_hash_double_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY; | 4267 | linear_size_min, |
4243 | mlxsw_sp_hash_double_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY; | 4268 | MLXSW_SP_KVD_GRANULARITY, |
4244 | 4269 | DEVLINK_RESOURCE_UNIT_ENTRY); | |
4245 | /* Hash single part init */ | ||
4246 | mlxsw_sp_hash_single_size_params.size_min = single_size_min; | ||
4247 | mlxsw_sp_hash_single_size_params.size_max = kvd_size - double_size_min - | ||
4248 | linear_size_min; | ||
4249 | mlxsw_sp_hash_single_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY; | ||
4250 | mlxsw_sp_hash_single_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY; | ||
4251 | } | 4270 | } |
4252 | 4271 | ||
4253 | static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core) | 4272 | static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core) |
4254 | { | 4273 | { |
4255 | struct devlink *devlink = priv_to_devlink(mlxsw_core); | 4274 | struct devlink *devlink = priv_to_devlink(mlxsw_core); |
4275 | struct devlink_resource_size_params hash_single_size_params; | ||
4276 | struct devlink_resource_size_params hash_double_size_params; | ||
4277 | struct devlink_resource_size_params linear_size_params; | ||
4278 | struct devlink_resource_size_params kvd_size_params; | ||
4256 | u32 kvd_size, single_size, double_size, linear_size; | 4279 | u32 kvd_size, single_size, double_size, linear_size; |
4257 | const struct mlxsw_config_profile *profile; | 4280 | const struct mlxsw_config_profile *profile; |
4258 | int err; | 4281 | int err; |
@@ -4261,13 +4284,17 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core) | |||
4261 | if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) | 4284 | if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE)) |
4262 | return -EIO; | 4285 | return -EIO; |
4263 | 4286 | ||
4264 | mlxsw_sp_resource_size_params_prepare(mlxsw_core); | 4287 | mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params, |
4288 | &linear_size_params, | ||
4289 | &hash_double_size_params, | ||
4290 | &hash_single_size_params); | ||
4291 | |||
4265 | kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); | 4292 | kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE); |
4266 | err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, | 4293 | err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD, |
4267 | true, kvd_size, | 4294 | true, kvd_size, |
4268 | MLXSW_SP_RESOURCE_KVD, | 4295 | MLXSW_SP_RESOURCE_KVD, |
4269 | DEVLINK_RESOURCE_ID_PARENT_TOP, | 4296 | DEVLINK_RESOURCE_ID_PARENT_TOP, |
4270 | &mlxsw_sp_kvd_size_params, | 4297 | &kvd_size_params, |
4271 | &mlxsw_sp_resource_kvd_ops); | 4298 | &mlxsw_sp_resource_kvd_ops); |
4272 | if (err) | 4299 | if (err) |
4273 | return err; | 4300 | return err; |
@@ -4277,7 +4304,7 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core) | |||
4277 | false, linear_size, | 4304 | false, linear_size, |
4278 | MLXSW_SP_RESOURCE_KVD_LINEAR, | 4305 | MLXSW_SP_RESOURCE_KVD_LINEAR, |
4279 | MLXSW_SP_RESOURCE_KVD, | 4306 | MLXSW_SP_RESOURCE_KVD, |
4280 | &mlxsw_sp_linear_size_params, | 4307 | &linear_size_params, |
4281 | &mlxsw_sp_resource_kvd_linear_ops); | 4308 | &mlxsw_sp_resource_kvd_linear_ops); |
4282 | if (err) | 4309 | if (err) |
4283 | return err; | 4310 | return err; |
@@ -4291,7 +4318,7 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core) | |||
4291 | false, double_size, | 4318 | false, double_size, |
4292 | MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, | 4319 | MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE, |
4293 | MLXSW_SP_RESOURCE_KVD, | 4320 | MLXSW_SP_RESOURCE_KVD, |
4294 | &mlxsw_sp_hash_double_size_params, | 4321 | &hash_double_size_params, |
4295 | &mlxsw_sp_resource_kvd_hash_double_ops); | 4322 | &mlxsw_sp_resource_kvd_hash_double_ops); |
4296 | if (err) | 4323 | if (err) |
4297 | return err; | 4324 | return err; |
@@ -4301,7 +4328,7 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core) | |||
4301 | false, single_size, | 4328 | false, single_size, |
4302 | MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, | 4329 | MLXSW_SP_RESOURCE_KVD_HASH_SINGLE, |
4303 | MLXSW_SP_RESOURCE_KVD, | 4330 | MLXSW_SP_RESOURCE_KVD, |
4304 | &mlxsw_sp_hash_single_size_params, | 4331 | &hash_single_size_params, |
4305 | &mlxsw_sp_resource_kvd_hash_single_ops); | 4332 | &mlxsw_sp_resource_kvd_hash_single_ops); |
4306 | if (err) | 4333 | if (err) |
4307 | return err; | 4334 | return err; |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index bdd8f94a452c..92064db2ae44 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h | |||
@@ -120,6 +120,9 @@ struct mlxsw_sp_span_inspected_port { | |||
120 | struct list_head list; | 120 | struct list_head list; |
121 | enum mlxsw_sp_span_type type; | 121 | enum mlxsw_sp_span_type type; |
122 | u8 local_port; | 122 | u8 local_port; |
123 | |||
124 | /* Whether this is a directly bound mirror (port-to-port) or an ACL. */ | ||
125 | bool bound; | ||
123 | }; | 126 | }; |
124 | 127 | ||
125 | struct mlxsw_sp_span_entry { | 128 | struct mlxsw_sp_span_entry { |
@@ -211,6 +214,7 @@ struct mlxsw_sp_port_vlan { | |||
211 | struct list_head list; | 214 | struct list_head list; |
212 | struct mlxsw_sp_port *mlxsw_sp_port; | 215 | struct mlxsw_sp_port *mlxsw_sp_port; |
213 | struct mlxsw_sp_fid *fid; | 216 | struct mlxsw_sp_fid *fid; |
217 | unsigned int ref_count; | ||
214 | u16 vid; | 218 | u16 vid; |
215 | struct mlxsw_sp_bridge_port *bridge_port; | 219 | struct mlxsw_sp_bridge_port *bridge_port; |
216 | struct list_head bridge_vlan_node; | 220 | struct list_head bridge_vlan_node; |
@@ -552,6 +556,7 @@ void mlxsw_sp_acl_rulei_keymask_buf(struct mlxsw_sp_acl_rule_info *rulei, | |||
552 | int mlxsw_sp_acl_rulei_act_continue(struct mlxsw_sp_acl_rule_info *rulei); | 556 | int mlxsw_sp_acl_rulei_act_continue(struct mlxsw_sp_acl_rule_info *rulei); |
553 | int mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei, | 557 | int mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei, |
554 | u16 group_id); | 558 | u16 group_id); |
559 | int mlxsw_sp_acl_rulei_act_terminate(struct mlxsw_sp_acl_rule_info *rulei); | ||
555 | int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei); | 560 | int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei); |
556 | int mlxsw_sp_acl_rulei_act_trap(struct mlxsw_sp_acl_rule_info *rulei); | 561 | int mlxsw_sp_acl_rulei_act_trap(struct mlxsw_sp_acl_rule_info *rulei); |
557 | int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp, | 562 | int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp, |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c index 0897a5435cc2..92d90ed7207e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c | |||
@@ -528,6 +528,11 @@ int mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei, | |||
528 | return mlxsw_afa_block_jump(rulei->act_block, group_id); | 528 | return mlxsw_afa_block_jump(rulei->act_block, group_id); |
529 | } | 529 | } |
530 | 530 | ||
531 | int mlxsw_sp_acl_rulei_act_terminate(struct mlxsw_sp_acl_rule_info *rulei) | ||
532 | { | ||
533 | return mlxsw_afa_block_terminate(rulei->act_block); | ||
534 | } | ||
535 | |||
531 | int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei) | 536 | int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei) |
532 | { | 537 | { |
533 | return mlxsw_afa_block_append_drop(rulei->act_block); | 538 | return mlxsw_afa_block_append_drop(rulei->act_block); |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c index 93728c694e6d..0a9adc5962fb 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c | |||
@@ -385,13 +385,13 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_egress[] = { | |||
385 | 385 | ||
386 | static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = { | 386 | static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = { |
387 | MLXSW_SP_CPU_PORT_SB_CM, | 387 | MLXSW_SP_CPU_PORT_SB_CM, |
388 | MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0), | ||
389 | MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0), | ||
390 | MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0), | ||
391 | MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0), | ||
392 | MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0), | ||
388 | MLXSW_SP_CPU_PORT_SB_CM, | 393 | MLXSW_SP_CPU_PORT_SB_CM, |
389 | MLXSW_SP_CPU_PORT_SB_CM, | 394 | MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0), |
390 | MLXSW_SP_CPU_PORT_SB_CM, | ||
391 | MLXSW_SP_CPU_PORT_SB_CM, | ||
392 | MLXSW_SP_CPU_PORT_SB_CM, | ||
393 | MLXSW_SP_CPU_PORT_SB_CM, | ||
394 | MLXSW_SP_SB_CM(10000, 0, 0), | ||
395 | MLXSW_SP_CPU_PORT_SB_CM, | 395 | MLXSW_SP_CPU_PORT_SB_CM, |
396 | MLXSW_SP_CPU_PORT_SB_CM, | 396 | MLXSW_SP_CPU_PORT_SB_CM, |
397 | MLXSW_SP_CPU_PORT_SB_CM, | 397 | MLXSW_SP_CPU_PORT_SB_CM, |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c index bbd238e50f05..54262af4e98f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c | |||
@@ -112,11 +112,11 @@ static const int mlxsw_sp_sfgc_bc_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = { | |||
112 | [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP] = 1, | 112 | [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP] = 1, |
113 | [MLXSW_REG_SFGC_TYPE_IPV4_LINK_LOCAL] = 1, | 113 | [MLXSW_REG_SFGC_TYPE_IPV4_LINK_LOCAL] = 1, |
114 | [MLXSW_REG_SFGC_TYPE_IPV6_ALL_HOST] = 1, | 114 | [MLXSW_REG_SFGC_TYPE_IPV6_ALL_HOST] = 1, |
115 | [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6] = 1, | ||
115 | }; | 116 | }; |
116 | 117 | ||
117 | static const int mlxsw_sp_sfgc_mc_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = { | 118 | static const int mlxsw_sp_sfgc_mc_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = { |
118 | [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4] = 1, | 119 | [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4] = 1, |
119 | [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6] = 1, | ||
120 | }; | 120 | }; |
121 | 121 | ||
122 | static const int *mlxsw_sp_packet_type_sfgc_types[] = { | 122 | static const int *mlxsw_sp_packet_type_sfgc_types[] = { |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c index 6ce00e28d4ea..89dbf569dff5 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c | |||
@@ -65,7 +65,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, | |||
65 | tcf_exts_to_list(exts, &actions); | 65 | tcf_exts_to_list(exts, &actions); |
66 | list_for_each_entry(a, &actions, list) { | 66 | list_for_each_entry(a, &actions, list) { |
67 | if (is_tcf_gact_ok(a)) { | 67 | if (is_tcf_gact_ok(a)) { |
68 | err = mlxsw_sp_acl_rulei_act_continue(rulei); | 68 | err = mlxsw_sp_acl_rulei_act_terminate(rulei); |
69 | if (err) | 69 | if (err) |
70 | return err; | 70 | return err; |
71 | } else if (is_tcf_gact_shot(a)) { | 71 | } else if (is_tcf_gact_shot(a)) { |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index f0b25baba09a..f7948e983637 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | |||
@@ -788,6 +788,9 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp, | |||
788 | u32 tb_id, | 788 | u32 tb_id, |
789 | struct netlink_ext_ack *extack) | 789 | struct netlink_ext_ack *extack) |
790 | { | 790 | { |
791 | struct mlxsw_sp_mr_table *mr4_table; | ||
792 | struct mlxsw_sp_fib *fib4; | ||
793 | struct mlxsw_sp_fib *fib6; | ||
791 | struct mlxsw_sp_vr *vr; | 794 | struct mlxsw_sp_vr *vr; |
792 | int err; | 795 | int err; |
793 | 796 | ||
@@ -796,29 +799,30 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp, | |||
796 | NL_SET_ERR_MSG(extack, "spectrum: Exceeded number of supported virtual routers"); | 799 | NL_SET_ERR_MSG(extack, "spectrum: Exceeded number of supported virtual routers"); |
797 | return ERR_PTR(-EBUSY); | 800 | return ERR_PTR(-EBUSY); |
798 | } | 801 | } |
799 | vr->fib4 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4); | 802 | fib4 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4); |
800 | if (IS_ERR(vr->fib4)) | 803 | if (IS_ERR(fib4)) |
801 | return ERR_CAST(vr->fib4); | 804 | return ERR_CAST(fib4); |
802 | vr->fib6 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6); | 805 | fib6 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6); |
803 | if (IS_ERR(vr->fib6)) { | 806 | if (IS_ERR(fib6)) { |
804 | err = PTR_ERR(vr->fib6); | 807 | err = PTR_ERR(fib6); |
805 | goto err_fib6_create; | 808 | goto err_fib6_create; |
806 | } | 809 | } |
807 | vr->mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id, | 810 | mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id, |
808 | MLXSW_SP_L3_PROTO_IPV4); | 811 | MLXSW_SP_L3_PROTO_IPV4); |
809 | if (IS_ERR(vr->mr4_table)) { | 812 | if (IS_ERR(mr4_table)) { |
810 | err = PTR_ERR(vr->mr4_table); | 813 | err = PTR_ERR(mr4_table); |
811 | goto err_mr_table_create; | 814 | goto err_mr_table_create; |
812 | } | 815 | } |
816 | vr->fib4 = fib4; | ||
817 | vr->fib6 = fib6; | ||
818 | vr->mr4_table = mr4_table; | ||
813 | vr->tb_id = tb_id; | 819 | vr->tb_id = tb_id; |
814 | return vr; | 820 | return vr; |
815 | 821 | ||
816 | err_mr_table_create: | 822 | err_mr_table_create: |
817 | mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib6); | 823 | mlxsw_sp_fib_destroy(mlxsw_sp, fib6); |
818 | vr->fib6 = NULL; | ||
819 | err_fib6_create: | 824 | err_fib6_create: |
820 | mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib4); | 825 | mlxsw_sp_fib_destroy(mlxsw_sp, fib4); |
821 | vr->fib4 = NULL; | ||
822 | return ERR_PTR(err); | 826 | return ERR_PTR(err); |
823 | } | 827 | } |
824 | 828 | ||
@@ -3790,6 +3794,9 @@ mlxsw_sp_fib4_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry) | |||
3790 | struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group; | 3794 | struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group; |
3791 | int i; | 3795 | int i; |
3792 | 3796 | ||
3797 | if (!list_is_singular(&nh_grp->fib_list)) | ||
3798 | return; | ||
3799 | |||
3793 | for (i = 0; i < nh_grp->count; i++) { | 3800 | for (i = 0; i < nh_grp->count; i++) { |
3794 | struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i]; | 3801 | struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i]; |
3795 | 3802 | ||
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 593ad31be749..161bcdc012f0 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c | |||
@@ -1203,6 +1203,7 @@ static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port, | |||
1203 | bool dynamic) | 1203 | bool dynamic) |
1204 | { | 1204 | { |
1205 | char *sfd_pl; | 1205 | char *sfd_pl; |
1206 | u8 num_rec; | ||
1206 | int err; | 1207 | int err; |
1207 | 1208 | ||
1208 | sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); | 1209 | sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); |
@@ -1212,9 +1213,16 @@ static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port, | |||
1212 | mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); | 1213 | mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); |
1213 | mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic), | 1214 | mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic), |
1214 | mac, fid, action, local_port); | 1215 | mac, fid, action, local_port); |
1216 | num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl); | ||
1215 | err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); | 1217 | err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); |
1216 | kfree(sfd_pl); | 1218 | if (err) |
1219 | goto out; | ||
1220 | |||
1221 | if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl)) | ||
1222 | err = -EBUSY; | ||
1217 | 1223 | ||
1224 | out: | ||
1225 | kfree(sfd_pl); | ||
1218 | return err; | 1226 | return err; |
1219 | } | 1227 | } |
1220 | 1228 | ||
@@ -1239,6 +1247,7 @@ static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id, | |||
1239 | bool adding, bool dynamic) | 1247 | bool adding, bool dynamic) |
1240 | { | 1248 | { |
1241 | char *sfd_pl; | 1249 | char *sfd_pl; |
1250 | u8 num_rec; | ||
1242 | int err; | 1251 | int err; |
1243 | 1252 | ||
1244 | sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); | 1253 | sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); |
@@ -1249,9 +1258,16 @@ static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id, | |||
1249 | mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic), | 1258 | mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic), |
1250 | mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP, | 1259 | mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP, |
1251 | lag_vid, lag_id); | 1260 | lag_vid, lag_id); |
1261 | num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl); | ||
1252 | err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); | 1262 | err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); |
1253 | kfree(sfd_pl); | 1263 | if (err) |
1264 | goto out; | ||
1265 | |||
1266 | if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl)) | ||
1267 | err = -EBUSY; | ||
1254 | 1268 | ||
1269 | out: | ||
1270 | kfree(sfd_pl); | ||
1255 | return err; | 1271 | return err; |
1256 | } | 1272 | } |
1257 | 1273 | ||
@@ -1296,6 +1312,7 @@ static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr, | |||
1296 | u16 fid, u16 mid_idx, bool adding) | 1312 | u16 fid, u16 mid_idx, bool adding) |
1297 | { | 1313 | { |
1298 | char *sfd_pl; | 1314 | char *sfd_pl; |
1315 | u8 num_rec; | ||
1299 | int err; | 1316 | int err; |
1300 | 1317 | ||
1301 | sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); | 1318 | sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); |
@@ -1305,7 +1322,15 @@ static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr, | |||
1305 | mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); | 1322 | mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); |
1306 | mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid, | 1323 | mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid, |
1307 | MLXSW_REG_SFD_REC_ACTION_NOP, mid_idx); | 1324 | MLXSW_REG_SFD_REC_ACTION_NOP, mid_idx); |
1325 | num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl); | ||
1308 | err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); | 1326 | err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); |
1327 | if (err) | ||
1328 | goto out; | ||
1329 | |||
1330 | if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl)) | ||
1331 | err = -EBUSY; | ||
1332 | |||
1333 | out: | ||
1309 | kfree(sfd_pl); | 1334 | kfree(sfd_pl); |
1310 | return err; | 1335 | return err; |
1311 | } | 1336 | } |
diff --git a/drivers/net/ethernet/natsemi/Kconfig b/drivers/net/ethernet/natsemi/Kconfig index a10ef50e4f12..017fb2322589 100644 --- a/drivers/net/ethernet/natsemi/Kconfig +++ b/drivers/net/ethernet/natsemi/Kconfig | |||
@@ -1,16 +1,16 @@ | |||
1 | # | 1 | # |
2 | # National Semi-conductor device configuration | 2 | # National Semiconductor device configuration |
3 | # | 3 | # |
4 | 4 | ||
5 | config NET_VENDOR_NATSEMI | 5 | config NET_VENDOR_NATSEMI |
6 | bool "National Semi-conductor devices" | 6 | bool "National Semiconductor devices" |
7 | default y | 7 | default y |
8 | ---help--- | 8 | ---help--- |
9 | If you have a network (Ethernet) card belonging to this class, say Y. | 9 | If you have a network (Ethernet) card belonging to this class, say Y. |
10 | 10 | ||
11 | Note that the answer to this question doesn't directly affect the | 11 | Note that the answer to this question doesn't directly affect the |
12 | kernel: saying N will just cause the configurator to skip all | 12 | kernel: saying N will just cause the configurator to skip all |
13 | the questions about National Semi-conductor devices. If you say Y, | 13 | the questions about National Semiconductor devices. If you say Y, |
14 | you will be asked for your specific card in the following questions. | 14 | you will be asked for your specific card in the following questions. |
15 | 15 | ||
16 | if NET_VENDOR_NATSEMI | 16 | if NET_VENDOR_NATSEMI |
diff --git a/drivers/net/ethernet/natsemi/Makefile b/drivers/net/ethernet/natsemi/Makefile index cc664977596e..a759aa09ef59 100644 --- a/drivers/net/ethernet/natsemi/Makefile +++ b/drivers/net/ethernet/natsemi/Makefile | |||
@@ -1,6 +1,6 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | 1 | # SPDX-License-Identifier: GPL-2.0 |
2 | # | 2 | # |
3 | # Makefile for the National Semi-conductor Sonic devices. | 3 | # Makefile for the National Semiconductor Sonic devices. |
4 | # | 4 | # |
5 | 5 | ||
6 | obj-$(CONFIG_MACSONIC) += macsonic.o | 6 | obj-$(CONFIG_MACSONIC) += macsonic.o |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index 6f546e869d8d..00f41c145d4d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c | |||
@@ -2480,7 +2480,10 @@ int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto) | |||
2480 | if (rc) | 2480 | if (rc) |
2481 | return rc; | 2481 | return rc; |
2482 | 2482 | ||
2483 | /* Free Task CXT */ | 2483 | /* Free Task CXT ( Intentionally RoCE as task-id is shared between |
2484 | * RoCE and iWARP ) | ||
2485 | */ | ||
2486 | proto = PROTOCOLID_ROCE; | ||
2484 | rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_TASK, 0, | 2487 | rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_TASK, 0, |
2485 | qed_cxt_get_proto_tid_count(p_hwfn, proto)); | 2488 | qed_cxt_get_proto_tid_count(p_hwfn, proto)); |
2486 | if (rc) | 2489 | if (rc) |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c index ca4a81dc1ace..d5d02be72947 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c | |||
@@ -1703,6 +1703,13 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn, | |||
1703 | iph = (struct iphdr *)((u8 *)(ethh) + eth_hlen); | 1703 | iph = (struct iphdr *)((u8 *)(ethh) + eth_hlen); |
1704 | 1704 | ||
1705 | if (eth_type == ETH_P_IP) { | 1705 | if (eth_type == ETH_P_IP) { |
1706 | if (iph->protocol != IPPROTO_TCP) { | ||
1707 | DP_NOTICE(p_hwfn, | ||
1708 | "Unexpected ip protocol on ll2 %x\n", | ||
1709 | iph->protocol); | ||
1710 | return -EINVAL; | ||
1711 | } | ||
1712 | |||
1706 | cm_info->local_ip[0] = ntohl(iph->daddr); | 1713 | cm_info->local_ip[0] = ntohl(iph->daddr); |
1707 | cm_info->remote_ip[0] = ntohl(iph->saddr); | 1714 | cm_info->remote_ip[0] = ntohl(iph->saddr); |
1708 | cm_info->ip_version = TCP_IPV4; | 1715 | cm_info->ip_version = TCP_IPV4; |
@@ -1711,6 +1718,14 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn, | |||
1711 | *payload_len = ntohs(iph->tot_len) - ip_hlen; | 1718 | *payload_len = ntohs(iph->tot_len) - ip_hlen; |
1712 | } else if (eth_type == ETH_P_IPV6) { | 1719 | } else if (eth_type == ETH_P_IPV6) { |
1713 | ip6h = (struct ipv6hdr *)iph; | 1720 | ip6h = (struct ipv6hdr *)iph; |
1721 | |||
1722 | if (ip6h->nexthdr != IPPROTO_TCP) { | ||
1723 | DP_NOTICE(p_hwfn, | ||
1724 | "Unexpected ip protocol on ll2 %x\n", | ||
1725 | iph->protocol); | ||
1726 | return -EINVAL; | ||
1727 | } | ||
1728 | |||
1714 | for (i = 0; i < 4; i++) { | 1729 | for (i = 0; i < 4; i++) { |
1715 | cm_info->local_ip[i] = | 1730 | cm_info->local_ip[i] = |
1716 | ntohl(ip6h->daddr.in6_u.u6_addr32[i]); | 1731 | ntohl(ip6h->daddr.in6_u.u6_addr32[i]); |
@@ -1928,8 +1943,8 @@ qed_iwarp_update_fpdu_length(struct qed_hwfn *p_hwfn, | |||
1928 | /* Missing lower byte is now available */ | 1943 | /* Missing lower byte is now available */ |
1929 | mpa_len = fpdu->fpdu_length | *mpa_data; | 1944 | mpa_len = fpdu->fpdu_length | *mpa_data; |
1930 | fpdu->fpdu_length = QED_IWARP_FPDU_LEN_WITH_PAD(mpa_len); | 1945 | fpdu->fpdu_length = QED_IWARP_FPDU_LEN_WITH_PAD(mpa_len); |
1931 | fpdu->mpa_frag_len = fpdu->fpdu_length; | ||
1932 | /* one byte of hdr */ | 1946 | /* one byte of hdr */ |
1947 | fpdu->mpa_frag_len = 1; | ||
1933 | fpdu->incomplete_bytes = fpdu->fpdu_length - 1; | 1948 | fpdu->incomplete_bytes = fpdu->fpdu_length - 1; |
1934 | DP_VERBOSE(p_hwfn, | 1949 | DP_VERBOSE(p_hwfn, |
1935 | QED_MSG_RDMA, | 1950 | QED_MSG_RDMA, |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c index 5d040b873137..a411f9c702a1 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c | |||
@@ -379,6 +379,7 @@ static void qed_rdma_free(struct qed_hwfn *p_hwfn) | |||
379 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Freeing RDMA\n"); | 379 | DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Freeing RDMA\n"); |
380 | 380 | ||
381 | qed_rdma_free_reserved_lkey(p_hwfn); | 381 | qed_rdma_free_reserved_lkey(p_hwfn); |
382 | qed_cxt_free_proto_ilt(p_hwfn, p_hwfn->p_rdma_info->proto); | ||
382 | qed_rdma_resc_free(p_hwfn); | 383 | qed_rdma_resc_free(p_hwfn); |
383 | } | 384 | } |
384 | 385 | ||
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 2db70eabddfe..a01e7d6e5442 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c | |||
@@ -288,7 +288,7 @@ int __init qede_init(void) | |||
288 | } | 288 | } |
289 | 289 | ||
290 | /* Must register notifier before pci ops, since we might miss | 290 | /* Must register notifier before pci ops, since we might miss |
291 | * interface rename after pci probe and netdev registeration. | 291 | * interface rename after pci probe and netdev registration. |
292 | */ | 292 | */ |
293 | ret = register_netdevice_notifier(&qede_netdev_notifier); | 293 | ret = register_netdevice_notifier(&qede_netdev_notifier); |
294 | if (ret) { | 294 | if (ret) { |
@@ -988,7 +988,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, | |||
988 | if (rc) | 988 | if (rc) |
989 | goto err3; | 989 | goto err3; |
990 | 990 | ||
991 | /* Prepare the lock prior to the registeration of the netdev, | 991 | /* Prepare the lock prior to the registration of the netdev, |
992 | * as once it's registered we might reach flows requiring it | 992 | * as once it's registered we might reach flows requiring it |
993 | * [it's even possible to reach a flow needing it directly | 993 | * [it's even possible to reach a flow needing it directly |
994 | * from there, although it's unlikely]. | 994 | * from there, although it's unlikely]. |
@@ -2067,8 +2067,6 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode, | |||
2067 | link_params.link_up = true; | 2067 | link_params.link_up = true; |
2068 | edev->ops->common->set_link(edev->cdev, &link_params); | 2068 | edev->ops->common->set_link(edev->cdev, &link_params); |
2069 | 2069 | ||
2070 | qede_rdma_dev_event_open(edev); | ||
2071 | |||
2072 | edev->state = QEDE_STATE_OPEN; | 2070 | edev->state = QEDE_STATE_OPEN; |
2073 | 2071 | ||
2074 | DP_INFO(edev, "Ending successfully qede load\n"); | 2072 | DP_INFO(edev, "Ending successfully qede load\n"); |
@@ -2169,12 +2167,14 @@ static void qede_link_update(void *dev, struct qed_link_output *link) | |||
2169 | DP_NOTICE(edev, "Link is up\n"); | 2167 | DP_NOTICE(edev, "Link is up\n"); |
2170 | netif_tx_start_all_queues(edev->ndev); | 2168 | netif_tx_start_all_queues(edev->ndev); |
2171 | netif_carrier_on(edev->ndev); | 2169 | netif_carrier_on(edev->ndev); |
2170 | qede_rdma_dev_event_open(edev); | ||
2172 | } | 2171 | } |
2173 | } else { | 2172 | } else { |
2174 | if (netif_carrier_ok(edev->ndev)) { | 2173 | if (netif_carrier_ok(edev->ndev)) { |
2175 | DP_NOTICE(edev, "Link is down\n"); | 2174 | DP_NOTICE(edev, "Link is down\n"); |
2176 | netif_tx_disable(edev->ndev); | 2175 | netif_tx_disable(edev->ndev); |
2177 | netif_carrier_off(edev->ndev); | 2176 | netif_carrier_off(edev->ndev); |
2177 | qede_rdma_dev_event_close(edev); | ||
2178 | } | 2178 | } |
2179 | } | 2179 | } |
2180 | } | 2180 | } |
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c index 9b2280badaf7..02adb513f475 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c | |||
@@ -485,7 +485,7 @@ int qede_ptp_enable(struct qede_dev *edev, bool init_tc) | |||
485 | ptp->clock = ptp_clock_register(&ptp->clock_info, &edev->pdev->dev); | 485 | ptp->clock = ptp_clock_register(&ptp->clock_info, &edev->pdev->dev); |
486 | if (IS_ERR(ptp->clock)) { | 486 | if (IS_ERR(ptp->clock)) { |
487 | rc = -EINVAL; | 487 | rc = -EINVAL; |
488 | DP_ERR(edev, "PTP clock registeration failed\n"); | 488 | DP_ERR(edev, "PTP clock registration failed\n"); |
489 | goto err2; | 489 | goto err2; |
490 | } | 490 | } |
491 | 491 | ||
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c index 9cbb27263742..d5a32b7c7dc5 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c | |||
@@ -1194,9 +1194,9 @@ void emac_mac_tx_process(struct emac_adapter *adpt, struct emac_tx_queue *tx_q) | |||
1194 | while (tx_q->tpd.consume_idx != hw_consume_idx) { | 1194 | while (tx_q->tpd.consume_idx != hw_consume_idx) { |
1195 | tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.consume_idx); | 1195 | tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.consume_idx); |
1196 | if (tpbuf->dma_addr) { | 1196 | if (tpbuf->dma_addr) { |
1197 | dma_unmap_single(adpt->netdev->dev.parent, | 1197 | dma_unmap_page(adpt->netdev->dev.parent, |
1198 | tpbuf->dma_addr, tpbuf->length, | 1198 | tpbuf->dma_addr, tpbuf->length, |
1199 | DMA_TO_DEVICE); | 1199 | DMA_TO_DEVICE); |
1200 | tpbuf->dma_addr = 0; | 1200 | tpbuf->dma_addr = 0; |
1201 | } | 1201 | } |
1202 | 1202 | ||
@@ -1353,9 +1353,11 @@ static void emac_tx_fill_tpd(struct emac_adapter *adpt, | |||
1353 | 1353 | ||
1354 | tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.produce_idx); | 1354 | tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.produce_idx); |
1355 | tpbuf->length = mapped_len; | 1355 | tpbuf->length = mapped_len; |
1356 | tpbuf->dma_addr = dma_map_single(adpt->netdev->dev.parent, | 1356 | tpbuf->dma_addr = dma_map_page(adpt->netdev->dev.parent, |
1357 | skb->data, tpbuf->length, | 1357 | virt_to_page(skb->data), |
1358 | DMA_TO_DEVICE); | 1358 | offset_in_page(skb->data), |
1359 | tpbuf->length, | ||
1360 | DMA_TO_DEVICE); | ||
1359 | ret = dma_mapping_error(adpt->netdev->dev.parent, | 1361 | ret = dma_mapping_error(adpt->netdev->dev.parent, |
1360 | tpbuf->dma_addr); | 1362 | tpbuf->dma_addr); |
1361 | if (ret) | 1363 | if (ret) |
@@ -1371,9 +1373,12 @@ static void emac_tx_fill_tpd(struct emac_adapter *adpt, | |||
1371 | if (mapped_len < len) { | 1373 | if (mapped_len < len) { |
1372 | tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.produce_idx); | 1374 | tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.produce_idx); |
1373 | tpbuf->length = len - mapped_len; | 1375 | tpbuf->length = len - mapped_len; |
1374 | tpbuf->dma_addr = dma_map_single(adpt->netdev->dev.parent, | 1376 | tpbuf->dma_addr = dma_map_page(adpt->netdev->dev.parent, |
1375 | skb->data + mapped_len, | 1377 | virt_to_page(skb->data + |
1376 | tpbuf->length, DMA_TO_DEVICE); | 1378 | mapped_len), |
1379 | offset_in_page(skb->data + | ||
1380 | mapped_len), | ||
1381 | tpbuf->length, DMA_TO_DEVICE); | ||
1377 | ret = dma_mapping_error(adpt->netdev->dev.parent, | 1382 | ret = dma_mapping_error(adpt->netdev->dev.parent, |
1378 | tpbuf->dma_addr); | 1383 | tpbuf->dma_addr); |
1379 | if (ret) | 1384 | if (ret) |
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c index 7e7704daf5f1..c4949183eef3 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c | |||
@@ -43,12 +43,6 @@ | |||
43 | 43 | ||
44 | /* Local Definitions and Declarations */ | 44 | /* Local Definitions and Declarations */ |
45 | 45 | ||
46 | struct rmnet_walk_data { | ||
47 | struct net_device *real_dev; | ||
48 | struct list_head *head; | ||
49 | struct rmnet_port *port; | ||
50 | }; | ||
51 | |||
52 | static int rmnet_is_real_dev_registered(const struct net_device *real_dev) | 46 | static int rmnet_is_real_dev_registered(const struct net_device *real_dev) |
53 | { | 47 | { |
54 | return rcu_access_pointer(real_dev->rx_handler) == rmnet_rx_handler; | 48 | return rcu_access_pointer(real_dev->rx_handler) == rmnet_rx_handler; |
@@ -112,17 +106,14 @@ static int rmnet_register_real_device(struct net_device *real_dev) | |||
112 | static void rmnet_unregister_bridge(struct net_device *dev, | 106 | static void rmnet_unregister_bridge(struct net_device *dev, |
113 | struct rmnet_port *port) | 107 | struct rmnet_port *port) |
114 | { | 108 | { |
115 | struct net_device *rmnet_dev, *bridge_dev; | ||
116 | struct rmnet_port *bridge_port; | 109 | struct rmnet_port *bridge_port; |
110 | struct net_device *bridge_dev; | ||
117 | 111 | ||
118 | if (port->rmnet_mode != RMNET_EPMODE_BRIDGE) | 112 | if (port->rmnet_mode != RMNET_EPMODE_BRIDGE) |
119 | return; | 113 | return; |
120 | 114 | ||
121 | /* bridge slave handling */ | 115 | /* bridge slave handling */ |
122 | if (!port->nr_rmnet_devs) { | 116 | if (!port->nr_rmnet_devs) { |
123 | rmnet_dev = netdev_master_upper_dev_get_rcu(dev); | ||
124 | netdev_upper_dev_unlink(dev, rmnet_dev); | ||
125 | |||
126 | bridge_dev = port->bridge_ep; | 117 | bridge_dev = port->bridge_ep; |
127 | 118 | ||
128 | bridge_port = rmnet_get_port_rtnl(bridge_dev); | 119 | bridge_port = rmnet_get_port_rtnl(bridge_dev); |
@@ -132,9 +123,6 @@ static void rmnet_unregister_bridge(struct net_device *dev, | |||
132 | bridge_dev = port->bridge_ep; | 123 | bridge_dev = port->bridge_ep; |
133 | 124 | ||
134 | bridge_port = rmnet_get_port_rtnl(bridge_dev); | 125 | bridge_port = rmnet_get_port_rtnl(bridge_dev); |
135 | rmnet_dev = netdev_master_upper_dev_get_rcu(bridge_dev); | ||
136 | netdev_upper_dev_unlink(bridge_dev, rmnet_dev); | ||
137 | |||
138 | rmnet_unregister_real_device(bridge_dev, bridge_port); | 126 | rmnet_unregister_real_device(bridge_dev, bridge_port); |
139 | } | 127 | } |
140 | } | 128 | } |
@@ -173,10 +161,6 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev, | |||
173 | if (err) | 161 | if (err) |
174 | goto err1; | 162 | goto err1; |
175 | 163 | ||
176 | err = netdev_master_upper_dev_link(dev, real_dev, NULL, NULL, extack); | ||
177 | if (err) | ||
178 | goto err2; | ||
179 | |||
180 | port->rmnet_mode = mode; | 164 | port->rmnet_mode = mode; |
181 | 165 | ||
182 | hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]); | 166 | hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]); |
@@ -193,8 +177,6 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev, | |||
193 | 177 | ||
194 | return 0; | 178 | return 0; |
195 | 179 | ||
196 | err2: | ||
197 | rmnet_vnd_dellink(mux_id, port, ep); | ||
198 | err1: | 180 | err1: |
199 | rmnet_unregister_real_device(real_dev, port); | 181 | rmnet_unregister_real_device(real_dev, port); |
200 | err0: | 182 | err0: |
@@ -204,14 +186,13 @@ err0: | |||
204 | 186 | ||
205 | static void rmnet_dellink(struct net_device *dev, struct list_head *head) | 187 | static void rmnet_dellink(struct net_device *dev, struct list_head *head) |
206 | { | 188 | { |
189 | struct rmnet_priv *priv = netdev_priv(dev); | ||
207 | struct net_device *real_dev; | 190 | struct net_device *real_dev; |
208 | struct rmnet_endpoint *ep; | 191 | struct rmnet_endpoint *ep; |
209 | struct rmnet_port *port; | 192 | struct rmnet_port *port; |
210 | u8 mux_id; | 193 | u8 mux_id; |
211 | 194 | ||
212 | rcu_read_lock(); | 195 | real_dev = priv->real_dev; |
213 | real_dev = netdev_master_upper_dev_get_rcu(dev); | ||
214 | rcu_read_unlock(); | ||
215 | 196 | ||
216 | if (!real_dev || !rmnet_is_real_dev_registered(real_dev)) | 197 | if (!real_dev || !rmnet_is_real_dev_registered(real_dev)) |
217 | return; | 198 | return; |
@@ -219,7 +200,6 @@ static void rmnet_dellink(struct net_device *dev, struct list_head *head) | |||
219 | port = rmnet_get_port_rtnl(real_dev); | 200 | port = rmnet_get_port_rtnl(real_dev); |
220 | 201 | ||
221 | mux_id = rmnet_vnd_get_mux(dev); | 202 | mux_id = rmnet_vnd_get_mux(dev); |
222 | netdev_upper_dev_unlink(dev, real_dev); | ||
223 | 203 | ||
224 | ep = rmnet_get_endpoint(port, mux_id); | 204 | ep = rmnet_get_endpoint(port, mux_id); |
225 | if (ep) { | 205 | if (ep) { |
@@ -233,30 +213,13 @@ static void rmnet_dellink(struct net_device *dev, struct list_head *head) | |||
233 | unregister_netdevice_queue(dev, head); | 213 | unregister_netdevice_queue(dev, head); |
234 | } | 214 | } |
235 | 215 | ||
236 | static int rmnet_dev_walk_unreg(struct net_device *rmnet_dev, void *data) | ||
237 | { | ||
238 | struct rmnet_walk_data *d = data; | ||
239 | struct rmnet_endpoint *ep; | ||
240 | u8 mux_id; | ||
241 | |||
242 | mux_id = rmnet_vnd_get_mux(rmnet_dev); | ||
243 | ep = rmnet_get_endpoint(d->port, mux_id); | ||
244 | if (ep) { | ||
245 | hlist_del_init_rcu(&ep->hlnode); | ||
246 | rmnet_vnd_dellink(mux_id, d->port, ep); | ||
247 | kfree(ep); | ||
248 | } | ||
249 | netdev_upper_dev_unlink(rmnet_dev, d->real_dev); | ||
250 | unregister_netdevice_queue(rmnet_dev, d->head); | ||
251 | |||
252 | return 0; | ||
253 | } | ||
254 | |||
255 | static void rmnet_force_unassociate_device(struct net_device *dev) | 216 | static void rmnet_force_unassociate_device(struct net_device *dev) |
256 | { | 217 | { |
257 | struct net_device *real_dev = dev; | 218 | struct net_device *real_dev = dev; |
258 | struct rmnet_walk_data d; | 219 | struct hlist_node *tmp_ep; |
220 | struct rmnet_endpoint *ep; | ||
259 | struct rmnet_port *port; | 221 | struct rmnet_port *port; |
222 | unsigned long bkt_ep; | ||
260 | LIST_HEAD(list); | 223 | LIST_HEAD(list); |
261 | 224 | ||
262 | if (!rmnet_is_real_dev_registered(real_dev)) | 225 | if (!rmnet_is_real_dev_registered(real_dev)) |
@@ -264,16 +227,19 @@ static void rmnet_force_unassociate_device(struct net_device *dev) | |||
264 | 227 | ||
265 | ASSERT_RTNL(); | 228 | ASSERT_RTNL(); |
266 | 229 | ||
267 | d.real_dev = real_dev; | ||
268 | d.head = &list; | ||
269 | |||
270 | port = rmnet_get_port_rtnl(dev); | 230 | port = rmnet_get_port_rtnl(dev); |
271 | d.port = port; | ||
272 | 231 | ||
273 | rcu_read_lock(); | 232 | rcu_read_lock(); |
274 | rmnet_unregister_bridge(dev, port); | 233 | rmnet_unregister_bridge(dev, port); |
275 | 234 | ||
276 | netdev_walk_all_lower_dev_rcu(real_dev, rmnet_dev_walk_unreg, &d); | 235 | hash_for_each_safe(port->muxed_ep, bkt_ep, tmp_ep, ep, hlnode) { |
236 | unregister_netdevice_queue(ep->egress_dev, &list); | ||
237 | rmnet_vnd_dellink(ep->mux_id, port, ep); | ||
238 | |||
239 | hlist_del_init_rcu(&ep->hlnode); | ||
240 | kfree(ep); | ||
241 | } | ||
242 | |||
277 | rcu_read_unlock(); | 243 | rcu_read_unlock(); |
278 | unregister_netdevice_many(&list); | 244 | unregister_netdevice_many(&list); |
279 | 245 | ||
@@ -422,11 +388,6 @@ int rmnet_add_bridge(struct net_device *rmnet_dev, | |||
422 | if (err) | 388 | if (err) |
423 | return -EBUSY; | 389 | return -EBUSY; |
424 | 390 | ||
425 | err = netdev_master_upper_dev_link(slave_dev, rmnet_dev, NULL, NULL, | ||
426 | extack); | ||
427 | if (err) | ||
428 | return -EINVAL; | ||
429 | |||
430 | slave_port = rmnet_get_port(slave_dev); | 391 | slave_port = rmnet_get_port(slave_dev); |
431 | slave_port->rmnet_mode = RMNET_EPMODE_BRIDGE; | 392 | slave_port->rmnet_mode = RMNET_EPMODE_BRIDGE; |
432 | slave_port->bridge_ep = real_dev; | 393 | slave_port->bridge_ep = real_dev; |
@@ -449,7 +410,6 @@ int rmnet_del_bridge(struct net_device *rmnet_dev, | |||
449 | port->rmnet_mode = RMNET_EPMODE_VND; | 410 | port->rmnet_mode = RMNET_EPMODE_VND; |
450 | port->bridge_ep = NULL; | 411 | port->bridge_ep = NULL; |
451 | 412 | ||
452 | netdev_upper_dev_unlink(slave_dev, rmnet_dev); | ||
453 | slave_port = rmnet_get_port(slave_dev); | 413 | slave_port = rmnet_get_port(slave_dev); |
454 | rmnet_unregister_real_device(slave_dev, slave_port); | 414 | rmnet_unregister_real_device(slave_dev, slave_port); |
455 | 415 | ||
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c index 6bc328fb88e1..b0dbca070c00 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c | |||
@@ -38,6 +38,11 @@ static u8 rmnet_map_do_flow_control(struct sk_buff *skb, | |||
38 | } | 38 | } |
39 | 39 | ||
40 | ep = rmnet_get_endpoint(port, mux_id); | 40 | ep = rmnet_get_endpoint(port, mux_id); |
41 | if (!ep) { | ||
42 | kfree_skb(skb); | ||
43 | return RX_HANDLER_CONSUMED; | ||
44 | } | ||
45 | |||
41 | vnd = ep->egress_dev; | 46 | vnd = ep->egress_dev; |
42 | 47 | ||
43 | ip_family = cmd->flow_control.ip_family; | 48 | ip_family = cmd->flow_control.ip_family; |
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c index 570a227acdd8..346d310914df 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c | |||
@@ -121,7 +121,7 @@ static void rmnet_get_stats64(struct net_device *dev, | |||
121 | memset(&total_stats, 0, sizeof(struct rmnet_vnd_stats)); | 121 | memset(&total_stats, 0, sizeof(struct rmnet_vnd_stats)); |
122 | 122 | ||
123 | for_each_possible_cpu(cpu) { | 123 | for_each_possible_cpu(cpu) { |
124 | pcpu_ptr = this_cpu_ptr(priv->pcpu_stats); | 124 | pcpu_ptr = per_cpu_ptr(priv->pcpu_stats, cpu); |
125 | 125 | ||
126 | do { | 126 | do { |
127 | start = u64_stats_fetch_begin_irq(&pcpu_ptr->syncp); | 127 | start = u64_stats_fetch_begin_irq(&pcpu_ptr->syncp); |
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index c87f57ca4437..a95fbd5510d9 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c | |||
@@ -2255,9 +2255,6 @@ static int ravb_wol_setup(struct net_device *ndev) | |||
2255 | /* Enable MagicPacket */ | 2255 | /* Enable MagicPacket */ |
2256 | ravb_modify(ndev, ECMR, ECMR_MPDE, ECMR_MPDE); | 2256 | ravb_modify(ndev, ECMR, ECMR_MPDE, ECMR_MPDE); |
2257 | 2257 | ||
2258 | /* Increased clock usage so device won't be suspended */ | ||
2259 | clk_enable(priv->clk); | ||
2260 | |||
2261 | return enable_irq_wake(priv->emac_irq); | 2258 | return enable_irq_wake(priv->emac_irq); |
2262 | } | 2259 | } |
2263 | 2260 | ||
@@ -2276,9 +2273,6 @@ static int ravb_wol_restore(struct net_device *ndev) | |||
2276 | if (ret < 0) | 2273 | if (ret < 0) |
2277 | return ret; | 2274 | return ret; |
2278 | 2275 | ||
2279 | /* Restore clock usage count */ | ||
2280 | clk_disable(priv->clk); | ||
2281 | |||
2282 | return disable_irq_wake(priv->emac_irq); | 2276 | return disable_irq_wake(priv->emac_irq); |
2283 | } | 2277 | } |
2284 | 2278 | ||
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index a197e11f3a56..14c839bb09e7 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c | |||
@@ -40,7 +40,6 @@ | |||
40 | #include <linux/slab.h> | 40 | #include <linux/slab.h> |
41 | #include <linux/ethtool.h> | 41 | #include <linux/ethtool.h> |
42 | #include <linux/if_vlan.h> | 42 | #include <linux/if_vlan.h> |
43 | #include <linux/clk.h> | ||
44 | #include <linux/sh_eth.h> | 43 | #include <linux/sh_eth.h> |
45 | #include <linux/of_mdio.h> | 44 | #include <linux/of_mdio.h> |
46 | 45 | ||
@@ -440,6 +439,17 @@ static void sh_eth_modify(struct net_device *ndev, int enum_index, u32 clear, | |||
440 | enum_index); | 439 | enum_index); |
441 | } | 440 | } |
442 | 441 | ||
442 | static void sh_eth_tsu_write(struct sh_eth_private *mdp, u32 data, | ||
443 | int enum_index) | ||
444 | { | ||
445 | iowrite32(data, mdp->tsu_addr + mdp->reg_offset[enum_index]); | ||
446 | } | ||
447 | |||
448 | static u32 sh_eth_tsu_read(struct sh_eth_private *mdp, int enum_index) | ||
449 | { | ||
450 | return ioread32(mdp->tsu_addr + mdp->reg_offset[enum_index]); | ||
451 | } | ||
452 | |||
443 | static bool sh_eth_is_gether(struct sh_eth_private *mdp) | 453 | static bool sh_eth_is_gether(struct sh_eth_private *mdp) |
444 | { | 454 | { |
445 | return mdp->reg_offset == sh_eth_offset_gigabit; | 455 | return mdp->reg_offset == sh_eth_offset_gigabit; |
@@ -2304,7 +2314,7 @@ static void sh_eth_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) | |||
2304 | wol->supported = 0; | 2314 | wol->supported = 0; |
2305 | wol->wolopts = 0; | 2315 | wol->wolopts = 0; |
2306 | 2316 | ||
2307 | if (mdp->cd->magic && mdp->clk) { | 2317 | if (mdp->cd->magic) { |
2308 | wol->supported = WAKE_MAGIC; | 2318 | wol->supported = WAKE_MAGIC; |
2309 | wol->wolopts = mdp->wol_enabled ? WAKE_MAGIC : 0; | 2319 | wol->wolopts = mdp->wol_enabled ? WAKE_MAGIC : 0; |
2310 | } | 2320 | } |
@@ -2314,7 +2324,7 @@ static int sh_eth_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) | |||
2314 | { | 2324 | { |
2315 | struct sh_eth_private *mdp = netdev_priv(ndev); | 2325 | struct sh_eth_private *mdp = netdev_priv(ndev); |
2316 | 2326 | ||
2317 | if (!mdp->cd->magic || !mdp->clk || wol->wolopts & ~WAKE_MAGIC) | 2327 | if (!mdp->cd->magic || wol->wolopts & ~WAKE_MAGIC) |
2318 | return -EOPNOTSUPP; | 2328 | return -EOPNOTSUPP; |
2319 | 2329 | ||
2320 | mdp->wol_enabled = !!(wol->wolopts & WAKE_MAGIC); | 2330 | mdp->wol_enabled = !!(wol->wolopts & WAKE_MAGIC); |
@@ -3153,11 +3163,6 @@ static int sh_eth_drv_probe(struct platform_device *pdev) | |||
3153 | goto out_release; | 3163 | goto out_release; |
3154 | } | 3164 | } |
3155 | 3165 | ||
3156 | /* Get clock, if not found that's OK but Wake-On-Lan is unavailable */ | ||
3157 | mdp->clk = devm_clk_get(&pdev->dev, NULL); | ||
3158 | if (IS_ERR(mdp->clk)) | ||
3159 | mdp->clk = NULL; | ||
3160 | |||
3161 | ndev->base_addr = res->start; | 3166 | ndev->base_addr = res->start; |
3162 | 3167 | ||
3163 | spin_lock_init(&mdp->lock); | 3168 | spin_lock_init(&mdp->lock); |
@@ -3278,7 +3283,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev) | |||
3278 | if (ret) | 3283 | if (ret) |
3279 | goto out_napi_del; | 3284 | goto out_napi_del; |
3280 | 3285 | ||
3281 | if (mdp->cd->magic && mdp->clk) | 3286 | if (mdp->cd->magic) |
3282 | device_set_wakeup_capable(&pdev->dev, 1); | 3287 | device_set_wakeup_capable(&pdev->dev, 1); |
3283 | 3288 | ||
3284 | /* print device information */ | 3289 | /* print device information */ |
@@ -3331,9 +3336,6 @@ static int sh_eth_wol_setup(struct net_device *ndev) | |||
3331 | /* Enable MagicPacket */ | 3336 | /* Enable MagicPacket */ |
3332 | sh_eth_modify(ndev, ECMR, ECMR_MPDE, ECMR_MPDE); | 3337 | sh_eth_modify(ndev, ECMR, ECMR_MPDE, ECMR_MPDE); |
3333 | 3338 | ||
3334 | /* Increased clock usage so device won't be suspended */ | ||
3335 | clk_enable(mdp->clk); | ||
3336 | |||
3337 | return enable_irq_wake(ndev->irq); | 3339 | return enable_irq_wake(ndev->irq); |
3338 | } | 3340 | } |
3339 | 3341 | ||
@@ -3359,9 +3361,6 @@ static int sh_eth_wol_restore(struct net_device *ndev) | |||
3359 | if (ret < 0) | 3361 | if (ret < 0) |
3360 | return ret; | 3362 | return ret; |
3361 | 3363 | ||
3362 | /* Restore clock usage count */ | ||
3363 | clk_disable(mdp->clk); | ||
3364 | |||
3365 | return disable_irq_wake(ndev->irq); | 3364 | return disable_irq_wake(ndev->irq); |
3366 | } | 3365 | } |
3367 | 3366 | ||
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index a6753ccba711..e5fe70134690 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h | |||
@@ -567,15 +567,4 @@ static inline void *sh_eth_tsu_get_offset(struct sh_eth_private *mdp, | |||
567 | return mdp->tsu_addr + mdp->reg_offset[enum_index]; | 567 | return mdp->tsu_addr + mdp->reg_offset[enum_index]; |
568 | } | 568 | } |
569 | 569 | ||
570 | static inline void sh_eth_tsu_write(struct sh_eth_private *mdp, u32 data, | ||
571 | int enum_index) | ||
572 | { | ||
573 | iowrite32(data, mdp->tsu_addr + mdp->reg_offset[enum_index]); | ||
574 | } | ||
575 | |||
576 | static inline u32 sh_eth_tsu_read(struct sh_eth_private *mdp, int enum_index) | ||
577 | { | ||
578 | return ioread32(mdp->tsu_addr + mdp->reg_offset[enum_index]); | ||
579 | } | ||
580 | |||
581 | #endif /* #ifndef __SH_ETH_H__ */ | 570 | #endif /* #ifndef __SH_ETH_H__ */ |
diff --git a/drivers/net/ethernet/smsc/Kconfig b/drivers/net/ethernet/smsc/Kconfig index 63aca9f847e1..4c2f612e4414 100644 --- a/drivers/net/ethernet/smsc/Kconfig +++ b/drivers/net/ethernet/smsc/Kconfig | |||
@@ -20,7 +20,7 @@ if NET_VENDOR_SMSC | |||
20 | 20 | ||
21 | config SMC9194 | 21 | config SMC9194 |
22 | tristate "SMC 9194 support" | 22 | tristate "SMC 9194 support" |
23 | depends on (ISA || MAC && BROKEN) | 23 | depends on ISA |
24 | select CRC32 | 24 | select CRC32 |
25 | ---help--- | 25 | ---help--- |
26 | This is support for the SMC9xxx based Ethernet cards. Choose this | 26 | This is support for the SMC9xxx based Ethernet cards. Choose this |
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c index 012fb66eed8d..f0afb88d7bc2 100644 --- a/drivers/net/ethernet/smsc/smsc911x.c +++ b/drivers/net/ethernet/smsc/smsc911x.c | |||
@@ -2335,14 +2335,14 @@ static int smsc911x_drv_remove(struct platform_device *pdev) | |||
2335 | pdata = netdev_priv(dev); | 2335 | pdata = netdev_priv(dev); |
2336 | BUG_ON(!pdata); | 2336 | BUG_ON(!pdata); |
2337 | BUG_ON(!pdata->ioaddr); | 2337 | BUG_ON(!pdata->ioaddr); |
2338 | WARN_ON(dev->phydev); | ||
2339 | 2338 | ||
2340 | SMSC_TRACE(pdata, ifdown, "Stopping driver"); | 2339 | SMSC_TRACE(pdata, ifdown, "Stopping driver"); |
2341 | 2340 | ||
2341 | unregister_netdev(dev); | ||
2342 | |||
2342 | mdiobus_unregister(pdata->mii_bus); | 2343 | mdiobus_unregister(pdata->mii_bus); |
2343 | mdiobus_free(pdata->mii_bus); | 2344 | mdiobus_free(pdata->mii_bus); |
2344 | 2345 | ||
2345 | unregister_netdev(dev); | ||
2346 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, | 2346 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, |
2347 | "smsc911x-memory"); | 2347 | "smsc911x-memory"); |
2348 | if (!res) | 2348 | if (!res) |
diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c index 111e7ca9df56..f5c5984afefb 100644 --- a/drivers/net/ethernet/socionext/sni_ave.c +++ b/drivers/net/ethernet/socionext/sni_ave.c | |||
@@ -1295,7 +1295,7 @@ static int ave_open(struct net_device *ndev) | |||
1295 | val |= AVE_IIRQC_EN0 | (AVE_INTM_COUNT << 16); | 1295 | val |= AVE_IIRQC_EN0 | (AVE_INTM_COUNT << 16); |
1296 | writel(val, priv->base + AVE_IIRQC); | 1296 | writel(val, priv->base + AVE_IIRQC); |
1297 | 1297 | ||
1298 | val = AVE_GI_RXIINT | AVE_GI_RXOVF | AVE_GI_TX; | 1298 | val = AVE_GI_RXIINT | AVE_GI_RXOVF | AVE_GI_TX | AVE_GI_RXDROP; |
1299 | ave_irq_restore(ndev, val); | 1299 | ave_irq_restore(ndev, val); |
1300 | 1300 | ||
1301 | napi_enable(&priv->napi_rx); | 1301 | napi_enable(&priv->napi_rx); |
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c index 63d3d6b215f3..a94f50442613 100644 --- a/drivers/net/ethernet/sun/sunvnet.c +++ b/drivers/net/ethernet/sun/sunvnet.c | |||
@@ -312,7 +312,7 @@ static struct vnet *vnet_new(const u64 *local_mac, | |||
312 | dev->ethtool_ops = &vnet_ethtool_ops; | 312 | dev->ethtool_ops = &vnet_ethtool_ops; |
313 | dev->watchdog_timeo = VNET_TX_TIMEOUT; | 313 | dev->watchdog_timeo = VNET_TX_TIMEOUT; |
314 | 314 | ||
315 | dev->hw_features = NETIF_F_TSO | NETIF_F_GSO | NETIF_F_GSO_SOFTWARE | | 315 | dev->hw_features = NETIF_F_TSO | NETIF_F_GSO | NETIF_F_ALL_TSO | |
316 | NETIF_F_HW_CSUM | NETIF_F_SG; | 316 | NETIF_F_HW_CSUM | NETIF_F_SG; |
317 | dev->features = dev->hw_features; | 317 | dev->features = dev->hw_features; |
318 | 318 | ||
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 1b1b78fdc138..b2b30c9df037 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c | |||
@@ -1014,7 +1014,8 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave, | |||
1014 | /* set speed_in input in case RMII mode is used in 100Mbps */ | 1014 | /* set speed_in input in case RMII mode is used in 100Mbps */ |
1015 | if (phy->speed == 100) | 1015 | if (phy->speed == 100) |
1016 | mac_control |= BIT(15); | 1016 | mac_control |= BIT(15); |
1017 | else if (phy->speed == 10) | 1017 | /* in band mode only works in 10Mbps RGMII mode */ |
1018 | else if ((phy->speed == 10) && phy_interface_is_rgmii(phy)) | ||
1018 | mac_control |= BIT(18); /* In Band mode */ | 1019 | mac_control |= BIT(18); /* In Band mode */ |
1019 | 1020 | ||
1020 | if (priv->rx_pause) | 1021 | if (priv->rx_pause) |
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 0db3bd1ea06f..32861036c3fc 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h | |||
@@ -173,6 +173,7 @@ struct rndis_device { | |||
173 | struct list_head req_list; | 173 | struct list_head req_list; |
174 | 174 | ||
175 | struct work_struct mcast_work; | 175 | struct work_struct mcast_work; |
176 | u32 filter; | ||
176 | 177 | ||
177 | bool link_state; /* 0 - link up, 1 - link down */ | 178 | bool link_state; /* 0 - link up, 1 - link down */ |
178 | 179 | ||
@@ -211,7 +212,6 @@ void netvsc_channel_cb(void *context); | |||
211 | int netvsc_poll(struct napi_struct *napi, int budget); | 212 | int netvsc_poll(struct napi_struct *napi, int budget); |
212 | 213 | ||
213 | void rndis_set_subchannel(struct work_struct *w); | 214 | void rndis_set_subchannel(struct work_struct *w); |
214 | bool rndis_filter_opened(const struct netvsc_device *nvdev); | ||
215 | int rndis_filter_open(struct netvsc_device *nvdev); | 215 | int rndis_filter_open(struct netvsc_device *nvdev); |
216 | int rndis_filter_close(struct netvsc_device *nvdev); | 216 | int rndis_filter_close(struct netvsc_device *nvdev); |
217 | struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, | 217 | struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, |
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 17e529af79dc..7472172823f3 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c | |||
@@ -90,6 +90,11 @@ static void free_netvsc_device(struct rcu_head *head) | |||
90 | = container_of(head, struct netvsc_device, rcu); | 90 | = container_of(head, struct netvsc_device, rcu); |
91 | int i; | 91 | int i; |
92 | 92 | ||
93 | kfree(nvdev->extension); | ||
94 | vfree(nvdev->recv_buf); | ||
95 | vfree(nvdev->send_buf); | ||
96 | kfree(nvdev->send_section_map); | ||
97 | |||
93 | for (i = 0; i < VRSS_CHANNEL_MAX; i++) | 98 | for (i = 0; i < VRSS_CHANNEL_MAX; i++) |
94 | vfree(nvdev->chan_table[i].mrc.slots); | 99 | vfree(nvdev->chan_table[i].mrc.slots); |
95 | 100 | ||
@@ -211,12 +216,6 @@ static void netvsc_teardown_gpadl(struct hv_device *device, | |||
211 | net_device->recv_buf_gpadl_handle = 0; | 216 | net_device->recv_buf_gpadl_handle = 0; |
212 | } | 217 | } |
213 | 218 | ||
214 | if (net_device->recv_buf) { | ||
215 | /* Free up the receive buffer */ | ||
216 | vfree(net_device->recv_buf); | ||
217 | net_device->recv_buf = NULL; | ||
218 | } | ||
219 | |||
220 | if (net_device->send_buf_gpadl_handle) { | 219 | if (net_device->send_buf_gpadl_handle) { |
221 | ret = vmbus_teardown_gpadl(device->channel, | 220 | ret = vmbus_teardown_gpadl(device->channel, |
222 | net_device->send_buf_gpadl_handle); | 221 | net_device->send_buf_gpadl_handle); |
@@ -231,12 +230,6 @@ static void netvsc_teardown_gpadl(struct hv_device *device, | |||
231 | } | 230 | } |
232 | net_device->send_buf_gpadl_handle = 0; | 231 | net_device->send_buf_gpadl_handle = 0; |
233 | } | 232 | } |
234 | if (net_device->send_buf) { | ||
235 | /* Free up the send buffer */ | ||
236 | vfree(net_device->send_buf); | ||
237 | net_device->send_buf = NULL; | ||
238 | } | ||
239 | kfree(net_device->send_section_map); | ||
240 | } | 233 | } |
241 | 234 | ||
242 | int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx) | 235 | int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx) |
@@ -562,26 +555,29 @@ void netvsc_device_remove(struct hv_device *device) | |||
562 | = rtnl_dereference(net_device_ctx->nvdev); | 555 | = rtnl_dereference(net_device_ctx->nvdev); |
563 | int i; | 556 | int i; |
564 | 557 | ||
565 | cancel_work_sync(&net_device->subchan_work); | ||
566 | |||
567 | netvsc_revoke_buf(device, net_device); | 558 | netvsc_revoke_buf(device, net_device); |
568 | 559 | ||
569 | RCU_INIT_POINTER(net_device_ctx->nvdev, NULL); | 560 | RCU_INIT_POINTER(net_device_ctx->nvdev, NULL); |
570 | 561 | ||
562 | /* And disassociate NAPI context from device */ | ||
563 | for (i = 0; i < net_device->num_chn; i++) | ||
564 | netif_napi_del(&net_device->chan_table[i].napi); | ||
565 | |||
571 | /* | 566 | /* |
572 | * At this point, no one should be accessing net_device | 567 | * At this point, no one should be accessing net_device |
573 | * except in here | 568 | * except in here |
574 | */ | 569 | */ |
575 | netdev_dbg(ndev, "net device safe to remove\n"); | 570 | netdev_dbg(ndev, "net device safe to remove\n"); |
576 | 571 | ||
572 | /* older versions require that buffer be revoked before close */ | ||
573 | if (net_device->nvsp_version < NVSP_PROTOCOL_VERSION_4) | ||
574 | netvsc_teardown_gpadl(device, net_device); | ||
575 | |||
577 | /* Now, we can close the channel safely */ | 576 | /* Now, we can close the channel safely */ |
578 | vmbus_close(device->channel); | 577 | vmbus_close(device->channel); |
579 | 578 | ||
580 | netvsc_teardown_gpadl(device, net_device); | 579 | if (net_device->nvsp_version >= NVSP_PROTOCOL_VERSION_4) |
581 | 580 | netvsc_teardown_gpadl(device, net_device); | |
582 | /* And dissassociate NAPI context from device */ | ||
583 | for (i = 0; i < net_device->num_chn; i++) | ||
584 | netif_napi_del(&net_device->chan_table[i].napi); | ||
585 | 581 | ||
586 | /* Release all resources */ | 582 | /* Release all resources */ |
587 | free_netvsc_device_rcu(net_device); | 583 | free_netvsc_device_rcu(net_device); |
@@ -645,14 +641,18 @@ static void netvsc_send_tx_complete(struct netvsc_device *net_device, | |||
645 | queue_sends = | 641 | queue_sends = |
646 | atomic_dec_return(&net_device->chan_table[q_idx].queue_sends); | 642 | atomic_dec_return(&net_device->chan_table[q_idx].queue_sends); |
647 | 643 | ||
648 | if (net_device->destroy && queue_sends == 0) | 644 | if (unlikely(net_device->destroy)) { |
649 | wake_up(&net_device->wait_drain); | 645 | if (queue_sends == 0) |
646 | wake_up(&net_device->wait_drain); | ||
647 | } else { | ||
648 | struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx); | ||
650 | 649 | ||
651 | if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev, q_idx)) && | 650 | if (netif_tx_queue_stopped(txq) && |
652 | (hv_ringbuf_avail_percent(&channel->outbound) > RING_AVAIL_PERCENT_HIWATER || | 651 | (hv_ringbuf_avail_percent(&channel->outbound) > RING_AVAIL_PERCENT_HIWATER || |
653 | queue_sends < 1)) { | 652 | queue_sends < 1)) { |
654 | netif_tx_wake_queue(netdev_get_tx_queue(ndev, q_idx)); | 653 | netif_tx_wake_queue(txq); |
655 | ndev_ctx->eth_stats.wake_queue++; | 654 | ndev_ctx->eth_stats.wake_queue++; |
655 | } | ||
656 | } | 656 | } |
657 | } | 657 | } |
658 | 658 | ||
@@ -852,13 +852,6 @@ int netvsc_send(struct net_device *ndev, | |||
852 | if (unlikely(!net_device || net_device->destroy)) | 852 | if (unlikely(!net_device || net_device->destroy)) |
853 | return -ENODEV; | 853 | return -ENODEV; |
854 | 854 | ||
855 | /* We may race with netvsc_connect_vsp()/netvsc_init_buf() and get | ||
856 | * here before the negotiation with the host is finished and | ||
857 | * send_section_map may not be allocated yet. | ||
858 | */ | ||
859 | if (unlikely(!net_device->send_section_map)) | ||
860 | return -EAGAIN; | ||
861 | |||
862 | nvchan = &net_device->chan_table[packet->q_idx]; | 855 | nvchan = &net_device->chan_table[packet->q_idx]; |
863 | packet->send_buf_index = NETVSC_INVALID_INDEX; | 856 | packet->send_buf_index = NETVSC_INVALID_INDEX; |
864 | packet->cp_partial = false; | 857 | packet->cp_partial = false; |
@@ -866,10 +859,8 @@ int netvsc_send(struct net_device *ndev, | |||
866 | /* Send control message directly without accessing msd (Multi-Send | 859 | /* Send control message directly without accessing msd (Multi-Send |
867 | * Data) field which may be changed during data packet processing. | 860 | * Data) field which may be changed during data packet processing. |
868 | */ | 861 | */ |
869 | if (!skb) { | 862 | if (!skb) |
870 | cur_send = packet; | 863 | return netvsc_send_pkt(device, packet, net_device, pb, skb); |
871 | goto send_now; | ||
872 | } | ||
873 | 864 | ||
874 | /* batch packets in send buffer if possible */ | 865 | /* batch packets in send buffer if possible */ |
875 | msdp = &nvchan->msd; | 866 | msdp = &nvchan->msd; |
@@ -953,7 +944,6 @@ int netvsc_send(struct net_device *ndev, | |||
953 | } | 944 | } |
954 | } | 945 | } |
955 | 946 | ||
956 | send_now: | ||
957 | if (cur_send) | 947 | if (cur_send) |
958 | ret = netvsc_send_pkt(device, cur_send, net_device, pb, skb); | 948 | ret = netvsc_send_pkt(device, cur_send, net_device, pb, skb); |
959 | 949 | ||
@@ -1217,9 +1207,10 @@ int netvsc_poll(struct napi_struct *napi, int budget) | |||
1217 | if (send_recv_completions(ndev, net_device, nvchan) == 0 && | 1207 | if (send_recv_completions(ndev, net_device, nvchan) == 0 && |
1218 | work_done < budget && | 1208 | work_done < budget && |
1219 | napi_complete_done(napi, work_done) && | 1209 | napi_complete_done(napi, work_done) && |
1220 | hv_end_read(&channel->inbound)) { | 1210 | hv_end_read(&channel->inbound) && |
1211 | napi_schedule_prep(napi)) { | ||
1221 | hv_begin_read(&channel->inbound); | 1212 | hv_begin_read(&channel->inbound); |
1222 | napi_reschedule(napi); | 1213 | __napi_schedule(napi); |
1223 | } | 1214 | } |
1224 | 1215 | ||
1225 | /* Driver may overshoot since multiple packets per descriptor */ | 1216 | /* Driver may overshoot since multiple packets per descriptor */ |
@@ -1242,7 +1233,7 @@ void netvsc_channel_cb(void *context) | |||
1242 | /* disable interupts from host */ | 1233 | /* disable interupts from host */ |
1243 | hv_begin_read(rbi); | 1234 | hv_begin_read(rbi); |
1244 | 1235 | ||
1245 | __napi_schedule(&nvchan->napi); | 1236 | __napi_schedule_irqoff(&nvchan->napi); |
1246 | } | 1237 | } |
1247 | } | 1238 | } |
1248 | 1239 | ||
@@ -1296,7 +1287,6 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device, | |||
1296 | netvsc_channel_cb, net_device->chan_table); | 1287 | netvsc_channel_cb, net_device->chan_table); |
1297 | 1288 | ||
1298 | if (ret != 0) { | 1289 | if (ret != 0) { |
1299 | netif_napi_del(&net_device->chan_table[0].napi); | ||
1300 | netdev_err(ndev, "unable to open channel: %d\n", ret); | 1290 | netdev_err(ndev, "unable to open channel: %d\n", ret); |
1301 | goto cleanup; | 1291 | goto cleanup; |
1302 | } | 1292 | } |
@@ -1306,11 +1296,6 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device, | |||
1306 | 1296 | ||
1307 | napi_enable(&net_device->chan_table[0].napi); | 1297 | napi_enable(&net_device->chan_table[0].napi); |
1308 | 1298 | ||
1309 | /* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is | ||
1310 | * populated. | ||
1311 | */ | ||
1312 | rcu_assign_pointer(net_device_ctx->nvdev, net_device); | ||
1313 | |||
1314 | /* Connect with the NetVsp */ | 1299 | /* Connect with the NetVsp */ |
1315 | ret = netvsc_connect_vsp(device, net_device, device_info); | 1300 | ret = netvsc_connect_vsp(device, net_device, device_info); |
1316 | if (ret != 0) { | 1301 | if (ret != 0) { |
@@ -1319,6 +1304,11 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device, | |||
1319 | goto close; | 1304 | goto close; |
1320 | } | 1305 | } |
1321 | 1306 | ||
1307 | /* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is | ||
1308 | * populated. | ||
1309 | */ | ||
1310 | rcu_assign_pointer(net_device_ctx->nvdev, net_device); | ||
1311 | |||
1322 | return net_device; | 1312 | return net_device; |
1323 | 1313 | ||
1324 | close: | 1314 | close: |
@@ -1329,6 +1319,7 @@ close: | |||
1329 | vmbus_close(device->channel); | 1319 | vmbus_close(device->channel); |
1330 | 1320 | ||
1331 | cleanup: | 1321 | cleanup: |
1322 | netif_napi_del(&net_device->chan_table[0].napi); | ||
1332 | free_netvsc_device(&net_device->rcu); | 1323 | free_netvsc_device(&net_device->rcu); |
1333 | 1324 | ||
1334 | return ERR_PTR(ret); | 1325 | return ERR_PTR(ret); |
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index c5584c2d440e..f28c85d212ce 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c | |||
@@ -46,7 +46,10 @@ | |||
46 | 46 | ||
47 | #include "hyperv_net.h" | 47 | #include "hyperv_net.h" |
48 | 48 | ||
49 | #define RING_SIZE_MIN 64 | 49 | #define RING_SIZE_MIN 64 |
50 | #define RETRY_US_LO 5000 | ||
51 | #define RETRY_US_HI 10000 | ||
52 | #define RETRY_MAX 2000 /* >10 sec */ | ||
50 | 53 | ||
51 | #define LINKCHANGE_INT (2 * HZ) | 54 | #define LINKCHANGE_INT (2 * HZ) |
52 | #define VF_TAKEOVER_INT (HZ / 10) | 55 | #define VF_TAKEOVER_INT (HZ / 10) |
@@ -66,12 +69,43 @@ static int debug = -1; | |||
66 | module_param(debug, int, S_IRUGO); | 69 | module_param(debug, int, S_IRUGO); |
67 | MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); | 70 | MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); |
68 | 71 | ||
69 | static void netvsc_set_multicast_list(struct net_device *net) | 72 | static void netvsc_change_rx_flags(struct net_device *net, int change) |
70 | { | 73 | { |
71 | struct net_device_context *net_device_ctx = netdev_priv(net); | 74 | struct net_device_context *ndev_ctx = netdev_priv(net); |
72 | struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); | 75 | struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev); |
76 | int inc; | ||
77 | |||
78 | if (!vf_netdev) | ||
79 | return; | ||
80 | |||
81 | if (change & IFF_PROMISC) { | ||
82 | inc = (net->flags & IFF_PROMISC) ? 1 : -1; | ||
83 | dev_set_promiscuity(vf_netdev, inc); | ||
84 | } | ||
85 | |||
86 | if (change & IFF_ALLMULTI) { | ||
87 | inc = (net->flags & IFF_ALLMULTI) ? 1 : -1; | ||
88 | dev_set_allmulti(vf_netdev, inc); | ||
89 | } | ||
90 | } | ||
73 | 91 | ||
74 | rndis_filter_update(nvdev); | 92 | static void netvsc_set_rx_mode(struct net_device *net) |
93 | { | ||
94 | struct net_device_context *ndev_ctx = netdev_priv(net); | ||
95 | struct net_device *vf_netdev; | ||
96 | struct netvsc_device *nvdev; | ||
97 | |||
98 | rcu_read_lock(); | ||
99 | vf_netdev = rcu_dereference(ndev_ctx->vf_netdev); | ||
100 | if (vf_netdev) { | ||
101 | dev_uc_sync(vf_netdev, net); | ||
102 | dev_mc_sync(vf_netdev, net); | ||
103 | } | ||
104 | |||
105 | nvdev = rcu_dereference(ndev_ctx->nvdev); | ||
106 | if (nvdev) | ||
107 | rndis_filter_update(nvdev); | ||
108 | rcu_read_unlock(); | ||
75 | } | 109 | } |
76 | 110 | ||
77 | static int netvsc_open(struct net_device *net) | 111 | static int netvsc_open(struct net_device *net) |
@@ -91,10 +125,7 @@ static int netvsc_open(struct net_device *net) | |||
91 | return ret; | 125 | return ret; |
92 | } | 126 | } |
93 | 127 | ||
94 | netif_tx_wake_all_queues(net); | ||
95 | |||
96 | rdev = nvdev->extension; | 128 | rdev = nvdev->extension; |
97 | |||
98 | if (!rdev->link_state) | 129 | if (!rdev->link_state) |
99 | netif_carrier_on(net); | 130 | netif_carrier_on(net); |
100 | 131 | ||
@@ -112,36 +143,25 @@ static int netvsc_open(struct net_device *net) | |||
112 | return 0; | 143 | return 0; |
113 | } | 144 | } |
114 | 145 | ||
115 | static int netvsc_close(struct net_device *net) | 146 | static int netvsc_wait_until_empty(struct netvsc_device *nvdev) |
116 | { | 147 | { |
117 | struct net_device_context *net_device_ctx = netdev_priv(net); | 148 | unsigned int retry = 0; |
118 | struct net_device *vf_netdev | 149 | int i; |
119 | = rtnl_dereference(net_device_ctx->vf_netdev); | ||
120 | struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); | ||
121 | int ret = 0; | ||
122 | u32 aread, i, msec = 10, retry = 0, retry_max = 20; | ||
123 | struct vmbus_channel *chn; | ||
124 | |||
125 | netif_tx_disable(net); | ||
126 | |||
127 | /* No need to close rndis filter if it is removed already */ | ||
128 | if (!nvdev) | ||
129 | goto out; | ||
130 | |||
131 | ret = rndis_filter_close(nvdev); | ||
132 | if (ret != 0) { | ||
133 | netdev_err(net, "unable to close device (ret %d).\n", ret); | ||
134 | return ret; | ||
135 | } | ||
136 | 150 | ||
137 | /* Ensure pending bytes in ring are read */ | 151 | /* Ensure pending bytes in ring are read */ |
138 | while (true) { | 152 | for (;;) { |
139 | aread = 0; | 153 | u32 aread = 0; |
154 | |||
140 | for (i = 0; i < nvdev->num_chn; i++) { | 155 | for (i = 0; i < nvdev->num_chn; i++) { |
141 | chn = nvdev->chan_table[i].channel; | 156 | struct vmbus_channel *chn |
157 | = nvdev->chan_table[i].channel; | ||
158 | |||
142 | if (!chn) | 159 | if (!chn) |
143 | continue; | 160 | continue; |
144 | 161 | ||
162 | /* make sure receive not running now */ | ||
163 | napi_synchronize(&nvdev->chan_table[i].napi); | ||
164 | |||
145 | aread = hv_get_bytes_to_read(&chn->inbound); | 165 | aread = hv_get_bytes_to_read(&chn->inbound); |
146 | if (aread) | 166 | if (aread) |
147 | break; | 167 | break; |
@@ -151,22 +171,40 @@ static int netvsc_close(struct net_device *net) | |||
151 | break; | 171 | break; |
152 | } | 172 | } |
153 | 173 | ||
154 | retry++; | 174 | if (aread == 0) |
155 | if (retry > retry_max || aread == 0) | 175 | return 0; |
156 | break; | ||
157 | 176 | ||
158 | msleep(msec); | 177 | if (++retry > RETRY_MAX) |
178 | return -ETIMEDOUT; | ||
159 | 179 | ||
160 | if (msec < 1000) | 180 | usleep_range(RETRY_US_LO, RETRY_US_HI); |
161 | msec *= 2; | ||
162 | } | 181 | } |
182 | } | ||
163 | 183 | ||
164 | if (aread) { | 184 | static int netvsc_close(struct net_device *net) |
165 | netdev_err(net, "Ring buffer not empty after closing rndis\n"); | 185 | { |
166 | ret = -ETIMEDOUT; | 186 | struct net_device_context *net_device_ctx = netdev_priv(net); |
187 | struct net_device *vf_netdev | ||
188 | = rtnl_dereference(net_device_ctx->vf_netdev); | ||
189 | struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); | ||
190 | int ret; | ||
191 | |||
192 | netif_tx_disable(net); | ||
193 | |||
194 | /* No need to close rndis filter if it is removed already */ | ||
195 | if (!nvdev) | ||
196 | return 0; | ||
197 | |||
198 | ret = rndis_filter_close(nvdev); | ||
199 | if (ret != 0) { | ||
200 | netdev_err(net, "unable to close device (ret %d).\n", ret); | ||
201 | return ret; | ||
167 | } | 202 | } |
168 | 203 | ||
169 | out: | 204 | ret = netvsc_wait_until_empty(nvdev); |
205 | if (ret) | ||
206 | netdev_err(net, "Ring buffer not empty after closing rndis\n"); | ||
207 | |||
170 | if (vf_netdev) | 208 | if (vf_netdev) |
171 | dev_close(vf_netdev); | 209 | dev_close(vf_netdev); |
172 | 210 | ||
@@ -299,8 +337,19 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb, | |||
299 | rcu_read_lock(); | 337 | rcu_read_lock(); |
300 | vf_netdev = rcu_dereference(ndc->vf_netdev); | 338 | vf_netdev = rcu_dereference(ndc->vf_netdev); |
301 | if (vf_netdev) { | 339 | if (vf_netdev) { |
302 | txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0; | 340 | const struct net_device_ops *vf_ops = vf_netdev->netdev_ops; |
303 | qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping; | 341 | |
342 | if (vf_ops->ndo_select_queue) | ||
343 | txq = vf_ops->ndo_select_queue(vf_netdev, skb, | ||
344 | accel_priv, fallback); | ||
345 | else | ||
346 | txq = fallback(vf_netdev, skb); | ||
347 | |||
348 | /* Record the queue selected by VF so that it can be | ||
349 | * used for common case where VF has more queues than | ||
350 | * the synthetic device. | ||
351 | */ | ||
352 | qdisc_skb_cb(skb)->slave_dev_queue_mapping = txq; | ||
304 | } else { | 353 | } else { |
305 | txq = netvsc_pick_tx(ndev, skb); | 354 | txq = netvsc_pick_tx(ndev, skb); |
306 | } | 355 | } |
@@ -804,16 +853,81 @@ static void netvsc_get_channels(struct net_device *net, | |||
804 | } | 853 | } |
805 | } | 854 | } |
806 | 855 | ||
856 | static int netvsc_detach(struct net_device *ndev, | ||
857 | struct netvsc_device *nvdev) | ||
858 | { | ||
859 | struct net_device_context *ndev_ctx = netdev_priv(ndev); | ||
860 | struct hv_device *hdev = ndev_ctx->device_ctx; | ||
861 | int ret; | ||
862 | |||
863 | /* Don't try continuing to try and setup sub channels */ | ||
864 | if (cancel_work_sync(&nvdev->subchan_work)) | ||
865 | nvdev->num_chn = 1; | ||
866 | |||
867 | /* If device was up (receiving) then shutdown */ | ||
868 | if (netif_running(ndev)) { | ||
869 | netif_tx_disable(ndev); | ||
870 | |||
871 | ret = rndis_filter_close(nvdev); | ||
872 | if (ret) { | ||
873 | netdev_err(ndev, | ||
874 | "unable to close device (ret %d).\n", ret); | ||
875 | return ret; | ||
876 | } | ||
877 | |||
878 | ret = netvsc_wait_until_empty(nvdev); | ||
879 | if (ret) { | ||
880 | netdev_err(ndev, | ||
881 | "Ring buffer not empty after closing rndis\n"); | ||
882 | return ret; | ||
883 | } | ||
884 | } | ||
885 | |||
886 | netif_device_detach(ndev); | ||
887 | |||
888 | rndis_filter_device_remove(hdev, nvdev); | ||
889 | |||
890 | return 0; | ||
891 | } | ||
892 | |||
893 | static int netvsc_attach(struct net_device *ndev, | ||
894 | struct netvsc_device_info *dev_info) | ||
895 | { | ||
896 | struct net_device_context *ndev_ctx = netdev_priv(ndev); | ||
897 | struct hv_device *hdev = ndev_ctx->device_ctx; | ||
898 | struct netvsc_device *nvdev; | ||
899 | struct rndis_device *rdev; | ||
900 | int ret; | ||
901 | |||
902 | nvdev = rndis_filter_device_add(hdev, dev_info); | ||
903 | if (IS_ERR(nvdev)) | ||
904 | return PTR_ERR(nvdev); | ||
905 | |||
906 | /* Note: enable and attach happen when sub-channels setup */ | ||
907 | |||
908 | netif_carrier_off(ndev); | ||
909 | |||
910 | if (netif_running(ndev)) { | ||
911 | ret = rndis_filter_open(nvdev); | ||
912 | if (ret) | ||
913 | return ret; | ||
914 | |||
915 | rdev = nvdev->extension; | ||
916 | if (!rdev->link_state) | ||
917 | netif_carrier_on(ndev); | ||
918 | } | ||
919 | |||
920 | return 0; | ||
921 | } | ||
922 | |||
807 | static int netvsc_set_channels(struct net_device *net, | 923 | static int netvsc_set_channels(struct net_device *net, |
808 | struct ethtool_channels *channels) | 924 | struct ethtool_channels *channels) |
809 | { | 925 | { |
810 | struct net_device_context *net_device_ctx = netdev_priv(net); | 926 | struct net_device_context *net_device_ctx = netdev_priv(net); |
811 | struct hv_device *dev = net_device_ctx->device_ctx; | ||
812 | struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); | 927 | struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); |
813 | unsigned int orig, count = channels->combined_count; | 928 | unsigned int orig, count = channels->combined_count; |
814 | struct netvsc_device_info device_info; | 929 | struct netvsc_device_info device_info; |
815 | bool was_opened; | 930 | int ret; |
816 | int ret = 0; | ||
817 | 931 | ||
818 | /* We do not support separate count for rx, tx, or other */ | 932 | /* We do not support separate count for rx, tx, or other */ |
819 | if (count == 0 || | 933 | if (count == 0 || |
@@ -830,9 +944,6 @@ static int netvsc_set_channels(struct net_device *net, | |||
830 | return -EINVAL; | 944 | return -EINVAL; |
831 | 945 | ||
832 | orig = nvdev->num_chn; | 946 | orig = nvdev->num_chn; |
833 | was_opened = rndis_filter_opened(nvdev); | ||
834 | if (was_opened) | ||
835 | rndis_filter_close(nvdev); | ||
836 | 947 | ||
837 | memset(&device_info, 0, sizeof(device_info)); | 948 | memset(&device_info, 0, sizeof(device_info)); |
838 | device_info.num_chn = count; | 949 | device_info.num_chn = count; |
@@ -841,28 +952,17 @@ static int netvsc_set_channels(struct net_device *net, | |||
841 | device_info.recv_sections = nvdev->recv_section_cnt; | 952 | device_info.recv_sections = nvdev->recv_section_cnt; |
842 | device_info.recv_section_size = nvdev->recv_section_size; | 953 | device_info.recv_section_size = nvdev->recv_section_size; |
843 | 954 | ||
844 | rndis_filter_device_remove(dev, nvdev); | 955 | ret = netvsc_detach(net, nvdev); |
956 | if (ret) | ||
957 | return ret; | ||
845 | 958 | ||
846 | nvdev = rndis_filter_device_add(dev, &device_info); | 959 | ret = netvsc_attach(net, &device_info); |
847 | if (IS_ERR(nvdev)) { | 960 | if (ret) { |
848 | ret = PTR_ERR(nvdev); | ||
849 | device_info.num_chn = orig; | 961 | device_info.num_chn = orig; |
850 | nvdev = rndis_filter_device_add(dev, &device_info); | 962 | if (netvsc_attach(net, &device_info)) |
851 | 963 | netdev_err(net, "restoring channel setting failed\n"); | |
852 | if (IS_ERR(nvdev)) { | ||
853 | netdev_err(net, "restoring channel setting failed: %ld\n", | ||
854 | PTR_ERR(nvdev)); | ||
855 | return ret; | ||
856 | } | ||
857 | } | 964 | } |
858 | 965 | ||
859 | if (was_opened) | ||
860 | rndis_filter_open(nvdev); | ||
861 | |||
862 | /* We may have missed link change notifications */ | ||
863 | net_device_ctx->last_reconfig = 0; | ||
864 | schedule_delayed_work(&net_device_ctx->dwork, 0); | ||
865 | |||
866 | return ret; | 966 | return ret; |
867 | } | 967 | } |
868 | 968 | ||
@@ -928,10 +1028,8 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu) | |||
928 | struct net_device_context *ndevctx = netdev_priv(ndev); | 1028 | struct net_device_context *ndevctx = netdev_priv(ndev); |
929 | struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev); | 1029 | struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev); |
930 | struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); | 1030 | struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); |
931 | struct hv_device *hdev = ndevctx->device_ctx; | ||
932 | int orig_mtu = ndev->mtu; | 1031 | int orig_mtu = ndev->mtu; |
933 | struct netvsc_device_info device_info; | 1032 | struct netvsc_device_info device_info; |
934 | bool was_opened; | ||
935 | int ret = 0; | 1033 | int ret = 0; |
936 | 1034 | ||
937 | if (!nvdev || nvdev->destroy) | 1035 | if (!nvdev || nvdev->destroy) |
@@ -944,11 +1042,6 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu) | |||
944 | return ret; | 1042 | return ret; |
945 | } | 1043 | } |
946 | 1044 | ||
947 | netif_device_detach(ndev); | ||
948 | was_opened = rndis_filter_opened(nvdev); | ||
949 | if (was_opened) | ||
950 | rndis_filter_close(nvdev); | ||
951 | |||
952 | memset(&device_info, 0, sizeof(device_info)); | 1045 | memset(&device_info, 0, sizeof(device_info)); |
953 | device_info.num_chn = nvdev->num_chn; | 1046 | device_info.num_chn = nvdev->num_chn; |
954 | device_info.send_sections = nvdev->send_section_cnt; | 1047 | device_info.send_sections = nvdev->send_section_cnt; |
@@ -956,35 +1049,27 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu) | |||
956 | device_info.recv_sections = nvdev->recv_section_cnt; | 1049 | device_info.recv_sections = nvdev->recv_section_cnt; |
957 | device_info.recv_section_size = nvdev->recv_section_size; | 1050 | device_info.recv_section_size = nvdev->recv_section_size; |
958 | 1051 | ||
959 | rndis_filter_device_remove(hdev, nvdev); | 1052 | ret = netvsc_detach(ndev, nvdev); |
1053 | if (ret) | ||
1054 | goto rollback_vf; | ||
960 | 1055 | ||
961 | ndev->mtu = mtu; | 1056 | ndev->mtu = mtu; |
962 | 1057 | ||
963 | nvdev = rndis_filter_device_add(hdev, &device_info); | 1058 | ret = netvsc_attach(ndev, &device_info); |
964 | if (IS_ERR(nvdev)) { | 1059 | if (ret) |
965 | ret = PTR_ERR(nvdev); | 1060 | goto rollback; |
966 | |||
967 | /* Attempt rollback to original MTU */ | ||
968 | ndev->mtu = orig_mtu; | ||
969 | nvdev = rndis_filter_device_add(hdev, &device_info); | ||
970 | |||
971 | if (vf_netdev) | ||
972 | dev_set_mtu(vf_netdev, orig_mtu); | ||
973 | |||
974 | if (IS_ERR(nvdev)) { | ||
975 | netdev_err(ndev, "restoring mtu failed: %ld\n", | ||
976 | PTR_ERR(nvdev)); | ||
977 | return ret; | ||
978 | } | ||
979 | } | ||
980 | 1061 | ||
981 | if (was_opened) | 1062 | return 0; |
982 | rndis_filter_open(nvdev); | ||
983 | 1063 | ||
984 | netif_device_attach(ndev); | 1064 | rollback: |
1065 | /* Attempt rollback to original MTU */ | ||
1066 | ndev->mtu = orig_mtu; | ||
985 | 1067 | ||
986 | /* We may have missed link change notifications */ | 1068 | if (netvsc_attach(ndev, &device_info)) |
987 | schedule_delayed_work(&ndevctx->dwork, 0); | 1069 | netdev_err(ndev, "restoring mtu failed\n"); |
1070 | rollback_vf: | ||
1071 | if (vf_netdev) | ||
1072 | dev_set_mtu(vf_netdev, orig_mtu); | ||
988 | 1073 | ||
989 | return ret; | 1074 | return ret; |
990 | } | 1075 | } |
@@ -1490,11 +1575,9 @@ static int netvsc_set_ringparam(struct net_device *ndev, | |||
1490 | { | 1575 | { |
1491 | struct net_device_context *ndevctx = netdev_priv(ndev); | 1576 | struct net_device_context *ndevctx = netdev_priv(ndev); |
1492 | struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); | 1577 | struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); |
1493 | struct hv_device *hdev = ndevctx->device_ctx; | ||
1494 | struct netvsc_device_info device_info; | 1578 | struct netvsc_device_info device_info; |
1495 | struct ethtool_ringparam orig; | 1579 | struct ethtool_ringparam orig; |
1496 | u32 new_tx, new_rx; | 1580 | u32 new_tx, new_rx; |
1497 | bool was_opened; | ||
1498 | int ret = 0; | 1581 | int ret = 0; |
1499 | 1582 | ||
1500 | if (!nvdev || nvdev->destroy) | 1583 | if (!nvdev || nvdev->destroy) |
@@ -1519,34 +1602,18 @@ static int netvsc_set_ringparam(struct net_device *ndev, | |||
1519 | device_info.recv_sections = new_rx; | 1602 | device_info.recv_sections = new_rx; |
1520 | device_info.recv_section_size = nvdev->recv_section_size; | 1603 | device_info.recv_section_size = nvdev->recv_section_size; |
1521 | 1604 | ||
1522 | netif_device_detach(ndev); | 1605 | ret = netvsc_detach(ndev, nvdev); |
1523 | was_opened = rndis_filter_opened(nvdev); | 1606 | if (ret) |
1524 | if (was_opened) | 1607 | return ret; |
1525 | rndis_filter_close(nvdev); | ||
1526 | |||
1527 | rndis_filter_device_remove(hdev, nvdev); | ||
1528 | |||
1529 | nvdev = rndis_filter_device_add(hdev, &device_info); | ||
1530 | if (IS_ERR(nvdev)) { | ||
1531 | ret = PTR_ERR(nvdev); | ||
1532 | 1608 | ||
1609 | ret = netvsc_attach(ndev, &device_info); | ||
1610 | if (ret) { | ||
1533 | device_info.send_sections = orig.tx_pending; | 1611 | device_info.send_sections = orig.tx_pending; |
1534 | device_info.recv_sections = orig.rx_pending; | 1612 | device_info.recv_sections = orig.rx_pending; |
1535 | nvdev = rndis_filter_device_add(hdev, &device_info); | ||
1536 | if (IS_ERR(nvdev)) { | ||
1537 | netdev_err(ndev, "restoring ringparam failed: %ld\n", | ||
1538 | PTR_ERR(nvdev)); | ||
1539 | return ret; | ||
1540 | } | ||
1541 | } | ||
1542 | 1613 | ||
1543 | if (was_opened) | 1614 | if (netvsc_attach(ndev, &device_info)) |
1544 | rndis_filter_open(nvdev); | 1615 | netdev_err(ndev, "restoring ringparam failed"); |
1545 | netif_device_attach(ndev); | 1616 | } |
1546 | |||
1547 | /* We may have missed link change notifications */ | ||
1548 | ndevctx->last_reconfig = 0; | ||
1549 | schedule_delayed_work(&ndevctx->dwork, 0); | ||
1550 | 1617 | ||
1551 | return ret; | 1618 | return ret; |
1552 | } | 1619 | } |
@@ -1576,7 +1643,8 @@ static const struct net_device_ops device_ops = { | |||
1576 | .ndo_open = netvsc_open, | 1643 | .ndo_open = netvsc_open, |
1577 | .ndo_stop = netvsc_close, | 1644 | .ndo_stop = netvsc_close, |
1578 | .ndo_start_xmit = netvsc_start_xmit, | 1645 | .ndo_start_xmit = netvsc_start_xmit, |
1579 | .ndo_set_rx_mode = netvsc_set_multicast_list, | 1646 | .ndo_change_rx_flags = netvsc_change_rx_flags, |
1647 | .ndo_set_rx_mode = netvsc_set_rx_mode, | ||
1580 | .ndo_change_mtu = netvsc_change_mtu, | 1648 | .ndo_change_mtu = netvsc_change_mtu, |
1581 | .ndo_validate_addr = eth_validate_addr, | 1649 | .ndo_validate_addr = eth_validate_addr, |
1582 | .ndo_set_mac_address = netvsc_set_mac_addr, | 1650 | .ndo_set_mac_address = netvsc_set_mac_addr, |
@@ -1807,6 +1875,15 @@ static void __netvsc_vf_setup(struct net_device *ndev, | |||
1807 | netdev_warn(vf_netdev, | 1875 | netdev_warn(vf_netdev, |
1808 | "unable to change mtu to %u\n", ndev->mtu); | 1876 | "unable to change mtu to %u\n", ndev->mtu); |
1809 | 1877 | ||
1878 | /* set multicast etc flags on VF */ | ||
1879 | dev_change_flags(vf_netdev, ndev->flags | IFF_SLAVE); | ||
1880 | |||
1881 | /* sync address list from ndev to VF */ | ||
1882 | netif_addr_lock_bh(ndev); | ||
1883 | dev_uc_sync(vf_netdev, ndev); | ||
1884 | dev_mc_sync(vf_netdev, ndev); | ||
1885 | netif_addr_unlock_bh(ndev); | ||
1886 | |||
1810 | if (netif_running(ndev)) { | 1887 | if (netif_running(ndev)) { |
1811 | ret = dev_open(vf_netdev); | 1888 | ret = dev_open(vf_netdev); |
1812 | if (ret) | 1889 | if (ret) |
@@ -2021,8 +2098,8 @@ no_net: | |||
2021 | static int netvsc_remove(struct hv_device *dev) | 2098 | static int netvsc_remove(struct hv_device *dev) |
2022 | { | 2099 | { |
2023 | struct net_device_context *ndev_ctx; | 2100 | struct net_device_context *ndev_ctx; |
2024 | struct net_device *vf_netdev; | 2101 | struct net_device *vf_netdev, *net; |
2025 | struct net_device *net; | 2102 | struct netvsc_device *nvdev; |
2026 | 2103 | ||
2027 | net = hv_get_drvdata(dev); | 2104 | net = hv_get_drvdata(dev); |
2028 | if (net == NULL) { | 2105 | if (net == NULL) { |
@@ -2032,10 +2109,14 @@ static int netvsc_remove(struct hv_device *dev) | |||
2032 | 2109 | ||
2033 | ndev_ctx = netdev_priv(net); | 2110 | ndev_ctx = netdev_priv(net); |
2034 | 2111 | ||
2035 | netif_device_detach(net); | ||
2036 | |||
2037 | cancel_delayed_work_sync(&ndev_ctx->dwork); | 2112 | cancel_delayed_work_sync(&ndev_ctx->dwork); |
2038 | 2113 | ||
2114 | rcu_read_lock(); | ||
2115 | nvdev = rcu_dereference(ndev_ctx->nvdev); | ||
2116 | |||
2117 | if (nvdev) | ||
2118 | cancel_work_sync(&nvdev->subchan_work); | ||
2119 | |||
2039 | /* | 2120 | /* |
2040 | * Call to the vsc driver to let it know that the device is being | 2121 | * Call to the vsc driver to let it know that the device is being |
2041 | * removed. Also blocks mtu and channel changes. | 2122 | * removed. Also blocks mtu and channel changes. |
@@ -2045,11 +2126,13 @@ static int netvsc_remove(struct hv_device *dev) | |||
2045 | if (vf_netdev) | 2126 | if (vf_netdev) |
2046 | netvsc_unregister_vf(vf_netdev); | 2127 | netvsc_unregister_vf(vf_netdev); |
2047 | 2128 | ||
2129 | if (nvdev) | ||
2130 | rndis_filter_device_remove(dev, nvdev); | ||
2131 | |||
2048 | unregister_netdevice(net); | 2132 | unregister_netdevice(net); |
2049 | 2133 | ||
2050 | rndis_filter_device_remove(dev, | ||
2051 | rtnl_dereference(ndev_ctx->nvdev)); | ||
2052 | rtnl_unlock(); | 2134 | rtnl_unlock(); |
2135 | rcu_read_unlock(); | ||
2053 | 2136 | ||
2054 | hv_set_drvdata(dev, NULL); | 2137 | hv_set_drvdata(dev, NULL); |
2055 | 2138 | ||
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index c3ca191fea7f..a6ec41c399d6 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c | |||
@@ -264,13 +264,23 @@ static void rndis_set_link_state(struct rndis_device *rdev, | |||
264 | } | 264 | } |
265 | } | 265 | } |
266 | 266 | ||
267 | static void rndis_filter_receive_response(struct rndis_device *dev, | 267 | static void rndis_filter_receive_response(struct net_device *ndev, |
268 | struct rndis_message *resp) | 268 | struct netvsc_device *nvdev, |
269 | const struct rndis_message *resp) | ||
269 | { | 270 | { |
271 | struct rndis_device *dev = nvdev->extension; | ||
270 | struct rndis_request *request = NULL; | 272 | struct rndis_request *request = NULL; |
271 | bool found = false; | 273 | bool found = false; |
272 | unsigned long flags; | 274 | unsigned long flags; |
273 | struct net_device *ndev = dev->ndev; | 275 | |
276 | /* This should never happen, it means control message | ||
277 | * response received after device removed. | ||
278 | */ | ||
279 | if (dev->state == RNDIS_DEV_UNINITIALIZED) { | ||
280 | netdev_err(ndev, | ||
281 | "got rndis message uninitialized\n"); | ||
282 | return; | ||
283 | } | ||
274 | 284 | ||
275 | spin_lock_irqsave(&dev->request_lock, flags); | 285 | spin_lock_irqsave(&dev->request_lock, flags); |
276 | list_for_each_entry(request, &dev->req_list, list_ent) { | 286 | list_for_each_entry(request, &dev->req_list, list_ent) { |
@@ -352,7 +362,6 @@ static inline void *rndis_get_ppi(struct rndis_packet *rpkt, u32 type) | |||
352 | 362 | ||
353 | static int rndis_filter_receive_data(struct net_device *ndev, | 363 | static int rndis_filter_receive_data(struct net_device *ndev, |
354 | struct netvsc_device *nvdev, | 364 | struct netvsc_device *nvdev, |
355 | struct rndis_device *dev, | ||
356 | struct rndis_message *msg, | 365 | struct rndis_message *msg, |
357 | struct vmbus_channel *channel, | 366 | struct vmbus_channel *channel, |
358 | void *data, u32 data_buflen) | 367 | void *data, u32 data_buflen) |
@@ -372,7 +381,7 @@ static int rndis_filter_receive_data(struct net_device *ndev, | |||
372 | * should be the data packet size plus the trailer padding size | 381 | * should be the data packet size plus the trailer padding size |
373 | */ | 382 | */ |
374 | if (unlikely(data_buflen < rndis_pkt->data_len)) { | 383 | if (unlikely(data_buflen < rndis_pkt->data_len)) { |
375 | netdev_err(dev->ndev, "rndis message buffer " | 384 | netdev_err(ndev, "rndis message buffer " |
376 | "overflow detected (got %u, min %u)" | 385 | "overflow detected (got %u, min %u)" |
377 | "...dropping this message!\n", | 386 | "...dropping this message!\n", |
378 | data_buflen, rndis_pkt->data_len); | 387 | data_buflen, rndis_pkt->data_len); |
@@ -400,35 +409,20 @@ int rndis_filter_receive(struct net_device *ndev, | |||
400 | void *data, u32 buflen) | 409 | void *data, u32 buflen) |
401 | { | 410 | { |
402 | struct net_device_context *net_device_ctx = netdev_priv(ndev); | 411 | struct net_device_context *net_device_ctx = netdev_priv(ndev); |
403 | struct rndis_device *rndis_dev = net_dev->extension; | ||
404 | struct rndis_message *rndis_msg = data; | 412 | struct rndis_message *rndis_msg = data; |
405 | 413 | ||
406 | /* Make sure the rndis device state is initialized */ | ||
407 | if (unlikely(!rndis_dev)) { | ||
408 | netif_dbg(net_device_ctx, rx_err, ndev, | ||
409 | "got rndis message but no rndis device!\n"); | ||
410 | return NVSP_STAT_FAIL; | ||
411 | } | ||
412 | |||
413 | if (unlikely(rndis_dev->state == RNDIS_DEV_UNINITIALIZED)) { | ||
414 | netif_dbg(net_device_ctx, rx_err, ndev, | ||
415 | "got rndis message uninitialized\n"); | ||
416 | return NVSP_STAT_FAIL; | ||
417 | } | ||
418 | |||
419 | if (netif_msg_rx_status(net_device_ctx)) | 414 | if (netif_msg_rx_status(net_device_ctx)) |
420 | dump_rndis_message(ndev, rndis_msg); | 415 | dump_rndis_message(ndev, rndis_msg); |
421 | 416 | ||
422 | switch (rndis_msg->ndis_msg_type) { | 417 | switch (rndis_msg->ndis_msg_type) { |
423 | case RNDIS_MSG_PACKET: | 418 | case RNDIS_MSG_PACKET: |
424 | return rndis_filter_receive_data(ndev, net_dev, | 419 | return rndis_filter_receive_data(ndev, net_dev, rndis_msg, |
425 | rndis_dev, rndis_msg, | ||
426 | channel, data, buflen); | 420 | channel, data, buflen); |
427 | case RNDIS_MSG_INIT_C: | 421 | case RNDIS_MSG_INIT_C: |
428 | case RNDIS_MSG_QUERY_C: | 422 | case RNDIS_MSG_QUERY_C: |
429 | case RNDIS_MSG_SET_C: | 423 | case RNDIS_MSG_SET_C: |
430 | /* completion msgs */ | 424 | /* completion msgs */ |
431 | rndis_filter_receive_response(rndis_dev, rndis_msg); | 425 | rndis_filter_receive_response(ndev, net_dev, rndis_msg); |
432 | break; | 426 | break; |
433 | 427 | ||
434 | case RNDIS_MSG_INDICATE: | 428 | case RNDIS_MSG_INDICATE: |
@@ -825,13 +819,15 @@ static int rndis_filter_set_packet_filter(struct rndis_device *dev, | |||
825 | struct rndis_set_request *set; | 819 | struct rndis_set_request *set; |
826 | int ret; | 820 | int ret; |
827 | 821 | ||
822 | if (dev->filter == new_filter) | ||
823 | return 0; | ||
824 | |||
828 | request = get_rndis_request(dev, RNDIS_MSG_SET, | 825 | request = get_rndis_request(dev, RNDIS_MSG_SET, |
829 | RNDIS_MESSAGE_SIZE(struct rndis_set_request) + | 826 | RNDIS_MESSAGE_SIZE(struct rndis_set_request) + |
830 | sizeof(u32)); | 827 | sizeof(u32)); |
831 | if (!request) | 828 | if (!request) |
832 | return -ENOMEM; | 829 | return -ENOMEM; |
833 | 830 | ||
834 | |||
835 | /* Setup the rndis set */ | 831 | /* Setup the rndis set */ |
836 | set = &request->request_msg.msg.set_req; | 832 | set = &request->request_msg.msg.set_req; |
837 | set->oid = RNDIS_OID_GEN_CURRENT_PACKET_FILTER; | 833 | set->oid = RNDIS_OID_GEN_CURRENT_PACKET_FILTER; |
@@ -842,8 +838,10 @@ static int rndis_filter_set_packet_filter(struct rndis_device *dev, | |||
842 | &new_filter, sizeof(u32)); | 838 | &new_filter, sizeof(u32)); |
843 | 839 | ||
844 | ret = rndis_filter_send_request(dev, request); | 840 | ret = rndis_filter_send_request(dev, request); |
845 | if (ret == 0) | 841 | if (ret == 0) { |
846 | wait_for_completion(&request->wait_event); | 842 | wait_for_completion(&request->wait_event); |
843 | dev->filter = new_filter; | ||
844 | } | ||
847 | 845 | ||
848 | put_rndis_request(dev, request); | 846 | put_rndis_request(dev, request); |
849 | 847 | ||
@@ -854,15 +852,19 @@ static void rndis_set_multicast(struct work_struct *w) | |||
854 | { | 852 | { |
855 | struct rndis_device *rdev | 853 | struct rndis_device *rdev |
856 | = container_of(w, struct rndis_device, mcast_work); | 854 | = container_of(w, struct rndis_device, mcast_work); |
855 | u32 filter = NDIS_PACKET_TYPE_DIRECTED; | ||
856 | unsigned int flags = rdev->ndev->flags; | ||
857 | 857 | ||
858 | if (rdev->ndev->flags & IFF_PROMISC) | 858 | if (flags & IFF_PROMISC) { |
859 | rndis_filter_set_packet_filter(rdev, | 859 | filter = NDIS_PACKET_TYPE_PROMISCUOUS; |
860 | NDIS_PACKET_TYPE_PROMISCUOUS); | 860 | } else { |
861 | else | 861 | if (flags & IFF_ALLMULTI) |
862 | rndis_filter_set_packet_filter(rdev, | 862 | filter |= NDIS_PACKET_TYPE_ALL_MULTICAST; |
863 | NDIS_PACKET_TYPE_BROADCAST | | 863 | if (flags & IFF_BROADCAST) |
864 | NDIS_PACKET_TYPE_ALL_MULTICAST | | 864 | filter |= NDIS_PACKET_TYPE_BROADCAST; |
865 | NDIS_PACKET_TYPE_DIRECTED); | 865 | } |
866 | |||
867 | rndis_filter_set_packet_filter(rdev, filter); | ||
866 | } | 868 | } |
867 | 869 | ||
868 | void rndis_filter_update(struct netvsc_device *nvdev) | 870 | void rndis_filter_update(struct netvsc_device *nvdev) |
@@ -1116,6 +1118,7 @@ void rndis_set_subchannel(struct work_struct *w) | |||
1116 | for (i = 0; i < VRSS_SEND_TAB_SIZE; i++) | 1118 | for (i = 0; i < VRSS_SEND_TAB_SIZE; i++) |
1117 | ndev_ctx->tx_table[i] = i % nvdev->num_chn; | 1119 | ndev_ctx->tx_table[i] = i % nvdev->num_chn; |
1118 | 1120 | ||
1121 | netif_device_attach(ndev); | ||
1119 | rtnl_unlock(); | 1122 | rtnl_unlock(); |
1120 | return; | 1123 | return; |
1121 | 1124 | ||
@@ -1126,6 +1129,8 @@ failed: | |||
1126 | 1129 | ||
1127 | nvdev->max_chn = 1; | 1130 | nvdev->max_chn = 1; |
1128 | nvdev->num_chn = 1; | 1131 | nvdev->num_chn = 1; |
1132 | |||
1133 | netif_device_attach(ndev); | ||
1129 | unlock: | 1134 | unlock: |
1130 | rtnl_unlock(); | 1135 | rtnl_unlock(); |
1131 | } | 1136 | } |
@@ -1328,6 +1333,10 @@ out: | |||
1328 | net_device->num_chn = 1; | 1333 | net_device->num_chn = 1; |
1329 | } | 1334 | } |
1330 | 1335 | ||
1336 | /* No sub channels, device is ready */ | ||
1337 | if (net_device->num_chn == 1) | ||
1338 | netif_device_attach(net); | ||
1339 | |||
1331 | return net_device; | 1340 | return net_device; |
1332 | 1341 | ||
1333 | err_dev_remv: | 1342 | err_dev_remv: |
@@ -1346,7 +1355,6 @@ void rndis_filter_device_remove(struct hv_device *dev, | |||
1346 | net_dev->extension = NULL; | 1355 | net_dev->extension = NULL; |
1347 | 1356 | ||
1348 | netvsc_device_remove(dev); | 1357 | netvsc_device_remove(dev); |
1349 | kfree(rndis_dev); | ||
1350 | } | 1358 | } |
1351 | 1359 | ||
1352 | int rndis_filter_open(struct netvsc_device *nvdev) | 1360 | int rndis_filter_open(struct netvsc_device *nvdev) |
@@ -1364,10 +1372,3 @@ int rndis_filter_close(struct netvsc_device *nvdev) | |||
1364 | 1372 | ||
1365 | return rndis_filter_close_device(nvdev->extension); | 1373 | return rndis_filter_close_device(nvdev->extension); |
1366 | } | 1374 | } |
1367 | |||
1368 | bool rndis_filter_opened(const struct netvsc_device *nvdev) | ||
1369 | { | ||
1370 | const struct rndis_device *dev = nvdev->extension; | ||
1371 | |||
1372 | return dev->state == RNDIS_DEV_DATAINITIALIZED; | ||
1373 | } | ||
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index 7de88b33d5b9..9cbb0c8a896a 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c | |||
@@ -3277,7 +3277,7 @@ static int macsec_newlink(struct net *net, struct net_device *dev, | |||
3277 | 3277 | ||
3278 | err = netdev_upper_dev_link(real_dev, dev, extack); | 3278 | err = netdev_upper_dev_link(real_dev, dev, extack); |
3279 | if (err < 0) | 3279 | if (err < 0) |
3280 | goto unregister; | 3280 | goto put_dev; |
3281 | 3281 | ||
3282 | /* need to be already registered so that ->init has run and | 3282 | /* need to be already registered so that ->init has run and |
3283 | * the MAC addr is set | 3283 | * the MAC addr is set |
@@ -3316,7 +3316,8 @@ del_dev: | |||
3316 | macsec_del_dev(macsec); | 3316 | macsec_del_dev(macsec); |
3317 | unlink: | 3317 | unlink: |
3318 | netdev_upper_dev_unlink(real_dev, dev); | 3318 | netdev_upper_dev_unlink(real_dev, dev); |
3319 | unregister: | 3319 | put_dev: |
3320 | dev_put(real_dev); | ||
3320 | unregister_netdevice(dev); | 3321 | unregister_netdevice(dev); |
3321 | return err; | 3322 | return err; |
3322 | } | 3323 | } |
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index a0f2be81d52e..725f4b4afc6d 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c | |||
@@ -1036,7 +1036,7 @@ static netdev_features_t macvlan_fix_features(struct net_device *dev, | |||
1036 | lowerdev_features &= (features | ~NETIF_F_LRO); | 1036 | lowerdev_features &= (features | ~NETIF_F_LRO); |
1037 | features = netdev_increment_features(lowerdev_features, features, mask); | 1037 | features = netdev_increment_features(lowerdev_features, features, mask); |
1038 | features |= ALWAYS_ON_FEATURES; | 1038 | features |= ALWAYS_ON_FEATURES; |
1039 | features &= ~NETIF_F_NETNS_LOCAL; | 1039 | features &= (ALWAYS_ON_FEATURES | MACVLAN_FEATURES); |
1040 | 1040 | ||
1041 | return features; | 1041 | return features; |
1042 | } | 1042 | } |
@@ -1451,7 +1451,7 @@ destroy_macvlan_port: | |||
1451 | /* the macvlan port may be freed by macvlan_uninit when fail to register. | 1451 | /* the macvlan port may be freed by macvlan_uninit when fail to register. |
1452 | * so we destroy the macvlan port only when it's valid. | 1452 | * so we destroy the macvlan port only when it's valid. |
1453 | */ | 1453 | */ |
1454 | if (create && macvlan_port_get_rtnl(dev)) | 1454 | if (create && macvlan_port_get_rtnl(lowerdev)) |
1455 | macvlan_port_destroy(port->dev); | 1455 | macvlan_port_destroy(port->dev); |
1456 | return err; | 1456 | return err; |
1457 | } | 1457 | } |
diff --git a/drivers/net/phy/bcm-phy-lib.c b/drivers/net/phy/bcm-phy-lib.c index 171010eb4d9c..5ad130c3da43 100644 --- a/drivers/net/phy/bcm-phy-lib.c +++ b/drivers/net/phy/bcm-phy-lib.c | |||
@@ -341,8 +341,8 @@ void bcm_phy_get_strings(struct phy_device *phydev, u8 *data) | |||
341 | unsigned int i; | 341 | unsigned int i; |
342 | 342 | ||
343 | for (i = 0; i < ARRAY_SIZE(bcm_phy_hw_stats); i++) | 343 | for (i = 0; i < ARRAY_SIZE(bcm_phy_hw_stats); i++) |
344 | memcpy(data + i * ETH_GSTRING_LEN, | 344 | strlcpy(data + i * ETH_GSTRING_LEN, |
345 | bcm_phy_hw_stats[i].string, ETH_GSTRING_LEN); | 345 | bcm_phy_hw_stats[i].string, ETH_GSTRING_LEN); |
346 | } | 346 | } |
347 | EXPORT_SYMBOL_GPL(bcm_phy_get_strings); | 347 | EXPORT_SYMBOL_GPL(bcm_phy_get_strings); |
348 | 348 | ||
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 22d9bc9c33a4..0e0978d8a0eb 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c | |||
@@ -1452,8 +1452,8 @@ static void marvell_get_strings(struct phy_device *phydev, u8 *data) | |||
1452 | int i; | 1452 | int i; |
1453 | 1453 | ||
1454 | for (i = 0; i < ARRAY_SIZE(marvell_hw_stats); i++) { | 1454 | for (i = 0; i < ARRAY_SIZE(marvell_hw_stats); i++) { |
1455 | memcpy(data + i * ETH_GSTRING_LEN, | 1455 | strlcpy(data + i * ETH_GSTRING_LEN, |
1456 | marvell_hw_stats[i].string, ETH_GSTRING_LEN); | 1456 | marvell_hw_stats[i].string, ETH_GSTRING_LEN); |
1457 | } | 1457 | } |
1458 | } | 1458 | } |
1459 | 1459 | ||
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 0f45310300f6..f41b224a9cdb 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c | |||
@@ -635,25 +635,6 @@ static int ksz8873mll_config_aneg(struct phy_device *phydev) | |||
635 | return 0; | 635 | return 0; |
636 | } | 636 | } |
637 | 637 | ||
638 | /* This routine returns -1 as an indication to the caller that the | ||
639 | * Micrel ksz9021 10/100/1000 PHY does not support standard IEEE | ||
640 | * MMD extended PHY registers. | ||
641 | */ | ||
642 | static int | ||
643 | ksz9021_rd_mmd_phyreg(struct phy_device *phydev, int devad, u16 regnum) | ||
644 | { | ||
645 | return -1; | ||
646 | } | ||
647 | |||
648 | /* This routine does nothing since the Micrel ksz9021 does not support | ||
649 | * standard IEEE MMD extended PHY registers. | ||
650 | */ | ||
651 | static int | ||
652 | ksz9021_wr_mmd_phyreg(struct phy_device *phydev, int devad, u16 regnum, u16 val) | ||
653 | { | ||
654 | return -1; | ||
655 | } | ||
656 | |||
657 | static int kszphy_get_sset_count(struct phy_device *phydev) | 638 | static int kszphy_get_sset_count(struct phy_device *phydev) |
658 | { | 639 | { |
659 | return ARRAY_SIZE(kszphy_hw_stats); | 640 | return ARRAY_SIZE(kszphy_hw_stats); |
@@ -664,8 +645,8 @@ static void kszphy_get_strings(struct phy_device *phydev, u8 *data) | |||
664 | int i; | 645 | int i; |
665 | 646 | ||
666 | for (i = 0; i < ARRAY_SIZE(kszphy_hw_stats); i++) { | 647 | for (i = 0; i < ARRAY_SIZE(kszphy_hw_stats); i++) { |
667 | memcpy(data + i * ETH_GSTRING_LEN, | 648 | strlcpy(data + i * ETH_GSTRING_LEN, |
668 | kszphy_hw_stats[i].string, ETH_GSTRING_LEN); | 649 | kszphy_hw_stats[i].string, ETH_GSTRING_LEN); |
669 | } | 650 | } |
670 | } | 651 | } |
671 | 652 | ||
@@ -946,8 +927,8 @@ static struct phy_driver ksphy_driver[] = { | |||
946 | .get_stats = kszphy_get_stats, | 927 | .get_stats = kszphy_get_stats, |
947 | .suspend = genphy_suspend, | 928 | .suspend = genphy_suspend, |
948 | .resume = genphy_resume, | 929 | .resume = genphy_resume, |
949 | .read_mmd = ksz9021_rd_mmd_phyreg, | 930 | .read_mmd = genphy_read_mmd_unsupported, |
950 | .write_mmd = ksz9021_wr_mmd_phyreg, | 931 | .write_mmd = genphy_write_mmd_unsupported, |
951 | }, { | 932 | }, { |
952 | .phy_id = PHY_ID_KSZ9031, | 933 | .phy_id = PHY_ID_KSZ9031, |
953 | .phy_id_mask = MICREL_PHY_ID_MASK, | 934 | .phy_id_mask = MICREL_PHY_ID_MASK, |
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index e3e29c2b028b..9aabfa1a455a 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c | |||
@@ -618,6 +618,77 @@ static void phy_error(struct phy_device *phydev) | |||
618 | } | 618 | } |
619 | 619 | ||
620 | /** | 620 | /** |
621 | * phy_disable_interrupts - Disable the PHY interrupts from the PHY side | ||
622 | * @phydev: target phy_device struct | ||
623 | */ | ||
624 | static int phy_disable_interrupts(struct phy_device *phydev) | ||
625 | { | ||
626 | int err; | ||
627 | |||
628 | /* Disable PHY interrupts */ | ||
629 | err = phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED); | ||
630 | if (err) | ||
631 | goto phy_err; | ||
632 | |||
633 | /* Clear the interrupt */ | ||
634 | err = phy_clear_interrupt(phydev); | ||
635 | if (err) | ||
636 | goto phy_err; | ||
637 | |||
638 | return 0; | ||
639 | |||
640 | phy_err: | ||
641 | phy_error(phydev); | ||
642 | |||
643 | return err; | ||
644 | } | ||
645 | |||
646 | /** | ||
647 | * phy_change - Called by the phy_interrupt to handle PHY changes | ||
648 | * @phydev: phy_device struct that interrupted | ||
649 | */ | ||
650 | static irqreturn_t phy_change(struct phy_device *phydev) | ||
651 | { | ||
652 | if (phy_interrupt_is_valid(phydev)) { | ||
653 | if (phydev->drv->did_interrupt && | ||
654 | !phydev->drv->did_interrupt(phydev)) | ||
655 | return IRQ_NONE; | ||
656 | |||
657 | if (phydev->state == PHY_HALTED) | ||
658 | if (phy_disable_interrupts(phydev)) | ||
659 | goto phy_err; | ||
660 | } | ||
661 | |||
662 | mutex_lock(&phydev->lock); | ||
663 | if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state)) | ||
664 | phydev->state = PHY_CHANGELINK; | ||
665 | mutex_unlock(&phydev->lock); | ||
666 | |||
667 | /* reschedule state queue work to run as soon as possible */ | ||
668 | phy_trigger_machine(phydev, true); | ||
669 | |||
670 | if (phy_interrupt_is_valid(phydev) && phy_clear_interrupt(phydev)) | ||
671 | goto phy_err; | ||
672 | return IRQ_HANDLED; | ||
673 | |||
674 | phy_err: | ||
675 | phy_error(phydev); | ||
676 | return IRQ_NONE; | ||
677 | } | ||
678 | |||
679 | /** | ||
680 | * phy_change_work - Scheduled by the phy_mac_interrupt to handle PHY changes | ||
681 | * @work: work_struct that describes the work to be done | ||
682 | */ | ||
683 | void phy_change_work(struct work_struct *work) | ||
684 | { | ||
685 | struct phy_device *phydev = | ||
686 | container_of(work, struct phy_device, phy_queue); | ||
687 | |||
688 | phy_change(phydev); | ||
689 | } | ||
690 | |||
691 | /** | ||
621 | * phy_interrupt - PHY interrupt handler | 692 | * phy_interrupt - PHY interrupt handler |
622 | * @irq: interrupt line | 693 | * @irq: interrupt line |
623 | * @phy_dat: phy_device pointer | 694 | * @phy_dat: phy_device pointer |
@@ -632,9 +703,7 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat) | |||
632 | if (PHY_HALTED == phydev->state) | 703 | if (PHY_HALTED == phydev->state) |
633 | return IRQ_NONE; /* It can't be ours. */ | 704 | return IRQ_NONE; /* It can't be ours. */ |
634 | 705 | ||
635 | phy_change(phydev); | 706 | return phy_change(phydev); |
636 | |||
637 | return IRQ_HANDLED; | ||
638 | } | 707 | } |
639 | 708 | ||
640 | /** | 709 | /** |
@@ -652,32 +721,6 @@ static int phy_enable_interrupts(struct phy_device *phydev) | |||
652 | } | 721 | } |
653 | 722 | ||
654 | /** | 723 | /** |
655 | * phy_disable_interrupts - Disable the PHY interrupts from the PHY side | ||
656 | * @phydev: target phy_device struct | ||
657 | */ | ||
658 | static int phy_disable_interrupts(struct phy_device *phydev) | ||
659 | { | ||
660 | int err; | ||
661 | |||
662 | /* Disable PHY interrupts */ | ||
663 | err = phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED); | ||
664 | if (err) | ||
665 | goto phy_err; | ||
666 | |||
667 | /* Clear the interrupt */ | ||
668 | err = phy_clear_interrupt(phydev); | ||
669 | if (err) | ||
670 | goto phy_err; | ||
671 | |||
672 | return 0; | ||
673 | |||
674 | phy_err: | ||
675 | phy_error(phydev); | ||
676 | |||
677 | return err; | ||
678 | } | ||
679 | |||
680 | /** | ||
681 | * phy_start_interrupts - request and enable interrupts for a PHY device | 724 | * phy_start_interrupts - request and enable interrupts for a PHY device |
682 | * @phydev: target phy_device struct | 725 | * @phydev: target phy_device struct |
683 | * | 726 | * |
@@ -720,50 +763,6 @@ int phy_stop_interrupts(struct phy_device *phydev) | |||
720 | EXPORT_SYMBOL(phy_stop_interrupts); | 763 | EXPORT_SYMBOL(phy_stop_interrupts); |
721 | 764 | ||
722 | /** | 765 | /** |
723 | * phy_change - Called by the phy_interrupt to handle PHY changes | ||
724 | * @phydev: phy_device struct that interrupted | ||
725 | */ | ||
726 | void phy_change(struct phy_device *phydev) | ||
727 | { | ||
728 | if (phy_interrupt_is_valid(phydev)) { | ||
729 | if (phydev->drv->did_interrupt && | ||
730 | !phydev->drv->did_interrupt(phydev)) | ||
731 | return; | ||
732 | |||
733 | if (phydev->state == PHY_HALTED) | ||
734 | if (phy_disable_interrupts(phydev)) | ||
735 | goto phy_err; | ||
736 | } | ||
737 | |||
738 | mutex_lock(&phydev->lock); | ||
739 | if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state)) | ||
740 | phydev->state = PHY_CHANGELINK; | ||
741 | mutex_unlock(&phydev->lock); | ||
742 | |||
743 | /* reschedule state queue work to run as soon as possible */ | ||
744 | phy_trigger_machine(phydev, true); | ||
745 | |||
746 | if (phy_interrupt_is_valid(phydev) && phy_clear_interrupt(phydev)) | ||
747 | goto phy_err; | ||
748 | return; | ||
749 | |||
750 | phy_err: | ||
751 | phy_error(phydev); | ||
752 | } | ||
753 | |||
754 | /** | ||
755 | * phy_change_work - Scheduled by the phy_mac_interrupt to handle PHY changes | ||
756 | * @work: work_struct that describes the work to be done | ||
757 | */ | ||
758 | void phy_change_work(struct work_struct *work) | ||
759 | { | ||
760 | struct phy_device *phydev = | ||
761 | container_of(work, struct phy_device, phy_queue); | ||
762 | |||
763 | phy_change(phydev); | ||
764 | } | ||
765 | |||
766 | /** | ||
767 | * phy_stop - Bring down the PHY link, and stop checking the status | 766 | * phy_stop - Bring down the PHY link, and stop checking the status |
768 | * @phydev: target phy_device struct | 767 | * @phydev: target phy_device struct |
769 | */ | 768 | */ |
@@ -819,7 +818,7 @@ void phy_start(struct phy_device *phydev) | |||
819 | break; | 818 | break; |
820 | case PHY_HALTED: | 819 | case PHY_HALTED: |
821 | /* if phy was suspended, bring the physical link up again */ | 820 | /* if phy was suspended, bring the physical link up again */ |
822 | phy_resume(phydev); | 821 | __phy_resume(phydev); |
823 | 822 | ||
824 | /* make sure interrupts are re-enabled for the PHY */ | 823 | /* make sure interrupts are re-enabled for the PHY */ |
825 | if (phy_interrupt_is_valid(phydev)) { | 824 | if (phy_interrupt_is_valid(phydev)) { |
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index b13eed21c87d..74664a6c0cdc 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c | |||
@@ -135,9 +135,7 @@ static int mdio_bus_phy_resume(struct device *dev) | |||
135 | if (!mdio_bus_phy_may_suspend(phydev)) | 135 | if (!mdio_bus_phy_may_suspend(phydev)) |
136 | goto no_resume; | 136 | goto no_resume; |
137 | 137 | ||
138 | mutex_lock(&phydev->lock); | ||
139 | ret = phy_resume(phydev); | 138 | ret = phy_resume(phydev); |
140 | mutex_unlock(&phydev->lock); | ||
141 | if (ret < 0) | 139 | if (ret < 0) |
142 | return ret; | 140 | return ret; |
143 | 141 | ||
@@ -1014,10 +1012,17 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, | |||
1014 | err = sysfs_create_link(&phydev->mdio.dev.kobj, &dev->dev.kobj, | 1012 | err = sysfs_create_link(&phydev->mdio.dev.kobj, &dev->dev.kobj, |
1015 | "attached_dev"); | 1013 | "attached_dev"); |
1016 | if (!err) { | 1014 | if (!err) { |
1017 | err = sysfs_create_link(&dev->dev.kobj, &phydev->mdio.dev.kobj, | 1015 | err = sysfs_create_link_nowarn(&dev->dev.kobj, |
1018 | "phydev"); | 1016 | &phydev->mdio.dev.kobj, |
1019 | if (err) | 1017 | "phydev"); |
1020 | goto error; | 1018 | if (err) { |
1019 | dev_err(&dev->dev, "could not add device link to %s err %d\n", | ||
1020 | kobject_name(&phydev->mdio.dev.kobj), | ||
1021 | err); | ||
1022 | /* non-fatal - some net drivers can use one netdevice | ||
1023 | * with more then one phy | ||
1024 | */ | ||
1025 | } | ||
1021 | 1026 | ||
1022 | phydev->sysfs_links = true; | 1027 | phydev->sysfs_links = true; |
1023 | } | 1028 | } |
@@ -1041,9 +1046,7 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, | |||
1041 | if (err) | 1046 | if (err) |
1042 | goto error; | 1047 | goto error; |
1043 | 1048 | ||
1044 | mutex_lock(&phydev->lock); | ||
1045 | phy_resume(phydev); | 1049 | phy_resume(phydev); |
1046 | mutex_unlock(&phydev->lock); | ||
1047 | phy_led_triggers_register(phydev); | 1050 | phy_led_triggers_register(phydev); |
1048 | 1051 | ||
1049 | return err; | 1052 | return err; |
@@ -1172,7 +1175,7 @@ int phy_suspend(struct phy_device *phydev) | |||
1172 | } | 1175 | } |
1173 | EXPORT_SYMBOL(phy_suspend); | 1176 | EXPORT_SYMBOL(phy_suspend); |
1174 | 1177 | ||
1175 | int phy_resume(struct phy_device *phydev) | 1178 | int __phy_resume(struct phy_device *phydev) |
1176 | { | 1179 | { |
1177 | struct phy_driver *phydrv = to_phy_driver(phydev->mdio.dev.driver); | 1180 | struct phy_driver *phydrv = to_phy_driver(phydev->mdio.dev.driver); |
1178 | int ret = 0; | 1181 | int ret = 0; |
@@ -1189,6 +1192,18 @@ int phy_resume(struct phy_device *phydev) | |||
1189 | 1192 | ||
1190 | return ret; | 1193 | return ret; |
1191 | } | 1194 | } |
1195 | EXPORT_SYMBOL(__phy_resume); | ||
1196 | |||
1197 | int phy_resume(struct phy_device *phydev) | ||
1198 | { | ||
1199 | int ret; | ||
1200 | |||
1201 | mutex_lock(&phydev->lock); | ||
1202 | ret = __phy_resume(phydev); | ||
1203 | mutex_unlock(&phydev->lock); | ||
1204 | |||
1205 | return ret; | ||
1206 | } | ||
1192 | EXPORT_SYMBOL(phy_resume); | 1207 | EXPORT_SYMBOL(phy_resume); |
1193 | 1208 | ||
1194 | int phy_loopback(struct phy_device *phydev, bool enable) | 1209 | int phy_loopback(struct phy_device *phydev, bool enable) |
@@ -1382,7 +1397,7 @@ int genphy_setup_forced(struct phy_device *phydev) | |||
1382 | ctl |= BMCR_FULLDPLX; | 1397 | ctl |= BMCR_FULLDPLX; |
1383 | 1398 | ||
1384 | return phy_modify(phydev, MII_BMCR, | 1399 | return phy_modify(phydev, MII_BMCR, |
1385 | BMCR_LOOPBACK | BMCR_ISOLATE | BMCR_PDOWN, ctl); | 1400 | ~(BMCR_LOOPBACK | BMCR_ISOLATE | BMCR_PDOWN), ctl); |
1386 | } | 1401 | } |
1387 | EXPORT_SYMBOL(genphy_setup_forced); | 1402 | EXPORT_SYMBOL(genphy_setup_forced); |
1388 | 1403 | ||
@@ -1658,6 +1673,23 @@ int genphy_config_init(struct phy_device *phydev) | |||
1658 | } | 1673 | } |
1659 | EXPORT_SYMBOL(genphy_config_init); | 1674 | EXPORT_SYMBOL(genphy_config_init); |
1660 | 1675 | ||
1676 | /* This is used for the phy device which doesn't support the MMD extended | ||
1677 | * register access, but it does have side effect when we are trying to access | ||
1678 | * the MMD register via indirect method. | ||
1679 | */ | ||
1680 | int genphy_read_mmd_unsupported(struct phy_device *phdev, int devad, u16 regnum) | ||
1681 | { | ||
1682 | return -EOPNOTSUPP; | ||
1683 | } | ||
1684 | EXPORT_SYMBOL(genphy_read_mmd_unsupported); | ||
1685 | |||
1686 | int genphy_write_mmd_unsupported(struct phy_device *phdev, int devnum, | ||
1687 | u16 regnum, u16 val) | ||
1688 | { | ||
1689 | return -EOPNOTSUPP; | ||
1690 | } | ||
1691 | EXPORT_SYMBOL(genphy_write_mmd_unsupported); | ||
1692 | |||
1661 | int genphy_suspend(struct phy_device *phydev) | 1693 | int genphy_suspend(struct phy_device *phydev) |
1662 | { | 1694 | { |
1663 | return phy_set_bits(phydev, MII_BMCR, BMCR_PDOWN); | 1695 | return phy_set_bits(phydev, MII_BMCR, BMCR_PDOWN); |
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c index ee3ca4a2f12b..9f48ecf9c627 100644 --- a/drivers/net/phy/realtek.c +++ b/drivers/net/phy/realtek.c | |||
@@ -172,6 +172,8 @@ static struct phy_driver realtek_drvs[] = { | |||
172 | .flags = PHY_HAS_INTERRUPT, | 172 | .flags = PHY_HAS_INTERRUPT, |
173 | .ack_interrupt = &rtl821x_ack_interrupt, | 173 | .ack_interrupt = &rtl821x_ack_interrupt, |
174 | .config_intr = &rtl8211b_config_intr, | 174 | .config_intr = &rtl8211b_config_intr, |
175 | .read_mmd = &genphy_read_mmd_unsupported, | ||
176 | .write_mmd = &genphy_write_mmd_unsupported, | ||
175 | }, { | 177 | }, { |
176 | .phy_id = 0x001cc914, | 178 | .phy_id = 0x001cc914, |
177 | .name = "RTL8211DN Gigabit Ethernet", | 179 | .name = "RTL8211DN Gigabit Ethernet", |
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index 255a5def56e9..da1937832c99 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c | |||
@@ -257,7 +257,7 @@ struct ppp_net { | |||
257 | /* Prototypes. */ | 257 | /* Prototypes. */ |
258 | static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf, | 258 | static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf, |
259 | struct file *file, unsigned int cmd, unsigned long arg); | 259 | struct file *file, unsigned int cmd, unsigned long arg); |
260 | static void ppp_xmit_process(struct ppp *ppp); | 260 | static void ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb); |
261 | static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb); | 261 | static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb); |
262 | static void ppp_push(struct ppp *ppp); | 262 | static void ppp_push(struct ppp *ppp); |
263 | static void ppp_channel_push(struct channel *pch); | 263 | static void ppp_channel_push(struct channel *pch); |
@@ -513,13 +513,12 @@ static ssize_t ppp_write(struct file *file, const char __user *buf, | |||
513 | goto out; | 513 | goto out; |
514 | } | 514 | } |
515 | 515 | ||
516 | skb_queue_tail(&pf->xq, skb); | ||
517 | |||
518 | switch (pf->kind) { | 516 | switch (pf->kind) { |
519 | case INTERFACE: | 517 | case INTERFACE: |
520 | ppp_xmit_process(PF_TO_PPP(pf)); | 518 | ppp_xmit_process(PF_TO_PPP(pf), skb); |
521 | break; | 519 | break; |
522 | case CHANNEL: | 520 | case CHANNEL: |
521 | skb_queue_tail(&pf->xq, skb); | ||
523 | ppp_channel_push(PF_TO_CHANNEL(pf)); | 522 | ppp_channel_push(PF_TO_CHANNEL(pf)); |
524 | break; | 523 | break; |
525 | } | 524 | } |
@@ -1267,8 +1266,8 @@ ppp_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1267 | put_unaligned_be16(proto, pp); | 1266 | put_unaligned_be16(proto, pp); |
1268 | 1267 | ||
1269 | skb_scrub_packet(skb, !net_eq(ppp->ppp_net, dev_net(dev))); | 1268 | skb_scrub_packet(skb, !net_eq(ppp->ppp_net, dev_net(dev))); |
1270 | skb_queue_tail(&ppp->file.xq, skb); | 1269 | ppp_xmit_process(ppp, skb); |
1271 | ppp_xmit_process(ppp); | 1270 | |
1272 | return NETDEV_TX_OK; | 1271 | return NETDEV_TX_OK; |
1273 | 1272 | ||
1274 | outf: | 1273 | outf: |
@@ -1420,13 +1419,14 @@ static void ppp_setup(struct net_device *dev) | |||
1420 | */ | 1419 | */ |
1421 | 1420 | ||
1422 | /* Called to do any work queued up on the transmit side that can now be done */ | 1421 | /* Called to do any work queued up on the transmit side that can now be done */ |
1423 | static void __ppp_xmit_process(struct ppp *ppp) | 1422 | static void __ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb) |
1424 | { | 1423 | { |
1425 | struct sk_buff *skb; | ||
1426 | |||
1427 | ppp_xmit_lock(ppp); | 1424 | ppp_xmit_lock(ppp); |
1428 | if (!ppp->closing) { | 1425 | if (!ppp->closing) { |
1429 | ppp_push(ppp); | 1426 | ppp_push(ppp); |
1427 | |||
1428 | if (skb) | ||
1429 | skb_queue_tail(&ppp->file.xq, skb); | ||
1430 | while (!ppp->xmit_pending && | 1430 | while (!ppp->xmit_pending && |
1431 | (skb = skb_dequeue(&ppp->file.xq))) | 1431 | (skb = skb_dequeue(&ppp->file.xq))) |
1432 | ppp_send_frame(ppp, skb); | 1432 | ppp_send_frame(ppp, skb); |
@@ -1440,7 +1440,7 @@ static void __ppp_xmit_process(struct ppp *ppp) | |||
1440 | ppp_xmit_unlock(ppp); | 1440 | ppp_xmit_unlock(ppp); |
1441 | } | 1441 | } |
1442 | 1442 | ||
1443 | static void ppp_xmit_process(struct ppp *ppp) | 1443 | static void ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb) |
1444 | { | 1444 | { |
1445 | local_bh_disable(); | 1445 | local_bh_disable(); |
1446 | 1446 | ||
@@ -1448,7 +1448,7 @@ static void ppp_xmit_process(struct ppp *ppp) | |||
1448 | goto err; | 1448 | goto err; |
1449 | 1449 | ||
1450 | (*this_cpu_ptr(ppp->xmit_recursion))++; | 1450 | (*this_cpu_ptr(ppp->xmit_recursion))++; |
1451 | __ppp_xmit_process(ppp); | 1451 | __ppp_xmit_process(ppp, skb); |
1452 | (*this_cpu_ptr(ppp->xmit_recursion))--; | 1452 | (*this_cpu_ptr(ppp->xmit_recursion))--; |
1453 | 1453 | ||
1454 | local_bh_enable(); | 1454 | local_bh_enable(); |
@@ -1458,6 +1458,8 @@ static void ppp_xmit_process(struct ppp *ppp) | |||
1458 | err: | 1458 | err: |
1459 | local_bh_enable(); | 1459 | local_bh_enable(); |
1460 | 1460 | ||
1461 | kfree_skb(skb); | ||
1462 | |||
1461 | if (net_ratelimit()) | 1463 | if (net_ratelimit()) |
1462 | netdev_err(ppp->dev, "recursion detected\n"); | 1464 | netdev_err(ppp->dev, "recursion detected\n"); |
1463 | } | 1465 | } |
@@ -1942,7 +1944,7 @@ static void __ppp_channel_push(struct channel *pch) | |||
1942 | if (skb_queue_empty(&pch->file.xq)) { | 1944 | if (skb_queue_empty(&pch->file.xq)) { |
1943 | ppp = pch->ppp; | 1945 | ppp = pch->ppp; |
1944 | if (ppp) | 1946 | if (ppp) |
1945 | __ppp_xmit_process(ppp); | 1947 | __ppp_xmit_process(ppp, NULL); |
1946 | } | 1948 | } |
1947 | } | 1949 | } |
1948 | 1950 | ||
@@ -3161,6 +3163,15 @@ ppp_connect_channel(struct channel *pch, int unit) | |||
3161 | goto outl; | 3163 | goto outl; |
3162 | 3164 | ||
3163 | ppp_lock(ppp); | 3165 | ppp_lock(ppp); |
3166 | spin_lock_bh(&pch->downl); | ||
3167 | if (!pch->chan) { | ||
3168 | /* Don't connect unregistered channels */ | ||
3169 | spin_unlock_bh(&pch->downl); | ||
3170 | ppp_unlock(ppp); | ||
3171 | ret = -ENOTCONN; | ||
3172 | goto outl; | ||
3173 | } | ||
3174 | spin_unlock_bh(&pch->downl); | ||
3164 | if (pch->file.hdrlen > ppp->file.hdrlen) | 3175 | if (pch->file.hdrlen > ppp->file.hdrlen) |
3165 | ppp->file.hdrlen = pch->file.hdrlen; | 3176 | ppp->file.hdrlen = pch->file.hdrlen; |
3166 | hdrlen = pch->file.hdrlen + 2; /* for protocol bytes */ | 3177 | hdrlen = pch->file.hdrlen + 2; /* for protocol bytes */ |
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index a468439969df..56c701b73c12 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c | |||
@@ -2395,7 +2395,7 @@ send_done: | |||
2395 | if (!nlh) { | 2395 | if (!nlh) { |
2396 | err = __send_and_alloc_skb(&skb, team, portid, send_func); | 2396 | err = __send_and_alloc_skb(&skb, team, portid, send_func); |
2397 | if (err) | 2397 | if (err) |
2398 | goto errout; | 2398 | return err; |
2399 | goto send_done; | 2399 | goto send_done; |
2400 | } | 2400 | } |
2401 | 2401 | ||
@@ -2681,7 +2681,7 @@ send_done: | |||
2681 | if (!nlh) { | 2681 | if (!nlh) { |
2682 | err = __send_and_alloc_skb(&skb, team, portid, send_func); | 2682 | err = __send_and_alloc_skb(&skb, team, portid, send_func); |
2683 | if (err) | 2683 | if (err) |
2684 | goto errout; | 2684 | return err; |
2685 | goto send_done; | 2685 | goto send_done; |
2686 | } | 2686 | } |
2687 | 2687 | ||
diff --git a/drivers/net/thunderbolt.c b/drivers/net/thunderbolt.c index ca5e375de27c..e0d6760f3219 100644 --- a/drivers/net/thunderbolt.c +++ b/drivers/net/thunderbolt.c | |||
@@ -166,6 +166,8 @@ struct tbnet_ring { | |||
166 | * @connected_work: Worker that finalizes the ThunderboltIP connection | 166 | * @connected_work: Worker that finalizes the ThunderboltIP connection |
167 | * setup and enables DMA paths for high speed data | 167 | * setup and enables DMA paths for high speed data |
168 | * transfers | 168 | * transfers |
169 | * @disconnect_work: Worker that handles tearing down the ThunderboltIP | ||
170 | * connection | ||
169 | * @rx_hdr: Copy of the currently processed Rx frame. Used when a | 171 | * @rx_hdr: Copy of the currently processed Rx frame. Used when a |
170 | * network packet consists of multiple Thunderbolt frames. | 172 | * network packet consists of multiple Thunderbolt frames. |
171 | * In host byte order. | 173 | * In host byte order. |
@@ -190,6 +192,7 @@ struct tbnet { | |||
190 | int login_retries; | 192 | int login_retries; |
191 | struct delayed_work login_work; | 193 | struct delayed_work login_work; |
192 | struct work_struct connected_work; | 194 | struct work_struct connected_work; |
195 | struct work_struct disconnect_work; | ||
193 | struct thunderbolt_ip_frame_header rx_hdr; | 196 | struct thunderbolt_ip_frame_header rx_hdr; |
194 | struct tbnet_ring rx_ring; | 197 | struct tbnet_ring rx_ring; |
195 | atomic_t frame_id; | 198 | atomic_t frame_id; |
@@ -445,7 +448,7 @@ static int tbnet_handle_packet(const void *buf, size_t size, void *data) | |||
445 | case TBIP_LOGOUT: | 448 | case TBIP_LOGOUT: |
446 | ret = tbnet_logout_response(net, route, sequence, command_id); | 449 | ret = tbnet_logout_response(net, route, sequence, command_id); |
447 | if (!ret) | 450 | if (!ret) |
448 | tbnet_tear_down(net, false); | 451 | queue_work(system_long_wq, &net->disconnect_work); |
449 | break; | 452 | break; |
450 | 453 | ||
451 | default: | 454 | default: |
@@ -659,6 +662,13 @@ static void tbnet_login_work(struct work_struct *work) | |||
659 | } | 662 | } |
660 | } | 663 | } |
661 | 664 | ||
665 | static void tbnet_disconnect_work(struct work_struct *work) | ||
666 | { | ||
667 | struct tbnet *net = container_of(work, typeof(*net), disconnect_work); | ||
668 | |||
669 | tbnet_tear_down(net, false); | ||
670 | } | ||
671 | |||
662 | static bool tbnet_check_frame(struct tbnet *net, const struct tbnet_frame *tf, | 672 | static bool tbnet_check_frame(struct tbnet *net, const struct tbnet_frame *tf, |
663 | const struct thunderbolt_ip_frame_header *hdr) | 673 | const struct thunderbolt_ip_frame_header *hdr) |
664 | { | 674 | { |
@@ -881,6 +891,7 @@ static int tbnet_stop(struct net_device *dev) | |||
881 | 891 | ||
882 | napi_disable(&net->napi); | 892 | napi_disable(&net->napi); |
883 | 893 | ||
894 | cancel_work_sync(&net->disconnect_work); | ||
884 | tbnet_tear_down(net, true); | 895 | tbnet_tear_down(net, true); |
885 | 896 | ||
886 | tb_ring_free(net->rx_ring.ring); | 897 | tb_ring_free(net->rx_ring.ring); |
@@ -1195,6 +1206,7 @@ static int tbnet_probe(struct tb_service *svc, const struct tb_service_id *id) | |||
1195 | net = netdev_priv(dev); | 1206 | net = netdev_priv(dev); |
1196 | INIT_DELAYED_WORK(&net->login_work, tbnet_login_work); | 1207 | INIT_DELAYED_WORK(&net->login_work, tbnet_login_work); |
1197 | INIT_WORK(&net->connected_work, tbnet_connected_work); | 1208 | INIT_WORK(&net->connected_work, tbnet_connected_work); |
1209 | INIT_WORK(&net->disconnect_work, tbnet_disconnect_work); | ||
1198 | mutex_init(&net->connection_lock); | 1210 | mutex_init(&net->connection_lock); |
1199 | atomic_set(&net->command_id, 0); | 1211 | atomic_set(&net->command_id, 0); |
1200 | atomic_set(&net->frame_id, 0); | 1212 | atomic_set(&net->frame_id, 0); |
@@ -1270,10 +1282,7 @@ static int __maybe_unused tbnet_suspend(struct device *dev) | |||
1270 | stop_login(net); | 1282 | stop_login(net); |
1271 | if (netif_running(net->dev)) { | 1283 | if (netif_running(net->dev)) { |
1272 | netif_device_detach(net->dev); | 1284 | netif_device_detach(net->dev); |
1273 | tb_ring_stop(net->rx_ring.ring); | 1285 | tbnet_tear_down(net, true); |
1274 | tb_ring_stop(net->tx_ring.ring); | ||
1275 | tbnet_free_buffers(&net->rx_ring); | ||
1276 | tbnet_free_buffers(&net->tx_ring); | ||
1277 | } | 1286 | } |
1278 | 1287 | ||
1279 | return 0; | 1288 | return 0; |
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 81e6cc951e7f..28cfa642e39a 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
@@ -181,7 +181,6 @@ struct tun_file { | |||
181 | struct tun_struct *detached; | 181 | struct tun_struct *detached; |
182 | struct ptr_ring tx_ring; | 182 | struct ptr_ring tx_ring; |
183 | struct xdp_rxq_info xdp_rxq; | 183 | struct xdp_rxq_info xdp_rxq; |
184 | int xdp_pending_pkts; | ||
185 | }; | 184 | }; |
186 | 185 | ||
187 | struct tun_flow_entry { | 186 | struct tun_flow_entry { |
@@ -656,7 +655,7 @@ static struct tun_struct *tun_enable_queue(struct tun_file *tfile) | |||
656 | return tun; | 655 | return tun; |
657 | } | 656 | } |
658 | 657 | ||
659 | static void tun_ptr_free(void *ptr) | 658 | void tun_ptr_free(void *ptr) |
660 | { | 659 | { |
661 | if (!ptr) | 660 | if (!ptr) |
662 | return; | 661 | return; |
@@ -668,6 +667,7 @@ static void tun_ptr_free(void *ptr) | |||
668 | __skb_array_destroy_skb(ptr); | 667 | __skb_array_destroy_skb(ptr); |
669 | } | 668 | } |
670 | } | 669 | } |
670 | EXPORT_SYMBOL_GPL(tun_ptr_free); | ||
671 | 671 | ||
672 | static void tun_queue_purge(struct tun_file *tfile) | 672 | static void tun_queue_purge(struct tun_file *tfile) |
673 | { | 673 | { |
@@ -1489,27 +1489,23 @@ static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile, | |||
1489 | skb->truesize += skb->data_len; | 1489 | skb->truesize += skb->data_len; |
1490 | 1490 | ||
1491 | for (i = 1; i < it->nr_segs; i++) { | 1491 | for (i = 1; i < it->nr_segs; i++) { |
1492 | struct page_frag *pfrag = ¤t->task_frag; | ||
1492 | size_t fragsz = it->iov[i].iov_len; | 1493 | size_t fragsz = it->iov[i].iov_len; |
1493 | unsigned long offset; | ||
1494 | struct page *page; | ||
1495 | void *data; | ||
1496 | 1494 | ||
1497 | if (fragsz == 0 || fragsz > PAGE_SIZE) { | 1495 | if (fragsz == 0 || fragsz > PAGE_SIZE) { |
1498 | err = -EINVAL; | 1496 | err = -EINVAL; |
1499 | goto free; | 1497 | goto free; |
1500 | } | 1498 | } |
1501 | 1499 | ||
1502 | local_bh_disable(); | 1500 | if (!skb_page_frag_refill(fragsz, pfrag, GFP_KERNEL)) { |
1503 | data = napi_alloc_frag(fragsz); | ||
1504 | local_bh_enable(); | ||
1505 | if (!data) { | ||
1506 | err = -ENOMEM; | 1501 | err = -ENOMEM; |
1507 | goto free; | 1502 | goto free; |
1508 | } | 1503 | } |
1509 | 1504 | ||
1510 | page = virt_to_head_page(data); | 1505 | skb_fill_page_desc(skb, i - 1, pfrag->page, |
1511 | offset = data - page_address(page); | 1506 | pfrag->offset, fragsz); |
1512 | skb_fill_page_desc(skb, i - 1, page, offset, fragsz); | 1507 | page_ref_inc(pfrag->page); |
1508 | pfrag->offset += fragsz; | ||
1513 | } | 1509 | } |
1514 | 1510 | ||
1515 | return skb; | 1511 | return skb; |
@@ -1647,6 +1643,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, | |||
1647 | else | 1643 | else |
1648 | *skb_xdp = 0; | 1644 | *skb_xdp = 0; |
1649 | 1645 | ||
1646 | preempt_disable(); | ||
1650 | rcu_read_lock(); | 1647 | rcu_read_lock(); |
1651 | xdp_prog = rcu_dereference(tun->xdp_prog); | 1648 | xdp_prog = rcu_dereference(tun->xdp_prog); |
1652 | if (xdp_prog && !*skb_xdp) { | 1649 | if (xdp_prog && !*skb_xdp) { |
@@ -1666,11 +1663,12 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, | |||
1666 | case XDP_REDIRECT: | 1663 | case XDP_REDIRECT: |
1667 | get_page(alloc_frag->page); | 1664 | get_page(alloc_frag->page); |
1668 | alloc_frag->offset += buflen; | 1665 | alloc_frag->offset += buflen; |
1669 | ++tfile->xdp_pending_pkts; | ||
1670 | err = xdp_do_redirect(tun->dev, &xdp, xdp_prog); | 1666 | err = xdp_do_redirect(tun->dev, &xdp, xdp_prog); |
1667 | xdp_do_flush_map(); | ||
1671 | if (err) | 1668 | if (err) |
1672 | goto err_redirect; | 1669 | goto err_redirect; |
1673 | rcu_read_unlock(); | 1670 | rcu_read_unlock(); |
1671 | preempt_enable(); | ||
1674 | return NULL; | 1672 | return NULL; |
1675 | case XDP_TX: | 1673 | case XDP_TX: |
1676 | xdp_xmit = true; | 1674 | xdp_xmit = true; |
@@ -1692,6 +1690,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, | |||
1692 | skb = build_skb(buf, buflen); | 1690 | skb = build_skb(buf, buflen); |
1693 | if (!skb) { | 1691 | if (!skb) { |
1694 | rcu_read_unlock(); | 1692 | rcu_read_unlock(); |
1693 | preempt_enable(); | ||
1695 | return ERR_PTR(-ENOMEM); | 1694 | return ERR_PTR(-ENOMEM); |
1696 | } | 1695 | } |
1697 | 1696 | ||
@@ -1704,10 +1703,12 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, | |||
1704 | skb->dev = tun->dev; | 1703 | skb->dev = tun->dev; |
1705 | generic_xdp_tx(skb, xdp_prog); | 1704 | generic_xdp_tx(skb, xdp_prog); |
1706 | rcu_read_unlock(); | 1705 | rcu_read_unlock(); |
1706 | preempt_enable(); | ||
1707 | return NULL; | 1707 | return NULL; |
1708 | } | 1708 | } |
1709 | 1709 | ||
1710 | rcu_read_unlock(); | 1710 | rcu_read_unlock(); |
1711 | preempt_enable(); | ||
1711 | 1712 | ||
1712 | return skb; | 1713 | return skb; |
1713 | 1714 | ||
@@ -1715,6 +1716,7 @@ err_redirect: | |||
1715 | put_page(alloc_frag->page); | 1716 | put_page(alloc_frag->page); |
1716 | err_xdp: | 1717 | err_xdp: |
1717 | rcu_read_unlock(); | 1718 | rcu_read_unlock(); |
1719 | preempt_enable(); | ||
1718 | this_cpu_inc(tun->pcpu_stats->rx_dropped); | 1720 | this_cpu_inc(tun->pcpu_stats->rx_dropped); |
1719 | return NULL; | 1721 | return NULL; |
1720 | } | 1722 | } |
@@ -1988,11 +1990,6 @@ static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from) | |||
1988 | result = tun_get_user(tun, tfile, NULL, from, | 1990 | result = tun_get_user(tun, tfile, NULL, from, |
1989 | file->f_flags & O_NONBLOCK, false); | 1991 | file->f_flags & O_NONBLOCK, false); |
1990 | 1992 | ||
1991 | if (tfile->xdp_pending_pkts) { | ||
1992 | tfile->xdp_pending_pkts = 0; | ||
1993 | xdp_do_flush_map(); | ||
1994 | } | ||
1995 | |||
1996 | tun_put(tun); | 1993 | tun_put(tun); |
1997 | return result; | 1994 | return result; |
1998 | } | 1995 | } |
@@ -2329,13 +2326,6 @@ static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) | |||
2329 | ret = tun_get_user(tun, tfile, m->msg_control, &m->msg_iter, | 2326 | ret = tun_get_user(tun, tfile, m->msg_control, &m->msg_iter, |
2330 | m->msg_flags & MSG_DONTWAIT, | 2327 | m->msg_flags & MSG_DONTWAIT, |
2331 | m->msg_flags & MSG_MORE); | 2328 | m->msg_flags & MSG_MORE); |
2332 | |||
2333 | if (tfile->xdp_pending_pkts >= NAPI_POLL_WEIGHT || | ||
2334 | !(m->msg_flags & MSG_MORE)) { | ||
2335 | tfile->xdp_pending_pkts = 0; | ||
2336 | xdp_do_flush_map(); | ||
2337 | } | ||
2338 | |||
2339 | tun_put(tun); | 2329 | tun_put(tun); |
2340 | return ret; | 2330 | return ret; |
2341 | } | 2331 | } |
@@ -3167,7 +3157,6 @@ static int tun_chr_open(struct inode *inode, struct file * file) | |||
3167 | sock_set_flag(&tfile->sk, SOCK_ZEROCOPY); | 3157 | sock_set_flag(&tfile->sk, SOCK_ZEROCOPY); |
3168 | 3158 | ||
3169 | memset(&tfile->tx_ring, 0, sizeof(tfile->tx_ring)); | 3159 | memset(&tfile->tx_ring, 0, sizeof(tfile->tx_ring)); |
3170 | tfile->xdp_pending_pkts = 0; | ||
3171 | 3160 | ||
3172 | return 0; | 3161 | return 0; |
3173 | } | 3162 | } |
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index 05dca3e5c93d..fff4b13eece2 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c | |||
@@ -896,6 +896,12 @@ static const struct usb_device_id products[] = { | |||
896 | USB_CDC_PROTO_NONE), | 896 | USB_CDC_PROTO_NONE), |
897 | .driver_info = (unsigned long)&wwan_info, | 897 | .driver_info = (unsigned long)&wwan_info, |
898 | }, { | 898 | }, { |
899 | /* Cinterion PLS8 modem by GEMALTO */ | ||
900 | USB_DEVICE_AND_INTERFACE_INFO(0x1e2d, 0x0061, USB_CLASS_COMM, | ||
901 | USB_CDC_SUBCLASS_ETHERNET, | ||
902 | USB_CDC_PROTO_NONE), | ||
903 | .driver_info = (unsigned long)&wwan_info, | ||
904 | }, { | ||
899 | USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, | 905 | USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, |
900 | USB_CDC_PROTO_NONE), | 906 | USB_CDC_PROTO_NONE), |
901 | .driver_info = (unsigned long) &cdc_info, | 907 | .driver_info = (unsigned long) &cdc_info, |
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 958b2e8b90f6..86f7196f9d91 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c | |||
@@ -1794,7 +1794,7 @@ static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg) | |||
1794 | 1794 | ||
1795 | tx_data += len; | 1795 | tx_data += len; |
1796 | agg->skb_len += len; | 1796 | agg->skb_len += len; |
1797 | agg->skb_num++; | 1797 | agg->skb_num += skb_shinfo(skb)->gso_segs ?: 1; |
1798 | 1798 | ||
1799 | dev_kfree_skb_any(skb); | 1799 | dev_kfree_skb_any(skb); |
1800 | 1800 | ||
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c index d0a113743195..7a6a1fe79309 100644 --- a/drivers/net/usb/smsc75xx.c +++ b/drivers/net/usb/smsc75xx.c | |||
@@ -954,10 +954,11 @@ static int smsc75xx_set_features(struct net_device *netdev, | |||
954 | /* it's racing here! */ | 954 | /* it's racing here! */ |
955 | 955 | ||
956 | ret = smsc75xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl); | 956 | ret = smsc75xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl); |
957 | if (ret < 0) | 957 | if (ret < 0) { |
958 | netdev_warn(dev->net, "Error writing RFE_CTL\n"); | 958 | netdev_warn(dev->net, "Error writing RFE_CTL\n"); |
959 | 959 | return ret; | |
960 | return ret; | 960 | } |
961 | return 0; | ||
961 | } | 962 | } |
962 | 963 | ||
963 | static int smsc75xx_wait_ready(struct usbnet *dev, int in_pm) | 964 | static int smsc75xx_wait_ready(struct usbnet *dev, int in_pm) |
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 8a22ff67b026..d9eea8cfe6cb 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c | |||
@@ -315,6 +315,7 @@ static void __usbnet_status_stop_force(struct usbnet *dev) | |||
315 | void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb) | 315 | void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb) |
316 | { | 316 | { |
317 | struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->stats64); | 317 | struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->stats64); |
318 | unsigned long flags; | ||
318 | int status; | 319 | int status; |
319 | 320 | ||
320 | if (test_bit(EVENT_RX_PAUSED, &dev->flags)) { | 321 | if (test_bit(EVENT_RX_PAUSED, &dev->flags)) { |
@@ -326,10 +327,10 @@ void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb) | |||
326 | if (skb->protocol == 0) | 327 | if (skb->protocol == 0) |
327 | skb->protocol = eth_type_trans (skb, dev->net); | 328 | skb->protocol = eth_type_trans (skb, dev->net); |
328 | 329 | ||
329 | u64_stats_update_begin(&stats64->syncp); | 330 | flags = u64_stats_update_begin_irqsave(&stats64->syncp); |
330 | stats64->rx_packets++; | 331 | stats64->rx_packets++; |
331 | stats64->rx_bytes += skb->len; | 332 | stats64->rx_bytes += skb->len; |
332 | u64_stats_update_end(&stats64->syncp); | 333 | u64_stats_update_end_irqrestore(&stats64->syncp, flags); |
333 | 334 | ||
334 | netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n", | 335 | netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n", |
335 | skb->len + sizeof (struct ethhdr), skb->protocol); | 336 | skb->len + sizeof (struct ethhdr), skb->protocol); |
@@ -1248,11 +1249,12 @@ static void tx_complete (struct urb *urb) | |||
1248 | 1249 | ||
1249 | if (urb->status == 0) { | 1250 | if (urb->status == 0) { |
1250 | struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->stats64); | 1251 | struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->stats64); |
1252 | unsigned long flags; | ||
1251 | 1253 | ||
1252 | u64_stats_update_begin(&stats64->syncp); | 1254 | flags = u64_stats_update_begin_irqsave(&stats64->syncp); |
1253 | stats64->tx_packets += entry->packets; | 1255 | stats64->tx_packets += entry->packets; |
1254 | stats64->tx_bytes += entry->length; | 1256 | stats64->tx_bytes += entry->length; |
1255 | u64_stats_update_end(&stats64->syncp); | 1257 | u64_stats_update_end_irqrestore(&stats64->syncp, flags); |
1256 | } else { | 1258 | } else { |
1257 | dev->net->stats.tx_errors++; | 1259 | dev->net->stats.tx_errors++; |
1258 | 1260 | ||
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 626c27352ae2..23374603e4d9 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
@@ -443,12 +443,8 @@ static bool __virtnet_xdp_xmit(struct virtnet_info *vi, | |||
443 | sg_init_one(sq->sg, xdp->data, xdp->data_end - xdp->data); | 443 | sg_init_one(sq->sg, xdp->data, xdp->data_end - xdp->data); |
444 | 444 | ||
445 | err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp->data, GFP_ATOMIC); | 445 | err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp->data, GFP_ATOMIC); |
446 | if (unlikely(err)) { | 446 | if (unlikely(err)) |
447 | struct page *page = virt_to_head_page(xdp->data); | 447 | return false; /* Caller handle free/refcnt */ |
448 | |||
449 | put_page(page); | ||
450 | return false; | ||
451 | } | ||
452 | 448 | ||
453 | return true; | 449 | return true; |
454 | } | 450 | } |
@@ -456,8 +452,18 @@ static bool __virtnet_xdp_xmit(struct virtnet_info *vi, | |||
456 | static int virtnet_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp) | 452 | static int virtnet_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp) |
457 | { | 453 | { |
458 | struct virtnet_info *vi = netdev_priv(dev); | 454 | struct virtnet_info *vi = netdev_priv(dev); |
459 | bool sent = __virtnet_xdp_xmit(vi, xdp); | 455 | struct receive_queue *rq = vi->rq; |
456 | struct bpf_prog *xdp_prog; | ||
457 | bool sent; | ||
458 | |||
459 | /* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this | ||
460 | * indicate XDP resources have been successfully allocated. | ||
461 | */ | ||
462 | xdp_prog = rcu_dereference(rq->xdp_prog); | ||
463 | if (!xdp_prog) | ||
464 | return -ENXIO; | ||
460 | 465 | ||
466 | sent = __virtnet_xdp_xmit(vi, xdp); | ||
461 | if (!sent) | 467 | if (!sent) |
462 | return -ENOSPC; | 468 | return -ENOSPC; |
463 | return 0; | 469 | return 0; |
@@ -498,6 +504,7 @@ static struct page *xdp_linearize_page(struct receive_queue *rq, | |||
498 | page_off += *len; | 504 | page_off += *len; |
499 | 505 | ||
500 | while (--*num_buf) { | 506 | while (--*num_buf) { |
507 | int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | ||
501 | unsigned int buflen; | 508 | unsigned int buflen; |
502 | void *buf; | 509 | void *buf; |
503 | int off; | 510 | int off; |
@@ -512,7 +519,7 @@ static struct page *xdp_linearize_page(struct receive_queue *rq, | |||
512 | /* guard against a misconfigured or uncooperative backend that | 519 | /* guard against a misconfigured or uncooperative backend that |
513 | * is sending packet larger than the MTU. | 520 | * is sending packet larger than the MTU. |
514 | */ | 521 | */ |
515 | if ((page_off + buflen) > PAGE_SIZE) { | 522 | if ((page_off + buflen + tailroom) > PAGE_SIZE) { |
516 | put_page(p); | 523 | put_page(p); |
517 | goto err_buf; | 524 | goto err_buf; |
518 | } | 525 | } |
@@ -546,8 +553,11 @@ static struct sk_buff *receive_small(struct net_device *dev, | |||
546 | unsigned int buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) + | 553 | unsigned int buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) + |
547 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | 554 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
548 | struct page *page = virt_to_head_page(buf); | 555 | struct page *page = virt_to_head_page(buf); |
549 | unsigned int delta = 0, err; | 556 | unsigned int delta = 0; |
550 | struct page *xdp_page; | 557 | struct page *xdp_page; |
558 | bool sent; | ||
559 | int err; | ||
560 | |||
551 | len -= vi->hdr_len; | 561 | len -= vi->hdr_len; |
552 | 562 | ||
553 | rcu_read_lock(); | 563 | rcu_read_lock(); |
@@ -558,7 +568,7 @@ static struct sk_buff *receive_small(struct net_device *dev, | |||
558 | void *orig_data; | 568 | void *orig_data; |
559 | u32 act; | 569 | u32 act; |
560 | 570 | ||
561 | if (unlikely(hdr->hdr.gso_type || hdr->hdr.flags)) | 571 | if (unlikely(hdr->hdr.gso_type)) |
562 | goto err_xdp; | 572 | goto err_xdp; |
563 | 573 | ||
564 | if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) { | 574 | if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) { |
@@ -596,16 +606,19 @@ static struct sk_buff *receive_small(struct net_device *dev, | |||
596 | delta = orig_data - xdp.data; | 606 | delta = orig_data - xdp.data; |
597 | break; | 607 | break; |
598 | case XDP_TX: | 608 | case XDP_TX: |
599 | if (unlikely(!__virtnet_xdp_xmit(vi, &xdp))) | 609 | sent = __virtnet_xdp_xmit(vi, &xdp); |
610 | if (unlikely(!sent)) { | ||
600 | trace_xdp_exception(vi->dev, xdp_prog, act); | 611 | trace_xdp_exception(vi->dev, xdp_prog, act); |
601 | else | 612 | goto err_xdp; |
602 | *xdp_xmit = true; | 613 | } |
614 | *xdp_xmit = true; | ||
603 | rcu_read_unlock(); | 615 | rcu_read_unlock(); |
604 | goto xdp_xmit; | 616 | goto xdp_xmit; |
605 | case XDP_REDIRECT: | 617 | case XDP_REDIRECT: |
606 | err = xdp_do_redirect(dev, &xdp, xdp_prog); | 618 | err = xdp_do_redirect(dev, &xdp, xdp_prog); |
607 | if (!err) | 619 | if (err) |
608 | *xdp_xmit = true; | 620 | goto err_xdp; |
621 | *xdp_xmit = true; | ||
609 | rcu_read_unlock(); | 622 | rcu_read_unlock(); |
610 | goto xdp_xmit; | 623 | goto xdp_xmit; |
611 | default: | 624 | default: |
@@ -677,6 +690,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, | |||
677 | struct bpf_prog *xdp_prog; | 690 | struct bpf_prog *xdp_prog; |
678 | unsigned int truesize; | 691 | unsigned int truesize; |
679 | unsigned int headroom = mergeable_ctx_to_headroom(ctx); | 692 | unsigned int headroom = mergeable_ctx_to_headroom(ctx); |
693 | bool sent; | ||
680 | int err; | 694 | int err; |
681 | 695 | ||
682 | head_skb = NULL; | 696 | head_skb = NULL; |
@@ -689,7 +703,12 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, | |||
689 | void *data; | 703 | void *data; |
690 | u32 act; | 704 | u32 act; |
691 | 705 | ||
692 | /* This happens when rx buffer size is underestimated */ | 706 | /* This happens when rx buffer size is underestimated |
707 | * or headroom is not enough because of the buffer | ||
708 | * was refilled before XDP is set. This should only | ||
709 | * happen for the first several packets, so we don't | ||
710 | * care much about its performance. | ||
711 | */ | ||
693 | if (unlikely(num_buf > 1 || | 712 | if (unlikely(num_buf > 1 || |
694 | headroom < virtnet_get_headroom(vi))) { | 713 | headroom < virtnet_get_headroom(vi))) { |
695 | /* linearize data for XDP */ | 714 | /* linearize data for XDP */ |
@@ -724,9 +743,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, | |||
724 | 743 | ||
725 | act = bpf_prog_run_xdp(xdp_prog, &xdp); | 744 | act = bpf_prog_run_xdp(xdp_prog, &xdp); |
726 | 745 | ||
727 | if (act != XDP_PASS) | ||
728 | ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len); | ||
729 | |||
730 | switch (act) { | 746 | switch (act) { |
731 | case XDP_PASS: | 747 | case XDP_PASS: |
732 | /* recalculate offset to account for any header | 748 | /* recalculate offset to account for any header |
@@ -746,18 +762,28 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, | |||
746 | } | 762 | } |
747 | break; | 763 | break; |
748 | case XDP_TX: | 764 | case XDP_TX: |
749 | if (unlikely(!__virtnet_xdp_xmit(vi, &xdp))) | 765 | sent = __virtnet_xdp_xmit(vi, &xdp); |
766 | if (unlikely(!sent)) { | ||
750 | trace_xdp_exception(vi->dev, xdp_prog, act); | 767 | trace_xdp_exception(vi->dev, xdp_prog, act); |
751 | else | 768 | if (unlikely(xdp_page != page)) |
752 | *xdp_xmit = true; | 769 | put_page(xdp_page); |
770 | goto err_xdp; | ||
771 | } | ||
772 | *xdp_xmit = true; | ||
753 | if (unlikely(xdp_page != page)) | 773 | if (unlikely(xdp_page != page)) |
754 | goto err_xdp; | 774 | goto err_xdp; |
755 | rcu_read_unlock(); | 775 | rcu_read_unlock(); |
756 | goto xdp_xmit; | 776 | goto xdp_xmit; |
757 | case XDP_REDIRECT: | 777 | case XDP_REDIRECT: |
758 | err = xdp_do_redirect(dev, &xdp, xdp_prog); | 778 | err = xdp_do_redirect(dev, &xdp, xdp_prog); |
759 | if (!err) | 779 | if (err) { |
760 | *xdp_xmit = true; | 780 | if (unlikely(xdp_page != page)) |
781 | put_page(xdp_page); | ||
782 | goto err_xdp; | ||
783 | } | ||
784 | *xdp_xmit = true; | ||
785 | if (unlikely(xdp_page != page)) | ||
786 | goto err_xdp; | ||
761 | rcu_read_unlock(); | 787 | rcu_read_unlock(); |
762 | goto xdp_xmit; | 788 | goto xdp_xmit; |
763 | default: | 789 | default: |
@@ -1003,13 +1029,18 @@ static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq, | |||
1003 | } | 1029 | } |
1004 | 1030 | ||
1005 | static unsigned int get_mergeable_buf_len(struct receive_queue *rq, | 1031 | static unsigned int get_mergeable_buf_len(struct receive_queue *rq, |
1006 | struct ewma_pkt_len *avg_pkt_len) | 1032 | struct ewma_pkt_len *avg_pkt_len, |
1033 | unsigned int room) | ||
1007 | { | 1034 | { |
1008 | const size_t hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); | 1035 | const size_t hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); |
1009 | unsigned int len; | 1036 | unsigned int len; |
1010 | 1037 | ||
1011 | len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len), | 1038 | if (room) |
1039 | return PAGE_SIZE - room; | ||
1040 | |||
1041 | len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len), | ||
1012 | rq->min_buf_len, PAGE_SIZE - hdr_len); | 1042 | rq->min_buf_len, PAGE_SIZE - hdr_len); |
1043 | |||
1013 | return ALIGN(len, L1_CACHE_BYTES); | 1044 | return ALIGN(len, L1_CACHE_BYTES); |
1014 | } | 1045 | } |
1015 | 1046 | ||
@@ -1018,21 +1049,27 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi, | |||
1018 | { | 1049 | { |
1019 | struct page_frag *alloc_frag = &rq->alloc_frag; | 1050 | struct page_frag *alloc_frag = &rq->alloc_frag; |
1020 | unsigned int headroom = virtnet_get_headroom(vi); | 1051 | unsigned int headroom = virtnet_get_headroom(vi); |
1052 | unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0; | ||
1053 | unsigned int room = SKB_DATA_ALIGN(headroom + tailroom); | ||
1021 | char *buf; | 1054 | char *buf; |
1022 | void *ctx; | 1055 | void *ctx; |
1023 | int err; | 1056 | int err; |
1024 | unsigned int len, hole; | 1057 | unsigned int len, hole; |
1025 | 1058 | ||
1026 | len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len); | 1059 | /* Extra tailroom is needed to satisfy XDP's assumption. This |
1027 | if (unlikely(!skb_page_frag_refill(len + headroom, alloc_frag, gfp))) | 1060 | * means rx frags coalescing won't work, but consider we've |
1061 | * disabled GSO for XDP, it won't be a big issue. | ||
1062 | */ | ||
1063 | len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len, room); | ||
1064 | if (unlikely(!skb_page_frag_refill(len + room, alloc_frag, gfp))) | ||
1028 | return -ENOMEM; | 1065 | return -ENOMEM; |
1029 | 1066 | ||
1030 | buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; | 1067 | buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; |
1031 | buf += headroom; /* advance address leaving hole at front of pkt */ | 1068 | buf += headroom; /* advance address leaving hole at front of pkt */ |
1032 | get_page(alloc_frag->page); | 1069 | get_page(alloc_frag->page); |
1033 | alloc_frag->offset += len + headroom; | 1070 | alloc_frag->offset += len + room; |
1034 | hole = alloc_frag->size - alloc_frag->offset; | 1071 | hole = alloc_frag->size - alloc_frag->offset; |
1035 | if (hole < len + headroom) { | 1072 | if (hole < len + room) { |
1036 | /* To avoid internal fragmentation, if there is very likely not | 1073 | /* To avoid internal fragmentation, if there is very likely not |
1037 | * enough space for another buffer, add the remaining space to | 1074 | * enough space for another buffer, add the remaining space to |
1038 | * the current buffer. | 1075 | * the current buffer. |
@@ -2175,8 +2212,9 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, | |||
2175 | } | 2212 | } |
2176 | 2213 | ||
2177 | /* Make sure NAPI is not using any XDP TX queues for RX. */ | 2214 | /* Make sure NAPI is not using any XDP TX queues for RX. */ |
2178 | for (i = 0; i < vi->max_queue_pairs; i++) | 2215 | if (netif_running(dev)) |
2179 | napi_disable(&vi->rq[i].napi); | 2216 | for (i = 0; i < vi->max_queue_pairs; i++) |
2217 | napi_disable(&vi->rq[i].napi); | ||
2180 | 2218 | ||
2181 | netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp); | 2219 | netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp); |
2182 | err = _virtnet_set_queues(vi, curr_qp + xdp_qp); | 2220 | err = _virtnet_set_queues(vi, curr_qp + xdp_qp); |
@@ -2195,7 +2233,8 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, | |||
2195 | } | 2233 | } |
2196 | if (old_prog) | 2234 | if (old_prog) |
2197 | bpf_prog_put(old_prog); | 2235 | bpf_prog_put(old_prog); |
2198 | virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); | 2236 | if (netif_running(dev)) |
2237 | virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); | ||
2199 | } | 2238 | } |
2200 | 2239 | ||
2201 | return 0; | 2240 | return 0; |
@@ -2566,12 +2605,15 @@ static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue, | |||
2566 | { | 2605 | { |
2567 | struct virtnet_info *vi = netdev_priv(queue->dev); | 2606 | struct virtnet_info *vi = netdev_priv(queue->dev); |
2568 | unsigned int queue_index = get_netdev_rx_queue_index(queue); | 2607 | unsigned int queue_index = get_netdev_rx_queue_index(queue); |
2608 | unsigned int headroom = virtnet_get_headroom(vi); | ||
2609 | unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0; | ||
2569 | struct ewma_pkt_len *avg; | 2610 | struct ewma_pkt_len *avg; |
2570 | 2611 | ||
2571 | BUG_ON(queue_index >= vi->max_queue_pairs); | 2612 | BUG_ON(queue_index >= vi->max_queue_pairs); |
2572 | avg = &vi->rq[queue_index].mrg_avg_pkt_len; | 2613 | avg = &vi->rq[queue_index].mrg_avg_pkt_len; |
2573 | return sprintf(buf, "%u\n", | 2614 | return sprintf(buf, "%u\n", |
2574 | get_mergeable_buf_len(&vi->rq[queue_index], avg)); | 2615 | get_mergeable_buf_len(&vi->rq[queue_index], avg, |
2616 | SKB_DATA_ALIGN(headroom + tailroom))); | ||
2575 | } | 2617 | } |
2576 | 2618 | ||
2577 | static struct rx_queue_attribute mergeable_rx_buffer_size_attribute = | 2619 | static struct rx_queue_attribute mergeable_rx_buffer_size_attribute = |
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 8b39c160743d..e04937f44f33 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c | |||
@@ -977,6 +977,8 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, | |||
977 | { | 977 | { |
978 | int ret; | 978 | int ret; |
979 | u32 count; | 979 | u32 count; |
980 | int num_pkts; | ||
981 | int tx_num_deferred; | ||
980 | unsigned long flags; | 982 | unsigned long flags; |
981 | struct vmxnet3_tx_ctx ctx; | 983 | struct vmxnet3_tx_ctx ctx; |
982 | union Vmxnet3_GenericDesc *gdesc; | 984 | union Vmxnet3_GenericDesc *gdesc; |
@@ -1075,12 +1077,12 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, | |||
1075 | #else | 1077 | #else |
1076 | gdesc = ctx.sop_txd; | 1078 | gdesc = ctx.sop_txd; |
1077 | #endif | 1079 | #endif |
1080 | tx_num_deferred = le32_to_cpu(tq->shared->txNumDeferred); | ||
1078 | if (ctx.mss) { | 1081 | if (ctx.mss) { |
1079 | gdesc->txd.hlen = ctx.eth_ip_hdr_size + ctx.l4_hdr_size; | 1082 | gdesc->txd.hlen = ctx.eth_ip_hdr_size + ctx.l4_hdr_size; |
1080 | gdesc->txd.om = VMXNET3_OM_TSO; | 1083 | gdesc->txd.om = VMXNET3_OM_TSO; |
1081 | gdesc->txd.msscof = ctx.mss; | 1084 | gdesc->txd.msscof = ctx.mss; |
1082 | le32_add_cpu(&tq->shared->txNumDeferred, (skb->len - | 1085 | num_pkts = (skb->len - gdesc->txd.hlen + ctx.mss - 1) / ctx.mss; |
1083 | gdesc->txd.hlen + ctx.mss - 1) / ctx.mss); | ||
1084 | } else { | 1086 | } else { |
1085 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 1087 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
1086 | gdesc->txd.hlen = ctx.eth_ip_hdr_size; | 1088 | gdesc->txd.hlen = ctx.eth_ip_hdr_size; |
@@ -1091,8 +1093,10 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, | |||
1091 | gdesc->txd.om = 0; | 1093 | gdesc->txd.om = 0; |
1092 | gdesc->txd.msscof = 0; | 1094 | gdesc->txd.msscof = 0; |
1093 | } | 1095 | } |
1094 | le32_add_cpu(&tq->shared->txNumDeferred, 1); | 1096 | num_pkts = 1; |
1095 | } | 1097 | } |
1098 | le32_add_cpu(&tq->shared->txNumDeferred, num_pkts); | ||
1099 | tx_num_deferred += num_pkts; | ||
1096 | 1100 | ||
1097 | if (skb_vlan_tag_present(skb)) { | 1101 | if (skb_vlan_tag_present(skb)) { |
1098 | gdesc->txd.ti = 1; | 1102 | gdesc->txd.ti = 1; |
@@ -1118,8 +1122,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, | |||
1118 | 1122 | ||
1119 | spin_unlock_irqrestore(&tq->tx_lock, flags); | 1123 | spin_unlock_irqrestore(&tq->tx_lock, flags); |
1120 | 1124 | ||
1121 | if (le32_to_cpu(tq->shared->txNumDeferred) >= | 1125 | if (tx_num_deferred >= le32_to_cpu(tq->shared->txThreshold)) { |
1122 | le32_to_cpu(tq->shared->txThreshold)) { | ||
1123 | tq->shared->txNumDeferred = 0; | 1126 | tq->shared->txNumDeferred = 0; |
1124 | VMXNET3_WRITE_BAR0_REG(adapter, | 1127 | VMXNET3_WRITE_BAR0_REG(adapter, |
1125 | VMXNET3_REG_TXPROD + tq->qid * 8, | 1128 | VMXNET3_REG_TXPROD + tq->qid * 8, |
@@ -1470,7 +1473,8 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, | |||
1470 | vmxnet3_rx_csum(adapter, skb, | 1473 | vmxnet3_rx_csum(adapter, skb, |
1471 | (union Vmxnet3_GenericDesc *)rcd); | 1474 | (union Vmxnet3_GenericDesc *)rcd); |
1472 | skb->protocol = eth_type_trans(skb, adapter->netdev); | 1475 | skb->protocol = eth_type_trans(skb, adapter->netdev); |
1473 | if (!rcd->tcp || !adapter->lro) | 1476 | if (!rcd->tcp || |
1477 | !(adapter->netdev->features & NETIF_F_LRO)) | ||
1474 | goto not_lro; | 1478 | goto not_lro; |
1475 | 1479 | ||
1476 | if (segCnt != 0 && mss != 0) { | 1480 | if (segCnt != 0 && mss != 0) { |
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h index 5ba222920e80..59ec34052a65 100644 --- a/drivers/net/vmxnet3/vmxnet3_int.h +++ b/drivers/net/vmxnet3/vmxnet3_int.h | |||
@@ -69,10 +69,10 @@ | |||
69 | /* | 69 | /* |
70 | * Version numbers | 70 | * Version numbers |
71 | */ | 71 | */ |
72 | #define VMXNET3_DRIVER_VERSION_STRING "1.4.11.0-k" | 72 | #define VMXNET3_DRIVER_VERSION_STRING "1.4.13.0-k" |
73 | 73 | ||
74 | /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ | 74 | /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ |
75 | #define VMXNET3_DRIVER_VERSION_NUM 0x01040b00 | 75 | #define VMXNET3_DRIVER_VERSION_NUM 0x01040d00 |
76 | 76 | ||
77 | #if defined(CONFIG_PCI_MSI) | 77 | #if defined(CONFIG_PCI_MSI) |
78 | /* RSS only makes sense if MSI-X is supported. */ | 78 | /* RSS only makes sense if MSI-X is supported. */ |
@@ -342,9 +342,6 @@ struct vmxnet3_adapter { | |||
342 | u8 __iomem *hw_addr1; /* for BAR 1 */ | 342 | u8 __iomem *hw_addr1; /* for BAR 1 */ |
343 | u8 version; | 343 | u8 version; |
344 | 344 | ||
345 | bool rxcsum; | ||
346 | bool lro; | ||
347 | |||
348 | #ifdef VMXNET3_RSS | 345 | #ifdef VMXNET3_RSS |
349 | struct UPT1_RSSConf *rss_conf; | 346 | struct UPT1_RSSConf *rss_conf; |
350 | bool rss; | 347 | bool rss; |
diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c index afeca6bcdade..ab8b3cbbb205 100644 --- a/drivers/net/wan/hdlc_ppp.c +++ b/drivers/net/wan/hdlc_ppp.c | |||
@@ -574,7 +574,10 @@ static void ppp_timer(struct timer_list *t) | |||
574 | ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0, | 574 | ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0, |
575 | 0, NULL); | 575 | 0, NULL); |
576 | proto->restart_counter--; | 576 | proto->restart_counter--; |
577 | } else | 577 | } else if (netif_carrier_ok(proto->dev)) |
578 | ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0, | ||
579 | 0, NULL); | ||
580 | else | ||
578 | ppp_cp_event(proto->dev, proto->pid, TO_BAD, 0, 0, | 581 | ppp_cp_event(proto->dev, proto->pid, TO_BAD, 0, 0, |
579 | 0, NULL); | 582 | 0, NULL); |
580 | break; | 583 | break; |
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c index e89e5ef2c2a4..f246e9ed4a81 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c | |||
@@ -729,6 +729,7 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv, | |||
729 | ieee80211_hw_set(hw, SPECTRUM_MGMT); | 729 | ieee80211_hw_set(hw, SPECTRUM_MGMT); |
730 | ieee80211_hw_set(hw, SIGNAL_DBM); | 730 | ieee80211_hw_set(hw, SIGNAL_DBM); |
731 | ieee80211_hw_set(hw, AMPDU_AGGREGATION); | 731 | ieee80211_hw_set(hw, AMPDU_AGGREGATION); |
732 | ieee80211_hw_set(hw, DOESNT_SUPPORT_QOS_NDP); | ||
732 | 733 | ||
733 | if (ath9k_ps_enable) | 734 | if (ath9k_ps_enable) |
734 | ieee80211_hw_set(hw, SUPPORTS_PS); | 735 | ieee80211_hw_set(hw, SUPPORTS_PS); |
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h index df8a1ecb9924..232dcbb83311 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h | |||
@@ -181,6 +181,7 @@ enum brcmf_netif_stop_reason { | |||
181 | * @netif_stop_lock: spinlock for update netif_stop from multiple sources. | 181 | * @netif_stop_lock: spinlock for update netif_stop from multiple sources. |
182 | * @pend_8021x_cnt: tracks outstanding number of 802.1x frames. | 182 | * @pend_8021x_cnt: tracks outstanding number of 802.1x frames. |
183 | * @pend_8021x_wait: used for signalling change in count. | 183 | * @pend_8021x_wait: used for signalling change in count. |
184 | * @fwil_fwerr: flag indicating fwil layer should return firmware error codes. | ||
184 | */ | 185 | */ |
185 | struct brcmf_if { | 186 | struct brcmf_if { |
186 | struct brcmf_pub *drvr; | 187 | struct brcmf_pub *drvr; |
@@ -198,6 +199,7 @@ struct brcmf_if { | |||
198 | wait_queue_head_t pend_8021x_wait; | 199 | wait_queue_head_t pend_8021x_wait; |
199 | struct in6_addr ipv6_addr_tbl[NDOL_MAX_ENTRIES]; | 200 | struct in6_addr ipv6_addr_tbl[NDOL_MAX_ENTRIES]; |
200 | u8 ipv6addr_idx; | 201 | u8 ipv6addr_idx; |
202 | bool fwil_fwerr; | ||
201 | }; | 203 | }; |
202 | 204 | ||
203 | int brcmf_netdev_wait_pend8021x(struct brcmf_if *ifp); | 205 | int brcmf_netdev_wait_pend8021x(struct brcmf_if *ifp); |
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c index 47de35a33853..bede7b7fd996 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c | |||
@@ -104,6 +104,9 @@ static void brcmf_feat_iovar_int_get(struct brcmf_if *ifp, | |||
104 | u32 data; | 104 | u32 data; |
105 | int err; | 105 | int err; |
106 | 106 | ||
107 | /* we need to know firmware error */ | ||
108 | ifp->fwil_fwerr = true; | ||
109 | |||
107 | err = brcmf_fil_iovar_int_get(ifp, name, &data); | 110 | err = brcmf_fil_iovar_int_get(ifp, name, &data); |
108 | if (err == 0) { | 111 | if (err == 0) { |
109 | brcmf_dbg(INFO, "enabling feature: %s\n", brcmf_feat_names[id]); | 112 | brcmf_dbg(INFO, "enabling feature: %s\n", brcmf_feat_names[id]); |
@@ -112,6 +115,8 @@ static void brcmf_feat_iovar_int_get(struct brcmf_if *ifp, | |||
112 | brcmf_dbg(TRACE, "%s feature check failed: %d\n", | 115 | brcmf_dbg(TRACE, "%s feature check failed: %d\n", |
113 | brcmf_feat_names[id], err); | 116 | brcmf_feat_names[id], err); |
114 | } | 117 | } |
118 | |||
119 | ifp->fwil_fwerr = false; | ||
115 | } | 120 | } |
116 | 121 | ||
117 | static void brcmf_feat_iovar_data_set(struct brcmf_if *ifp, | 122 | static void brcmf_feat_iovar_data_set(struct brcmf_if *ifp, |
@@ -120,6 +125,9 @@ static void brcmf_feat_iovar_data_set(struct brcmf_if *ifp, | |||
120 | { | 125 | { |
121 | int err; | 126 | int err; |
122 | 127 | ||
128 | /* we need to know firmware error */ | ||
129 | ifp->fwil_fwerr = true; | ||
130 | |||
123 | err = brcmf_fil_iovar_data_set(ifp, name, data, len); | 131 | err = brcmf_fil_iovar_data_set(ifp, name, data, len); |
124 | if (err != -BRCMF_FW_UNSUPPORTED) { | 132 | if (err != -BRCMF_FW_UNSUPPORTED) { |
125 | brcmf_dbg(INFO, "enabling feature: %s\n", brcmf_feat_names[id]); | 133 | brcmf_dbg(INFO, "enabling feature: %s\n", brcmf_feat_names[id]); |
@@ -128,6 +136,8 @@ static void brcmf_feat_iovar_data_set(struct brcmf_if *ifp, | |||
128 | brcmf_dbg(TRACE, "%s feature check failed: %d\n", | 136 | brcmf_dbg(TRACE, "%s feature check failed: %d\n", |
129 | brcmf_feat_names[id], err); | 137 | brcmf_feat_names[id], err); |
130 | } | 138 | } |
139 | |||
140 | ifp->fwil_fwerr = false; | ||
131 | } | 141 | } |
132 | 142 | ||
133 | #define MAX_CAPS_BUFFER_SIZE 512 | 143 | #define MAX_CAPS_BUFFER_SIZE 512 |
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c index f2cfdd3b2bf1..fc5751116d99 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c | |||
@@ -131,6 +131,9 @@ brcmf_fil_cmd_data(struct brcmf_if *ifp, u32 cmd, void *data, u32 len, bool set) | |||
131 | brcmf_fil_get_errstr((u32)(-fwerr)), fwerr); | 131 | brcmf_fil_get_errstr((u32)(-fwerr)), fwerr); |
132 | err = -EBADE; | 132 | err = -EBADE; |
133 | } | 133 | } |
134 | if (ifp->fwil_fwerr) | ||
135 | return fwerr; | ||
136 | |||
134 | return err; | 137 | return err; |
135 | } | 138 | } |
136 | 139 | ||
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c index 2ee54133efa1..82064e909784 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c | |||
@@ -462,25 +462,23 @@ static int brcmf_p2p_set_firmware(struct brcmf_if *ifp, u8 *p2p_mac) | |||
462 | * @dev_addr: optional device address. | 462 | * @dev_addr: optional device address. |
463 | * | 463 | * |
464 | * P2P needs mac addresses for P2P device and interface. If no device | 464 | * P2P needs mac addresses for P2P device and interface. If no device |
465 | * address it specified, these are derived from the primary net device, ie. | 465 | * address it specified, these are derived from a random ethernet |
466 | * the permanent ethernet address of the device. | 466 | * address. |
467 | */ | 467 | */ |
468 | static void brcmf_p2p_generate_bss_mac(struct brcmf_p2p_info *p2p, u8 *dev_addr) | 468 | static void brcmf_p2p_generate_bss_mac(struct brcmf_p2p_info *p2p, u8 *dev_addr) |
469 | { | 469 | { |
470 | struct brcmf_if *pri_ifp = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp; | 470 | bool random_addr = false; |
471 | bool local_admin = false; | ||
472 | 471 | ||
473 | if (!dev_addr || is_zero_ether_addr(dev_addr)) { | 472 | if (!dev_addr || is_zero_ether_addr(dev_addr)) |
474 | dev_addr = pri_ifp->mac_addr; | 473 | random_addr = true; |
475 | local_admin = true; | ||
476 | } | ||
477 | 474 | ||
478 | /* Generate the P2P Device Address. This consists of the device's | 475 | /* Generate the P2P Device Address obtaining a random ethernet |
479 | * primary MAC address with the locally administered bit set. | 476 | * address with the locally administered bit set. |
480 | */ | 477 | */ |
481 | memcpy(p2p->dev_addr, dev_addr, ETH_ALEN); | 478 | if (random_addr) |
482 | if (local_admin) | 479 | eth_random_addr(p2p->dev_addr); |
483 | p2p->dev_addr[0] |= 0x02; | 480 | else |
481 | memcpy(p2p->dev_addr, dev_addr, ETH_ALEN); | ||
484 | 482 | ||
485 | /* Generate the P2P Interface Address. If the discovery and connection | 483 | /* Generate the P2P Interface Address. If the discovery and connection |
486 | * BSSCFGs need to simultaneously co-exist, then this address must be | 484 | * BSSCFGs need to simultaneously co-exist, then this address must be |
diff --git a/drivers/net/wireless/intel/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig index c5f2ddf9b0fe..e5a2fc738ac3 100644 --- a/drivers/net/wireless/intel/iwlwifi/Kconfig +++ b/drivers/net/wireless/intel/iwlwifi/Kconfig | |||
@@ -91,7 +91,6 @@ config IWLWIFI_BCAST_FILTERING | |||
91 | config IWLWIFI_PCIE_RTPM | 91 | config IWLWIFI_PCIE_RTPM |
92 | bool "Enable runtime power management mode for PCIe devices" | 92 | bool "Enable runtime power management mode for PCIe devices" |
93 | depends on IWLMVM && PM && EXPERT | 93 | depends on IWLMVM && PM && EXPERT |
94 | default false | ||
95 | help | 94 | help |
96 | Say Y here to enable runtime power management for PCIe | 95 | Say Y here to enable runtime power management for PCIe |
97 | devices. If enabled, the device will go into low power mode | 96 | devices. If enabled, the device will go into low power mode |
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h index 3721a3ed358b..f824bebceb06 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h | |||
@@ -211,7 +211,7 @@ enum { | |||
211 | * @TE_V2_NOTIF_HOST_FRAG_END:request/receive notification on frag end | 211 | * @TE_V2_NOTIF_HOST_FRAG_END:request/receive notification on frag end |
212 | * @TE_V2_NOTIF_INTERNAL_FRAG_START: internal FW use. | 212 | * @TE_V2_NOTIF_INTERNAL_FRAG_START: internal FW use. |
213 | * @TE_V2_NOTIF_INTERNAL_FRAG_END: internal FW use. | 213 | * @TE_V2_NOTIF_INTERNAL_FRAG_END: internal FW use. |
214 | * @T2_V2_START_IMMEDIATELY: start time event immediately | 214 | * @TE_V2_START_IMMEDIATELY: start time event immediately |
215 | * @TE_V2_DEP_OTHER: depends on another time event | 215 | * @TE_V2_DEP_OTHER: depends on another time event |
216 | * @TE_V2_DEP_TSF: depends on a specific time | 216 | * @TE_V2_DEP_TSF: depends on a specific time |
217 | * @TE_V2_EVENT_SOCIOPATHIC: can't co-exist with other events of tha same MAC | 217 | * @TE_V2_EVENT_SOCIOPATHIC: can't co-exist with other events of tha same MAC |
@@ -230,7 +230,7 @@ enum iwl_time_event_policy { | |||
230 | TE_V2_NOTIF_HOST_FRAG_END = BIT(5), | 230 | TE_V2_NOTIF_HOST_FRAG_END = BIT(5), |
231 | TE_V2_NOTIF_INTERNAL_FRAG_START = BIT(6), | 231 | TE_V2_NOTIF_INTERNAL_FRAG_START = BIT(6), |
232 | TE_V2_NOTIF_INTERNAL_FRAG_END = BIT(7), | 232 | TE_V2_NOTIF_INTERNAL_FRAG_END = BIT(7), |
233 | T2_V2_START_IMMEDIATELY = BIT(11), | 233 | TE_V2_START_IMMEDIATELY = BIT(11), |
234 | 234 | ||
235 | /* placement characteristics */ | 235 | /* placement characteristics */ |
236 | TE_V2_DEP_OTHER = BIT(TE_V2_PLACEMENT_POS), | 236 | TE_V2_DEP_OTHER = BIT(TE_V2_PLACEMENT_POS), |
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c index 67aefc8fc9ac..7bd704a3e640 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c | |||
@@ -8,6 +8,7 @@ | |||
8 | * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. | 8 | * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. |
9 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH | 9 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH |
10 | * Copyright(c) 2015 - 2017 Intel Deutschland GmbH | 10 | * Copyright(c) 2015 - 2017 Intel Deutschland GmbH |
11 | * Copyright(c) 2018 Intel Corporation | ||
11 | * | 12 | * |
12 | * This program is free software; you can redistribute it and/or modify | 13 | * This program is free software; you can redistribute it and/or modify |
13 | * it under the terms of version 2 of the GNU General Public License as | 14 | * it under the terms of version 2 of the GNU General Public License as |
@@ -33,6 +34,7 @@ | |||
33 | * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. | 34 | * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. |
34 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH | 35 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH |
35 | * Copyright(c) 2015 - 2017 Intel Deutschland GmbH | 36 | * Copyright(c) 2015 - 2017 Intel Deutschland GmbH |
37 | * Copyright(c) 2018 Intel Corporation | ||
36 | * All rights reserved. | 38 | * All rights reserved. |
37 | * | 39 | * |
38 | * Redistribution and use in source and binary forms, with or without | 40 | * Redistribution and use in source and binary forms, with or without |
@@ -942,7 +944,6 @@ dump_trans_data: | |||
942 | 944 | ||
943 | out: | 945 | out: |
944 | iwl_fw_free_dump_desc(fwrt); | 946 | iwl_fw_free_dump_desc(fwrt); |
945 | fwrt->dump.trig = NULL; | ||
946 | clear_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status); | 947 | clear_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status); |
947 | IWL_DEBUG_INFO(fwrt, "WRT dump done\n"); | 948 | IWL_DEBUG_INFO(fwrt, "WRT dump done\n"); |
948 | } | 949 | } |
@@ -1112,6 +1113,14 @@ void iwl_fw_error_dump_wk(struct work_struct *work) | |||
1112 | fwrt->ops->dump_start(fwrt->ops_ctx)) | 1113 | fwrt->ops->dump_start(fwrt->ops_ctx)) |
1113 | return; | 1114 | return; |
1114 | 1115 | ||
1116 | if (fwrt->ops && fwrt->ops->fw_running && | ||
1117 | !fwrt->ops->fw_running(fwrt->ops_ctx)) { | ||
1118 | IWL_ERR(fwrt, "Firmware not running - cannot dump error\n"); | ||
1119 | iwl_fw_free_dump_desc(fwrt); | ||
1120 | clear_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status); | ||
1121 | goto out; | ||
1122 | } | ||
1123 | |||
1115 | if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) { | 1124 | if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) { |
1116 | /* stop recording */ | 1125 | /* stop recording */ |
1117 | iwl_fw_dbg_stop_recording(fwrt); | 1126 | iwl_fw_dbg_stop_recording(fwrt); |
@@ -1145,7 +1154,7 @@ void iwl_fw_error_dump_wk(struct work_struct *work) | |||
1145 | iwl_write_prph(fwrt->trans, DBGC_OUT_CTRL, out_ctrl); | 1154 | iwl_write_prph(fwrt->trans, DBGC_OUT_CTRL, out_ctrl); |
1146 | } | 1155 | } |
1147 | } | 1156 | } |
1148 | 1157 | out: | |
1149 | if (fwrt->ops && fwrt->ops->dump_end) | 1158 | if (fwrt->ops && fwrt->ops->dump_end) |
1150 | fwrt->ops->dump_end(fwrt->ops_ctx); | 1159 | fwrt->ops->dump_end(fwrt->ops_ctx); |
1151 | } | 1160 | } |
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h index 223fb77a3aa9..72259bff9922 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h | |||
@@ -8,6 +8,7 @@ | |||
8 | * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. | 8 | * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. |
9 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH | 9 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH |
10 | * Copyright(c) 2015 - 2017 Intel Deutschland GmbH | 10 | * Copyright(c) 2015 - 2017 Intel Deutschland GmbH |
11 | * Copyright(c) 2018 Intel Corporation | ||
11 | * | 12 | * |
12 | * This program is free software; you can redistribute it and/or modify | 13 | * This program is free software; you can redistribute it and/or modify |
13 | * it under the terms of version 2 of the GNU General Public License as | 14 | * it under the terms of version 2 of the GNU General Public License as |
@@ -33,6 +34,7 @@ | |||
33 | * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. | 34 | * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. |
34 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH | 35 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH |
35 | * Copyright(c) 2015 - 2017 Intel Deutschland GmbH | 36 | * Copyright(c) 2015 - 2017 Intel Deutschland GmbH |
37 | * Copyright(c) 2018 Intel Corporation | ||
36 | * All rights reserved. | 38 | * All rights reserved. |
37 | * | 39 | * |
38 | * Redistribution and use in source and binary forms, with or without | 40 | * Redistribution and use in source and binary forms, with or without |
@@ -91,6 +93,7 @@ static inline void iwl_fw_free_dump_desc(struct iwl_fw_runtime *fwrt) | |||
91 | if (fwrt->dump.desc != &iwl_dump_desc_assert) | 93 | if (fwrt->dump.desc != &iwl_dump_desc_assert) |
92 | kfree(fwrt->dump.desc); | 94 | kfree(fwrt->dump.desc); |
93 | fwrt->dump.desc = NULL; | 95 | fwrt->dump.desc = NULL; |
96 | fwrt->dump.trig = NULL; | ||
94 | } | 97 | } |
95 | 98 | ||
96 | void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt); | 99 | void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt); |
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h index e57ff92a68ae..3da468d2cc92 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h | |||
@@ -75,6 +75,20 @@ static inline void iwl_fw_cancel_timestamp(struct iwl_fw_runtime *fwrt) | |||
75 | cancel_delayed_work_sync(&fwrt->timestamp.wk); | 75 | cancel_delayed_work_sync(&fwrt->timestamp.wk); |
76 | } | 76 | } |
77 | 77 | ||
78 | static inline void iwl_fw_suspend_timestamp(struct iwl_fw_runtime *fwrt) | ||
79 | { | ||
80 | cancel_delayed_work_sync(&fwrt->timestamp.wk); | ||
81 | } | ||
82 | |||
83 | static inline void iwl_fw_resume_timestamp(struct iwl_fw_runtime *fwrt) | ||
84 | { | ||
85 | if (!fwrt->timestamp.delay) | ||
86 | return; | ||
87 | |||
88 | schedule_delayed_work(&fwrt->timestamp.wk, | ||
89 | round_jiffies_relative(fwrt->timestamp.delay)); | ||
90 | } | ||
91 | |||
78 | #else | 92 | #else |
79 | static inline int iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt, | 93 | static inline int iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt, |
80 | struct dentry *dbgfs_dir) | 94 | struct dentry *dbgfs_dir) |
@@ -84,4 +98,8 @@ static inline int iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt, | |||
84 | 98 | ||
85 | static inline void iwl_fw_cancel_timestamp(struct iwl_fw_runtime *fwrt) {} | 99 | static inline void iwl_fw_cancel_timestamp(struct iwl_fw_runtime *fwrt) {} |
86 | 100 | ||
101 | static inline void iwl_fw_suspend_timestamp(struct iwl_fw_runtime *fwrt) {} | ||
102 | |||
103 | static inline void iwl_fw_resume_timestamp(struct iwl_fw_runtime *fwrt) {} | ||
104 | |||
87 | #endif /* CONFIG_IWLWIFI_DEBUGFS */ | 105 | #endif /* CONFIG_IWLWIFI_DEBUGFS */ |
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/init.c b/drivers/net/wireless/intel/iwlwifi/fw/init.c index c39fe84bb4c4..2efac307909e 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/init.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/init.c | |||
@@ -77,8 +77,14 @@ void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans, | |||
77 | } | 77 | } |
78 | IWL_EXPORT_SYMBOL(iwl_fw_runtime_init); | 78 | IWL_EXPORT_SYMBOL(iwl_fw_runtime_init); |
79 | 79 | ||
80 | void iwl_fw_runtime_exit(struct iwl_fw_runtime *fwrt) | 80 | void iwl_fw_runtime_suspend(struct iwl_fw_runtime *fwrt) |
81 | { | 81 | { |
82 | iwl_fw_cancel_timestamp(fwrt); | 82 | iwl_fw_suspend_timestamp(fwrt); |
83 | } | 83 | } |
84 | IWL_EXPORT_SYMBOL(iwl_fw_runtime_exit); | 84 | IWL_EXPORT_SYMBOL(iwl_fw_runtime_suspend); |
85 | |||
86 | void iwl_fw_runtime_resume(struct iwl_fw_runtime *fwrt) | ||
87 | { | ||
88 | iwl_fw_resume_timestamp(fwrt); | ||
89 | } | ||
90 | IWL_EXPORT_SYMBOL(iwl_fw_runtime_resume); | ||
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h index e25c049f980f..3fb940ebd74a 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h | |||
@@ -6,6 +6,7 @@ | |||
6 | * GPL LICENSE SUMMARY | 6 | * GPL LICENSE SUMMARY |
7 | * | 7 | * |
8 | * Copyright(c) 2017 Intel Deutschland GmbH | 8 | * Copyright(c) 2017 Intel Deutschland GmbH |
9 | * Copyright(c) 2018 Intel Corporation | ||
9 | * | 10 | * |
10 | * This program is free software; you can redistribute it and/or modify | 11 | * This program is free software; you can redistribute it and/or modify |
11 | * it under the terms of version 2 of the GNU General Public License as | 12 | * it under the terms of version 2 of the GNU General Public License as |
@@ -26,6 +27,7 @@ | |||
26 | * BSD LICENSE | 27 | * BSD LICENSE |
27 | * | 28 | * |
28 | * Copyright(c) 2017 Intel Deutschland GmbH | 29 | * Copyright(c) 2017 Intel Deutschland GmbH |
30 | * Copyright(c) 2018 Intel Corporation | ||
29 | * All rights reserved. | 31 | * All rights reserved. |
30 | * | 32 | * |
31 | * Redistribution and use in source and binary forms, with or without | 33 | * Redistribution and use in source and binary forms, with or without |
@@ -68,6 +70,7 @@ | |||
68 | struct iwl_fw_runtime_ops { | 70 | struct iwl_fw_runtime_ops { |
69 | int (*dump_start)(void *ctx); | 71 | int (*dump_start)(void *ctx); |
70 | void (*dump_end)(void *ctx); | 72 | void (*dump_end)(void *ctx); |
73 | bool (*fw_running)(void *ctx); | ||
71 | }; | 74 | }; |
72 | 75 | ||
73 | #define MAX_NUM_LMAC 2 | 76 | #define MAX_NUM_LMAC 2 |
@@ -150,6 +153,10 @@ void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans, | |||
150 | 153 | ||
151 | void iwl_fw_runtime_exit(struct iwl_fw_runtime *fwrt); | 154 | void iwl_fw_runtime_exit(struct iwl_fw_runtime *fwrt); |
152 | 155 | ||
156 | void iwl_fw_runtime_suspend(struct iwl_fw_runtime *fwrt); | ||
157 | |||
158 | void iwl_fw_runtime_resume(struct iwl_fw_runtime *fwrt); | ||
159 | |||
153 | static inline void iwl_fw_set_current_image(struct iwl_fw_runtime *fwrt, | 160 | static inline void iwl_fw_set_current_image(struct iwl_fw_runtime *fwrt, |
154 | enum iwl_ucode_type cur_fw_img) | 161 | enum iwl_ucode_type cur_fw_img) |
155 | { | 162 | { |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index 0e6cf39285f4..2efe9b099556 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c | |||
@@ -1098,6 +1098,8 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) | |||
1098 | /* make sure the d0i3 exit work is not pending */ | 1098 | /* make sure the d0i3 exit work is not pending */ |
1099 | flush_work(&mvm->d0i3_exit_work); | 1099 | flush_work(&mvm->d0i3_exit_work); |
1100 | 1100 | ||
1101 | iwl_fw_runtime_suspend(&mvm->fwrt); | ||
1102 | |||
1101 | ret = iwl_trans_suspend(trans); | 1103 | ret = iwl_trans_suspend(trans); |
1102 | if (ret) | 1104 | if (ret) |
1103 | return ret; | 1105 | return ret; |
@@ -2012,6 +2014,8 @@ int iwl_mvm_resume(struct ieee80211_hw *hw) | |||
2012 | 2014 | ||
2013 | mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED; | 2015 | mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED; |
2014 | 2016 | ||
2017 | iwl_fw_runtime_resume(&mvm->fwrt); | ||
2018 | |||
2015 | return ret; | 2019 | return ret; |
2016 | } | 2020 | } |
2017 | 2021 | ||
@@ -2038,6 +2042,8 @@ static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file) | |||
2038 | 2042 | ||
2039 | mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_D3; | 2043 | mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_D3; |
2040 | 2044 | ||
2045 | iwl_fw_runtime_suspend(&mvm->fwrt); | ||
2046 | |||
2041 | /* start pseudo D3 */ | 2047 | /* start pseudo D3 */ |
2042 | rtnl_lock(); | 2048 | rtnl_lock(); |
2043 | err = __iwl_mvm_suspend(mvm->hw, mvm->hw->wiphy->wowlan_config, true); | 2049 | err = __iwl_mvm_suspend(mvm->hw, mvm->hw->wiphy->wowlan_config, true); |
@@ -2098,6 +2104,8 @@ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file) | |||
2098 | __iwl_mvm_resume(mvm, true); | 2104 | __iwl_mvm_resume(mvm, true); |
2099 | rtnl_unlock(); | 2105 | rtnl_unlock(); |
2100 | 2106 | ||
2107 | iwl_fw_runtime_resume(&mvm->fwrt); | ||
2108 | |||
2101 | mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED; | 2109 | mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED; |
2102 | 2110 | ||
2103 | iwl_abort_notification_waits(&mvm->notif_wait); | 2111 | iwl_abort_notification_waits(&mvm->notif_wait); |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index a7892c1254a2..9c436d8d001d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c | |||
@@ -8,6 +8,7 @@ | |||
8 | * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. | 8 | * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. |
9 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH | 9 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH |
10 | * Copyright(c) 2016 - 2017 Intel Deutschland GmbH | 10 | * Copyright(c) 2016 - 2017 Intel Deutschland GmbH |
11 | * Copyright(c) 2018 Intel Corporation | ||
11 | * | 12 | * |
12 | * This program is free software; you can redistribute it and/or modify | 13 | * This program is free software; you can redistribute it and/or modify |
13 | * it under the terms of version 2 of the GNU General Public License as | 14 | * it under the terms of version 2 of the GNU General Public License as |
@@ -35,6 +36,7 @@ | |||
35 | * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. | 36 | * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. |
36 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH | 37 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH |
37 | * Copyright(c) 2016 - 2017 Intel Deutschland GmbH | 38 | * Copyright(c) 2016 - 2017 Intel Deutschland GmbH |
39 | * Copyright(c) 2018 Intel Corporation | ||
38 | * All rights reserved. | 40 | * All rights reserved. |
39 | * | 41 | * |
40 | * Redistribution and use in source and binary forms, with or without | 42 | * Redistribution and use in source and binary forms, with or without |
@@ -1281,9 +1283,6 @@ static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm, | |||
1281 | { | 1283 | { |
1282 | int ret; | 1284 | int ret; |
1283 | 1285 | ||
1284 | if (!iwl_mvm_firmware_running(mvm)) | ||
1285 | return -EIO; | ||
1286 | |||
1287 | ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PRPH_WRITE); | 1286 | ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PRPH_WRITE); |
1288 | if (ret) | 1287 | if (ret) |
1289 | return ret; | 1288 | return ret; |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index 2f22e14e00fe..8ba16fc24e3a 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c | |||
@@ -438,7 +438,8 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif) | |||
438 | } | 438 | } |
439 | 439 | ||
440 | /* Allocate the CAB queue for softAP and GO interfaces */ | 440 | /* Allocate the CAB queue for softAP and GO interfaces */ |
441 | if (vif->type == NL80211_IFTYPE_AP) { | 441 | if (vif->type == NL80211_IFTYPE_AP || |
442 | vif->type == NL80211_IFTYPE_ADHOC) { | ||
442 | /* | 443 | /* |
443 | * For TVQM this will be overwritten later with the FW assigned | 444 | * For TVQM this will be overwritten later with the FW assigned |
444 | * queue value (when queue is enabled). | 445 | * queue value (when queue is enabled). |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 8aed40a8bc38..ebf511150f4d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c | |||
@@ -8,6 +8,7 @@ | |||
8 | * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. | 8 | * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. |
9 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH | 9 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH |
10 | * Copyright(c) 2016 - 2017 Intel Deutschland GmbH | 10 | * Copyright(c) 2016 - 2017 Intel Deutschland GmbH |
11 | * Copyright(c) 2018 Intel Corporation | ||
11 | * | 12 | * |
12 | * This program is free software; you can redistribute it and/or modify | 13 | * This program is free software; you can redistribute it and/or modify |
13 | * it under the terms of version 2 of the GNU General Public License as | 14 | * it under the terms of version 2 of the GNU General Public License as |
@@ -2106,15 +2107,40 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw, | |||
2106 | if (ret) | 2107 | if (ret) |
2107 | goto out_remove; | 2108 | goto out_remove; |
2108 | 2109 | ||
2109 | ret = iwl_mvm_add_mcast_sta(mvm, vif); | 2110 | /* |
2110 | if (ret) | 2111 | * This is not very nice, but the simplest: |
2111 | goto out_unbind; | 2112 | * For older FWs adding the mcast sta before the bcast station may |
2112 | 2113 | * cause assert 0x2b00. | |
2113 | /* Send the bcast station. At this stage the TBTT and DTIM time events | 2114 | * This is fixed in later FW so make the order of removal depend on |
2114 | * are added and applied to the scheduler */ | 2115 | * the TLV |
2115 | ret = iwl_mvm_send_add_bcast_sta(mvm, vif); | 2116 | */ |
2116 | if (ret) | 2117 | if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) { |
2117 | goto out_rm_mcast; | 2118 | ret = iwl_mvm_add_mcast_sta(mvm, vif); |
2119 | if (ret) | ||
2120 | goto out_unbind; | ||
2121 | /* | ||
2122 | * Send the bcast station. At this stage the TBTT and DTIM time | ||
2123 | * events are added and applied to the scheduler | ||
2124 | */ | ||
2125 | ret = iwl_mvm_send_add_bcast_sta(mvm, vif); | ||
2126 | if (ret) { | ||
2127 | iwl_mvm_rm_mcast_sta(mvm, vif); | ||
2128 | goto out_unbind; | ||
2129 | } | ||
2130 | } else { | ||
2131 | /* | ||
2132 | * Send the bcast station. At this stage the TBTT and DTIM time | ||
2133 | * events are added and applied to the scheduler | ||
2134 | */ | ||
2135 | iwl_mvm_send_add_bcast_sta(mvm, vif); | ||
2136 | if (ret) | ||
2137 | goto out_unbind; | ||
2138 | iwl_mvm_add_mcast_sta(mvm, vif); | ||
2139 | if (ret) { | ||
2140 | iwl_mvm_send_rm_bcast_sta(mvm, vif); | ||
2141 | goto out_unbind; | ||
2142 | } | ||
2143 | } | ||
2118 | 2144 | ||
2119 | /* must be set before quota calculations */ | 2145 | /* must be set before quota calculations */ |
2120 | mvmvif->ap_ibss_active = true; | 2146 | mvmvif->ap_ibss_active = true; |
@@ -2144,7 +2170,6 @@ out_quota_failed: | |||
2144 | iwl_mvm_power_update_mac(mvm); | 2170 | iwl_mvm_power_update_mac(mvm); |
2145 | mvmvif->ap_ibss_active = false; | 2171 | mvmvif->ap_ibss_active = false; |
2146 | iwl_mvm_send_rm_bcast_sta(mvm, vif); | 2172 | iwl_mvm_send_rm_bcast_sta(mvm, vif); |
2147 | out_rm_mcast: | ||
2148 | iwl_mvm_rm_mcast_sta(mvm, vif); | 2173 | iwl_mvm_rm_mcast_sta(mvm, vif); |
2149 | out_unbind: | 2174 | out_unbind: |
2150 | iwl_mvm_binding_remove_vif(mvm, vif); | 2175 | iwl_mvm_binding_remove_vif(mvm, vif); |
@@ -2682,6 +2707,10 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, | |||
2682 | 2707 | ||
2683 | /* enable beacon filtering */ | 2708 | /* enable beacon filtering */ |
2684 | WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); | 2709 | WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); |
2710 | |||
2711 | iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, | ||
2712 | false); | ||
2713 | |||
2685 | ret = 0; | 2714 | ret = 0; |
2686 | } else if (old_state == IEEE80211_STA_AUTHORIZED && | 2715 | } else if (old_state == IEEE80211_STA_AUTHORIZED && |
2687 | new_state == IEEE80211_STA_ASSOC) { | 2716 | new_state == IEEE80211_STA_ASSOC) { |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 2d28e0804218..89ff02d7c876 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | |||
@@ -90,6 +90,7 @@ | |||
90 | #include "fw/runtime.h" | 90 | #include "fw/runtime.h" |
91 | #include "fw/dbg.h" | 91 | #include "fw/dbg.h" |
92 | #include "fw/acpi.h" | 92 | #include "fw/acpi.h" |
93 | #include "fw/debugfs.h" | ||
93 | 94 | ||
94 | #define IWL_MVM_MAX_ADDRESSES 5 | 95 | #define IWL_MVM_MAX_ADDRESSES 5 |
95 | /* RSSI offset for WkP */ | 96 | /* RSSI offset for WkP */ |
@@ -1783,6 +1784,7 @@ static inline u32 iwl_mvm_flushable_queues(struct iwl_mvm *mvm) | |||
1783 | 1784 | ||
1784 | static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm) | 1785 | static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm) |
1785 | { | 1786 | { |
1787 | iwl_fw_cancel_timestamp(&mvm->fwrt); | ||
1786 | iwl_free_fw_paging(&mvm->fwrt); | 1788 | iwl_free_fw_paging(&mvm->fwrt); |
1787 | clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status); | 1789 | clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status); |
1788 | iwl_fw_dump_conf_clear(&mvm->fwrt); | 1790 | iwl_fw_dump_conf_clear(&mvm->fwrt); |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 5d525a0023dc..ab7fb5aad984 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c | |||
@@ -8,6 +8,7 @@ | |||
8 | * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. | 8 | * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. |
9 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH | 9 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH |
10 | * Copyright(c) 2016 - 2017 Intel Deutschland GmbH | 10 | * Copyright(c) 2016 - 2017 Intel Deutschland GmbH |
11 | * Copyright(c) 2018 Intel Corporation | ||
11 | * | 12 | * |
12 | * This program is free software; you can redistribute it and/or modify | 13 | * This program is free software; you can redistribute it and/or modify |
13 | * it under the terms of version 2 of the GNU General Public License as | 14 | * it under the terms of version 2 of the GNU General Public License as |
@@ -35,6 +36,7 @@ | |||
35 | * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. | 36 | * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. |
36 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH | 37 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH |
37 | * Copyright(c) 2016 - 2017 Intel Deutschland GmbH | 38 | * Copyright(c) 2016 - 2017 Intel Deutschland GmbH |
39 | * Copyright(c) 2018 Intel Corporation | ||
38 | * All rights reserved. | 40 | * All rights reserved. |
39 | * | 41 | * |
40 | * Redistribution and use in source and binary forms, with or without | 42 | * Redistribution and use in source and binary forms, with or without |
@@ -552,9 +554,15 @@ static void iwl_mvm_fwrt_dump_end(void *ctx) | |||
552 | iwl_mvm_unref(mvm, IWL_MVM_REF_FW_DBG_COLLECT); | 554 | iwl_mvm_unref(mvm, IWL_MVM_REF_FW_DBG_COLLECT); |
553 | } | 555 | } |
554 | 556 | ||
557 | static bool iwl_mvm_fwrt_fw_running(void *ctx) | ||
558 | { | ||
559 | return iwl_mvm_firmware_running(ctx); | ||
560 | } | ||
561 | |||
555 | static const struct iwl_fw_runtime_ops iwl_mvm_fwrt_ops = { | 562 | static const struct iwl_fw_runtime_ops iwl_mvm_fwrt_ops = { |
556 | .dump_start = iwl_mvm_fwrt_dump_start, | 563 | .dump_start = iwl_mvm_fwrt_dump_start, |
557 | .dump_end = iwl_mvm_fwrt_dump_end, | 564 | .dump_end = iwl_mvm_fwrt_dump_end, |
565 | .fw_running = iwl_mvm_fwrt_fw_running, | ||
558 | }; | 566 | }; |
559 | 567 | ||
560 | static struct iwl_op_mode * | 568 | static struct iwl_op_mode * |
@@ -802,7 +810,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, | |||
802 | iwl_mvm_leds_exit(mvm); | 810 | iwl_mvm_leds_exit(mvm); |
803 | iwl_mvm_thermal_exit(mvm); | 811 | iwl_mvm_thermal_exit(mvm); |
804 | out_free: | 812 | out_free: |
805 | iwl_fw_runtime_exit(&mvm->fwrt); | ||
806 | iwl_fw_flush_dump(&mvm->fwrt); | 813 | iwl_fw_flush_dump(&mvm->fwrt); |
807 | 814 | ||
808 | if (iwlmvm_mod_params.init_dbg) | 815 | if (iwlmvm_mod_params.init_dbg) |
@@ -843,7 +850,6 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode) | |||
843 | #if defined(CONFIG_PM_SLEEP) && defined(CONFIG_IWLWIFI_DEBUGFS) | 850 | #if defined(CONFIG_PM_SLEEP) && defined(CONFIG_IWLWIFI_DEBUGFS) |
844 | kfree(mvm->d3_resume_sram); | 851 | kfree(mvm->d3_resume_sram); |
845 | #endif | 852 | #endif |
846 | iwl_fw_runtime_exit(&mvm->fwrt); | ||
847 | iwl_trans_op_mode_leave(mvm->trans); | 853 | iwl_trans_op_mode_leave(mvm->trans); |
848 | 854 | ||
849 | iwl_phy_db_free(mvm->phy_db); | 855 | iwl_phy_db_free(mvm->phy_db); |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c index 60abb0084ee5..47f4c7a1d80d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c | |||
@@ -2684,7 +2684,8 @@ static void rs_get_initial_rate(struct iwl_mvm *mvm, | |||
2684 | struct ieee80211_sta *sta, | 2684 | struct ieee80211_sta *sta, |
2685 | struct iwl_lq_sta *lq_sta, | 2685 | struct iwl_lq_sta *lq_sta, |
2686 | enum nl80211_band band, | 2686 | enum nl80211_band band, |
2687 | struct rs_rate *rate) | 2687 | struct rs_rate *rate, |
2688 | bool init) | ||
2688 | { | 2689 | { |
2689 | int i, nentries; | 2690 | int i, nentries; |
2690 | unsigned long active_rate; | 2691 | unsigned long active_rate; |
@@ -2738,14 +2739,25 @@ static void rs_get_initial_rate(struct iwl_mvm *mvm, | |||
2738 | */ | 2739 | */ |
2739 | if (sta->vht_cap.vht_supported && | 2740 | if (sta->vht_cap.vht_supported && |
2740 | best_rssi > IWL_RS_LOW_RSSI_THRESHOLD) { | 2741 | best_rssi > IWL_RS_LOW_RSSI_THRESHOLD) { |
2741 | switch (sta->bandwidth) { | 2742 | /* |
2742 | case IEEE80211_STA_RX_BW_160: | 2743 | * In AP mode, when a new station associates, rs is initialized |
2743 | case IEEE80211_STA_RX_BW_80: | 2744 | * immediately upon association completion, before the phy |
2744 | case IEEE80211_STA_RX_BW_40: | 2745 | * context is updated with the association parameters, so the |
2746 | * sta bandwidth might be wider than the phy context allows. | ||
2747 | * To avoid this issue, always initialize rs with 20mhz | ||
2748 | * bandwidth rate, and after authorization, when the phy context | ||
2749 | * is already up-to-date, re-init rs with the correct bw. | ||
2750 | */ | ||
2751 | u32 bw = init ? RATE_MCS_CHAN_WIDTH_20 : rs_bw_from_sta_bw(sta); | ||
2752 | |||
2753 | switch (bw) { | ||
2754 | case RATE_MCS_CHAN_WIDTH_40: | ||
2755 | case RATE_MCS_CHAN_WIDTH_80: | ||
2756 | case RATE_MCS_CHAN_WIDTH_160: | ||
2745 | initial_rates = rs_optimal_rates_vht; | 2757 | initial_rates = rs_optimal_rates_vht; |
2746 | nentries = ARRAY_SIZE(rs_optimal_rates_vht); | 2758 | nentries = ARRAY_SIZE(rs_optimal_rates_vht); |
2747 | break; | 2759 | break; |
2748 | case IEEE80211_STA_RX_BW_20: | 2760 | case RATE_MCS_CHAN_WIDTH_20: |
2749 | initial_rates = rs_optimal_rates_vht_20mhz; | 2761 | initial_rates = rs_optimal_rates_vht_20mhz; |
2750 | nentries = ARRAY_SIZE(rs_optimal_rates_vht_20mhz); | 2762 | nentries = ARRAY_SIZE(rs_optimal_rates_vht_20mhz); |
2751 | break; | 2763 | break; |
@@ -2756,7 +2768,7 @@ static void rs_get_initial_rate(struct iwl_mvm *mvm, | |||
2756 | 2768 | ||
2757 | active_rate = lq_sta->active_siso_rate; | 2769 | active_rate = lq_sta->active_siso_rate; |
2758 | rate->type = LQ_VHT_SISO; | 2770 | rate->type = LQ_VHT_SISO; |
2759 | rate->bw = rs_bw_from_sta_bw(sta); | 2771 | rate->bw = bw; |
2760 | } else if (sta->ht_cap.ht_supported && | 2772 | } else if (sta->ht_cap.ht_supported && |
2761 | best_rssi > IWL_RS_LOW_RSSI_THRESHOLD) { | 2773 | best_rssi > IWL_RS_LOW_RSSI_THRESHOLD) { |
2762 | initial_rates = rs_optimal_rates_ht; | 2774 | initial_rates = rs_optimal_rates_ht; |
@@ -2839,7 +2851,7 @@ static void rs_initialize_lq(struct iwl_mvm *mvm, | |||
2839 | tbl = &(lq_sta->lq_info[active_tbl]); | 2851 | tbl = &(lq_sta->lq_info[active_tbl]); |
2840 | rate = &tbl->rate; | 2852 | rate = &tbl->rate; |
2841 | 2853 | ||
2842 | rs_get_initial_rate(mvm, sta, lq_sta, band, rate); | 2854 | rs_get_initial_rate(mvm, sta, lq_sta, band, rate, init); |
2843 | rs_init_optimal_rate(mvm, sta, lq_sta); | 2855 | rs_init_optimal_rate(mvm, sta, lq_sta); |
2844 | 2856 | ||
2845 | WARN_ONCE(rate->ant != ANT_A && rate->ant != ANT_B, | 2857 | WARN_ONCE(rate->ant != ANT_A && rate->ant != ANT_B, |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index a3f7c1bf3cc8..580de5851fc7 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c | |||
@@ -71,6 +71,7 @@ static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb, | |||
71 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | 71 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; |
72 | struct ieee80211_rx_status *stats = IEEE80211_SKB_RXCB(skb); | 72 | struct ieee80211_rx_status *stats = IEEE80211_SKB_RXCB(skb); |
73 | struct iwl_mvm_key_pn *ptk_pn; | 73 | struct iwl_mvm_key_pn *ptk_pn; |
74 | int res; | ||
74 | u8 tid, keyidx; | 75 | u8 tid, keyidx; |
75 | u8 pn[IEEE80211_CCMP_PN_LEN]; | 76 | u8 pn[IEEE80211_CCMP_PN_LEN]; |
76 | u8 *extiv; | 77 | u8 *extiv; |
@@ -127,12 +128,13 @@ static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb, | |||
127 | pn[4] = extiv[1]; | 128 | pn[4] = extiv[1]; |
128 | pn[5] = extiv[0]; | 129 | pn[5] = extiv[0]; |
129 | 130 | ||
130 | if (memcmp(pn, ptk_pn->q[queue].pn[tid], | 131 | res = memcmp(pn, ptk_pn->q[queue].pn[tid], IEEE80211_CCMP_PN_LEN); |
131 | IEEE80211_CCMP_PN_LEN) <= 0) | 132 | if (res < 0) |
133 | return -1; | ||
134 | if (!res && !(stats->flag & RX_FLAG_ALLOW_SAME_PN)) | ||
132 | return -1; | 135 | return -1; |
133 | 136 | ||
134 | if (!(stats->flag & RX_FLAG_AMSDU_MORE)) | 137 | memcpy(ptk_pn->q[queue].pn[tid], pn, IEEE80211_CCMP_PN_LEN); |
135 | memcpy(ptk_pn->q[queue].pn[tid], pn, IEEE80211_CCMP_PN_LEN); | ||
136 | stats->flag |= RX_FLAG_PN_VALIDATED; | 138 | stats->flag |= RX_FLAG_PN_VALIDATED; |
137 | 139 | ||
138 | return 0; | 140 | return 0; |
@@ -314,28 +316,21 @@ static void iwl_mvm_rx_csum(struct ieee80211_sta *sta, | |||
314 | } | 316 | } |
315 | 317 | ||
316 | /* | 318 | /* |
317 | * returns true if a packet outside BA session is a duplicate and | 319 | * returns true if a packet is a duplicate and should be dropped. |
318 | * should be dropped | 320 | * Updates AMSDU PN tracking info |
319 | */ | 321 | */ |
320 | static bool iwl_mvm_is_nonagg_dup(struct ieee80211_sta *sta, int queue, | 322 | static bool iwl_mvm_is_dup(struct ieee80211_sta *sta, int queue, |
321 | struct ieee80211_rx_status *rx_status, | 323 | struct ieee80211_rx_status *rx_status, |
322 | struct ieee80211_hdr *hdr, | 324 | struct ieee80211_hdr *hdr, |
323 | struct iwl_rx_mpdu_desc *desc) | 325 | struct iwl_rx_mpdu_desc *desc) |
324 | { | 326 | { |
325 | struct iwl_mvm_sta *mvm_sta; | 327 | struct iwl_mvm_sta *mvm_sta; |
326 | struct iwl_mvm_rxq_dup_data *dup_data; | 328 | struct iwl_mvm_rxq_dup_data *dup_data; |
327 | u8 baid, tid, sub_frame_idx; | 329 | u8 tid, sub_frame_idx; |
328 | 330 | ||
329 | if (WARN_ON(IS_ERR_OR_NULL(sta))) | 331 | if (WARN_ON(IS_ERR_OR_NULL(sta))) |
330 | return false; | 332 | return false; |
331 | 333 | ||
332 | baid = (le32_to_cpu(desc->reorder_data) & | ||
333 | IWL_RX_MPDU_REORDER_BAID_MASK) >> | ||
334 | IWL_RX_MPDU_REORDER_BAID_SHIFT; | ||
335 | |||
336 | if (baid != IWL_RX_REORDER_DATA_INVALID_BAID) | ||
337 | return false; | ||
338 | |||
339 | mvm_sta = iwl_mvm_sta_from_mac80211(sta); | 334 | mvm_sta = iwl_mvm_sta_from_mac80211(sta); |
340 | dup_data = &mvm_sta->dup_data[queue]; | 335 | dup_data = &mvm_sta->dup_data[queue]; |
341 | 336 | ||
@@ -365,6 +360,12 @@ static bool iwl_mvm_is_nonagg_dup(struct ieee80211_sta *sta, int queue, | |||
365 | dup_data->last_sub_frame[tid] >= sub_frame_idx)) | 360 | dup_data->last_sub_frame[tid] >= sub_frame_idx)) |
366 | return true; | 361 | return true; |
367 | 362 | ||
363 | /* Allow same PN as the first subframe for following sub frames */ | ||
364 | if (dup_data->last_seq[tid] == hdr->seq_ctrl && | ||
365 | sub_frame_idx > dup_data->last_sub_frame[tid] && | ||
366 | desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU) | ||
367 | rx_status->flag |= RX_FLAG_ALLOW_SAME_PN; | ||
368 | |||
368 | dup_data->last_seq[tid] = hdr->seq_ctrl; | 369 | dup_data->last_seq[tid] = hdr->seq_ctrl; |
369 | dup_data->last_sub_frame[tid] = sub_frame_idx; | 370 | dup_data->last_sub_frame[tid] = sub_frame_idx; |
370 | 371 | ||
@@ -971,7 +972,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, | |||
971 | if (ieee80211_is_data(hdr->frame_control)) | 972 | if (ieee80211_is_data(hdr->frame_control)) |
972 | iwl_mvm_rx_csum(sta, skb, desc); | 973 | iwl_mvm_rx_csum(sta, skb, desc); |
973 | 974 | ||
974 | if (iwl_mvm_is_nonagg_dup(sta, queue, rx_status, hdr, desc)) { | 975 | if (iwl_mvm_is_dup(sta, queue, rx_status, hdr, desc)) { |
975 | kfree_skb(skb); | 976 | kfree_skb(skb); |
976 | goto out; | 977 | goto out; |
977 | } | 978 | } |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 6b2674e02606..630e23cb0ffb 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c | |||
@@ -2039,7 +2039,7 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) | |||
2039 | struct iwl_trans_txq_scd_cfg cfg = { | 2039 | struct iwl_trans_txq_scd_cfg cfg = { |
2040 | .fifo = IWL_MVM_TX_FIFO_MCAST, | 2040 | .fifo = IWL_MVM_TX_FIFO_MCAST, |
2041 | .sta_id = msta->sta_id, | 2041 | .sta_id = msta->sta_id, |
2042 | .tid = IWL_MAX_TID_COUNT, | 2042 | .tid = 0, |
2043 | .aggregate = false, | 2043 | .aggregate = false, |
2044 | .frame_limit = IWL_FRAME_LIMIT, | 2044 | .frame_limit = IWL_FRAME_LIMIT, |
2045 | }; | 2045 | }; |
@@ -2053,6 +2053,17 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) | |||
2053 | return -ENOTSUPP; | 2053 | return -ENOTSUPP; |
2054 | 2054 | ||
2055 | /* | 2055 | /* |
2056 | * In IBSS, ieee80211_check_queues() sets the cab_queue to be | ||
2057 | * invalid, so make sure we use the queue we want. | ||
2058 | * Note that this is done here as we want to avoid making DQA | ||
2059 | * changes in mac80211 layer. | ||
2060 | */ | ||
2061 | if (vif->type == NL80211_IFTYPE_ADHOC) { | ||
2062 | vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE; | ||
2063 | mvmvif->cab_queue = vif->cab_queue; | ||
2064 | } | ||
2065 | |||
2066 | /* | ||
2056 | * While in previous FWs we had to exclude cab queue from TFD queue | 2067 | * While in previous FWs we had to exclude cab queue from TFD queue |
2057 | * mask, now it is needed as any other queue. | 2068 | * mask, now it is needed as any other queue. |
2058 | */ | 2069 | */ |
@@ -2079,24 +2090,13 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) | |||
2079 | if (iwl_mvm_has_new_tx_api(mvm)) { | 2090 | if (iwl_mvm_has_new_tx_api(mvm)) { |
2080 | int queue = iwl_mvm_tvqm_enable_txq(mvm, vif->cab_queue, | 2091 | int queue = iwl_mvm_tvqm_enable_txq(mvm, vif->cab_queue, |
2081 | msta->sta_id, | 2092 | msta->sta_id, |
2082 | IWL_MAX_TID_COUNT, | 2093 | 0, |
2083 | timeout); | 2094 | timeout); |
2084 | mvmvif->cab_queue = queue; | 2095 | mvmvif->cab_queue = queue; |
2085 | } else if (!fw_has_api(&mvm->fw->ucode_capa, | 2096 | } else if (!fw_has_api(&mvm->fw->ucode_capa, |
2086 | IWL_UCODE_TLV_API_STA_TYPE)) { | 2097 | IWL_UCODE_TLV_API_STA_TYPE)) |
2087 | /* | ||
2088 | * In IBSS, ieee80211_check_queues() sets the cab_queue to be | ||
2089 | * invalid, so make sure we use the queue we want. | ||
2090 | * Note that this is done here as we want to avoid making DQA | ||
2091 | * changes in mac80211 layer. | ||
2092 | */ | ||
2093 | if (vif->type == NL80211_IFTYPE_ADHOC) { | ||
2094 | vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE; | ||
2095 | mvmvif->cab_queue = vif->cab_queue; | ||
2096 | } | ||
2097 | iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0, | 2098 | iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0, |
2098 | &cfg, timeout); | 2099 | &cfg, timeout); |
2099 | } | ||
2100 | 2100 | ||
2101 | return 0; | 2101 | return 0; |
2102 | } | 2102 | } |
@@ -2115,7 +2115,7 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) | |||
2115 | iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true, 0); | 2115 | iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true, 0); |
2116 | 2116 | ||
2117 | iwl_mvm_disable_txq(mvm, mvmvif->cab_queue, vif->cab_queue, | 2117 | iwl_mvm_disable_txq(mvm, mvmvif->cab_queue, vif->cab_queue, |
2118 | IWL_MAX_TID_COUNT, 0); | 2118 | 0, 0); |
2119 | 2119 | ||
2120 | ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id); | 2120 | ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id); |
2121 | if (ret) | 2121 | if (ret) |
@@ -3170,8 +3170,9 @@ static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id, | |||
3170 | int ret, size; | 3170 | int ret, size; |
3171 | u32 status; | 3171 | u32 status; |
3172 | 3172 | ||
3173 | /* This is a valid situation for GTK removal */ | ||
3173 | if (sta_id == IWL_MVM_INVALID_STA) | 3174 | if (sta_id == IWL_MVM_INVALID_STA) |
3174 | return -EINVAL; | 3175 | return 0; |
3175 | 3176 | ||
3176 | key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) & | 3177 | key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) & |
3177 | STA_KEY_FLG_KEYID_MSK); | 3178 | STA_KEY_FLG_KEYID_MSK); |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c index 200ab50ec86b..acb217e666db 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c | |||
@@ -616,7 +616,7 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm, | |||
616 | time_cmd.repeat = 1; | 616 | time_cmd.repeat = 1; |
617 | time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START | | 617 | time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START | |
618 | TE_V2_NOTIF_HOST_EVENT_END | | 618 | TE_V2_NOTIF_HOST_EVENT_END | |
619 | T2_V2_START_IMMEDIATELY); | 619 | TE_V2_START_IMMEDIATELY); |
620 | 620 | ||
621 | if (!wait_for_notif) { | 621 | if (!wait_for_notif) { |
622 | iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd); | 622 | iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd); |
@@ -803,7 +803,7 @@ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif, | |||
803 | time_cmd.repeat = 1; | 803 | time_cmd.repeat = 1; |
804 | time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START | | 804 | time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START | |
805 | TE_V2_NOTIF_HOST_EVENT_END | | 805 | TE_V2_NOTIF_HOST_EVENT_END | |
806 | T2_V2_START_IMMEDIATELY); | 806 | TE_V2_START_IMMEDIATELY); |
807 | 807 | ||
808 | return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd); | 808 | return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd); |
809 | } | 809 | } |
@@ -913,6 +913,8 @@ int iwl_mvm_schedule_csa_period(struct iwl_mvm *mvm, | |||
913 | time_cmd.interval = cpu_to_le32(1); | 913 | time_cmd.interval = cpu_to_le32(1); |
914 | time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START | | 914 | time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START | |
915 | TE_V2_ABSENCE); | 915 | TE_V2_ABSENCE); |
916 | if (!apply_time) | ||
917 | time_cmd.policy |= cpu_to_le16(TE_V2_START_IMMEDIATELY); | ||
916 | 918 | ||
917 | return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd); | 919 | return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd); |
918 | } | 920 | } |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index dda77b327c98..af6dfceab6b8 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c | |||
@@ -419,11 +419,11 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm, | |||
419 | { | 419 | { |
420 | struct ieee80211_key_conf *keyconf = info->control.hw_key; | 420 | struct ieee80211_key_conf *keyconf = info->control.hw_key; |
421 | u8 *crypto_hdr = skb_frag->data + hdrlen; | 421 | u8 *crypto_hdr = skb_frag->data + hdrlen; |
422 | enum iwl_tx_cmd_sec_ctrl type = TX_CMD_SEC_CCM; | ||
422 | u64 pn; | 423 | u64 pn; |
423 | 424 | ||
424 | switch (keyconf->cipher) { | 425 | switch (keyconf->cipher) { |
425 | case WLAN_CIPHER_SUITE_CCMP: | 426 | case WLAN_CIPHER_SUITE_CCMP: |
426 | case WLAN_CIPHER_SUITE_CCMP_256: | ||
427 | iwl_mvm_set_tx_cmd_ccmp(info, tx_cmd); | 427 | iwl_mvm_set_tx_cmd_ccmp(info, tx_cmd); |
428 | iwl_mvm_set_tx_cmd_pn(info, crypto_hdr); | 428 | iwl_mvm_set_tx_cmd_pn(info, crypto_hdr); |
429 | break; | 429 | break; |
@@ -447,13 +447,16 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm, | |||
447 | break; | 447 | break; |
448 | case WLAN_CIPHER_SUITE_GCMP: | 448 | case WLAN_CIPHER_SUITE_GCMP: |
449 | case WLAN_CIPHER_SUITE_GCMP_256: | 449 | case WLAN_CIPHER_SUITE_GCMP_256: |
450 | type = TX_CMD_SEC_GCMP; | ||
451 | /* Fall through */ | ||
452 | case WLAN_CIPHER_SUITE_CCMP_256: | ||
450 | /* TODO: Taking the key from the table might introduce a race | 453 | /* TODO: Taking the key from the table might introduce a race |
451 | * when PTK rekeying is done, having an old packets with a PN | 454 | * when PTK rekeying is done, having an old packets with a PN |
452 | * based on the old key but the message encrypted with a new | 455 | * based on the old key but the message encrypted with a new |
453 | * one. | 456 | * one. |
454 | * Need to handle this. | 457 | * Need to handle this. |
455 | */ | 458 | */ |
456 | tx_cmd->sec_ctl |= TX_CMD_SEC_GCMP | TX_CMD_SEC_KEY_FROM_TABLE; | 459 | tx_cmd->sec_ctl |= type | TX_CMD_SEC_KEY_FROM_TABLE; |
457 | tx_cmd->key[0] = keyconf->hw_key_idx; | 460 | tx_cmd->key[0] = keyconf->hw_key_idx; |
458 | iwl_mvm_set_tx_cmd_pn(info, crypto_hdr); | 461 | iwl_mvm_set_tx_cmd_pn(info, crypto_hdr); |
459 | break; | 462 | break; |
@@ -645,7 +648,11 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) | |||
645 | if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE || | 648 | if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE || |
646 | info.control.vif->type == NL80211_IFTYPE_AP || | 649 | info.control.vif->type == NL80211_IFTYPE_AP || |
647 | info.control.vif->type == NL80211_IFTYPE_ADHOC) { | 650 | info.control.vif->type == NL80211_IFTYPE_ADHOC) { |
648 | sta_id = mvmvif->bcast_sta.sta_id; | 651 | if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE) |
652 | sta_id = mvmvif->bcast_sta.sta_id; | ||
653 | else | ||
654 | sta_id = mvmvif->mcast_sta.sta_id; | ||
655 | |||
649 | queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info, | 656 | queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info, |
650 | hdr->frame_control); | 657 | hdr->frame_control); |
651 | if (queue < 0) | 658 | if (queue < 0) |
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c index 6d0a907d5ba5..fabae0f60683 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c | |||
@@ -147,7 +147,7 @@ static void iwl_pcie_gen2_tfd_unmap(struct iwl_trans *trans, | |||
147 | /* Sanity check on number of chunks */ | 147 | /* Sanity check on number of chunks */ |
148 | num_tbs = iwl_pcie_gen2_get_num_tbs(trans, tfd); | 148 | num_tbs = iwl_pcie_gen2_get_num_tbs(trans, tfd); |
149 | 149 | ||
150 | if (num_tbs >= trans_pcie->max_tbs) { | 150 | if (num_tbs > trans_pcie->max_tbs) { |
151 | IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); | 151 | IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); |
152 | return; | 152 | return; |
153 | } | 153 | } |
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index 3f85713c41dc..1a566287993d 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c | |||
@@ -378,7 +378,7 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans, | |||
378 | /* Sanity check on number of chunks */ | 378 | /* Sanity check on number of chunks */ |
379 | num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd); | 379 | num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd); |
380 | 380 | ||
381 | if (num_tbs >= trans_pcie->max_tbs) { | 381 | if (num_tbs > trans_pcie->max_tbs) { |
382 | IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); | 382 | IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); |
383 | /* @todo issue fatal error, it is quite serious situation */ | 383 | /* @todo issue fatal error, it is quite serious situation */ |
384 | return; | 384 | return; |
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 1cf22e62e3dd..35b21f8152bb 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c | |||
@@ -2727,6 +2727,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, | |||
2727 | mutex_init(&data->mutex); | 2727 | mutex_init(&data->mutex); |
2728 | 2728 | ||
2729 | data->netgroup = hwsim_net_get_netgroup(net); | 2729 | data->netgroup = hwsim_net_get_netgroup(net); |
2730 | data->wmediumd = hwsim_net_get_wmediumd(net); | ||
2730 | 2731 | ||
2731 | /* Enable frame retransmissions for lossy channels */ | 2732 | /* Enable frame retransmissions for lossy channels */ |
2732 | hw->max_rates = 4; | 2733 | hw->max_rates = 4; |
@@ -3516,7 +3517,7 @@ static int __init init_mac80211_hwsim(void) | |||
3516 | 3517 | ||
3517 | spin_lock_init(&hwsim_radio_lock); | 3518 | spin_lock_init(&hwsim_radio_lock); |
3518 | 3519 | ||
3519 | hwsim_wq = alloc_workqueue("hwsim_wq",WQ_MEM_RECLAIM,0); | 3520 | hwsim_wq = alloc_workqueue("hwsim_wq", 0, 0); |
3520 | if (!hwsim_wq) | 3521 | if (!hwsim_wq) |
3521 | return -ENOMEM; | 3522 | return -ENOMEM; |
3522 | rhashtable_init(&hwsim_radios_rht, &hwsim_rht_params); | 3523 | rhashtable_init(&hwsim_radios_rht, &hwsim_rht_params); |
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c index f9ccd13c79f9..e7bbbc95cdb1 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c | |||
@@ -1125,7 +1125,8 @@ static void _rtl8723be_enable_aspm_back_door(struct ieee80211_hw *hw) | |||
1125 | 1125 | ||
1126 | /* Configuration Space offset 0x70f BIT7 is used to control L0S */ | 1126 | /* Configuration Space offset 0x70f BIT7 is used to control L0S */ |
1127 | tmp8 = _rtl8723be_dbi_read(rtlpriv, 0x70f); | 1127 | tmp8 = _rtl8723be_dbi_read(rtlpriv, 0x70f); |
1128 | _rtl8723be_dbi_write(rtlpriv, 0x70f, tmp8 | BIT(7)); | 1128 | _rtl8723be_dbi_write(rtlpriv, 0x70f, tmp8 | BIT(7) | |
1129 | ASPM_L1_LATENCY << 3); | ||
1129 | 1130 | ||
1130 | /* Configuration Space offset 0x719 Bit3 is for L1 | 1131 | /* Configuration Space offset 0x719 Bit3 is for L1 |
1131 | * BIT4 is for clock request | 1132 | * BIT4 is for clock request |
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 8328d395e332..3127bc8633ca 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c | |||
@@ -2005,7 +2005,10 @@ static void netback_changed(struct xenbus_device *dev, | |||
2005 | case XenbusStateInitialised: | 2005 | case XenbusStateInitialised: |
2006 | case XenbusStateReconfiguring: | 2006 | case XenbusStateReconfiguring: |
2007 | case XenbusStateReconfigured: | 2007 | case XenbusStateReconfigured: |
2008 | break; | ||
2009 | |||
2008 | case XenbusStateUnknown: | 2010 | case XenbusStateUnknown: |
2011 | wake_up_all(&module_unload_q); | ||
2009 | break; | 2012 | break; |
2010 | 2013 | ||
2011 | case XenbusStateInitWait: | 2014 | case XenbusStateInitWait: |
@@ -2136,7 +2139,9 @@ static int xennet_remove(struct xenbus_device *dev) | |||
2136 | xenbus_switch_state(dev, XenbusStateClosing); | 2139 | xenbus_switch_state(dev, XenbusStateClosing); |
2137 | wait_event(module_unload_q, | 2140 | wait_event(module_unload_q, |
2138 | xenbus_read_driver_state(dev->otherend) == | 2141 | xenbus_read_driver_state(dev->otherend) == |
2139 | XenbusStateClosing); | 2142 | XenbusStateClosing || |
2143 | xenbus_read_driver_state(dev->otherend) == | ||
2144 | XenbusStateUnknown); | ||
2140 | 2145 | ||
2141 | xenbus_switch_state(dev, XenbusStateClosed); | 2146 | xenbus_switch_state(dev, XenbusStateClosed); |
2142 | wait_event(module_unload_q, | 2147 | wait_event(module_unload_q, |
diff --git a/drivers/nvdimm/blk.c b/drivers/nvdimm/blk.c index 345acca576b3..1bd7b3734751 100644 --- a/drivers/nvdimm/blk.c +++ b/drivers/nvdimm/blk.c | |||
@@ -278,8 +278,6 @@ static int nsblk_attach_disk(struct nd_namespace_blk *nsblk) | |||
278 | disk->queue = q; | 278 | disk->queue = q; |
279 | disk->flags = GENHD_FL_EXT_DEVT; | 279 | disk->flags = GENHD_FL_EXT_DEVT; |
280 | nvdimm_namespace_disk_name(&nsblk->common, disk->disk_name); | 280 | nvdimm_namespace_disk_name(&nsblk->common, disk->disk_name); |
281 | set_capacity(disk, 0); | ||
282 | device_add_disk(dev, disk); | ||
283 | 281 | ||
284 | if (devm_add_action_or_reset(dev, nd_blk_release_disk, disk)) | 282 | if (devm_add_action_or_reset(dev, nd_blk_release_disk, disk)) |
285 | return -ENOMEM; | 283 | return -ENOMEM; |
@@ -292,6 +290,7 @@ static int nsblk_attach_disk(struct nd_namespace_blk *nsblk) | |||
292 | } | 290 | } |
293 | 291 | ||
294 | set_capacity(disk, available_disk_size >> SECTOR_SHIFT); | 292 | set_capacity(disk, available_disk_size >> SECTOR_SHIFT); |
293 | device_add_disk(dev, disk); | ||
295 | revalidate_disk(disk); | 294 | revalidate_disk(disk); |
296 | return 0; | 295 | return 0; |
297 | } | 296 | } |
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c index 2ef544f10ec8..4b95ac513de2 100644 --- a/drivers/nvdimm/btt.c +++ b/drivers/nvdimm/btt.c | |||
@@ -1545,8 +1545,6 @@ static int btt_blk_init(struct btt *btt) | |||
1545 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, btt->btt_queue); | 1545 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, btt->btt_queue); |
1546 | btt->btt_queue->queuedata = btt; | 1546 | btt->btt_queue->queuedata = btt; |
1547 | 1547 | ||
1548 | set_capacity(btt->btt_disk, 0); | ||
1549 | device_add_disk(&btt->nd_btt->dev, btt->btt_disk); | ||
1550 | if (btt_meta_size(btt)) { | 1548 | if (btt_meta_size(btt)) { |
1551 | int rc = nd_integrity_init(btt->btt_disk, btt_meta_size(btt)); | 1549 | int rc = nd_integrity_init(btt->btt_disk, btt_meta_size(btt)); |
1552 | 1550 | ||
@@ -1558,6 +1556,7 @@ static int btt_blk_init(struct btt *btt) | |||
1558 | } | 1556 | } |
1559 | } | 1557 | } |
1560 | set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9); | 1558 | set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9); |
1559 | device_add_disk(&btt->nd_btt->dev, btt->btt_disk); | ||
1561 | btt->nd_btt->size = btt->nlba * (u64)btt->sector_size; | 1560 | btt->nd_btt->size = btt->nlba * (u64)btt->sector_size; |
1562 | revalidate_disk(btt->btt_disk); | 1561 | revalidate_disk(btt->btt_disk); |
1563 | 1562 | ||
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c index f5c4e8c6e29d..2f4d18752c97 100644 --- a/drivers/nvdimm/pfn_devs.c +++ b/drivers/nvdimm/pfn_devs.c | |||
@@ -304,7 +304,7 @@ static const struct attribute_group *nd_pfn_attribute_groups[] = { | |||
304 | struct device *nd_pfn_devinit(struct nd_pfn *nd_pfn, | 304 | struct device *nd_pfn_devinit(struct nd_pfn *nd_pfn, |
305 | struct nd_namespace_common *ndns) | 305 | struct nd_namespace_common *ndns) |
306 | { | 306 | { |
307 | struct device *dev = &nd_pfn->dev; | 307 | struct device *dev; |
308 | 308 | ||
309 | if (!nd_pfn) | 309 | if (!nd_pfn) |
310 | return NULL; | 310 | return NULL; |
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index 10041ac4032c..06f8dcc52ca6 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c | |||
@@ -335,8 +335,7 @@ static int pmem_attach_disk(struct device *dev, | |||
335 | dev_warn(dev, "unable to guarantee persistence of writes\n"); | 335 | dev_warn(dev, "unable to guarantee persistence of writes\n"); |
336 | fua = 0; | 336 | fua = 0; |
337 | } | 337 | } |
338 | wbc = nvdimm_has_cache(nd_region) && | 338 | wbc = nvdimm_has_cache(nd_region); |
339 | !test_bit(ND_REGION_PERSIST_CACHE, &nd_region->flags); | ||
340 | 339 | ||
341 | if (!devm_request_mem_region(dev, res->start, resource_size(res), | 340 | if (!devm_request_mem_region(dev, res->start, resource_size(res), |
342 | dev_name(&ndns->dev))) { | 341 | dev_name(&ndns->dev))) { |
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c index e6d01911e092..1593e1806b16 100644 --- a/drivers/nvdimm/region_devs.c +++ b/drivers/nvdimm/region_devs.c | |||
@@ -532,11 +532,13 @@ static ssize_t persistence_domain_show(struct device *dev, | |||
532 | struct device_attribute *attr, char *buf) | 532 | struct device_attribute *attr, char *buf) |
533 | { | 533 | { |
534 | struct nd_region *nd_region = to_nd_region(dev); | 534 | struct nd_region *nd_region = to_nd_region(dev); |
535 | unsigned long flags = nd_region->flags; | ||
536 | 535 | ||
537 | return sprintf(buf, "%s%s\n", | 536 | if (test_bit(ND_REGION_PERSIST_CACHE, &nd_region->flags)) |
538 | flags & BIT(ND_REGION_PERSIST_CACHE) ? "cpu_cache " : "", | 537 | return sprintf(buf, "cpu_cache\n"); |
539 | flags & BIT(ND_REGION_PERSIST_MEMCTRL) ? "memory_controller " : ""); | 538 | else if (test_bit(ND_REGION_PERSIST_MEMCTRL, &nd_region->flags)) |
539 | return sprintf(buf, "memory_controller\n"); | ||
540 | else | ||
541 | return sprintf(buf, "\n"); | ||
540 | } | 542 | } |
541 | static DEVICE_ATTR_RO(persistence_domain); | 543 | static DEVICE_ATTR_RO(persistence_domain); |
542 | 544 | ||
@@ -593,6 +595,13 @@ static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n) | |||
593 | return 0; | 595 | return 0; |
594 | } | 596 | } |
595 | 597 | ||
598 | if (a == &dev_attr_persistence_domain.attr) { | ||
599 | if ((nd_region->flags & (BIT(ND_REGION_PERSIST_CACHE) | ||
600 | | BIT(ND_REGION_PERSIST_MEMCTRL))) == 0) | ||
601 | return 0; | ||
602 | return a->mode; | ||
603 | } | ||
604 | |||
596 | if (a != &dev_attr_set_cookie.attr | 605 | if (a != &dev_attr_set_cookie.attr |
597 | && a != &dev_attr_available_size.attr) | 606 | && a != &dev_attr_available_size.attr) |
598 | return a->mode; | 607 | return a->mode; |
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index f431c32774f3..7aeca5db7916 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c | |||
@@ -120,8 +120,12 @@ int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl) | |||
120 | int ret; | 120 | int ret; |
121 | 121 | ||
122 | ret = nvme_reset_ctrl(ctrl); | 122 | ret = nvme_reset_ctrl(ctrl); |
123 | if (!ret) | 123 | if (!ret) { |
124 | flush_work(&ctrl->reset_work); | 124 | flush_work(&ctrl->reset_work); |
125 | if (ctrl->state != NVME_CTRL_LIVE) | ||
126 | ret = -ENETRESET; | ||
127 | } | ||
128 | |||
125 | return ret; | 129 | return ret; |
126 | } | 130 | } |
127 | EXPORT_SYMBOL_GPL(nvme_reset_ctrl_sync); | 131 | EXPORT_SYMBOL_GPL(nvme_reset_ctrl_sync); |
@@ -265,7 +269,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, | |||
265 | switch (new_state) { | 269 | switch (new_state) { |
266 | case NVME_CTRL_ADMIN_ONLY: | 270 | case NVME_CTRL_ADMIN_ONLY: |
267 | switch (old_state) { | 271 | switch (old_state) { |
268 | case NVME_CTRL_RECONNECTING: | 272 | case NVME_CTRL_CONNECTING: |
269 | changed = true; | 273 | changed = true; |
270 | /* FALLTHRU */ | 274 | /* FALLTHRU */ |
271 | default: | 275 | default: |
@@ -276,7 +280,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, | |||
276 | switch (old_state) { | 280 | switch (old_state) { |
277 | case NVME_CTRL_NEW: | 281 | case NVME_CTRL_NEW: |
278 | case NVME_CTRL_RESETTING: | 282 | case NVME_CTRL_RESETTING: |
279 | case NVME_CTRL_RECONNECTING: | 283 | case NVME_CTRL_CONNECTING: |
280 | changed = true; | 284 | changed = true; |
281 | /* FALLTHRU */ | 285 | /* FALLTHRU */ |
282 | default: | 286 | default: |
@@ -294,9 +298,9 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, | |||
294 | break; | 298 | break; |
295 | } | 299 | } |
296 | break; | 300 | break; |
297 | case NVME_CTRL_RECONNECTING: | 301 | case NVME_CTRL_CONNECTING: |
298 | switch (old_state) { | 302 | switch (old_state) { |
299 | case NVME_CTRL_LIVE: | 303 | case NVME_CTRL_NEW: |
300 | case NVME_CTRL_RESETTING: | 304 | case NVME_CTRL_RESETTING: |
301 | changed = true; | 305 | changed = true; |
302 | /* FALLTHRU */ | 306 | /* FALLTHRU */ |
@@ -309,7 +313,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, | |||
309 | case NVME_CTRL_LIVE: | 313 | case NVME_CTRL_LIVE: |
310 | case NVME_CTRL_ADMIN_ONLY: | 314 | case NVME_CTRL_ADMIN_ONLY: |
311 | case NVME_CTRL_RESETTING: | 315 | case NVME_CTRL_RESETTING: |
312 | case NVME_CTRL_RECONNECTING: | 316 | case NVME_CTRL_CONNECTING: |
313 | changed = true; | 317 | changed = true; |
314 | /* FALLTHRU */ | 318 | /* FALLTHRU */ |
315 | default: | 319 | default: |
@@ -518,9 +522,11 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req, | |||
518 | u64 slba = nvme_block_nr(ns, bio->bi_iter.bi_sector); | 522 | u64 slba = nvme_block_nr(ns, bio->bi_iter.bi_sector); |
519 | u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift; | 523 | u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift; |
520 | 524 | ||
521 | range[n].cattr = cpu_to_le32(0); | 525 | if (n < segments) { |
522 | range[n].nlb = cpu_to_le32(nlb); | 526 | range[n].cattr = cpu_to_le32(0); |
523 | range[n].slba = cpu_to_le64(slba); | 527 | range[n].nlb = cpu_to_le32(nlb); |
528 | range[n].slba = cpu_to_le64(slba); | ||
529 | } | ||
524 | n++; | 530 | n++; |
525 | } | 531 | } |
526 | 532 | ||
@@ -794,13 +800,9 @@ static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status) | |||
794 | 800 | ||
795 | static int nvme_keep_alive(struct nvme_ctrl *ctrl) | 801 | static int nvme_keep_alive(struct nvme_ctrl *ctrl) |
796 | { | 802 | { |
797 | struct nvme_command c; | ||
798 | struct request *rq; | 803 | struct request *rq; |
799 | 804 | ||
800 | memset(&c, 0, sizeof(c)); | 805 | rq = nvme_alloc_request(ctrl->admin_q, &ctrl->ka_cmd, BLK_MQ_REQ_RESERVED, |
801 | c.common.opcode = nvme_admin_keep_alive; | ||
802 | |||
803 | rq = nvme_alloc_request(ctrl->admin_q, &c, BLK_MQ_REQ_RESERVED, | ||
804 | NVME_QID_ANY); | 806 | NVME_QID_ANY); |
805 | if (IS_ERR(rq)) | 807 | if (IS_ERR(rq)) |
806 | return PTR_ERR(rq); | 808 | return PTR_ERR(rq); |
@@ -832,6 +834,8 @@ void nvme_start_keep_alive(struct nvme_ctrl *ctrl) | |||
832 | return; | 834 | return; |
833 | 835 | ||
834 | INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work); | 836 | INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work); |
837 | memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd)); | ||
838 | ctrl->ka_cmd.common.opcode = nvme_admin_keep_alive; | ||
835 | schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ); | 839 | schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ); |
836 | } | 840 | } |
837 | EXPORT_SYMBOL_GPL(nvme_start_keep_alive); | 841 | EXPORT_SYMBOL_GPL(nvme_start_keep_alive); |
@@ -1117,14 +1121,19 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, | |||
1117 | 1121 | ||
1118 | static void nvme_update_formats(struct nvme_ctrl *ctrl) | 1122 | static void nvme_update_formats(struct nvme_ctrl *ctrl) |
1119 | { | 1123 | { |
1120 | struct nvme_ns *ns; | 1124 | struct nvme_ns *ns, *next; |
1125 | LIST_HEAD(rm_list); | ||
1121 | 1126 | ||
1122 | mutex_lock(&ctrl->namespaces_mutex); | 1127 | mutex_lock(&ctrl->namespaces_mutex); |
1123 | list_for_each_entry(ns, &ctrl->namespaces, list) { | 1128 | list_for_each_entry(ns, &ctrl->namespaces, list) { |
1124 | if (ns->disk && nvme_revalidate_disk(ns->disk)) | 1129 | if (ns->disk && nvme_revalidate_disk(ns->disk)) { |
1125 | nvme_ns_remove(ns); | 1130 | list_move_tail(&ns->list, &rm_list); |
1131 | } | ||
1126 | } | 1132 | } |
1127 | mutex_unlock(&ctrl->namespaces_mutex); | 1133 | mutex_unlock(&ctrl->namespaces_mutex); |
1134 | |||
1135 | list_for_each_entry_safe(ns, next, &rm_list, list) | ||
1136 | nvme_ns_remove(ns); | ||
1128 | } | 1137 | } |
1129 | 1138 | ||
1130 | static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects) | 1139 | static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects) |
@@ -2687,7 +2696,7 @@ static ssize_t nvme_sysfs_show_state(struct device *dev, | |||
2687 | [NVME_CTRL_LIVE] = "live", | 2696 | [NVME_CTRL_LIVE] = "live", |
2688 | [NVME_CTRL_ADMIN_ONLY] = "only-admin", | 2697 | [NVME_CTRL_ADMIN_ONLY] = "only-admin", |
2689 | [NVME_CTRL_RESETTING] = "resetting", | 2698 | [NVME_CTRL_RESETTING] = "resetting", |
2690 | [NVME_CTRL_RECONNECTING]= "reconnecting", | 2699 | [NVME_CTRL_CONNECTING] = "connecting", |
2691 | [NVME_CTRL_DELETING] = "deleting", | 2700 | [NVME_CTRL_DELETING] = "deleting", |
2692 | [NVME_CTRL_DEAD] = "dead", | 2701 | [NVME_CTRL_DEAD] = "dead", |
2693 | }; | 2702 | }; |
@@ -2835,7 +2844,7 @@ out: | |||
2835 | } | 2844 | } |
2836 | 2845 | ||
2837 | static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid, | 2846 | static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid, |
2838 | struct nvme_id_ns *id, bool *new) | 2847 | struct nvme_id_ns *id) |
2839 | { | 2848 | { |
2840 | struct nvme_ctrl *ctrl = ns->ctrl; | 2849 | struct nvme_ctrl *ctrl = ns->ctrl; |
2841 | bool is_shared = id->nmic & (1 << 0); | 2850 | bool is_shared = id->nmic & (1 << 0); |
@@ -2851,8 +2860,6 @@ static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid, | |||
2851 | ret = PTR_ERR(head); | 2860 | ret = PTR_ERR(head); |
2852 | goto out_unlock; | 2861 | goto out_unlock; |
2853 | } | 2862 | } |
2854 | |||
2855 | *new = true; | ||
2856 | } else { | 2863 | } else { |
2857 | struct nvme_ns_ids ids; | 2864 | struct nvme_ns_ids ids; |
2858 | 2865 | ||
@@ -2864,8 +2871,6 @@ static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid, | |||
2864 | ret = -EINVAL; | 2871 | ret = -EINVAL; |
2865 | goto out_unlock; | 2872 | goto out_unlock; |
2866 | } | 2873 | } |
2867 | |||
2868 | *new = false; | ||
2869 | } | 2874 | } |
2870 | 2875 | ||
2871 | list_add_tail(&ns->siblings, &head->list); | 2876 | list_add_tail(&ns->siblings, &head->list); |
@@ -2936,7 +2941,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) | |||
2936 | struct nvme_id_ns *id; | 2941 | struct nvme_id_ns *id; |
2937 | char disk_name[DISK_NAME_LEN]; | 2942 | char disk_name[DISK_NAME_LEN]; |
2938 | int node = dev_to_node(ctrl->dev), flags = GENHD_FL_EXT_DEVT; | 2943 | int node = dev_to_node(ctrl->dev), flags = GENHD_FL_EXT_DEVT; |
2939 | bool new = true; | ||
2940 | 2944 | ||
2941 | ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node); | 2945 | ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node); |
2942 | if (!ns) | 2946 | if (!ns) |
@@ -2962,7 +2966,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) | |||
2962 | if (id->ncap == 0) | 2966 | if (id->ncap == 0) |
2963 | goto out_free_id; | 2967 | goto out_free_id; |
2964 | 2968 | ||
2965 | if (nvme_init_ns_head(ns, nsid, id, &new)) | 2969 | if (nvme_init_ns_head(ns, nsid, id)) |
2966 | goto out_free_id; | 2970 | goto out_free_id; |
2967 | nvme_setup_streams_ns(ctrl, ns); | 2971 | nvme_setup_streams_ns(ctrl, ns); |
2968 | 2972 | ||
@@ -3028,9 +3032,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) | |||
3028 | pr_warn("%s: failed to register lightnvm sysfs group for identification\n", | 3032 | pr_warn("%s: failed to register lightnvm sysfs group for identification\n", |
3029 | ns->disk->disk_name); | 3033 | ns->disk->disk_name); |
3030 | 3034 | ||
3031 | if (new) | 3035 | nvme_mpath_add_disk(ns->head); |
3032 | nvme_mpath_add_disk(ns->head); | ||
3033 | nvme_mpath_add_disk_links(ns); | ||
3034 | return; | 3036 | return; |
3035 | out_unlink_ns: | 3037 | out_unlink_ns: |
3036 | mutex_lock(&ctrl->subsys->lock); | 3038 | mutex_lock(&ctrl->subsys->lock); |
@@ -3050,7 +3052,6 @@ static void nvme_ns_remove(struct nvme_ns *ns) | |||
3050 | return; | 3052 | return; |
3051 | 3053 | ||
3052 | if (ns->disk && ns->disk->flags & GENHD_FL_UP) { | 3054 | if (ns->disk && ns->disk->flags & GENHD_FL_UP) { |
3053 | nvme_mpath_remove_disk_links(ns); | ||
3054 | sysfs_remove_group(&disk_to_dev(ns->disk)->kobj, | 3055 | sysfs_remove_group(&disk_to_dev(ns->disk)->kobj, |
3055 | &nvme_ns_id_attr_group); | 3056 | &nvme_ns_id_attr_group); |
3056 | if (ns->ndev) | 3057 | if (ns->ndev) |
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c index 5dd4ceefed8f..8f0f34d06d46 100644 --- a/drivers/nvme/host/fabrics.c +++ b/drivers/nvme/host/fabrics.c | |||
@@ -493,7 +493,7 @@ EXPORT_SYMBOL_GPL(nvmf_should_reconnect); | |||
493 | */ | 493 | */ |
494 | int nvmf_register_transport(struct nvmf_transport_ops *ops) | 494 | int nvmf_register_transport(struct nvmf_transport_ops *ops) |
495 | { | 495 | { |
496 | if (!ops->create_ctrl || !ops->module) | 496 | if (!ops->create_ctrl) |
497 | return -EINVAL; | 497 | return -EINVAL; |
498 | 498 | ||
499 | down_write(&nvmf_transports_rwsem); | 499 | down_write(&nvmf_transports_rwsem); |
@@ -650,6 +650,11 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, | |||
650 | ret = -EINVAL; | 650 | ret = -EINVAL; |
651 | goto out; | 651 | goto out; |
652 | } | 652 | } |
653 | if (opts->discovery_nqn) { | ||
654 | pr_debug("Ignoring nr_io_queues value for discovery controller\n"); | ||
655 | break; | ||
656 | } | ||
657 | |||
653 | opts->nr_io_queues = min_t(unsigned int, | 658 | opts->nr_io_queues = min_t(unsigned int, |
654 | num_online_cpus(), token); | 659 | num_online_cpus(), token); |
655 | break; | 660 | break; |
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h index 25b19f722f5b..a3145d90c1d2 100644 --- a/drivers/nvme/host/fabrics.h +++ b/drivers/nvme/host/fabrics.h | |||
@@ -171,13 +171,14 @@ static inline blk_status_t nvmf_check_init_req(struct nvme_ctrl *ctrl, | |||
171 | cmd->common.opcode != nvme_fabrics_command || | 171 | cmd->common.opcode != nvme_fabrics_command || |
172 | cmd->fabrics.fctype != nvme_fabrics_type_connect) { | 172 | cmd->fabrics.fctype != nvme_fabrics_type_connect) { |
173 | /* | 173 | /* |
174 | * Reconnecting state means transport disruption, which can take | 174 | * Connecting state means transport disruption or initial |
175 | * a long time and even might fail permanently, fail fast to | 175 | * establishment, which can take a long time and even might |
176 | * give upper layers a chance to failover. | 176 | * fail permanently, fail fast to give upper layers a chance |
177 | * to failover. | ||
177 | * Deleting state means that the ctrl will never accept commands | 178 | * Deleting state means that the ctrl will never accept commands |
178 | * again, fail it permanently. | 179 | * again, fail it permanently. |
179 | */ | 180 | */ |
180 | if (ctrl->state == NVME_CTRL_RECONNECTING || | 181 | if (ctrl->state == NVME_CTRL_CONNECTING || |
181 | ctrl->state == NVME_CTRL_DELETING) { | 182 | ctrl->state == NVME_CTRL_DELETING) { |
182 | nvme_req(rq)->status = NVME_SC_ABORT_REQ; | 183 | nvme_req(rq)->status = NVME_SC_ABORT_REQ; |
183 | return BLK_STS_IOERR; | 184 | return BLK_STS_IOERR; |
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index b856d7c919d2..1dc1387b7134 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c | |||
@@ -55,9 +55,7 @@ struct nvme_fc_queue { | |||
55 | 55 | ||
56 | enum nvme_fcop_flags { | 56 | enum nvme_fcop_flags { |
57 | FCOP_FLAGS_TERMIO = (1 << 0), | 57 | FCOP_FLAGS_TERMIO = (1 << 0), |
58 | FCOP_FLAGS_RELEASED = (1 << 1), | 58 | FCOP_FLAGS_AEN = (1 << 1), |
59 | FCOP_FLAGS_COMPLETE = (1 << 2), | ||
60 | FCOP_FLAGS_AEN = (1 << 3), | ||
61 | }; | 59 | }; |
62 | 60 | ||
63 | struct nvmefc_ls_req_op { | 61 | struct nvmefc_ls_req_op { |
@@ -532,7 +530,7 @@ nvme_fc_resume_controller(struct nvme_fc_ctrl *ctrl) | |||
532 | { | 530 | { |
533 | switch (ctrl->ctrl.state) { | 531 | switch (ctrl->ctrl.state) { |
534 | case NVME_CTRL_NEW: | 532 | case NVME_CTRL_NEW: |
535 | case NVME_CTRL_RECONNECTING: | 533 | case NVME_CTRL_CONNECTING: |
536 | /* | 534 | /* |
537 | * As all reconnects were suppressed, schedule a | 535 | * As all reconnects were suppressed, schedule a |
538 | * connect. | 536 | * connect. |
@@ -777,7 +775,7 @@ nvme_fc_ctrl_connectivity_loss(struct nvme_fc_ctrl *ctrl) | |||
777 | } | 775 | } |
778 | break; | 776 | break; |
779 | 777 | ||
780 | case NVME_CTRL_RECONNECTING: | 778 | case NVME_CTRL_CONNECTING: |
781 | /* | 779 | /* |
782 | * The association has already been terminated and the | 780 | * The association has already been terminated and the |
783 | * controller is attempting reconnects. No need to do anything | 781 | * controller is attempting reconnects. No need to do anything |
@@ -1208,7 +1206,7 @@ nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl, | |||
1208 | sizeof(struct fcnvme_lsdesc_cr_assoc_cmd)); | 1206 | sizeof(struct fcnvme_lsdesc_cr_assoc_cmd)); |
1209 | 1207 | ||
1210 | assoc_rqst->assoc_cmd.ersp_ratio = cpu_to_be16(ersp_ratio); | 1208 | assoc_rqst->assoc_cmd.ersp_ratio = cpu_to_be16(ersp_ratio); |
1211 | assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize); | 1209 | assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize - 1); |
1212 | /* Linux supports only Dynamic controllers */ | 1210 | /* Linux supports only Dynamic controllers */ |
1213 | assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff); | 1211 | assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff); |
1214 | uuid_copy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id); | 1212 | uuid_copy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id); |
@@ -1323,7 +1321,7 @@ nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, | |||
1323 | sizeof(struct fcnvme_lsdesc_cr_conn_cmd)); | 1321 | sizeof(struct fcnvme_lsdesc_cr_conn_cmd)); |
1324 | conn_rqst->connect_cmd.ersp_ratio = cpu_to_be16(ersp_ratio); | 1322 | conn_rqst->connect_cmd.ersp_ratio = cpu_to_be16(ersp_ratio); |
1325 | conn_rqst->connect_cmd.qid = cpu_to_be16(queue->qnum); | 1323 | conn_rqst->connect_cmd.qid = cpu_to_be16(queue->qnum); |
1326 | conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize); | 1324 | conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize - 1); |
1327 | 1325 | ||
1328 | lsop->queue = queue; | 1326 | lsop->queue = queue; |
1329 | lsreq->rqstaddr = conn_rqst; | 1327 | lsreq->rqstaddr = conn_rqst; |
@@ -1470,7 +1468,6 @@ nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl) | |||
1470 | 1468 | ||
1471 | /* *********************** NVME Ctrl Routines **************************** */ | 1469 | /* *********************** NVME Ctrl Routines **************************** */ |
1472 | 1470 | ||
1473 | static void __nvme_fc_final_op_cleanup(struct request *rq); | ||
1474 | static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg); | 1471 | static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg); |
1475 | 1472 | ||
1476 | static int | 1473 | static int |
@@ -1512,13 +1509,19 @@ nvme_fc_exit_request(struct blk_mq_tag_set *set, struct request *rq, | |||
1512 | static int | 1509 | static int |
1513 | __nvme_fc_abort_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_fcp_op *op) | 1510 | __nvme_fc_abort_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_fcp_op *op) |
1514 | { | 1511 | { |
1515 | int state; | 1512 | unsigned long flags; |
1513 | int opstate; | ||
1514 | |||
1515 | spin_lock_irqsave(&ctrl->lock, flags); | ||
1516 | opstate = atomic_xchg(&op->state, FCPOP_STATE_ABORTED); | ||
1517 | if (opstate != FCPOP_STATE_ACTIVE) | ||
1518 | atomic_set(&op->state, opstate); | ||
1519 | else if (ctrl->flags & FCCTRL_TERMIO) | ||
1520 | ctrl->iocnt++; | ||
1521 | spin_unlock_irqrestore(&ctrl->lock, flags); | ||
1516 | 1522 | ||
1517 | state = atomic_xchg(&op->state, FCPOP_STATE_ABORTED); | 1523 | if (opstate != FCPOP_STATE_ACTIVE) |
1518 | if (state != FCPOP_STATE_ACTIVE) { | ||
1519 | atomic_set(&op->state, state); | ||
1520 | return -ECANCELED; | 1524 | return -ECANCELED; |
1521 | } | ||
1522 | 1525 | ||
1523 | ctrl->lport->ops->fcp_abort(&ctrl->lport->localport, | 1526 | ctrl->lport->ops->fcp_abort(&ctrl->lport->localport, |
1524 | &ctrl->rport->remoteport, | 1527 | &ctrl->rport->remoteport, |
@@ -1532,60 +1535,26 @@ static void | |||
1532 | nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl) | 1535 | nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl) |
1533 | { | 1536 | { |
1534 | struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops; | 1537 | struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops; |
1535 | unsigned long flags; | 1538 | int i; |
1536 | int i, ret; | ||
1537 | |||
1538 | for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) { | ||
1539 | if (atomic_read(&aen_op->state) != FCPOP_STATE_ACTIVE) | ||
1540 | continue; | ||
1541 | |||
1542 | spin_lock_irqsave(&ctrl->lock, flags); | ||
1543 | if (ctrl->flags & FCCTRL_TERMIO) { | ||
1544 | ctrl->iocnt++; | ||
1545 | aen_op->flags |= FCOP_FLAGS_TERMIO; | ||
1546 | } | ||
1547 | spin_unlock_irqrestore(&ctrl->lock, flags); | ||
1548 | |||
1549 | ret = __nvme_fc_abort_op(ctrl, aen_op); | ||
1550 | if (ret) { | ||
1551 | /* | ||
1552 | * if __nvme_fc_abort_op failed the io wasn't | ||
1553 | * active. Thus this call path is running in | ||
1554 | * parallel to the io complete. Treat as non-error. | ||
1555 | */ | ||
1556 | 1539 | ||
1557 | /* back out the flags/counters */ | 1540 | for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) |
1558 | spin_lock_irqsave(&ctrl->lock, flags); | 1541 | __nvme_fc_abort_op(ctrl, aen_op); |
1559 | if (ctrl->flags & FCCTRL_TERMIO) | ||
1560 | ctrl->iocnt--; | ||
1561 | aen_op->flags &= ~FCOP_FLAGS_TERMIO; | ||
1562 | spin_unlock_irqrestore(&ctrl->lock, flags); | ||
1563 | return; | ||
1564 | } | ||
1565 | } | ||
1566 | } | 1542 | } |
1567 | 1543 | ||
1568 | static inline int | 1544 | static inline void |
1569 | __nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl, | 1545 | __nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl, |
1570 | struct nvme_fc_fcp_op *op) | 1546 | struct nvme_fc_fcp_op *op, int opstate) |
1571 | { | 1547 | { |
1572 | unsigned long flags; | 1548 | unsigned long flags; |
1573 | bool complete_rq = false; | ||
1574 | 1549 | ||
1575 | spin_lock_irqsave(&ctrl->lock, flags); | 1550 | if (opstate == FCPOP_STATE_ABORTED) { |
1576 | if (unlikely(op->flags & FCOP_FLAGS_TERMIO)) { | 1551 | spin_lock_irqsave(&ctrl->lock, flags); |
1577 | if (ctrl->flags & FCCTRL_TERMIO) { | 1552 | if (ctrl->flags & FCCTRL_TERMIO) { |
1578 | if (!--ctrl->iocnt) | 1553 | if (!--ctrl->iocnt) |
1579 | wake_up(&ctrl->ioabort_wait); | 1554 | wake_up(&ctrl->ioabort_wait); |
1580 | } | 1555 | } |
1556 | spin_unlock_irqrestore(&ctrl->lock, flags); | ||
1581 | } | 1557 | } |
1582 | if (op->flags & FCOP_FLAGS_RELEASED) | ||
1583 | complete_rq = true; | ||
1584 | else | ||
1585 | op->flags |= FCOP_FLAGS_COMPLETE; | ||
1586 | spin_unlock_irqrestore(&ctrl->lock, flags); | ||
1587 | |||
1588 | return complete_rq; | ||
1589 | } | 1558 | } |
1590 | 1559 | ||
1591 | static void | 1560 | static void |
@@ -1601,6 +1570,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req) | |||
1601 | __le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1); | 1570 | __le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1); |
1602 | union nvme_result result; | 1571 | union nvme_result result; |
1603 | bool terminate_assoc = true; | 1572 | bool terminate_assoc = true; |
1573 | int opstate; | ||
1604 | 1574 | ||
1605 | /* | 1575 | /* |
1606 | * WARNING: | 1576 | * WARNING: |
@@ -1639,11 +1609,12 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req) | |||
1639 | * association to be terminated. | 1609 | * association to be terminated. |
1640 | */ | 1610 | */ |
1641 | 1611 | ||
1612 | opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE); | ||
1613 | |||
1642 | fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma, | 1614 | fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma, |
1643 | sizeof(op->rsp_iu), DMA_FROM_DEVICE); | 1615 | sizeof(op->rsp_iu), DMA_FROM_DEVICE); |
1644 | 1616 | ||
1645 | if (atomic_read(&op->state) == FCPOP_STATE_ABORTED || | 1617 | if (opstate == FCPOP_STATE_ABORTED) |
1646 | op->flags & FCOP_FLAGS_TERMIO) | ||
1647 | status = cpu_to_le16(NVME_SC_ABORT_REQ << 1); | 1618 | status = cpu_to_le16(NVME_SC_ABORT_REQ << 1); |
1648 | else if (freq->status) | 1619 | else if (freq->status) |
1649 | status = cpu_to_le16(NVME_SC_INTERNAL << 1); | 1620 | status = cpu_to_le16(NVME_SC_INTERNAL << 1); |
@@ -1708,7 +1679,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req) | |||
1708 | done: | 1679 | done: |
1709 | if (op->flags & FCOP_FLAGS_AEN) { | 1680 | if (op->flags & FCOP_FLAGS_AEN) { |
1710 | nvme_complete_async_event(&queue->ctrl->ctrl, status, &result); | 1681 | nvme_complete_async_event(&queue->ctrl->ctrl, status, &result); |
1711 | __nvme_fc_fcpop_chk_teardowns(ctrl, op); | 1682 | __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate); |
1712 | atomic_set(&op->state, FCPOP_STATE_IDLE); | 1683 | atomic_set(&op->state, FCPOP_STATE_IDLE); |
1713 | op->flags = FCOP_FLAGS_AEN; /* clear other flags */ | 1684 | op->flags = FCOP_FLAGS_AEN; /* clear other flags */ |
1714 | nvme_fc_ctrl_put(ctrl); | 1685 | nvme_fc_ctrl_put(ctrl); |
@@ -1722,13 +1693,11 @@ done: | |||
1722 | if (status && | 1693 | if (status && |
1723 | (blk_queue_dying(rq->q) || | 1694 | (blk_queue_dying(rq->q) || |
1724 | ctrl->ctrl.state == NVME_CTRL_NEW || | 1695 | ctrl->ctrl.state == NVME_CTRL_NEW || |
1725 | ctrl->ctrl.state == NVME_CTRL_RECONNECTING)) | 1696 | ctrl->ctrl.state == NVME_CTRL_CONNECTING)) |
1726 | status |= cpu_to_le16(NVME_SC_DNR << 1); | 1697 | status |= cpu_to_le16(NVME_SC_DNR << 1); |
1727 | 1698 | ||
1728 | if (__nvme_fc_fcpop_chk_teardowns(ctrl, op)) | 1699 | __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate); |
1729 | __nvme_fc_final_op_cleanup(rq); | 1700 | nvme_end_request(rq, status, result); |
1730 | else | ||
1731 | nvme_end_request(rq, status, result); | ||
1732 | 1701 | ||
1733 | check_error: | 1702 | check_error: |
1734 | if (terminate_assoc) | 1703 | if (terminate_assoc) |
@@ -2415,46 +2384,16 @@ nvme_fc_submit_async_event(struct nvme_ctrl *arg) | |||
2415 | } | 2384 | } |
2416 | 2385 | ||
2417 | static void | 2386 | static void |
2418 | __nvme_fc_final_op_cleanup(struct request *rq) | 2387 | nvme_fc_complete_rq(struct request *rq) |
2419 | { | 2388 | { |
2420 | struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); | 2389 | struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); |
2421 | struct nvme_fc_ctrl *ctrl = op->ctrl; | 2390 | struct nvme_fc_ctrl *ctrl = op->ctrl; |
2422 | 2391 | ||
2423 | atomic_set(&op->state, FCPOP_STATE_IDLE); | 2392 | atomic_set(&op->state, FCPOP_STATE_IDLE); |
2424 | op->flags &= ~(FCOP_FLAGS_TERMIO | FCOP_FLAGS_RELEASED | | ||
2425 | FCOP_FLAGS_COMPLETE); | ||
2426 | 2393 | ||
2427 | nvme_fc_unmap_data(ctrl, rq, op); | 2394 | nvme_fc_unmap_data(ctrl, rq, op); |
2428 | nvme_complete_rq(rq); | 2395 | nvme_complete_rq(rq); |
2429 | nvme_fc_ctrl_put(ctrl); | 2396 | nvme_fc_ctrl_put(ctrl); |
2430 | |||
2431 | } | ||
2432 | |||
2433 | static void | ||
2434 | nvme_fc_complete_rq(struct request *rq) | ||
2435 | { | ||
2436 | struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); | ||
2437 | struct nvme_fc_ctrl *ctrl = op->ctrl; | ||
2438 | unsigned long flags; | ||
2439 | bool completed = false; | ||
2440 | |||
2441 | /* | ||
2442 | * the core layer, on controller resets after calling | ||
2443 | * nvme_shutdown_ctrl(), calls complete_rq without our | ||
2444 | * calling blk_mq_complete_request(), thus there may still | ||
2445 | * be live i/o outstanding with the LLDD. Means transport has | ||
2446 | * to track complete calls vs fcpio_done calls to know what | ||
2447 | * path to take on completes and dones. | ||
2448 | */ | ||
2449 | spin_lock_irqsave(&ctrl->lock, flags); | ||
2450 | if (op->flags & FCOP_FLAGS_COMPLETE) | ||
2451 | completed = true; | ||
2452 | else | ||
2453 | op->flags |= FCOP_FLAGS_RELEASED; | ||
2454 | spin_unlock_irqrestore(&ctrl->lock, flags); | ||
2455 | |||
2456 | if (completed) | ||
2457 | __nvme_fc_final_op_cleanup(rq); | ||
2458 | } | 2397 | } |
2459 | 2398 | ||
2460 | /* | 2399 | /* |
@@ -2476,35 +2415,11 @@ nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved) | |||
2476 | struct nvme_ctrl *nctrl = data; | 2415 | struct nvme_ctrl *nctrl = data; |
2477 | struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); | 2416 | struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); |
2478 | struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req); | 2417 | struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req); |
2479 | unsigned long flags; | ||
2480 | int status; | ||
2481 | 2418 | ||
2482 | if (!blk_mq_request_started(req)) | 2419 | if (!blk_mq_request_started(req)) |
2483 | return; | 2420 | return; |
2484 | 2421 | ||
2485 | spin_lock_irqsave(&ctrl->lock, flags); | 2422 | __nvme_fc_abort_op(ctrl, op); |
2486 | if (ctrl->flags & FCCTRL_TERMIO) { | ||
2487 | ctrl->iocnt++; | ||
2488 | op->flags |= FCOP_FLAGS_TERMIO; | ||
2489 | } | ||
2490 | spin_unlock_irqrestore(&ctrl->lock, flags); | ||
2491 | |||
2492 | status = __nvme_fc_abort_op(ctrl, op); | ||
2493 | if (status) { | ||
2494 | /* | ||
2495 | * if __nvme_fc_abort_op failed the io wasn't | ||
2496 | * active. Thus this call path is running in | ||
2497 | * parallel to the io complete. Treat as non-error. | ||
2498 | */ | ||
2499 | |||
2500 | /* back out the flags/counters */ | ||
2501 | spin_lock_irqsave(&ctrl->lock, flags); | ||
2502 | if (ctrl->flags & FCCTRL_TERMIO) | ||
2503 | ctrl->iocnt--; | ||
2504 | op->flags &= ~FCOP_FLAGS_TERMIO; | ||
2505 | spin_unlock_irqrestore(&ctrl->lock, flags); | ||
2506 | return; | ||
2507 | } | ||
2508 | } | 2423 | } |
2509 | 2424 | ||
2510 | 2425 | ||
@@ -2566,11 +2481,11 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl) | |||
2566 | goto out_free_tag_set; | 2481 | goto out_free_tag_set; |
2567 | } | 2482 | } |
2568 | 2483 | ||
2569 | ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.opts->queue_size); | 2484 | ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1); |
2570 | if (ret) | 2485 | if (ret) |
2571 | goto out_cleanup_blk_queue; | 2486 | goto out_cleanup_blk_queue; |
2572 | 2487 | ||
2573 | ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.opts->queue_size); | 2488 | ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1); |
2574 | if (ret) | 2489 | if (ret) |
2575 | goto out_delete_hw_queues; | 2490 | goto out_delete_hw_queues; |
2576 | 2491 | ||
@@ -2617,11 +2532,11 @@ nvme_fc_reinit_io_queues(struct nvme_fc_ctrl *ctrl) | |||
2617 | if (ret) | 2532 | if (ret) |
2618 | goto out_free_io_queues; | 2533 | goto out_free_io_queues; |
2619 | 2534 | ||
2620 | ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.opts->queue_size); | 2535 | ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1); |
2621 | if (ret) | 2536 | if (ret) |
2622 | goto out_free_io_queues; | 2537 | goto out_free_io_queues; |
2623 | 2538 | ||
2624 | ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.opts->queue_size); | 2539 | ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1); |
2625 | if (ret) | 2540 | if (ret) |
2626 | goto out_delete_hw_queues; | 2541 | goto out_delete_hw_queues; |
2627 | 2542 | ||
@@ -2717,13 +2632,12 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl) | |||
2717 | nvme_fc_init_queue(ctrl, 0); | 2632 | nvme_fc_init_queue(ctrl, 0); |
2718 | 2633 | ||
2719 | ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0, | 2634 | ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0, |
2720 | NVME_AQ_BLK_MQ_DEPTH); | 2635 | NVME_AQ_DEPTH); |
2721 | if (ret) | 2636 | if (ret) |
2722 | goto out_free_queue; | 2637 | goto out_free_queue; |
2723 | 2638 | ||
2724 | ret = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0], | 2639 | ret = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0], |
2725 | NVME_AQ_BLK_MQ_DEPTH, | 2640 | NVME_AQ_DEPTH, (NVME_AQ_DEPTH / 4)); |
2726 | (NVME_AQ_BLK_MQ_DEPTH / 4)); | ||
2727 | if (ret) | 2641 | if (ret) |
2728 | goto out_delete_hw_queue; | 2642 | goto out_delete_hw_queue; |
2729 | 2643 | ||
@@ -2751,7 +2665,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl) | |||
2751 | } | 2665 | } |
2752 | 2666 | ||
2753 | ctrl->ctrl.sqsize = | 2667 | ctrl->ctrl.sqsize = |
2754 | min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap) + 1, ctrl->ctrl.sqsize); | 2668 | min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap), ctrl->ctrl.sqsize); |
2755 | 2669 | ||
2756 | ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap); | 2670 | ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap); |
2757 | if (ret) | 2671 | if (ret) |
@@ -2784,6 +2698,14 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl) | |||
2784 | opts->queue_size = ctrl->ctrl.maxcmd; | 2698 | opts->queue_size = ctrl->ctrl.maxcmd; |
2785 | } | 2699 | } |
2786 | 2700 | ||
2701 | if (opts->queue_size > ctrl->ctrl.sqsize + 1) { | ||
2702 | /* warn if sqsize is lower than queue_size */ | ||
2703 | dev_warn(ctrl->ctrl.device, | ||
2704 | "queue_size %zu > ctrl sqsize %u, clamping down\n", | ||
2705 | opts->queue_size, ctrl->ctrl.sqsize + 1); | ||
2706 | opts->queue_size = ctrl->ctrl.sqsize + 1; | ||
2707 | } | ||
2708 | |||
2787 | ret = nvme_fc_init_aen_ops(ctrl); | 2709 | ret = nvme_fc_init_aen_ops(ctrl); |
2788 | if (ret) | 2710 | if (ret) |
2789 | goto out_term_aen_ops; | 2711 | goto out_term_aen_ops; |
@@ -2943,7 +2865,7 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status) | |||
2943 | unsigned long recon_delay = ctrl->ctrl.opts->reconnect_delay * HZ; | 2865 | unsigned long recon_delay = ctrl->ctrl.opts->reconnect_delay * HZ; |
2944 | bool recon = true; | 2866 | bool recon = true; |
2945 | 2867 | ||
2946 | if (ctrl->ctrl.state != NVME_CTRL_RECONNECTING) | 2868 | if (ctrl->ctrl.state != NVME_CTRL_CONNECTING) |
2947 | return; | 2869 | return; |
2948 | 2870 | ||
2949 | if (portptr->port_state == FC_OBJSTATE_ONLINE) | 2871 | if (portptr->port_state == FC_OBJSTATE_ONLINE) |
@@ -2991,10 +2913,10 @@ nvme_fc_reset_ctrl_work(struct work_struct *work) | |||
2991 | /* will block will waiting for io to terminate */ | 2913 | /* will block will waiting for io to terminate */ |
2992 | nvme_fc_delete_association(ctrl); | 2914 | nvme_fc_delete_association(ctrl); |
2993 | 2915 | ||
2994 | if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) { | 2916 | if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { |
2995 | dev_err(ctrl->ctrl.device, | 2917 | dev_err(ctrl->ctrl.device, |
2996 | "NVME-FC{%d}: error_recovery: Couldn't change state " | 2918 | "NVME-FC{%d}: error_recovery: Couldn't change state " |
2997 | "to RECONNECTING\n", ctrl->cnum); | 2919 | "to CONNECTING\n", ctrl->cnum); |
2998 | return; | 2920 | return; |
2999 | } | 2921 | } |
3000 | 2922 | ||
@@ -3195,7 +3117,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, | |||
3195 | * transport errors (frame drop, LS failure) inherently must kill | 3117 | * transport errors (frame drop, LS failure) inherently must kill |
3196 | * the association. The transport is coded so that any command used | 3118 | * the association. The transport is coded so that any command used |
3197 | * to create the association (prior to a LIVE state transition | 3119 | * to create the association (prior to a LIVE state transition |
3198 | * while NEW or RECONNECTING) will fail if it completes in error or | 3120 | * while NEW or CONNECTING) will fail if it completes in error or |
3199 | * times out. | 3121 | * times out. |
3200 | * | 3122 | * |
3201 | * As such: as the connect request was mostly likely due to a | 3123 | * As such: as the connect request was mostly likely due to a |
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 3b211d9e58b8..060f69e03427 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c | |||
@@ -198,30 +198,16 @@ void nvme_mpath_add_disk(struct nvme_ns_head *head) | |||
198 | { | 198 | { |
199 | if (!head->disk) | 199 | if (!head->disk) |
200 | return; | 200 | return; |
201 | device_add_disk(&head->subsys->dev, head->disk); | ||
202 | if (sysfs_create_group(&disk_to_dev(head->disk)->kobj, | ||
203 | &nvme_ns_id_attr_group)) | ||
204 | pr_warn("%s: failed to create sysfs group for identification\n", | ||
205 | head->disk->disk_name); | ||
206 | } | ||
207 | |||
208 | void nvme_mpath_add_disk_links(struct nvme_ns *ns) | ||
209 | { | ||
210 | struct kobject *slave_disk_kobj, *holder_disk_kobj; | ||
211 | |||
212 | if (!ns->head->disk) | ||
213 | return; | ||
214 | |||
215 | slave_disk_kobj = &disk_to_dev(ns->disk)->kobj; | ||
216 | if (sysfs_create_link(ns->head->disk->slave_dir, slave_disk_kobj, | ||
217 | kobject_name(slave_disk_kobj))) | ||
218 | return; | ||
219 | 201 | ||
220 | holder_disk_kobj = &disk_to_dev(ns->head->disk)->kobj; | 202 | mutex_lock(&head->subsys->lock); |
221 | if (sysfs_create_link(ns->disk->part0.holder_dir, holder_disk_kobj, | 203 | if (!(head->disk->flags & GENHD_FL_UP)) { |
222 | kobject_name(holder_disk_kobj))) | 204 | device_add_disk(&head->subsys->dev, head->disk); |
223 | sysfs_remove_link(ns->head->disk->slave_dir, | 205 | if (sysfs_create_group(&disk_to_dev(head->disk)->kobj, |
224 | kobject_name(slave_disk_kobj)); | 206 | &nvme_ns_id_attr_group)) |
207 | pr_warn("%s: failed to create sysfs group for identification\n", | ||
208 | head->disk->disk_name); | ||
209 | } | ||
210 | mutex_unlock(&head->subsys->lock); | ||
225 | } | 211 | } |
226 | 212 | ||
227 | void nvme_mpath_remove_disk(struct nvme_ns_head *head) | 213 | void nvme_mpath_remove_disk(struct nvme_ns_head *head) |
@@ -238,14 +224,3 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head) | |||
238 | blk_cleanup_queue(head->disk->queue); | 224 | blk_cleanup_queue(head->disk->queue); |
239 | put_disk(head->disk); | 225 | put_disk(head->disk); |
240 | } | 226 | } |
241 | |||
242 | void nvme_mpath_remove_disk_links(struct nvme_ns *ns) | ||
243 | { | ||
244 | if (!ns->head->disk) | ||
245 | return; | ||
246 | |||
247 | sysfs_remove_link(ns->disk->part0.holder_dir, | ||
248 | kobject_name(&disk_to_dev(ns->head->disk)->kobj)); | ||
249 | sysfs_remove_link(ns->head->disk->slave_dir, | ||
250 | kobject_name(&disk_to_dev(ns->disk)->kobj)); | ||
251 | } | ||
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 8e4550fa08f8..d733b14ede9d 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h | |||
@@ -123,7 +123,7 @@ enum nvme_ctrl_state { | |||
123 | NVME_CTRL_LIVE, | 123 | NVME_CTRL_LIVE, |
124 | NVME_CTRL_ADMIN_ONLY, /* Only admin queue live */ | 124 | NVME_CTRL_ADMIN_ONLY, /* Only admin queue live */ |
125 | NVME_CTRL_RESETTING, | 125 | NVME_CTRL_RESETTING, |
126 | NVME_CTRL_RECONNECTING, | 126 | NVME_CTRL_CONNECTING, |
127 | NVME_CTRL_DELETING, | 127 | NVME_CTRL_DELETING, |
128 | NVME_CTRL_DEAD, | 128 | NVME_CTRL_DEAD, |
129 | }; | 129 | }; |
@@ -183,6 +183,7 @@ struct nvme_ctrl { | |||
183 | struct work_struct scan_work; | 183 | struct work_struct scan_work; |
184 | struct work_struct async_event_work; | 184 | struct work_struct async_event_work; |
185 | struct delayed_work ka_work; | 185 | struct delayed_work ka_work; |
186 | struct nvme_command ka_cmd; | ||
186 | struct work_struct fw_act_work; | 187 | struct work_struct fw_act_work; |
187 | 188 | ||
188 | /* Power saving configuration */ | 189 | /* Power saving configuration */ |
@@ -409,9 +410,7 @@ bool nvme_req_needs_failover(struct request *req, blk_status_t error); | |||
409 | void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl); | 410 | void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl); |
410 | int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head); | 411 | int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head); |
411 | void nvme_mpath_add_disk(struct nvme_ns_head *head); | 412 | void nvme_mpath_add_disk(struct nvme_ns_head *head); |
412 | void nvme_mpath_add_disk_links(struct nvme_ns *ns); | ||
413 | void nvme_mpath_remove_disk(struct nvme_ns_head *head); | 413 | void nvme_mpath_remove_disk(struct nvme_ns_head *head); |
414 | void nvme_mpath_remove_disk_links(struct nvme_ns *ns); | ||
415 | 414 | ||
416 | static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns) | 415 | static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns) |
417 | { | 416 | { |
@@ -453,12 +452,6 @@ static inline void nvme_mpath_add_disk(struct nvme_ns_head *head) | |||
453 | static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head) | 452 | static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head) |
454 | { | 453 | { |
455 | } | 454 | } |
456 | static inline void nvme_mpath_add_disk_links(struct nvme_ns *ns) | ||
457 | { | ||
458 | } | ||
459 | static inline void nvme_mpath_remove_disk_links(struct nvme_ns *ns) | ||
460 | { | ||
461 | } | ||
462 | static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns) | 455 | static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns) |
463 | { | 456 | { |
464 | } | 457 | } |
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 6fe7af00a1f4..b6f43b738f03 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c | |||
@@ -1141,7 +1141,7 @@ static bool nvme_should_reset(struct nvme_dev *dev, u32 csts) | |||
1141 | /* If there is a reset/reinit ongoing, we shouldn't reset again. */ | 1141 | /* If there is a reset/reinit ongoing, we shouldn't reset again. */ |
1142 | switch (dev->ctrl.state) { | 1142 | switch (dev->ctrl.state) { |
1143 | case NVME_CTRL_RESETTING: | 1143 | case NVME_CTRL_RESETTING: |
1144 | case NVME_CTRL_RECONNECTING: | 1144 | case NVME_CTRL_CONNECTING: |
1145 | return false; | 1145 | return false; |
1146 | default: | 1146 | default: |
1147 | break; | 1147 | break; |
@@ -1153,12 +1153,6 @@ static bool nvme_should_reset(struct nvme_dev *dev, u32 csts) | |||
1153 | if (!(csts & NVME_CSTS_CFS) && !nssro) | 1153 | if (!(csts & NVME_CSTS_CFS) && !nssro) |
1154 | return false; | 1154 | return false; |
1155 | 1155 | ||
1156 | /* If PCI error recovery process is happening, we cannot reset or | ||
1157 | * the recovery mechanism will surely fail. | ||
1158 | */ | ||
1159 | if (pci_channel_offline(to_pci_dev(dev->dev))) | ||
1160 | return false; | ||
1161 | |||
1162 | return true; | 1156 | return true; |
1163 | } | 1157 | } |
1164 | 1158 | ||
@@ -1189,6 +1183,13 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) | |||
1189 | struct nvme_command cmd; | 1183 | struct nvme_command cmd; |
1190 | u32 csts = readl(dev->bar + NVME_REG_CSTS); | 1184 | u32 csts = readl(dev->bar + NVME_REG_CSTS); |
1191 | 1185 | ||
1186 | /* If PCI error recovery process is happening, we cannot reset or | ||
1187 | * the recovery mechanism will surely fail. | ||
1188 | */ | ||
1189 | mb(); | ||
1190 | if (pci_channel_offline(to_pci_dev(dev->dev))) | ||
1191 | return BLK_EH_RESET_TIMER; | ||
1192 | |||
1192 | /* | 1193 | /* |
1193 | * Reset immediately if the controller is failed | 1194 | * Reset immediately if the controller is failed |
1194 | */ | 1195 | */ |
@@ -1215,13 +1216,17 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) | |||
1215 | * cancellation error. All outstanding requests are completed on | 1216 | * cancellation error. All outstanding requests are completed on |
1216 | * shutdown, so we return BLK_EH_HANDLED. | 1217 | * shutdown, so we return BLK_EH_HANDLED. |
1217 | */ | 1218 | */ |
1218 | if (dev->ctrl.state == NVME_CTRL_RESETTING) { | 1219 | switch (dev->ctrl.state) { |
1220 | case NVME_CTRL_CONNECTING: | ||
1221 | case NVME_CTRL_RESETTING: | ||
1219 | dev_warn(dev->ctrl.device, | 1222 | dev_warn(dev->ctrl.device, |
1220 | "I/O %d QID %d timeout, disable controller\n", | 1223 | "I/O %d QID %d timeout, disable controller\n", |
1221 | req->tag, nvmeq->qid); | 1224 | req->tag, nvmeq->qid); |
1222 | nvme_dev_disable(dev, false); | 1225 | nvme_dev_disable(dev, false); |
1223 | nvme_req(req)->flags |= NVME_REQ_CANCELLED; | 1226 | nvme_req(req)->flags |= NVME_REQ_CANCELLED; |
1224 | return BLK_EH_HANDLED; | 1227 | return BLK_EH_HANDLED; |
1228 | default: | ||
1229 | break; | ||
1225 | } | 1230 | } |
1226 | 1231 | ||
1227 | /* | 1232 | /* |
@@ -1364,18 +1369,14 @@ static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues, | |||
1364 | static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq, | 1369 | static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq, |
1365 | int qid, int depth) | 1370 | int qid, int depth) |
1366 | { | 1371 | { |
1367 | if (qid && dev->cmb && use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) { | 1372 | /* CMB SQEs will be mapped before creation */ |
1368 | unsigned offset = (qid - 1) * roundup(SQ_SIZE(depth), | 1373 | if (qid && dev->cmb && use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) |
1369 | dev->ctrl.page_size); | 1374 | return 0; |
1370 | nvmeq->sq_dma_addr = dev->cmb_bus_addr + offset; | ||
1371 | nvmeq->sq_cmds_io = dev->cmb + offset; | ||
1372 | } else { | ||
1373 | nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth), | ||
1374 | &nvmeq->sq_dma_addr, GFP_KERNEL); | ||
1375 | if (!nvmeq->sq_cmds) | ||
1376 | return -ENOMEM; | ||
1377 | } | ||
1378 | 1375 | ||
1376 | nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth), | ||
1377 | &nvmeq->sq_dma_addr, GFP_KERNEL); | ||
1378 | if (!nvmeq->sq_cmds) | ||
1379 | return -ENOMEM; | ||
1379 | return 0; | 1380 | return 0; |
1380 | } | 1381 | } |
1381 | 1382 | ||
@@ -1449,10 +1450,17 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid) | |||
1449 | struct nvme_dev *dev = nvmeq->dev; | 1450 | struct nvme_dev *dev = nvmeq->dev; |
1450 | int result; | 1451 | int result; |
1451 | 1452 | ||
1453 | if (dev->cmb && use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) { | ||
1454 | unsigned offset = (qid - 1) * roundup(SQ_SIZE(nvmeq->q_depth), | ||
1455 | dev->ctrl.page_size); | ||
1456 | nvmeq->sq_dma_addr = dev->cmb_bus_addr + offset; | ||
1457 | nvmeq->sq_cmds_io = dev->cmb + offset; | ||
1458 | } | ||
1459 | |||
1452 | nvmeq->cq_vector = qid - 1; | 1460 | nvmeq->cq_vector = qid - 1; |
1453 | result = adapter_alloc_cq(dev, qid, nvmeq); | 1461 | result = adapter_alloc_cq(dev, qid, nvmeq); |
1454 | if (result < 0) | 1462 | if (result < 0) |
1455 | return result; | 1463 | goto release_vector; |
1456 | 1464 | ||
1457 | result = adapter_alloc_sq(dev, qid, nvmeq); | 1465 | result = adapter_alloc_sq(dev, qid, nvmeq); |
1458 | if (result < 0) | 1466 | if (result < 0) |
@@ -1466,9 +1474,12 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid) | |||
1466 | return result; | 1474 | return result; |
1467 | 1475 | ||
1468 | release_sq: | 1476 | release_sq: |
1477 | dev->online_queues--; | ||
1469 | adapter_delete_sq(dev, qid); | 1478 | adapter_delete_sq(dev, qid); |
1470 | release_cq: | 1479 | release_cq: |
1471 | adapter_delete_cq(dev, qid); | 1480 | adapter_delete_cq(dev, qid); |
1481 | release_vector: | ||
1482 | nvmeq->cq_vector = -1; | ||
1472 | return result; | 1483 | return result; |
1473 | } | 1484 | } |
1474 | 1485 | ||
@@ -1903,7 +1914,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) | |||
1903 | int result, nr_io_queues; | 1914 | int result, nr_io_queues; |
1904 | unsigned long size; | 1915 | unsigned long size; |
1905 | 1916 | ||
1906 | nr_io_queues = num_present_cpus(); | 1917 | nr_io_queues = num_possible_cpus(); |
1907 | result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues); | 1918 | result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues); |
1908 | if (result < 0) | 1919 | if (result < 0) |
1909 | return result; | 1920 | return result; |
@@ -2288,12 +2299,12 @@ static void nvme_reset_work(struct work_struct *work) | |||
2288 | nvme_dev_disable(dev, false); | 2299 | nvme_dev_disable(dev, false); |
2289 | 2300 | ||
2290 | /* | 2301 | /* |
2291 | * Introduce RECONNECTING state from nvme-fc/rdma transports to mark the | 2302 | * Introduce CONNECTING state from nvme-fc/rdma transports to mark the |
2292 | * initializing procedure here. | 2303 | * initializing procedure here. |
2293 | */ | 2304 | */ |
2294 | if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RECONNECTING)) { | 2305 | if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) { |
2295 | dev_warn(dev->ctrl.device, | 2306 | dev_warn(dev->ctrl.device, |
2296 | "failed to mark controller RECONNECTING\n"); | 2307 | "failed to mark controller CONNECTING\n"); |
2297 | goto out; | 2308 | goto out; |
2298 | } | 2309 | } |
2299 | 2310 | ||
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 2bc059f7d73c..4d84a73ee12d 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c | |||
@@ -887,7 +887,7 @@ free_ctrl: | |||
887 | static void nvme_rdma_reconnect_or_remove(struct nvme_rdma_ctrl *ctrl) | 887 | static void nvme_rdma_reconnect_or_remove(struct nvme_rdma_ctrl *ctrl) |
888 | { | 888 | { |
889 | /* If we are resetting/deleting then do nothing */ | 889 | /* If we are resetting/deleting then do nothing */ |
890 | if (ctrl->ctrl.state != NVME_CTRL_RECONNECTING) { | 890 | if (ctrl->ctrl.state != NVME_CTRL_CONNECTING) { |
891 | WARN_ON_ONCE(ctrl->ctrl.state == NVME_CTRL_NEW || | 891 | WARN_ON_ONCE(ctrl->ctrl.state == NVME_CTRL_NEW || |
892 | ctrl->ctrl.state == NVME_CTRL_LIVE); | 892 | ctrl->ctrl.state == NVME_CTRL_LIVE); |
893 | return; | 893 | return; |
@@ -973,7 +973,7 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work) | |||
973 | blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); | 973 | blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); |
974 | nvme_start_queues(&ctrl->ctrl); | 974 | nvme_start_queues(&ctrl->ctrl); |
975 | 975 | ||
976 | if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) { | 976 | if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { |
977 | /* state change failure should never happen */ | 977 | /* state change failure should never happen */ |
978 | WARN_ON_ONCE(1); | 978 | WARN_ON_ONCE(1); |
979 | return; | 979 | return; |
@@ -1051,7 +1051,7 @@ static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue, | |||
1051 | struct nvme_rdma_device *dev = queue->device; | 1051 | struct nvme_rdma_device *dev = queue->device; |
1052 | struct ib_device *ibdev = dev->dev; | 1052 | struct ib_device *ibdev = dev->dev; |
1053 | 1053 | ||
1054 | if (!blk_rq_bytes(rq)) | 1054 | if (!blk_rq_payload_bytes(rq)) |
1055 | return; | 1055 | return; |
1056 | 1056 | ||
1057 | if (req->mr) { | 1057 | if (req->mr) { |
@@ -1166,7 +1166,7 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue, | |||
1166 | 1166 | ||
1167 | c->common.flags |= NVME_CMD_SGL_METABUF; | 1167 | c->common.flags |= NVME_CMD_SGL_METABUF; |
1168 | 1168 | ||
1169 | if (!blk_rq_bytes(rq)) | 1169 | if (!blk_rq_payload_bytes(rq)) |
1170 | return nvme_rdma_set_sg_null(c); | 1170 | return nvme_rdma_set_sg_null(c); |
1171 | 1171 | ||
1172 | req->sg_table.sgl = req->first_sgl; | 1172 | req->sg_table.sgl = req->first_sgl; |
@@ -1756,7 +1756,7 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work) | |||
1756 | nvme_stop_ctrl(&ctrl->ctrl); | 1756 | nvme_stop_ctrl(&ctrl->ctrl); |
1757 | nvme_rdma_shutdown_ctrl(ctrl, false); | 1757 | nvme_rdma_shutdown_ctrl(ctrl, false); |
1758 | 1758 | ||
1759 | if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) { | 1759 | if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { |
1760 | /* state change failure should never happen */ | 1760 | /* state change failure should never happen */ |
1761 | WARN_ON_ONCE(1); | 1761 | WARN_ON_ONCE(1); |
1762 | return; | 1762 | return; |
@@ -1784,11 +1784,8 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work) | |||
1784 | return; | 1784 | return; |
1785 | 1785 | ||
1786 | out_fail: | 1786 | out_fail: |
1787 | dev_warn(ctrl->ctrl.device, "Removing after reset failure\n"); | 1787 | ++ctrl->ctrl.nr_reconnects; |
1788 | nvme_remove_namespaces(&ctrl->ctrl); | 1788 | nvme_rdma_reconnect_or_remove(ctrl); |
1789 | nvme_rdma_shutdown_ctrl(ctrl, true); | ||
1790 | nvme_uninit_ctrl(&ctrl->ctrl); | ||
1791 | nvme_put_ctrl(&ctrl->ctrl); | ||
1792 | } | 1789 | } |
1793 | 1790 | ||
1794 | static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = { | 1791 | static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = { |
@@ -1942,6 +1939,9 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev, | |||
1942 | if (!ctrl->queues) | 1939 | if (!ctrl->queues) |
1943 | goto out_uninit_ctrl; | 1940 | goto out_uninit_ctrl; |
1944 | 1941 | ||
1942 | changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING); | ||
1943 | WARN_ON_ONCE(!changed); | ||
1944 | |||
1945 | ret = nvme_rdma_configure_admin_queue(ctrl, true); | 1945 | ret = nvme_rdma_configure_admin_queue(ctrl, true); |
1946 | if (ret) | 1946 | if (ret) |
1947 | goto out_kfree_queues; | 1947 | goto out_kfree_queues; |
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 0bd737117a80..a78029e4e5f4 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c | |||
@@ -520,9 +520,12 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, | |||
520 | goto fail; | 520 | goto fail; |
521 | } | 521 | } |
522 | 522 | ||
523 | /* either variant of SGLs is fine, as we don't support metadata */ | 523 | /* |
524 | if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF && | 524 | * For fabrics, PSDT field shall describe metadata pointer (MPTR) that |
525 | (flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METASEG)) { | 525 | * contains an address of a single contiguous physical buffer that is |
526 | * byte aligned. | ||
527 | */ | ||
528 | if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF)) { | ||
526 | status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; | 529 | status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; |
527 | goto fail; | 530 | goto fail; |
528 | } | 531 | } |
diff --git a/drivers/nvme/target/io-cmd.c b/drivers/nvme/target/io-cmd.c index 0a4372a016f2..28bbdff4a88b 100644 --- a/drivers/nvme/target/io-cmd.c +++ b/drivers/nvme/target/io-cmd.c | |||
@@ -105,10 +105,13 @@ static void nvmet_execute_flush(struct nvmet_req *req) | |||
105 | static u16 nvmet_discard_range(struct nvmet_ns *ns, | 105 | static u16 nvmet_discard_range(struct nvmet_ns *ns, |
106 | struct nvme_dsm_range *range, struct bio **bio) | 106 | struct nvme_dsm_range *range, struct bio **bio) |
107 | { | 107 | { |
108 | if (__blkdev_issue_discard(ns->bdev, | 108 | int ret; |
109 | |||
110 | ret = __blkdev_issue_discard(ns->bdev, | ||
109 | le64_to_cpu(range->slba) << (ns->blksize_shift - 9), | 111 | le64_to_cpu(range->slba) << (ns->blksize_shift - 9), |
110 | le32_to_cpu(range->nlb) << (ns->blksize_shift - 9), | 112 | le32_to_cpu(range->nlb) << (ns->blksize_shift - 9), |
111 | GFP_KERNEL, 0, bio)) | 113 | GFP_KERNEL, 0, bio); |
114 | if (ret && ret != -EOPNOTSUPP) | ||
112 | return NVME_SC_INTERNAL | NVME_SC_DNR; | 115 | return NVME_SC_INTERNAL | NVME_SC_DNR; |
113 | return 0; | 116 | return 0; |
114 | } | 117 | } |
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index 7991ec3a17db..861d1509b22b 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c | |||
@@ -184,7 +184,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx, | |||
184 | return BLK_STS_OK; | 184 | return BLK_STS_OK; |
185 | } | 185 | } |
186 | 186 | ||
187 | if (blk_rq_bytes(req)) { | 187 | if (blk_rq_payload_bytes(req)) { |
188 | iod->sg_table.sgl = iod->first_sgl; | 188 | iod->sg_table.sgl = iod->first_sgl; |
189 | if (sg_alloc_table_chained(&iod->sg_table, | 189 | if (sg_alloc_table_chained(&iod->sg_table, |
190 | blk_rq_nr_phys_segments(req), | 190 | blk_rq_nr_phys_segments(req), |
@@ -193,7 +193,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx, | |||
193 | 193 | ||
194 | iod->req.sg = iod->sg_table.sgl; | 194 | iod->req.sg = iod->sg_table.sgl; |
195 | iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl); | 195 | iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl); |
196 | iod->req.transfer_len = blk_rq_bytes(req); | 196 | iod->req.transfer_len = blk_rq_payload_bytes(req); |
197 | } | 197 | } |
198 | 198 | ||
199 | blk_mq_start_request(req); | 199 | blk_mq_start_request(req); |
diff --git a/drivers/of/property.c b/drivers/of/property.c index 36ed84e26d9c..f46828e3b082 100644 --- a/drivers/of/property.c +++ b/drivers/of/property.c | |||
@@ -977,11 +977,11 @@ static int of_fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode, | |||
977 | return 0; | 977 | return 0; |
978 | } | 978 | } |
979 | 979 | ||
980 | static void * | 980 | static const void * |
981 | of_fwnode_device_get_match_data(const struct fwnode_handle *fwnode, | 981 | of_fwnode_device_get_match_data(const struct fwnode_handle *fwnode, |
982 | const struct device *dev) | 982 | const struct device *dev) |
983 | { | 983 | { |
984 | return (void *)of_device_get_match_data(dev); | 984 | return of_device_get_match_data(dev); |
985 | } | 985 | } |
986 | 986 | ||
987 | const struct fwnode_operations of_fwnode_ops = { | 987 | const struct fwnode_operations of_fwnode_ops = { |
diff --git a/drivers/opp/cpu.c b/drivers/opp/cpu.c index 2d87bc1adf38..0c0910709435 100644 --- a/drivers/opp/cpu.c +++ b/drivers/opp/cpu.c | |||
@@ -55,7 +55,7 @@ int dev_pm_opp_init_cpufreq_table(struct device *dev, | |||
55 | if (max_opps <= 0) | 55 | if (max_opps <= 0) |
56 | return max_opps ? max_opps : -ENODATA; | 56 | return max_opps ? max_opps : -ENODATA; |
57 | 57 | ||
58 | freq_table = kcalloc((max_opps + 1), sizeof(*freq_table), GFP_ATOMIC); | 58 | freq_table = kcalloc((max_opps + 1), sizeof(*freq_table), GFP_KERNEL); |
59 | if (!freq_table) | 59 | if (!freq_table) |
60 | return -ENOMEM; | 60 | return -ENOMEM; |
61 | 61 | ||
diff --git a/drivers/pci/dwc/pcie-designware-host.c b/drivers/pci/dwc/pcie-designware-host.c index 8de2d5c69b1d..dc9303abda42 100644 --- a/drivers/pci/dwc/pcie-designware-host.c +++ b/drivers/pci/dwc/pcie-designware-host.c | |||
@@ -613,7 +613,7 @@ void dw_pcie_setup_rc(struct pcie_port *pp) | |||
613 | /* setup bus numbers */ | 613 | /* setup bus numbers */ |
614 | val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS); | 614 | val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS); |
615 | val &= 0xff000000; | 615 | val &= 0xff000000; |
616 | val |= 0x00010100; | 616 | val |= 0x00ff0100; |
617 | dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val); | 617 | dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val); |
618 | 618 | ||
619 | /* setup command register */ | 619 | /* setup command register */ |
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index ec582d37c189..c3ba14f6444e 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c | |||
@@ -3420,22 +3420,29 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PORT_RIDGE, | |||
3420 | 3420 | ||
3421 | static void quirk_chelsio_extend_vpd(struct pci_dev *dev) | 3421 | static void quirk_chelsio_extend_vpd(struct pci_dev *dev) |
3422 | { | 3422 | { |
3423 | pci_set_vpd_size(dev, 8192); | 3423 | int chip = (dev->device & 0xf000) >> 12; |
3424 | } | 3424 | int func = (dev->device & 0x0f00) >> 8; |
3425 | 3425 | int prod = (dev->device & 0x00ff) >> 0; | |
3426 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x20, quirk_chelsio_extend_vpd); | 3426 | |
3427 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x21, quirk_chelsio_extend_vpd); | 3427 | /* |
3428 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x22, quirk_chelsio_extend_vpd); | 3428 | * If this is a T3-based adapter, there's a 1KB VPD area at offset |
3429 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x23, quirk_chelsio_extend_vpd); | 3429 | * 0xc00 which contains the preferred VPD values. If this is a T4 or |
3430 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x24, quirk_chelsio_extend_vpd); | 3430 | * later based adapter, the special VPD is at offset 0x400 for the |
3431 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x25, quirk_chelsio_extend_vpd); | 3431 | * Physical Functions (the SR-IOV Virtual Functions have no VPD |
3432 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x26, quirk_chelsio_extend_vpd); | 3432 | * Capabilities). The PCI VPD Access core routines will normally |
3433 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x30, quirk_chelsio_extend_vpd); | 3433 | * compute the size of the VPD by parsing the VPD Data Structure at |
3434 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x31, quirk_chelsio_extend_vpd); | 3434 | * offset 0x000. This will result in silent failures when attempting |
3435 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x32, quirk_chelsio_extend_vpd); | 3435 | * to accesses these other VPD areas which are beyond those computed |
3436 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x35, quirk_chelsio_extend_vpd); | 3436 | * limits. |
3437 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x36, quirk_chelsio_extend_vpd); | 3437 | */ |
3438 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x37, quirk_chelsio_extend_vpd); | 3438 | if (chip == 0x0 && prod >= 0x20) |
3439 | pci_set_vpd_size(dev, 8192); | ||
3440 | else if (chip >= 0x4 && func < 0x8) | ||
3441 | pci_set_vpd_size(dev, 2048); | ||
3442 | } | ||
3443 | |||
3444 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID, | ||
3445 | quirk_chelsio_extend_vpd); | ||
3439 | 3446 | ||
3440 | #ifdef CONFIG_ACPI | 3447 | #ifdef CONFIG_ACPI |
3441 | /* | 3448 | /* |
@@ -3902,6 +3909,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9230, | |||
3902 | quirk_dma_func1_alias); | 3909 | quirk_dma_func1_alias); |
3903 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TTI, 0x0642, | 3910 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TTI, 0x0642, |
3904 | quirk_dma_func1_alias); | 3911 | quirk_dma_func1_alias); |
3912 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TTI, 0x0645, | ||
3913 | quirk_dma_func1_alias); | ||
3905 | /* https://bugs.gentoo.org/show_bug.cgi?id=497630 */ | 3914 | /* https://bugs.gentoo.org/show_bug.cgi?id=497630 */ |
3906 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_JMICRON, | 3915 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_JMICRON, |
3907 | PCI_DEVICE_ID_JMICRON_JMB388_ESD, | 3916 | PCI_DEVICE_ID_JMICRON_JMB388_ESD, |
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c index 369d48d6c6f1..365447240d95 100644 --- a/drivers/pci/setup-res.c +++ b/drivers/pci/setup-res.c | |||
@@ -401,6 +401,10 @@ void pci_release_resource(struct pci_dev *dev, int resno) | |||
401 | struct resource *res = dev->resource + resno; | 401 | struct resource *res = dev->resource + resno; |
402 | 402 | ||
403 | pci_info(dev, "BAR %d: releasing %pR\n", resno, res); | 403 | pci_info(dev, "BAR %d: releasing %pR\n", resno, res); |
404 | |||
405 | if (!res->parent) | ||
406 | return; | ||
407 | |||
404 | release_resource(res); | 408 | release_resource(res); |
405 | res->end = resource_size(res) - 1; | 409 | res->end = resource_size(res) - 1; |
406 | res->start = 0; | 410 | res->start = 0; |
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index 7bc5eee96b31..f63db346c219 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c | |||
@@ -17,7 +17,6 @@ | |||
17 | #include <linux/export.h> | 17 | #include <linux/export.h> |
18 | #include <linux/kernel.h> | 18 | #include <linux/kernel.h> |
19 | #include <linux/perf/arm_pmu.h> | 19 | #include <linux/perf/arm_pmu.h> |
20 | #include <linux/platform_device.h> | ||
21 | #include <linux/slab.h> | 20 | #include <linux/slab.h> |
22 | #include <linux/sched/clock.h> | 21 | #include <linux/sched/clock.h> |
23 | #include <linux/spinlock.h> | 22 | #include <linux/spinlock.h> |
@@ -26,6 +25,9 @@ | |||
26 | 25 | ||
27 | #include <asm/irq_regs.h> | 26 | #include <asm/irq_regs.h> |
28 | 27 | ||
28 | static DEFINE_PER_CPU(struct arm_pmu *, cpu_armpmu); | ||
29 | static DEFINE_PER_CPU(int, cpu_irq); | ||
30 | |||
29 | static int | 31 | static int |
30 | armpmu_map_cache_event(const unsigned (*cache_map) | 32 | armpmu_map_cache_event(const unsigned (*cache_map) |
31 | [PERF_COUNT_HW_CACHE_MAX] | 33 | [PERF_COUNT_HW_CACHE_MAX] |
@@ -320,17 +322,9 @@ validate_group(struct perf_event *event) | |||
320 | return 0; | 322 | return 0; |
321 | } | 323 | } |
322 | 324 | ||
323 | static struct arm_pmu_platdata *armpmu_get_platdata(struct arm_pmu *armpmu) | ||
324 | { | ||
325 | struct platform_device *pdev = armpmu->plat_device; | ||
326 | |||
327 | return pdev ? dev_get_platdata(&pdev->dev) : NULL; | ||
328 | } | ||
329 | |||
330 | static irqreturn_t armpmu_dispatch_irq(int irq, void *dev) | 325 | static irqreturn_t armpmu_dispatch_irq(int irq, void *dev) |
331 | { | 326 | { |
332 | struct arm_pmu *armpmu; | 327 | struct arm_pmu *armpmu; |
333 | struct arm_pmu_platdata *plat; | ||
334 | int ret; | 328 | int ret; |
335 | u64 start_clock, finish_clock; | 329 | u64 start_clock, finish_clock; |
336 | 330 | ||
@@ -341,14 +335,11 @@ static irqreturn_t armpmu_dispatch_irq(int irq, void *dev) | |||
341 | * dereference. | 335 | * dereference. |
342 | */ | 336 | */ |
343 | armpmu = *(void **)dev; | 337 | armpmu = *(void **)dev; |
344 | 338 | if (WARN_ON_ONCE(!armpmu)) | |
345 | plat = armpmu_get_platdata(armpmu); | 339 | return IRQ_NONE; |
346 | 340 | ||
347 | start_clock = sched_clock(); | 341 | start_clock = sched_clock(); |
348 | if (plat && plat->handle_irq) | 342 | ret = armpmu->handle_irq(irq, armpmu); |
349 | ret = plat->handle_irq(irq, armpmu, armpmu->handle_irq); | ||
350 | else | ||
351 | ret = armpmu->handle_irq(irq, armpmu); | ||
352 | finish_clock = sched_clock(); | 343 | finish_clock = sched_clock(); |
353 | 344 | ||
354 | perf_sample_event_took(finish_clock - start_clock); | 345 | perf_sample_event_took(finish_clock - start_clock); |
@@ -531,54 +522,41 @@ int perf_num_counters(void) | |||
531 | } | 522 | } |
532 | EXPORT_SYMBOL_GPL(perf_num_counters); | 523 | EXPORT_SYMBOL_GPL(perf_num_counters); |
533 | 524 | ||
534 | void armpmu_free_irq(struct arm_pmu *armpmu, int cpu) | 525 | static int armpmu_count_irq_users(const int irq) |
535 | { | 526 | { |
536 | struct pmu_hw_events __percpu *hw_events = armpmu->hw_events; | 527 | int cpu, count = 0; |
537 | int irq = per_cpu(hw_events->irq, cpu); | ||
538 | 528 | ||
539 | if (!cpumask_test_and_clear_cpu(cpu, &armpmu->active_irqs)) | 529 | for_each_possible_cpu(cpu) { |
540 | return; | 530 | if (per_cpu(cpu_irq, cpu) == irq) |
541 | 531 | count++; | |
542 | if (irq_is_percpu_devid(irq)) { | ||
543 | free_percpu_irq(irq, &hw_events->percpu_pmu); | ||
544 | cpumask_clear(&armpmu->active_irqs); | ||
545 | return; | ||
546 | } | 532 | } |
547 | 533 | ||
548 | free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu)); | 534 | return count; |
549 | } | 535 | } |
550 | 536 | ||
551 | void armpmu_free_irqs(struct arm_pmu *armpmu) | 537 | void armpmu_free_irq(int irq, int cpu) |
552 | { | 538 | { |
553 | int cpu; | 539 | if (per_cpu(cpu_irq, cpu) == 0) |
540 | return; | ||
541 | if (WARN_ON(irq != per_cpu(cpu_irq, cpu))) | ||
542 | return; | ||
543 | |||
544 | if (!irq_is_percpu_devid(irq)) | ||
545 | free_irq(irq, per_cpu_ptr(&cpu_armpmu, cpu)); | ||
546 | else if (armpmu_count_irq_users(irq) == 1) | ||
547 | free_percpu_irq(irq, &cpu_armpmu); | ||
554 | 548 | ||
555 | for_each_cpu(cpu, &armpmu->supported_cpus) | 549 | per_cpu(cpu_irq, cpu) = 0; |
556 | armpmu_free_irq(armpmu, cpu); | ||
557 | } | 550 | } |
558 | 551 | ||
559 | int armpmu_request_irq(struct arm_pmu *armpmu, int cpu) | 552 | int armpmu_request_irq(int irq, int cpu) |
560 | { | 553 | { |
561 | int err = 0; | 554 | int err = 0; |
562 | struct pmu_hw_events __percpu *hw_events = armpmu->hw_events; | ||
563 | const irq_handler_t handler = armpmu_dispatch_irq; | 555 | const irq_handler_t handler = armpmu_dispatch_irq; |
564 | int irq = per_cpu(hw_events->irq, cpu); | ||
565 | if (!irq) | 556 | if (!irq) |
566 | return 0; | 557 | return 0; |
567 | 558 | ||
568 | if (irq_is_percpu_devid(irq) && cpumask_empty(&armpmu->active_irqs)) { | 559 | if (!irq_is_percpu_devid(irq)) { |
569 | err = request_percpu_irq(irq, handler, "arm-pmu", | ||
570 | &hw_events->percpu_pmu); | ||
571 | } else if (irq_is_percpu_devid(irq)) { | ||
572 | int other_cpu = cpumask_first(&armpmu->active_irqs); | ||
573 | int other_irq = per_cpu(hw_events->irq, other_cpu); | ||
574 | |||
575 | if (irq != other_irq) { | ||
576 | pr_warn("mismatched PPIs detected.\n"); | ||
577 | err = -EINVAL; | ||
578 | goto err_out; | ||
579 | } | ||
580 | } else { | ||
581 | struct arm_pmu_platdata *platdata = armpmu_get_platdata(armpmu); | ||
582 | unsigned long irq_flags; | 560 | unsigned long irq_flags; |
583 | 561 | ||
584 | err = irq_force_affinity(irq, cpumask_of(cpu)); | 562 | err = irq_force_affinity(irq, cpumask_of(cpu)); |
@@ -589,22 +567,22 @@ int armpmu_request_irq(struct arm_pmu *armpmu, int cpu) | |||
589 | goto err_out; | 567 | goto err_out; |
590 | } | 568 | } |
591 | 569 | ||
592 | if (platdata && platdata->irq_flags) { | 570 | irq_flags = IRQF_PERCPU | |
593 | irq_flags = platdata->irq_flags; | 571 | IRQF_NOBALANCING | |
594 | } else { | 572 | IRQF_NO_THREAD; |
595 | irq_flags = IRQF_PERCPU | | ||
596 | IRQF_NOBALANCING | | ||
597 | IRQF_NO_THREAD; | ||
598 | } | ||
599 | 573 | ||
574 | irq_set_status_flags(irq, IRQ_NOAUTOEN); | ||
600 | err = request_irq(irq, handler, irq_flags, "arm-pmu", | 575 | err = request_irq(irq, handler, irq_flags, "arm-pmu", |
601 | per_cpu_ptr(&hw_events->percpu_pmu, cpu)); | 576 | per_cpu_ptr(&cpu_armpmu, cpu)); |
577 | } else if (armpmu_count_irq_users(irq) == 0) { | ||
578 | err = request_percpu_irq(irq, handler, "arm-pmu", | ||
579 | &cpu_armpmu); | ||
602 | } | 580 | } |
603 | 581 | ||
604 | if (err) | 582 | if (err) |
605 | goto err_out; | 583 | goto err_out; |
606 | 584 | ||
607 | cpumask_set_cpu(cpu, &armpmu->active_irqs); | 585 | per_cpu(cpu_irq, cpu) = irq; |
608 | return 0; | 586 | return 0; |
609 | 587 | ||
610 | err_out: | 588 | err_out: |
@@ -612,19 +590,6 @@ err_out: | |||
612 | return err; | 590 | return err; |
613 | } | 591 | } |
614 | 592 | ||
615 | int armpmu_request_irqs(struct arm_pmu *armpmu) | ||
616 | { | ||
617 | int cpu, err; | ||
618 | |||
619 | for_each_cpu(cpu, &armpmu->supported_cpus) { | ||
620 | err = armpmu_request_irq(armpmu, cpu); | ||
621 | if (err) | ||
622 | break; | ||
623 | } | ||
624 | |||
625 | return err; | ||
626 | } | ||
627 | |||
628 | static int armpmu_get_cpu_irq(struct arm_pmu *pmu, int cpu) | 593 | static int armpmu_get_cpu_irq(struct arm_pmu *pmu, int cpu) |
629 | { | 594 | { |
630 | struct pmu_hw_events __percpu *hw_events = pmu->hw_events; | 595 | struct pmu_hw_events __percpu *hw_events = pmu->hw_events; |
@@ -647,12 +612,14 @@ static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node) | |||
647 | if (pmu->reset) | 612 | if (pmu->reset) |
648 | pmu->reset(pmu); | 613 | pmu->reset(pmu); |
649 | 614 | ||
615 | per_cpu(cpu_armpmu, cpu) = pmu; | ||
616 | |||
650 | irq = armpmu_get_cpu_irq(pmu, cpu); | 617 | irq = armpmu_get_cpu_irq(pmu, cpu); |
651 | if (irq) { | 618 | if (irq) { |
652 | if (irq_is_percpu_devid(irq)) { | 619 | if (irq_is_percpu_devid(irq)) |
653 | enable_percpu_irq(irq, IRQ_TYPE_NONE); | 620 | enable_percpu_irq(irq, IRQ_TYPE_NONE); |
654 | return 0; | 621 | else |
655 | } | 622 | enable_irq(irq); |
656 | } | 623 | } |
657 | 624 | ||
658 | return 0; | 625 | return 0; |
@@ -667,8 +634,14 @@ static int arm_perf_teardown_cpu(unsigned int cpu, struct hlist_node *node) | |||
667 | return 0; | 634 | return 0; |
668 | 635 | ||
669 | irq = armpmu_get_cpu_irq(pmu, cpu); | 636 | irq = armpmu_get_cpu_irq(pmu, cpu); |
670 | if (irq && irq_is_percpu_devid(irq)) | 637 | if (irq) { |
671 | disable_percpu_irq(irq); | 638 | if (irq_is_percpu_devid(irq)) |
639 | disable_percpu_irq(irq); | ||
640 | else | ||
641 | disable_irq_nosync(irq); | ||
642 | } | ||
643 | |||
644 | per_cpu(cpu_armpmu, cpu) = NULL; | ||
672 | 645 | ||
673 | return 0; | 646 | return 0; |
674 | } | 647 | } |
@@ -800,18 +773,18 @@ static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu) | |||
800 | &cpu_pmu->node); | 773 | &cpu_pmu->node); |
801 | } | 774 | } |
802 | 775 | ||
803 | struct arm_pmu *armpmu_alloc(void) | 776 | static struct arm_pmu *__armpmu_alloc(gfp_t flags) |
804 | { | 777 | { |
805 | struct arm_pmu *pmu; | 778 | struct arm_pmu *pmu; |
806 | int cpu; | 779 | int cpu; |
807 | 780 | ||
808 | pmu = kzalloc(sizeof(*pmu), GFP_KERNEL); | 781 | pmu = kzalloc(sizeof(*pmu), flags); |
809 | if (!pmu) { | 782 | if (!pmu) { |
810 | pr_info("failed to allocate PMU device!\n"); | 783 | pr_info("failed to allocate PMU device!\n"); |
811 | goto out; | 784 | goto out; |
812 | } | 785 | } |
813 | 786 | ||
814 | pmu->hw_events = alloc_percpu(struct pmu_hw_events); | 787 | pmu->hw_events = alloc_percpu_gfp(struct pmu_hw_events, flags); |
815 | if (!pmu->hw_events) { | 788 | if (!pmu->hw_events) { |
816 | pr_info("failed to allocate per-cpu PMU data.\n"); | 789 | pr_info("failed to allocate per-cpu PMU data.\n"); |
817 | goto out_free_pmu; | 790 | goto out_free_pmu; |
@@ -857,6 +830,17 @@ out: | |||
857 | return NULL; | 830 | return NULL; |
858 | } | 831 | } |
859 | 832 | ||
833 | struct arm_pmu *armpmu_alloc(void) | ||
834 | { | ||
835 | return __armpmu_alloc(GFP_KERNEL); | ||
836 | } | ||
837 | |||
838 | struct arm_pmu *armpmu_alloc_atomic(void) | ||
839 | { | ||
840 | return __armpmu_alloc(GFP_ATOMIC); | ||
841 | } | ||
842 | |||
843 | |||
860 | void armpmu_free(struct arm_pmu *pmu) | 844 | void armpmu_free(struct arm_pmu *pmu) |
861 | { | 845 | { |
862 | free_percpu(pmu->hw_events); | 846 | free_percpu(pmu->hw_events); |
diff --git a/drivers/perf/arm_pmu_acpi.c b/drivers/perf/arm_pmu_acpi.c index 705f1a390e31..0f197516d708 100644 --- a/drivers/perf/arm_pmu_acpi.c +++ b/drivers/perf/arm_pmu_acpi.c | |||
@@ -11,6 +11,8 @@ | |||
11 | #include <linux/acpi.h> | 11 | #include <linux/acpi.h> |
12 | #include <linux/cpumask.h> | 12 | #include <linux/cpumask.h> |
13 | #include <linux/init.h> | 13 | #include <linux/init.h> |
14 | #include <linux/irq.h> | ||
15 | #include <linux/irqdesc.h> | ||
14 | #include <linux/percpu.h> | 16 | #include <linux/percpu.h> |
15 | #include <linux/perf/arm_pmu.h> | 17 | #include <linux/perf/arm_pmu.h> |
16 | 18 | ||
@@ -87,7 +89,13 @@ static int arm_pmu_acpi_parse_irqs(void) | |||
87 | pr_warn("No ACPI PMU IRQ for CPU%d\n", cpu); | 89 | pr_warn("No ACPI PMU IRQ for CPU%d\n", cpu); |
88 | } | 90 | } |
89 | 91 | ||
92 | /* | ||
93 | * Log and request the IRQ so the core arm_pmu code can manage | ||
94 | * it. We'll have to sanity-check IRQs later when we associate | ||
95 | * them with their PMUs. | ||
96 | */ | ||
90 | per_cpu(pmu_irqs, cpu) = irq; | 97 | per_cpu(pmu_irqs, cpu) = irq; |
98 | armpmu_request_irq(irq, cpu); | ||
91 | } | 99 | } |
92 | 100 | ||
93 | return 0; | 101 | return 0; |
@@ -127,7 +135,7 @@ static struct arm_pmu *arm_pmu_acpi_find_alloc_pmu(void) | |||
127 | return pmu; | 135 | return pmu; |
128 | } | 136 | } |
129 | 137 | ||
130 | pmu = armpmu_alloc(); | 138 | pmu = armpmu_alloc_atomic(); |
131 | if (!pmu) { | 139 | if (!pmu) { |
132 | pr_warn("Unable to allocate PMU for CPU%d\n", | 140 | pr_warn("Unable to allocate PMU for CPU%d\n", |
133 | smp_processor_id()); | 141 | smp_processor_id()); |
@@ -140,6 +148,35 @@ static struct arm_pmu *arm_pmu_acpi_find_alloc_pmu(void) | |||
140 | } | 148 | } |
141 | 149 | ||
142 | /* | 150 | /* |
151 | * Check whether the new IRQ is compatible with those already associated with | ||
152 | * the PMU (e.g. we don't have mismatched PPIs). | ||
153 | */ | ||
154 | static bool pmu_irq_matches(struct arm_pmu *pmu, int irq) | ||
155 | { | ||
156 | struct pmu_hw_events __percpu *hw_events = pmu->hw_events; | ||
157 | int cpu; | ||
158 | |||
159 | if (!irq) | ||
160 | return true; | ||
161 | |||
162 | for_each_cpu(cpu, &pmu->supported_cpus) { | ||
163 | int other_irq = per_cpu(hw_events->irq, cpu); | ||
164 | if (!other_irq) | ||
165 | continue; | ||
166 | |||
167 | if (irq == other_irq) | ||
168 | continue; | ||
169 | if (!irq_is_percpu_devid(irq) && !irq_is_percpu_devid(other_irq)) | ||
170 | continue; | ||
171 | |||
172 | pr_warn("mismatched PPIs detected\n"); | ||
173 | return false; | ||
174 | } | ||
175 | |||
176 | return true; | ||
177 | } | ||
178 | |||
179 | /* | ||
143 | * This must run before the common arm_pmu hotplug logic, so that we can | 180 | * This must run before the common arm_pmu hotplug logic, so that we can |
144 | * associate a CPU and its interrupt before the common code tries to manage the | 181 | * associate a CPU and its interrupt before the common code tries to manage the |
145 | * affinity and so on. | 182 | * affinity and so on. |
@@ -164,19 +201,14 @@ static int arm_pmu_acpi_cpu_starting(unsigned int cpu) | |||
164 | if (!pmu) | 201 | if (!pmu) |
165 | return -ENOMEM; | 202 | return -ENOMEM; |
166 | 203 | ||
167 | cpumask_set_cpu(cpu, &pmu->supported_cpus); | ||
168 | |||
169 | per_cpu(probed_pmus, cpu) = pmu; | 204 | per_cpu(probed_pmus, cpu) = pmu; |
170 | 205 | ||
171 | /* | 206 | if (pmu_irq_matches(pmu, irq)) { |
172 | * Log and request the IRQ so the core arm_pmu code can manage it. In | 207 | hw_events = pmu->hw_events; |
173 | * some situations (e.g. mismatched PPIs), we may fail to request the | 208 | per_cpu(hw_events->irq, cpu) = irq; |
174 | * IRQ. However, it may be too late for us to do anything about it. | 209 | } |
175 | * The common ARM PMU code will log a warning in this case. | 210 | |
176 | */ | 211 | cpumask_set_cpu(cpu, &pmu->supported_cpus); |
177 | hw_events = pmu->hw_events; | ||
178 | per_cpu(hw_events->irq, cpu) = irq; | ||
179 | armpmu_request_irq(pmu, cpu); | ||
180 | 212 | ||
181 | /* | 213 | /* |
182 | * Ideally, we'd probe the PMU here when we find the first matching | 214 | * Ideally, we'd probe the PMU here when we find the first matching |
@@ -247,11 +279,6 @@ static int arm_pmu_acpi_init(void) | |||
247 | if (acpi_disabled) | 279 | if (acpi_disabled) |
248 | return 0; | 280 | return 0; |
249 | 281 | ||
250 | /* | ||
251 | * We can't request IRQs yet, since we don't know the cookie value | ||
252 | * until we know which CPUs share the same logical PMU. We'll handle | ||
253 | * that in arm_pmu_acpi_cpu_starting(). | ||
254 | */ | ||
255 | ret = arm_pmu_acpi_parse_irqs(); | 282 | ret = arm_pmu_acpi_parse_irqs(); |
256 | if (ret) | 283 | if (ret) |
257 | return ret; | 284 | return ret; |
diff --git a/drivers/perf/arm_pmu_platform.c b/drivers/perf/arm_pmu_platform.c index 46501cc79fd7..7729eda5909d 100644 --- a/drivers/perf/arm_pmu_platform.c +++ b/drivers/perf/arm_pmu_platform.c | |||
@@ -127,13 +127,6 @@ static int pmu_parse_irqs(struct arm_pmu *pmu) | |||
127 | pdev->dev.of_node); | 127 | pdev->dev.of_node); |
128 | } | 128 | } |
129 | 129 | ||
130 | /* | ||
131 | * Some platforms have all PMU IRQs OR'd into a single IRQ, with a | ||
132 | * special platdata function that attempts to demux them. | ||
133 | */ | ||
134 | if (dev_get_platdata(&pdev->dev)) | ||
135 | cpumask_setall(&pmu->supported_cpus); | ||
136 | |||
137 | for (i = 0; i < num_irqs; i++) { | 130 | for (i = 0; i < num_irqs; i++) { |
138 | int cpu, irq; | 131 | int cpu, irq; |
139 | 132 | ||
@@ -164,6 +157,36 @@ static int pmu_parse_irqs(struct arm_pmu *pmu) | |||
164 | return 0; | 157 | return 0; |
165 | } | 158 | } |
166 | 159 | ||
160 | static int armpmu_request_irqs(struct arm_pmu *armpmu) | ||
161 | { | ||
162 | struct pmu_hw_events __percpu *hw_events = armpmu->hw_events; | ||
163 | int cpu, err; | ||
164 | |||
165 | for_each_cpu(cpu, &armpmu->supported_cpus) { | ||
166 | int irq = per_cpu(hw_events->irq, cpu); | ||
167 | if (!irq) | ||
168 | continue; | ||
169 | |||
170 | err = armpmu_request_irq(irq, cpu); | ||
171 | if (err) | ||
172 | break; | ||
173 | } | ||
174 | |||
175 | return err; | ||
176 | } | ||
177 | |||
178 | static void armpmu_free_irqs(struct arm_pmu *armpmu) | ||
179 | { | ||
180 | int cpu; | ||
181 | struct pmu_hw_events __percpu *hw_events = armpmu->hw_events; | ||
182 | |||
183 | for_each_cpu(cpu, &armpmu->supported_cpus) { | ||
184 | int irq = per_cpu(hw_events->irq, cpu); | ||
185 | |||
186 | armpmu_free_irq(irq, cpu); | ||
187 | } | ||
188 | } | ||
189 | |||
167 | int arm_pmu_device_probe(struct platform_device *pdev, | 190 | int arm_pmu_device_probe(struct platform_device *pdev, |
168 | const struct of_device_id *of_table, | 191 | const struct of_device_id *of_table, |
169 | const struct pmu_probe_info *probe_table) | 192 | const struct pmu_probe_info *probe_table) |
diff --git a/drivers/phy/qualcomm/phy-qcom-ufs.c b/drivers/phy/qualcomm/phy-qcom-ufs.c index c5ff4525edef..c5493ea51282 100644 --- a/drivers/phy/qualcomm/phy-qcom-ufs.c +++ b/drivers/phy/qualcomm/phy-qcom-ufs.c | |||
@@ -675,3 +675,8 @@ int ufs_qcom_phy_power_off(struct phy *generic_phy) | |||
675 | return 0; | 675 | return 0; |
676 | } | 676 | } |
677 | EXPORT_SYMBOL_GPL(ufs_qcom_phy_power_off); | 677 | EXPORT_SYMBOL_GPL(ufs_qcom_phy_power_off); |
678 | |||
679 | MODULE_AUTHOR("Yaniv Gardi <ygardi@codeaurora.org>"); | ||
680 | MODULE_AUTHOR("Vivek Gautam <vivek.gautam@codeaurora.org>"); | ||
681 | MODULE_DESCRIPTION("Universal Flash Storage (UFS) QCOM PHY"); | ||
682 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/pinctrl/meson/pinctrl-meson-axg.c b/drivers/pinctrl/meson/pinctrl-meson-axg.c index 1fda9d6c7ea3..4b91ff74779b 100644 --- a/drivers/pinctrl/meson/pinctrl-meson-axg.c +++ b/drivers/pinctrl/meson/pinctrl-meson-axg.c | |||
@@ -716,7 +716,7 @@ static const char * const uart_b_groups[] = { | |||
716 | "uart_tx_b_x", "uart_rx_b_x", "uart_cts_b_x", "uart_rts_b_x", | 716 | "uart_tx_b_x", "uart_rx_b_x", "uart_cts_b_x", "uart_rts_b_x", |
717 | }; | 717 | }; |
718 | 718 | ||
719 | static const char * const uart_ao_b_gpioz_groups[] = { | 719 | static const char * const uart_ao_b_z_groups[] = { |
720 | "uart_ao_tx_b_z", "uart_ao_rx_b_z", | 720 | "uart_ao_tx_b_z", "uart_ao_rx_b_z", |
721 | "uart_ao_cts_b_z", "uart_ao_rts_b_z", | 721 | "uart_ao_cts_b_z", "uart_ao_rts_b_z", |
722 | }; | 722 | }; |
@@ -855,7 +855,7 @@ static struct meson_pmx_func meson_axg_periphs_functions[] = { | |||
855 | FUNCTION(nand), | 855 | FUNCTION(nand), |
856 | FUNCTION(uart_a), | 856 | FUNCTION(uart_a), |
857 | FUNCTION(uart_b), | 857 | FUNCTION(uart_b), |
858 | FUNCTION(uart_ao_b_gpioz), | 858 | FUNCTION(uart_ao_b_z), |
859 | FUNCTION(i2c0), | 859 | FUNCTION(i2c0), |
860 | FUNCTION(i2c1), | 860 | FUNCTION(i2c1), |
861 | FUNCTION(i2c2), | 861 | FUNCTION(i2c2), |
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos-arm.c b/drivers/pinctrl/samsung/pinctrl-exynos-arm.c index c32399faff57..90c274490181 100644 --- a/drivers/pinctrl/samsung/pinctrl-exynos-arm.c +++ b/drivers/pinctrl/samsung/pinctrl-exynos-arm.c | |||
@@ -124,7 +124,7 @@ static const struct samsung_pin_bank_data s5pv210_pin_bank[] __initconst = { | |||
124 | EXYNOS_PIN_BANK_EINTW(8, 0xc60, "gph3", 0x0c), | 124 | EXYNOS_PIN_BANK_EINTW(8, 0xc60, "gph3", 0x0c), |
125 | }; | 125 | }; |
126 | 126 | ||
127 | const struct samsung_pin_ctrl s5pv210_pin_ctrl[] __initconst = { | 127 | static const struct samsung_pin_ctrl s5pv210_pin_ctrl[] __initconst = { |
128 | { | 128 | { |
129 | /* pin-controller instance 0 data */ | 129 | /* pin-controller instance 0 data */ |
130 | .pin_banks = s5pv210_pin_bank, | 130 | .pin_banks = s5pv210_pin_bank, |
@@ -137,6 +137,11 @@ const struct samsung_pin_ctrl s5pv210_pin_ctrl[] __initconst = { | |||
137 | }, | 137 | }, |
138 | }; | 138 | }; |
139 | 139 | ||
140 | const struct samsung_pinctrl_of_match_data s5pv210_of_data __initconst = { | ||
141 | .ctrl = s5pv210_pin_ctrl, | ||
142 | .num_ctrl = ARRAY_SIZE(s5pv210_pin_ctrl), | ||
143 | }; | ||
144 | |||
140 | /* Pad retention control code for accessing PMU regmap */ | 145 | /* Pad retention control code for accessing PMU regmap */ |
141 | static atomic_t exynos_shared_retention_refcnt; | 146 | static atomic_t exynos_shared_retention_refcnt; |
142 | 147 | ||
@@ -199,7 +204,7 @@ static const struct samsung_retention_data exynos3250_retention_data __initconst | |||
199 | * Samsung pinctrl driver data for Exynos3250 SoC. Exynos3250 SoC includes | 204 | * Samsung pinctrl driver data for Exynos3250 SoC. Exynos3250 SoC includes |
200 | * two gpio/pin-mux/pinconfig controllers. | 205 | * two gpio/pin-mux/pinconfig controllers. |
201 | */ | 206 | */ |
202 | const struct samsung_pin_ctrl exynos3250_pin_ctrl[] __initconst = { | 207 | static const struct samsung_pin_ctrl exynos3250_pin_ctrl[] __initconst = { |
203 | { | 208 | { |
204 | /* pin-controller instance 0 data */ | 209 | /* pin-controller instance 0 data */ |
205 | .pin_banks = exynos3250_pin_banks0, | 210 | .pin_banks = exynos3250_pin_banks0, |
@@ -220,6 +225,11 @@ const struct samsung_pin_ctrl exynos3250_pin_ctrl[] __initconst = { | |||
220 | }, | 225 | }, |
221 | }; | 226 | }; |
222 | 227 | ||
228 | const struct samsung_pinctrl_of_match_data exynos3250_of_data __initconst = { | ||
229 | .ctrl = exynos3250_pin_ctrl, | ||
230 | .num_ctrl = ARRAY_SIZE(exynos3250_pin_ctrl), | ||
231 | }; | ||
232 | |||
223 | /* pin banks of exynos4210 pin-controller 0 */ | 233 | /* pin banks of exynos4210 pin-controller 0 */ |
224 | static const struct samsung_pin_bank_data exynos4210_pin_banks0[] __initconst = { | 234 | static const struct samsung_pin_bank_data exynos4210_pin_banks0[] __initconst = { |
225 | EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00), | 235 | EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00), |
@@ -303,7 +313,7 @@ static const struct samsung_retention_data exynos4_audio_retention_data __initco | |||
303 | * Samsung pinctrl driver data for Exynos4210 SoC. Exynos4210 SoC includes | 313 | * Samsung pinctrl driver data for Exynos4210 SoC. Exynos4210 SoC includes |
304 | * three gpio/pin-mux/pinconfig controllers. | 314 | * three gpio/pin-mux/pinconfig controllers. |
305 | */ | 315 | */ |
306 | const struct samsung_pin_ctrl exynos4210_pin_ctrl[] __initconst = { | 316 | static const struct samsung_pin_ctrl exynos4210_pin_ctrl[] __initconst = { |
307 | { | 317 | { |
308 | /* pin-controller instance 0 data */ | 318 | /* pin-controller instance 0 data */ |
309 | .pin_banks = exynos4210_pin_banks0, | 319 | .pin_banks = exynos4210_pin_banks0, |
@@ -329,6 +339,11 @@ const struct samsung_pin_ctrl exynos4210_pin_ctrl[] __initconst = { | |||
329 | }, | 339 | }, |
330 | }; | 340 | }; |
331 | 341 | ||
342 | const struct samsung_pinctrl_of_match_data exynos4210_of_data __initconst = { | ||
343 | .ctrl = exynos4210_pin_ctrl, | ||
344 | .num_ctrl = ARRAY_SIZE(exynos4210_pin_ctrl), | ||
345 | }; | ||
346 | |||
332 | /* pin banks of exynos4x12 pin-controller 0 */ | 347 | /* pin banks of exynos4x12 pin-controller 0 */ |
333 | static const struct samsung_pin_bank_data exynos4x12_pin_banks0[] __initconst = { | 348 | static const struct samsung_pin_bank_data exynos4x12_pin_banks0[] __initconst = { |
334 | EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00), | 349 | EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00), |
@@ -391,7 +406,7 @@ static const struct samsung_pin_bank_data exynos4x12_pin_banks3[] __initconst = | |||
391 | * Samsung pinctrl driver data for Exynos4x12 SoC. Exynos4x12 SoC includes | 406 | * Samsung pinctrl driver data for Exynos4x12 SoC. Exynos4x12 SoC includes |
392 | * four gpio/pin-mux/pinconfig controllers. | 407 | * four gpio/pin-mux/pinconfig controllers. |
393 | */ | 408 | */ |
394 | const struct samsung_pin_ctrl exynos4x12_pin_ctrl[] __initconst = { | 409 | static const struct samsung_pin_ctrl exynos4x12_pin_ctrl[] __initconst = { |
395 | { | 410 | { |
396 | /* pin-controller instance 0 data */ | 411 | /* pin-controller instance 0 data */ |
397 | .pin_banks = exynos4x12_pin_banks0, | 412 | .pin_banks = exynos4x12_pin_banks0, |
@@ -427,6 +442,11 @@ const struct samsung_pin_ctrl exynos4x12_pin_ctrl[] __initconst = { | |||
427 | }, | 442 | }, |
428 | }; | 443 | }; |
429 | 444 | ||
445 | const struct samsung_pinctrl_of_match_data exynos4x12_of_data __initconst = { | ||
446 | .ctrl = exynos4x12_pin_ctrl, | ||
447 | .num_ctrl = ARRAY_SIZE(exynos4x12_pin_ctrl), | ||
448 | }; | ||
449 | |||
430 | /* pin banks of exynos5250 pin-controller 0 */ | 450 | /* pin banks of exynos5250 pin-controller 0 */ |
431 | static const struct samsung_pin_bank_data exynos5250_pin_banks0[] __initconst = { | 451 | static const struct samsung_pin_bank_data exynos5250_pin_banks0[] __initconst = { |
432 | EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00), | 452 | EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00), |
@@ -487,7 +507,7 @@ static const struct samsung_pin_bank_data exynos5250_pin_banks3[] __initconst = | |||
487 | * Samsung pinctrl driver data for Exynos5250 SoC. Exynos5250 SoC includes | 507 | * Samsung pinctrl driver data for Exynos5250 SoC. Exynos5250 SoC includes |
488 | * four gpio/pin-mux/pinconfig controllers. | 508 | * four gpio/pin-mux/pinconfig controllers. |
489 | */ | 509 | */ |
490 | const struct samsung_pin_ctrl exynos5250_pin_ctrl[] __initconst = { | 510 | static const struct samsung_pin_ctrl exynos5250_pin_ctrl[] __initconst = { |
491 | { | 511 | { |
492 | /* pin-controller instance 0 data */ | 512 | /* pin-controller instance 0 data */ |
493 | .pin_banks = exynos5250_pin_banks0, | 513 | .pin_banks = exynos5250_pin_banks0, |
@@ -523,6 +543,11 @@ const struct samsung_pin_ctrl exynos5250_pin_ctrl[] __initconst = { | |||
523 | }, | 543 | }, |
524 | }; | 544 | }; |
525 | 545 | ||
546 | const struct samsung_pinctrl_of_match_data exynos5250_of_data __initconst = { | ||
547 | .ctrl = exynos5250_pin_ctrl, | ||
548 | .num_ctrl = ARRAY_SIZE(exynos5250_pin_ctrl), | ||
549 | }; | ||
550 | |||
526 | /* pin banks of exynos5260 pin-controller 0 */ | 551 | /* pin banks of exynos5260 pin-controller 0 */ |
527 | static const struct samsung_pin_bank_data exynos5260_pin_banks0[] __initconst = { | 552 | static const struct samsung_pin_bank_data exynos5260_pin_banks0[] __initconst = { |
528 | EXYNOS_PIN_BANK_EINTG(4, 0x000, "gpa0", 0x00), | 553 | EXYNOS_PIN_BANK_EINTG(4, 0x000, "gpa0", 0x00), |
@@ -567,7 +592,7 @@ static const struct samsung_pin_bank_data exynos5260_pin_banks2[] __initconst = | |||
567 | * Samsung pinctrl driver data for Exynos5260 SoC. Exynos5260 SoC includes | 592 | * Samsung pinctrl driver data for Exynos5260 SoC. Exynos5260 SoC includes |
568 | * three gpio/pin-mux/pinconfig controllers. | 593 | * three gpio/pin-mux/pinconfig controllers. |
569 | */ | 594 | */ |
570 | const struct samsung_pin_ctrl exynos5260_pin_ctrl[] __initconst = { | 595 | static const struct samsung_pin_ctrl exynos5260_pin_ctrl[] __initconst = { |
571 | { | 596 | { |
572 | /* pin-controller instance 0 data */ | 597 | /* pin-controller instance 0 data */ |
573 | .pin_banks = exynos5260_pin_banks0, | 598 | .pin_banks = exynos5260_pin_banks0, |
@@ -587,6 +612,11 @@ const struct samsung_pin_ctrl exynos5260_pin_ctrl[] __initconst = { | |||
587 | }, | 612 | }, |
588 | }; | 613 | }; |
589 | 614 | ||
615 | const struct samsung_pinctrl_of_match_data exynos5260_of_data __initconst = { | ||
616 | .ctrl = exynos5260_pin_ctrl, | ||
617 | .num_ctrl = ARRAY_SIZE(exynos5260_pin_ctrl), | ||
618 | }; | ||
619 | |||
590 | /* pin banks of exynos5410 pin-controller 0 */ | 620 | /* pin banks of exynos5410 pin-controller 0 */ |
591 | static const struct samsung_pin_bank_data exynos5410_pin_banks0[] __initconst = { | 621 | static const struct samsung_pin_bank_data exynos5410_pin_banks0[] __initconst = { |
592 | EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00), | 622 | EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00), |
@@ -657,7 +687,7 @@ static const struct samsung_pin_bank_data exynos5410_pin_banks3[] __initconst = | |||
657 | * Samsung pinctrl driver data for Exynos5410 SoC. Exynos5410 SoC includes | 687 | * Samsung pinctrl driver data for Exynos5410 SoC. Exynos5410 SoC includes |
658 | * four gpio/pin-mux/pinconfig controllers. | 688 | * four gpio/pin-mux/pinconfig controllers. |
659 | */ | 689 | */ |
660 | const struct samsung_pin_ctrl exynos5410_pin_ctrl[] __initconst = { | 690 | static const struct samsung_pin_ctrl exynos5410_pin_ctrl[] __initconst = { |
661 | { | 691 | { |
662 | /* pin-controller instance 0 data */ | 692 | /* pin-controller instance 0 data */ |
663 | .pin_banks = exynos5410_pin_banks0, | 693 | .pin_banks = exynos5410_pin_banks0, |
@@ -690,6 +720,11 @@ const struct samsung_pin_ctrl exynos5410_pin_ctrl[] __initconst = { | |||
690 | }, | 720 | }, |
691 | }; | 721 | }; |
692 | 722 | ||
723 | const struct samsung_pinctrl_of_match_data exynos5410_of_data __initconst = { | ||
724 | .ctrl = exynos5410_pin_ctrl, | ||
725 | .num_ctrl = ARRAY_SIZE(exynos5410_pin_ctrl), | ||
726 | }; | ||
727 | |||
693 | /* pin banks of exynos5420 pin-controller 0 */ | 728 | /* pin banks of exynos5420 pin-controller 0 */ |
694 | static const struct samsung_pin_bank_data exynos5420_pin_banks0[] __initconst = { | 729 | static const struct samsung_pin_bank_data exynos5420_pin_banks0[] __initconst = { |
695 | EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpy7", 0x00), | 730 | EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpy7", 0x00), |
@@ -774,7 +809,7 @@ static const struct samsung_retention_data exynos5420_retention_data __initconst | |||
774 | * Samsung pinctrl driver data for Exynos5420 SoC. Exynos5420 SoC includes | 809 | * Samsung pinctrl driver data for Exynos5420 SoC. Exynos5420 SoC includes |
775 | * four gpio/pin-mux/pinconfig controllers. | 810 | * four gpio/pin-mux/pinconfig controllers. |
776 | */ | 811 | */ |
777 | const struct samsung_pin_ctrl exynos5420_pin_ctrl[] __initconst = { | 812 | static const struct samsung_pin_ctrl exynos5420_pin_ctrl[] __initconst = { |
778 | { | 813 | { |
779 | /* pin-controller instance 0 data */ | 814 | /* pin-controller instance 0 data */ |
780 | .pin_banks = exynos5420_pin_banks0, | 815 | .pin_banks = exynos5420_pin_banks0, |
@@ -808,3 +843,8 @@ const struct samsung_pin_ctrl exynos5420_pin_ctrl[] __initconst = { | |||
808 | .retention_data = &exynos4_audio_retention_data, | 843 | .retention_data = &exynos4_audio_retention_data, |
809 | }, | 844 | }, |
810 | }; | 845 | }; |
846 | |||
847 | const struct samsung_pinctrl_of_match_data exynos5420_of_data __initconst = { | ||
848 | .ctrl = exynos5420_pin_ctrl, | ||
849 | .num_ctrl = ARRAY_SIZE(exynos5420_pin_ctrl), | ||
850 | }; | ||
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c b/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c index fc8f7833bec0..71c9d1d9f345 100644 --- a/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c +++ b/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c | |||
@@ -175,7 +175,7 @@ static const struct samsung_retention_data exynos5433_fsys_retention_data __init | |||
175 | * Samsung pinctrl driver data for Exynos5433 SoC. Exynos5433 SoC includes | 175 | * Samsung pinctrl driver data for Exynos5433 SoC. Exynos5433 SoC includes |
176 | * ten gpio/pin-mux/pinconfig controllers. | 176 | * ten gpio/pin-mux/pinconfig controllers. |
177 | */ | 177 | */ |
178 | const struct samsung_pin_ctrl exynos5433_pin_ctrl[] __initconst = { | 178 | static const struct samsung_pin_ctrl exynos5433_pin_ctrl[] __initconst = { |
179 | { | 179 | { |
180 | /* pin-controller instance 0 data */ | 180 | /* pin-controller instance 0 data */ |
181 | .pin_banks = exynos5433_pin_banks0, | 181 | .pin_banks = exynos5433_pin_banks0, |
@@ -260,6 +260,11 @@ const struct samsung_pin_ctrl exynos5433_pin_ctrl[] __initconst = { | |||
260 | }, | 260 | }, |
261 | }; | 261 | }; |
262 | 262 | ||
263 | const struct samsung_pinctrl_of_match_data exynos5433_of_data __initconst = { | ||
264 | .ctrl = exynos5433_pin_ctrl, | ||
265 | .num_ctrl = ARRAY_SIZE(exynos5433_pin_ctrl), | ||
266 | }; | ||
267 | |||
263 | /* pin banks of exynos7 pin-controller - ALIVE */ | 268 | /* pin banks of exynos7 pin-controller - ALIVE */ |
264 | static const struct samsung_pin_bank_data exynos7_pin_banks0[] __initconst = { | 269 | static const struct samsung_pin_bank_data exynos7_pin_banks0[] __initconst = { |
265 | EXYNOS_PIN_BANK_EINTW(8, 0x000, "gpa0", 0x00), | 270 | EXYNOS_PIN_BANK_EINTW(8, 0x000, "gpa0", 0x00), |
@@ -339,7 +344,7 @@ static const struct samsung_pin_bank_data exynos7_pin_banks9[] __initconst = { | |||
339 | EXYNOS_PIN_BANK_EINTG(4, 0x020, "gpz1", 0x04), | 344 | EXYNOS_PIN_BANK_EINTG(4, 0x020, "gpz1", 0x04), |
340 | }; | 345 | }; |
341 | 346 | ||
342 | const struct samsung_pin_ctrl exynos7_pin_ctrl[] __initconst = { | 347 | static const struct samsung_pin_ctrl exynos7_pin_ctrl[] __initconst = { |
343 | { | 348 | { |
344 | /* pin-controller instance 0 Alive data */ | 349 | /* pin-controller instance 0 Alive data */ |
345 | .pin_banks = exynos7_pin_banks0, | 350 | .pin_banks = exynos7_pin_banks0, |
@@ -392,3 +397,8 @@ const struct samsung_pin_ctrl exynos7_pin_ctrl[] __initconst = { | |||
392 | .eint_gpio_init = exynos_eint_gpio_init, | 397 | .eint_gpio_init = exynos_eint_gpio_init, |
393 | }, | 398 | }, |
394 | }; | 399 | }; |
400 | |||
401 | const struct samsung_pinctrl_of_match_data exynos7_of_data __initconst = { | ||
402 | .ctrl = exynos7_pin_ctrl, | ||
403 | .num_ctrl = ARRAY_SIZE(exynos7_pin_ctrl), | ||
404 | }; | ||
diff --git a/drivers/pinctrl/samsung/pinctrl-s3c24xx.c b/drivers/pinctrl/samsung/pinctrl-s3c24xx.c index 10187cb0e9b9..7e824e4d20f4 100644 --- a/drivers/pinctrl/samsung/pinctrl-s3c24xx.c +++ b/drivers/pinctrl/samsung/pinctrl-s3c24xx.c | |||
@@ -565,7 +565,7 @@ static const struct samsung_pin_bank_data s3c2412_pin_banks[] __initconst = { | |||
565 | PIN_BANK_2BIT(13, 0x080, "gpj"), | 565 | PIN_BANK_2BIT(13, 0x080, "gpj"), |
566 | }; | 566 | }; |
567 | 567 | ||
568 | const struct samsung_pin_ctrl s3c2412_pin_ctrl[] __initconst = { | 568 | static const struct samsung_pin_ctrl s3c2412_pin_ctrl[] __initconst = { |
569 | { | 569 | { |
570 | .pin_banks = s3c2412_pin_banks, | 570 | .pin_banks = s3c2412_pin_banks, |
571 | .nr_banks = ARRAY_SIZE(s3c2412_pin_banks), | 571 | .nr_banks = ARRAY_SIZE(s3c2412_pin_banks), |
@@ -573,6 +573,11 @@ const struct samsung_pin_ctrl s3c2412_pin_ctrl[] __initconst = { | |||
573 | }, | 573 | }, |
574 | }; | 574 | }; |
575 | 575 | ||
576 | const struct samsung_pinctrl_of_match_data s3c2412_of_data __initconst = { | ||
577 | .ctrl = s3c2412_pin_ctrl, | ||
578 | .num_ctrl = ARRAY_SIZE(s3c2412_pin_ctrl), | ||
579 | }; | ||
580 | |||
576 | static const struct samsung_pin_bank_data s3c2416_pin_banks[] __initconst = { | 581 | static const struct samsung_pin_bank_data s3c2416_pin_banks[] __initconst = { |
577 | PIN_BANK_A(27, 0x000, "gpa"), | 582 | PIN_BANK_A(27, 0x000, "gpa"), |
578 | PIN_BANK_2BIT(11, 0x010, "gpb"), | 583 | PIN_BANK_2BIT(11, 0x010, "gpb"), |
@@ -587,7 +592,7 @@ static const struct samsung_pin_bank_data s3c2416_pin_banks[] __initconst = { | |||
587 | PIN_BANK_2BIT(2, 0x100, "gpm"), | 592 | PIN_BANK_2BIT(2, 0x100, "gpm"), |
588 | }; | 593 | }; |
589 | 594 | ||
590 | const struct samsung_pin_ctrl s3c2416_pin_ctrl[] __initconst = { | 595 | static const struct samsung_pin_ctrl s3c2416_pin_ctrl[] __initconst = { |
591 | { | 596 | { |
592 | .pin_banks = s3c2416_pin_banks, | 597 | .pin_banks = s3c2416_pin_banks, |
593 | .nr_banks = ARRAY_SIZE(s3c2416_pin_banks), | 598 | .nr_banks = ARRAY_SIZE(s3c2416_pin_banks), |
@@ -595,6 +600,11 @@ const struct samsung_pin_ctrl s3c2416_pin_ctrl[] __initconst = { | |||
595 | }, | 600 | }, |
596 | }; | 601 | }; |
597 | 602 | ||
603 | const struct samsung_pinctrl_of_match_data s3c2416_of_data __initconst = { | ||
604 | .ctrl = s3c2416_pin_ctrl, | ||
605 | .num_ctrl = ARRAY_SIZE(s3c2416_pin_ctrl), | ||
606 | }; | ||
607 | |||
598 | static const struct samsung_pin_bank_data s3c2440_pin_banks[] __initconst = { | 608 | static const struct samsung_pin_bank_data s3c2440_pin_banks[] __initconst = { |
599 | PIN_BANK_A(25, 0x000, "gpa"), | 609 | PIN_BANK_A(25, 0x000, "gpa"), |
600 | PIN_BANK_2BIT(11, 0x010, "gpb"), | 610 | PIN_BANK_2BIT(11, 0x010, "gpb"), |
@@ -607,7 +617,7 @@ static const struct samsung_pin_bank_data s3c2440_pin_banks[] __initconst = { | |||
607 | PIN_BANK_2BIT(13, 0x0d0, "gpj"), | 617 | PIN_BANK_2BIT(13, 0x0d0, "gpj"), |
608 | }; | 618 | }; |
609 | 619 | ||
610 | const struct samsung_pin_ctrl s3c2440_pin_ctrl[] __initconst = { | 620 | static const struct samsung_pin_ctrl s3c2440_pin_ctrl[] __initconst = { |
611 | { | 621 | { |
612 | .pin_banks = s3c2440_pin_banks, | 622 | .pin_banks = s3c2440_pin_banks, |
613 | .nr_banks = ARRAY_SIZE(s3c2440_pin_banks), | 623 | .nr_banks = ARRAY_SIZE(s3c2440_pin_banks), |
@@ -615,6 +625,11 @@ const struct samsung_pin_ctrl s3c2440_pin_ctrl[] __initconst = { | |||
615 | }, | 625 | }, |
616 | }; | 626 | }; |
617 | 627 | ||
628 | const struct samsung_pinctrl_of_match_data s3c2440_of_data __initconst = { | ||
629 | .ctrl = s3c2440_pin_ctrl, | ||
630 | .num_ctrl = ARRAY_SIZE(s3c2440_pin_ctrl), | ||
631 | }; | ||
632 | |||
618 | static const struct samsung_pin_bank_data s3c2450_pin_banks[] __initconst = { | 633 | static const struct samsung_pin_bank_data s3c2450_pin_banks[] __initconst = { |
619 | PIN_BANK_A(28, 0x000, "gpa"), | 634 | PIN_BANK_A(28, 0x000, "gpa"), |
620 | PIN_BANK_2BIT(11, 0x010, "gpb"), | 635 | PIN_BANK_2BIT(11, 0x010, "gpb"), |
@@ -630,10 +645,15 @@ static const struct samsung_pin_bank_data s3c2450_pin_banks[] __initconst = { | |||
630 | PIN_BANK_2BIT(2, 0x100, "gpm"), | 645 | PIN_BANK_2BIT(2, 0x100, "gpm"), |
631 | }; | 646 | }; |
632 | 647 | ||
633 | const struct samsung_pin_ctrl s3c2450_pin_ctrl[] __initconst = { | 648 | static const struct samsung_pin_ctrl s3c2450_pin_ctrl[] __initconst = { |
634 | { | 649 | { |
635 | .pin_banks = s3c2450_pin_banks, | 650 | .pin_banks = s3c2450_pin_banks, |
636 | .nr_banks = ARRAY_SIZE(s3c2450_pin_banks), | 651 | .nr_banks = ARRAY_SIZE(s3c2450_pin_banks), |
637 | .eint_wkup_init = s3c24xx_eint_init, | 652 | .eint_wkup_init = s3c24xx_eint_init, |
638 | }, | 653 | }, |
639 | }; | 654 | }; |
655 | |||
656 | const struct samsung_pinctrl_of_match_data s3c2450_of_data __initconst = { | ||
657 | .ctrl = s3c2450_pin_ctrl, | ||
658 | .num_ctrl = ARRAY_SIZE(s3c2450_pin_ctrl), | ||
659 | }; | ||
diff --git a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c index 679628ac4b31..288e6567ceb1 100644 --- a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c +++ b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c | |||
@@ -789,7 +789,7 @@ static const struct samsung_pin_bank_data s3c64xx_pin_banks0[] __initconst = { | |||
789 | * Samsung pinctrl driver data for S3C64xx SoC. S3C64xx SoC includes | 789 | * Samsung pinctrl driver data for S3C64xx SoC. S3C64xx SoC includes |
790 | * one gpio/pin-mux/pinconfig controller. | 790 | * one gpio/pin-mux/pinconfig controller. |
791 | */ | 791 | */ |
792 | const struct samsung_pin_ctrl s3c64xx_pin_ctrl[] __initconst = { | 792 | static const struct samsung_pin_ctrl s3c64xx_pin_ctrl[] __initconst = { |
793 | { | 793 | { |
794 | /* pin-controller instance 1 data */ | 794 | /* pin-controller instance 1 data */ |
795 | .pin_banks = s3c64xx_pin_banks0, | 795 | .pin_banks = s3c64xx_pin_banks0, |
@@ -798,3 +798,8 @@ const struct samsung_pin_ctrl s3c64xx_pin_ctrl[] __initconst = { | |||
798 | .eint_wkup_init = s3c64xx_eint_eint0_init, | 798 | .eint_wkup_init = s3c64xx_eint_eint0_init, |
799 | }, | 799 | }, |
800 | }; | 800 | }; |
801 | |||
802 | const struct samsung_pinctrl_of_match_data s3c64xx_of_data __initconst = { | ||
803 | .ctrl = s3c64xx_pin_ctrl, | ||
804 | .num_ctrl = ARRAY_SIZE(s3c64xx_pin_ctrl), | ||
805 | }; | ||
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c index da58e4554137..336e88d7bdb9 100644 --- a/drivers/pinctrl/samsung/pinctrl-samsung.c +++ b/drivers/pinctrl/samsung/pinctrl-samsung.c | |||
@@ -942,12 +942,33 @@ static int samsung_gpiolib_register(struct platform_device *pdev, | |||
942 | return 0; | 942 | return 0; |
943 | } | 943 | } |
944 | 944 | ||
945 | static const struct samsung_pin_ctrl * | ||
946 | samsung_pinctrl_get_soc_data_for_of_alias(struct platform_device *pdev) | ||
947 | { | ||
948 | struct device_node *node = pdev->dev.of_node; | ||
949 | const struct samsung_pinctrl_of_match_data *of_data; | ||
950 | int id; | ||
951 | |||
952 | id = of_alias_get_id(node, "pinctrl"); | ||
953 | if (id < 0) { | ||
954 | dev_err(&pdev->dev, "failed to get alias id\n"); | ||
955 | return NULL; | ||
956 | } | ||
957 | |||
958 | of_data = of_device_get_match_data(&pdev->dev); | ||
959 | if (id >= of_data->num_ctrl) { | ||
960 | dev_err(&pdev->dev, "invalid alias id %d\n", id); | ||
961 | return NULL; | ||
962 | } | ||
963 | |||
964 | return &(of_data->ctrl[id]); | ||
965 | } | ||
966 | |||
945 | /* retrieve the soc specific data */ | 967 | /* retrieve the soc specific data */ |
946 | static const struct samsung_pin_ctrl * | 968 | static const struct samsung_pin_ctrl * |
947 | samsung_pinctrl_get_soc_data(struct samsung_pinctrl_drv_data *d, | 969 | samsung_pinctrl_get_soc_data(struct samsung_pinctrl_drv_data *d, |
948 | struct platform_device *pdev) | 970 | struct platform_device *pdev) |
949 | { | 971 | { |
950 | int id; | ||
951 | struct device_node *node = pdev->dev.of_node; | 972 | struct device_node *node = pdev->dev.of_node; |
952 | struct device_node *np; | 973 | struct device_node *np; |
953 | const struct samsung_pin_bank_data *bdata; | 974 | const struct samsung_pin_bank_data *bdata; |
@@ -957,13 +978,9 @@ samsung_pinctrl_get_soc_data(struct samsung_pinctrl_drv_data *d, | |||
957 | void __iomem *virt_base[SAMSUNG_PINCTRL_NUM_RESOURCES]; | 978 | void __iomem *virt_base[SAMSUNG_PINCTRL_NUM_RESOURCES]; |
958 | unsigned int i; | 979 | unsigned int i; |
959 | 980 | ||
960 | id = of_alias_get_id(node, "pinctrl"); | 981 | ctrl = samsung_pinctrl_get_soc_data_for_of_alias(pdev); |
961 | if (id < 0) { | 982 | if (!ctrl) |
962 | dev_err(&pdev->dev, "failed to get alias id\n"); | ||
963 | return ERR_PTR(-ENOENT); | 983 | return ERR_PTR(-ENOENT); |
964 | } | ||
965 | ctrl = of_device_get_match_data(&pdev->dev); | ||
966 | ctrl += id; | ||
967 | 984 | ||
968 | d->suspend = ctrl->suspend; | 985 | d->suspend = ctrl->suspend; |
969 | d->resume = ctrl->resume; | 986 | d->resume = ctrl->resume; |
@@ -1188,41 +1205,41 @@ static int __maybe_unused samsung_pinctrl_resume(struct device *dev) | |||
1188 | static const struct of_device_id samsung_pinctrl_dt_match[] = { | 1205 | static const struct of_device_id samsung_pinctrl_dt_match[] = { |
1189 | #ifdef CONFIG_PINCTRL_EXYNOS_ARM | 1206 | #ifdef CONFIG_PINCTRL_EXYNOS_ARM |
1190 | { .compatible = "samsung,exynos3250-pinctrl", | 1207 | { .compatible = "samsung,exynos3250-pinctrl", |
1191 | .data = exynos3250_pin_ctrl }, | 1208 | .data = &exynos3250_of_data }, |
1192 | { .compatible = "samsung,exynos4210-pinctrl", | 1209 | { .compatible = "samsung,exynos4210-pinctrl", |
1193 | .data = exynos4210_pin_ctrl }, | 1210 | .data = &exynos4210_of_data }, |
1194 | { .compatible = "samsung,exynos4x12-pinctrl", | 1211 | { .compatible = "samsung,exynos4x12-pinctrl", |
1195 | .data = exynos4x12_pin_ctrl }, | 1212 | .data = &exynos4x12_of_data }, |
1196 | { .compatible = "samsung,exynos5250-pinctrl", | 1213 | { .compatible = "samsung,exynos5250-pinctrl", |
1197 | .data = exynos5250_pin_ctrl }, | 1214 | .data = &exynos5250_of_data }, |
1198 | { .compatible = "samsung,exynos5260-pinctrl", | 1215 | { .compatible = "samsung,exynos5260-pinctrl", |
1199 | .data = exynos5260_pin_ctrl }, | 1216 | .data = &exynos5260_of_data }, |
1200 | { .compatible = "samsung,exynos5410-pinctrl", | 1217 | { .compatible = "samsung,exynos5410-pinctrl", |
1201 | .data = exynos5410_pin_ctrl }, | 1218 | .data = &exynos5410_of_data }, |
1202 | { .compatible = "samsung,exynos5420-pinctrl", | 1219 | { .compatible = "samsung,exynos5420-pinctrl", |
1203 | .data = exynos5420_pin_ctrl }, | 1220 | .data = &exynos5420_of_data }, |
1204 | { .compatible = "samsung,s5pv210-pinctrl", | 1221 | { .compatible = "samsung,s5pv210-pinctrl", |
1205 | .data = s5pv210_pin_ctrl }, | 1222 | .data = &s5pv210_of_data }, |
1206 | #endif | 1223 | #endif |
1207 | #ifdef CONFIG_PINCTRL_EXYNOS_ARM64 | 1224 | #ifdef CONFIG_PINCTRL_EXYNOS_ARM64 |
1208 | { .compatible = "samsung,exynos5433-pinctrl", | 1225 | { .compatible = "samsung,exynos5433-pinctrl", |
1209 | .data = exynos5433_pin_ctrl }, | 1226 | .data = &exynos5433_of_data }, |
1210 | { .compatible = "samsung,exynos7-pinctrl", | 1227 | { .compatible = "samsung,exynos7-pinctrl", |
1211 | .data = exynos7_pin_ctrl }, | 1228 | .data = &exynos7_of_data }, |
1212 | #endif | 1229 | #endif |
1213 | #ifdef CONFIG_PINCTRL_S3C64XX | 1230 | #ifdef CONFIG_PINCTRL_S3C64XX |
1214 | { .compatible = "samsung,s3c64xx-pinctrl", | 1231 | { .compatible = "samsung,s3c64xx-pinctrl", |
1215 | .data = s3c64xx_pin_ctrl }, | 1232 | .data = &s3c64xx_of_data }, |
1216 | #endif | 1233 | #endif |
1217 | #ifdef CONFIG_PINCTRL_S3C24XX | 1234 | #ifdef CONFIG_PINCTRL_S3C24XX |
1218 | { .compatible = "samsung,s3c2412-pinctrl", | 1235 | { .compatible = "samsung,s3c2412-pinctrl", |
1219 | .data = s3c2412_pin_ctrl }, | 1236 | .data = &s3c2412_of_data }, |
1220 | { .compatible = "samsung,s3c2416-pinctrl", | 1237 | { .compatible = "samsung,s3c2416-pinctrl", |
1221 | .data = s3c2416_pin_ctrl }, | 1238 | .data = &s3c2416_of_data }, |
1222 | { .compatible = "samsung,s3c2440-pinctrl", | 1239 | { .compatible = "samsung,s3c2440-pinctrl", |
1223 | .data = s3c2440_pin_ctrl }, | 1240 | .data = &s3c2440_of_data }, |
1224 | { .compatible = "samsung,s3c2450-pinctrl", | 1241 | { .compatible = "samsung,s3c2450-pinctrl", |
1225 | .data = s3c2450_pin_ctrl }, | 1242 | .data = &s3c2450_of_data }, |
1226 | #endif | 1243 | #endif |
1227 | {}, | 1244 | {}, |
1228 | }; | 1245 | }; |
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.h b/drivers/pinctrl/samsung/pinctrl-samsung.h index e204f609823b..f0cda9424dfe 100644 --- a/drivers/pinctrl/samsung/pinctrl-samsung.h +++ b/drivers/pinctrl/samsung/pinctrl-samsung.h | |||
@@ -282,6 +282,16 @@ struct samsung_pinctrl_drv_data { | |||
282 | }; | 282 | }; |
283 | 283 | ||
284 | /** | 284 | /** |
285 | * struct samsung_pinctrl_of_match_data: OF match device specific configuration data. | ||
286 | * @ctrl: array of pin controller data. | ||
287 | * @num_ctrl: size of array @ctrl. | ||
288 | */ | ||
289 | struct samsung_pinctrl_of_match_data { | ||
290 | const struct samsung_pin_ctrl *ctrl; | ||
291 | unsigned int num_ctrl; | ||
292 | }; | ||
293 | |||
294 | /** | ||
285 | * struct samsung_pin_group: represent group of pins of a pinmux function. | 295 | * struct samsung_pin_group: represent group of pins of a pinmux function. |
286 | * @name: name of the pin group, used to lookup the group. | 296 | * @name: name of the pin group, used to lookup the group. |
287 | * @pins: the pins included in this group. | 297 | * @pins: the pins included in this group. |
@@ -309,20 +319,20 @@ struct samsung_pmx_func { | |||
309 | }; | 319 | }; |
310 | 320 | ||
311 | /* list of all exported SoC specific data */ | 321 | /* list of all exported SoC specific data */ |
312 | extern const struct samsung_pin_ctrl exynos3250_pin_ctrl[]; | 322 | extern const struct samsung_pinctrl_of_match_data exynos3250_of_data; |
313 | extern const struct samsung_pin_ctrl exynos4210_pin_ctrl[]; | 323 | extern const struct samsung_pinctrl_of_match_data exynos4210_of_data; |
314 | extern const struct samsung_pin_ctrl exynos4x12_pin_ctrl[]; | 324 | extern const struct samsung_pinctrl_of_match_data exynos4x12_of_data; |
315 | extern const struct samsung_pin_ctrl exynos5250_pin_ctrl[]; | 325 | extern const struct samsung_pinctrl_of_match_data exynos5250_of_data; |
316 | extern const struct samsung_pin_ctrl exynos5260_pin_ctrl[]; | 326 | extern const struct samsung_pinctrl_of_match_data exynos5260_of_data; |
317 | extern const struct samsung_pin_ctrl exynos5410_pin_ctrl[]; | 327 | extern const struct samsung_pinctrl_of_match_data exynos5410_of_data; |
318 | extern const struct samsung_pin_ctrl exynos5420_pin_ctrl[]; | 328 | extern const struct samsung_pinctrl_of_match_data exynos5420_of_data; |
319 | extern const struct samsung_pin_ctrl exynos5433_pin_ctrl[]; | 329 | extern const struct samsung_pinctrl_of_match_data exynos5433_of_data; |
320 | extern const struct samsung_pin_ctrl exynos7_pin_ctrl[]; | 330 | extern const struct samsung_pinctrl_of_match_data exynos7_of_data; |
321 | extern const struct samsung_pin_ctrl s3c64xx_pin_ctrl[]; | 331 | extern const struct samsung_pinctrl_of_match_data s3c64xx_of_data; |
322 | extern const struct samsung_pin_ctrl s3c2412_pin_ctrl[]; | 332 | extern const struct samsung_pinctrl_of_match_data s3c2412_of_data; |
323 | extern const struct samsung_pin_ctrl s3c2416_pin_ctrl[]; | 333 | extern const struct samsung_pinctrl_of_match_data s3c2416_of_data; |
324 | extern const struct samsung_pin_ctrl s3c2440_pin_ctrl[]; | 334 | extern const struct samsung_pinctrl_of_match_data s3c2440_of_data; |
325 | extern const struct samsung_pin_ctrl s3c2450_pin_ctrl[]; | 335 | extern const struct samsung_pinctrl_of_match_data s3c2450_of_data; |
326 | extern const struct samsung_pin_ctrl s5pv210_pin_ctrl[]; | 336 | extern const struct samsung_pinctrl_of_match_data s5pv210_of_data; |
327 | 337 | ||
328 | #endif /* __PINCTRL_SAMSUNG_H */ | 338 | #endif /* __PINCTRL_SAMSUNG_H */ |
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7795.c b/drivers/pinctrl/sh-pfc/pfc-r8a7795.c index 18aeee592fdc..35951e7b89d2 100644 --- a/drivers/pinctrl/sh-pfc/pfc-r8a7795.c +++ b/drivers/pinctrl/sh-pfc/pfc-r8a7795.c | |||
@@ -1538,7 +1538,6 @@ static const struct sh_pfc_pin pinmux_pins[] = { | |||
1538 | SH_PFC_PIN_NAMED_CFG('B', 18, AVB_TD1, CFG_FLAGS), | 1538 | SH_PFC_PIN_NAMED_CFG('B', 18, AVB_TD1, CFG_FLAGS), |
1539 | SH_PFC_PIN_NAMED_CFG('B', 19, AVB_RXC, CFG_FLAGS), | 1539 | SH_PFC_PIN_NAMED_CFG('B', 19, AVB_RXC, CFG_FLAGS), |
1540 | SH_PFC_PIN_NAMED_CFG('C', 1, PRESETOUT#, CFG_FLAGS), | 1540 | SH_PFC_PIN_NAMED_CFG('C', 1, PRESETOUT#, CFG_FLAGS), |
1541 | SH_PFC_PIN_NAMED_CFG('F', 1, CLKOUT, CFG_FLAGS), | ||
1542 | SH_PFC_PIN_NAMED_CFG('H', 37, MLB_REF, CFG_FLAGS), | 1541 | SH_PFC_PIN_NAMED_CFG('H', 37, MLB_REF, CFG_FLAGS), |
1543 | SH_PFC_PIN_NAMED_CFG('V', 3, QSPI1_SPCLK, CFG_FLAGS), | 1542 | SH_PFC_PIN_NAMED_CFG('V', 3, QSPI1_SPCLK, CFG_FLAGS), |
1544 | SH_PFC_PIN_NAMED_CFG('V', 5, QSPI1_SSL, CFG_FLAGS), | 1543 | SH_PFC_PIN_NAMED_CFG('V', 5, QSPI1_SSL, CFG_FLAGS), |
diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c index 6dec6ab13300..d8599736a41a 100644 --- a/drivers/platform/chrome/chromeos_laptop.c +++ b/drivers/platform/chrome/chromeos_laptop.c | |||
@@ -423,7 +423,7 @@ static int chromeos_laptop_probe(struct platform_device *pdev) | |||
423 | return ret; | 423 | return ret; |
424 | } | 424 | } |
425 | 425 | ||
426 | static const struct chromeos_laptop samsung_series_5_550 = { | 426 | static struct chromeos_laptop samsung_series_5_550 = { |
427 | .i2c_peripherals = { | 427 | .i2c_peripherals = { |
428 | /* Touchpad. */ | 428 | /* Touchpad. */ |
429 | { .add = setup_cyapa_tp, I2C_ADAPTER_SMBUS }, | 429 | { .add = setup_cyapa_tp, I2C_ADAPTER_SMBUS }, |
@@ -432,14 +432,14 @@ static const struct chromeos_laptop samsung_series_5_550 = { | |||
432 | }, | 432 | }, |
433 | }; | 433 | }; |
434 | 434 | ||
435 | static const struct chromeos_laptop samsung_series_5 = { | 435 | static struct chromeos_laptop samsung_series_5 = { |
436 | .i2c_peripherals = { | 436 | .i2c_peripherals = { |
437 | /* Light Sensor. */ | 437 | /* Light Sensor. */ |
438 | { .add = setup_tsl2583_als, I2C_ADAPTER_SMBUS }, | 438 | { .add = setup_tsl2583_als, I2C_ADAPTER_SMBUS }, |
439 | }, | 439 | }, |
440 | }; | 440 | }; |
441 | 441 | ||
442 | static const struct chromeos_laptop chromebook_pixel = { | 442 | static struct chromeos_laptop chromebook_pixel = { |
443 | .i2c_peripherals = { | 443 | .i2c_peripherals = { |
444 | /* Touch Screen. */ | 444 | /* Touch Screen. */ |
445 | { .add = setup_atmel_1664s_ts, I2C_ADAPTER_PANEL }, | 445 | { .add = setup_atmel_1664s_ts, I2C_ADAPTER_PANEL }, |
@@ -450,14 +450,14 @@ static const struct chromeos_laptop chromebook_pixel = { | |||
450 | }, | 450 | }, |
451 | }; | 451 | }; |
452 | 452 | ||
453 | static const struct chromeos_laptop hp_chromebook_14 = { | 453 | static struct chromeos_laptop hp_chromebook_14 = { |
454 | .i2c_peripherals = { | 454 | .i2c_peripherals = { |
455 | /* Touchpad. */ | 455 | /* Touchpad. */ |
456 | { .add = setup_cyapa_tp, I2C_ADAPTER_DESIGNWARE_0 }, | 456 | { .add = setup_cyapa_tp, I2C_ADAPTER_DESIGNWARE_0 }, |
457 | }, | 457 | }, |
458 | }; | 458 | }; |
459 | 459 | ||
460 | static const struct chromeos_laptop dell_chromebook_11 = { | 460 | static struct chromeos_laptop dell_chromebook_11 = { |
461 | .i2c_peripherals = { | 461 | .i2c_peripherals = { |
462 | /* Touchpad. */ | 462 | /* Touchpad. */ |
463 | { .add = setup_cyapa_tp, I2C_ADAPTER_DESIGNWARE_0 }, | 463 | { .add = setup_cyapa_tp, I2C_ADAPTER_DESIGNWARE_0 }, |
@@ -466,28 +466,28 @@ static const struct chromeos_laptop dell_chromebook_11 = { | |||
466 | }, | 466 | }, |
467 | }; | 467 | }; |
468 | 468 | ||
469 | static const struct chromeos_laptop toshiba_cb35 = { | 469 | static struct chromeos_laptop toshiba_cb35 = { |
470 | .i2c_peripherals = { | 470 | .i2c_peripherals = { |
471 | /* Touchpad. */ | 471 | /* Touchpad. */ |
472 | { .add = setup_cyapa_tp, I2C_ADAPTER_DESIGNWARE_0 }, | 472 | { .add = setup_cyapa_tp, I2C_ADAPTER_DESIGNWARE_0 }, |
473 | }, | 473 | }, |
474 | }; | 474 | }; |
475 | 475 | ||
476 | static const struct chromeos_laptop acer_c7_chromebook = { | 476 | static struct chromeos_laptop acer_c7_chromebook = { |
477 | .i2c_peripherals = { | 477 | .i2c_peripherals = { |
478 | /* Touchpad. */ | 478 | /* Touchpad. */ |
479 | { .add = setup_cyapa_tp, I2C_ADAPTER_SMBUS }, | 479 | { .add = setup_cyapa_tp, I2C_ADAPTER_SMBUS }, |
480 | }, | 480 | }, |
481 | }; | 481 | }; |
482 | 482 | ||
483 | static const struct chromeos_laptop acer_ac700 = { | 483 | static struct chromeos_laptop acer_ac700 = { |
484 | .i2c_peripherals = { | 484 | .i2c_peripherals = { |
485 | /* Light Sensor. */ | 485 | /* Light Sensor. */ |
486 | { .add = setup_tsl2563_als, I2C_ADAPTER_SMBUS }, | 486 | { .add = setup_tsl2563_als, I2C_ADAPTER_SMBUS }, |
487 | }, | 487 | }, |
488 | }; | 488 | }; |
489 | 489 | ||
490 | static const struct chromeos_laptop acer_c720 = { | 490 | static struct chromeos_laptop acer_c720 = { |
491 | .i2c_peripherals = { | 491 | .i2c_peripherals = { |
492 | /* Touchscreen. */ | 492 | /* Touchscreen. */ |
493 | { .add = setup_atmel_1664s_ts, I2C_ADAPTER_DESIGNWARE_1 }, | 493 | { .add = setup_atmel_1664s_ts, I2C_ADAPTER_DESIGNWARE_1 }, |
@@ -500,14 +500,14 @@ static const struct chromeos_laptop acer_c720 = { | |||
500 | }, | 500 | }, |
501 | }; | 501 | }; |
502 | 502 | ||
503 | static const struct chromeos_laptop hp_pavilion_14_chromebook = { | 503 | static struct chromeos_laptop hp_pavilion_14_chromebook = { |
504 | .i2c_peripherals = { | 504 | .i2c_peripherals = { |
505 | /* Touchpad. */ | 505 | /* Touchpad. */ |
506 | { .add = setup_cyapa_tp, I2C_ADAPTER_SMBUS }, | 506 | { .add = setup_cyapa_tp, I2C_ADAPTER_SMBUS }, |
507 | }, | 507 | }, |
508 | }; | 508 | }; |
509 | 509 | ||
510 | static const struct chromeos_laptop cr48 = { | 510 | static struct chromeos_laptop cr48 = { |
511 | .i2c_peripherals = { | 511 | .i2c_peripherals = { |
512 | /* Light Sensor. */ | 512 | /* Light Sensor. */ |
513 | { .add = setup_tsl2563_als, I2C_ADAPTER_SMBUS }, | 513 | { .add = setup_tsl2563_als, I2C_ADAPTER_SMBUS }, |
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index 9a8f96465cdc..51ebc5a6053f 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig | |||
@@ -105,31 +105,45 @@ config ASUS_LAPTOP | |||
105 | 105 | ||
106 | If you have an ACPI-compatible ASUS laptop, say Y or M here. | 106 | If you have an ACPI-compatible ASUS laptop, say Y or M here. |
107 | 107 | ||
108 | # | ||
109 | # The DELL_SMBIOS driver depends on ACPI_WMI and/or DCDBAS if those | ||
110 | # backends are selected. The "depends" line prevents a configuration | ||
111 | # where DELL_SMBIOS=y while either of those dependencies =m. | ||
112 | # | ||
108 | config DELL_SMBIOS | 113 | config DELL_SMBIOS |
109 | tristate | 114 | tristate "Dell SMBIOS driver" |
115 | depends on DCDBAS || DCDBAS=n | ||
116 | depends on ACPI_WMI || ACPI_WMI=n | ||
117 | ---help--- | ||
118 | This provides support for the Dell SMBIOS calling interface. | ||
119 | If you have a Dell computer you should enable this option. | ||
120 | |||
121 | Be sure to select at least one backend for it to work properly. | ||
110 | 122 | ||
111 | config DELL_SMBIOS_WMI | 123 | config DELL_SMBIOS_WMI |
112 | tristate "Dell SMBIOS calling interface (WMI implementation)" | 124 | bool "Dell SMBIOS driver WMI backend" |
125 | default y | ||
113 | depends on ACPI_WMI | 126 | depends on ACPI_WMI |
114 | select DELL_WMI_DESCRIPTOR | 127 | select DELL_WMI_DESCRIPTOR |
115 | select DELL_SMBIOS | 128 | depends on DELL_SMBIOS |
116 | ---help--- | 129 | ---help--- |
117 | This provides an implementation for the Dell SMBIOS calling interface | 130 | This provides an implementation for the Dell SMBIOS calling interface |
118 | communicated over ACPI-WMI. | 131 | communicated over ACPI-WMI. |
119 | 132 | ||
120 | If you have a Dell computer from >2007 you should say Y or M here. | 133 | If you have a Dell computer from >2007 you should say Y here. |
121 | If you aren't sure and this module doesn't work for your computer | 134 | If you aren't sure and this module doesn't work for your computer |
122 | it just won't load. | 135 | it just won't load. |
123 | 136 | ||
124 | config DELL_SMBIOS_SMM | 137 | config DELL_SMBIOS_SMM |
125 | tristate "Dell SMBIOS calling interface (SMM implementation)" | 138 | bool "Dell SMBIOS driver SMM backend" |
139 | default y | ||
126 | depends on DCDBAS | 140 | depends on DCDBAS |
127 | select DELL_SMBIOS | 141 | depends on DELL_SMBIOS |
128 | ---help--- | 142 | ---help--- |
129 | This provides an implementation for the Dell SMBIOS calling interface | 143 | This provides an implementation for the Dell SMBIOS calling interface |
130 | communicated over SMI/SMM. | 144 | communicated over SMI/SMM. |
131 | 145 | ||
132 | If you have a Dell computer from <=2017 you should say Y or M here. | 146 | If you have a Dell computer from <=2017 you should say Y here. |
133 | If you aren't sure and this module doesn't work for your computer | 147 | If you aren't sure and this module doesn't work for your computer |
134 | it just won't load. | 148 | it just won't load. |
135 | 149 | ||
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile index c388608ad2a3..2ba6cb795338 100644 --- a/drivers/platform/x86/Makefile +++ b/drivers/platform/x86/Makefile | |||
@@ -13,8 +13,9 @@ obj-$(CONFIG_MSI_LAPTOP) += msi-laptop.o | |||
13 | obj-$(CONFIG_ACPI_CMPC) += classmate-laptop.o | 13 | obj-$(CONFIG_ACPI_CMPC) += classmate-laptop.o |
14 | obj-$(CONFIG_COMPAL_LAPTOP) += compal-laptop.o | 14 | obj-$(CONFIG_COMPAL_LAPTOP) += compal-laptop.o |
15 | obj-$(CONFIG_DELL_SMBIOS) += dell-smbios.o | 15 | obj-$(CONFIG_DELL_SMBIOS) += dell-smbios.o |
16 | obj-$(CONFIG_DELL_SMBIOS_WMI) += dell-smbios-wmi.o | 16 | dell-smbios-objs := dell-smbios-base.o |
17 | obj-$(CONFIG_DELL_SMBIOS_SMM) += dell-smbios-smm.o | 17 | dell-smbios-$(CONFIG_DELL_SMBIOS_WMI) += dell-smbios-wmi.o |
18 | dell-smbios-$(CONFIG_DELL_SMBIOS_SMM) += dell-smbios-smm.o | ||
18 | obj-$(CONFIG_DELL_LAPTOP) += dell-laptop.o | 19 | obj-$(CONFIG_DELL_LAPTOP) += dell-laptop.o |
19 | obj-$(CONFIG_DELL_WMI) += dell-wmi.o | 20 | obj-$(CONFIG_DELL_WMI) += dell-wmi.o |
20 | obj-$(CONFIG_DELL_WMI_DESCRIPTOR) += dell-wmi-descriptor.o | 21 | obj-$(CONFIG_DELL_WMI_DESCRIPTOR) += dell-wmi-descriptor.o |
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c index 2a68f59d2228..c52c6723374b 100644 --- a/drivers/platform/x86/dell-laptop.c +++ b/drivers/platform/x86/dell-laptop.c | |||
@@ -127,24 +127,6 @@ static const struct dmi_system_id dell_device_table[] __initconst = { | |||
127 | }, | 127 | }, |
128 | }, | 128 | }, |
129 | { | 129 | { |
130 | .matches = { | ||
131 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), | ||
132 | DMI_MATCH(DMI_CHASSIS_TYPE, "30"), /*Tablet*/ | ||
133 | }, | ||
134 | }, | ||
135 | { | ||
136 | .matches = { | ||
137 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), | ||
138 | DMI_MATCH(DMI_CHASSIS_TYPE, "31"), /*Convertible*/ | ||
139 | }, | ||
140 | }, | ||
141 | { | ||
142 | .matches = { | ||
143 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), | ||
144 | DMI_MATCH(DMI_CHASSIS_TYPE, "32"), /*Detachable*/ | ||
145 | }, | ||
146 | }, | ||
147 | { | ||
148 | .ident = "Dell Computer Corporation", | 130 | .ident = "Dell Computer Corporation", |
149 | .matches = { | 131 | .matches = { |
150 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"), | 132 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"), |
@@ -1279,7 +1261,7 @@ static int kbd_get_state(struct kbd_state *state) | |||
1279 | struct calling_interface_buffer buffer; | 1261 | struct calling_interface_buffer buffer; |
1280 | int ret; | 1262 | int ret; |
1281 | 1263 | ||
1282 | dell_fill_request(&buffer, 0, 0, 0, 0); | 1264 | dell_fill_request(&buffer, 0x1, 0, 0, 0); |
1283 | ret = dell_send_request(&buffer, | 1265 | ret = dell_send_request(&buffer, |
1284 | CLASS_KBD_BACKLIGHT, SELECT_KBD_BACKLIGHT); | 1266 | CLASS_KBD_BACKLIGHT, SELECT_KBD_BACKLIGHT); |
1285 | if (ret) | 1267 | if (ret) |
diff --git a/drivers/platform/x86/dell-smbios.c b/drivers/platform/x86/dell-smbios-base.c index 8541cde4cb7d..2485c80a9fdd 100644 --- a/drivers/platform/x86/dell-smbios.c +++ b/drivers/platform/x86/dell-smbios-base.c | |||
@@ -36,7 +36,7 @@ static DEFINE_MUTEX(smbios_mutex); | |||
36 | struct smbios_device { | 36 | struct smbios_device { |
37 | struct list_head list; | 37 | struct list_head list; |
38 | struct device *device; | 38 | struct device *device; |
39 | int (*call_fn)(struct calling_interface_buffer *); | 39 | int (*call_fn)(struct calling_interface_buffer *arg); |
40 | }; | 40 | }; |
41 | 41 | ||
42 | struct smbios_call { | 42 | struct smbios_call { |
@@ -352,8 +352,10 @@ static void __init parse_da_table(const struct dmi_header *dm) | |||
352 | struct calling_interface_structure *table = | 352 | struct calling_interface_structure *table = |
353 | container_of(dm, struct calling_interface_structure, header); | 353 | container_of(dm, struct calling_interface_structure, header); |
354 | 354 | ||
355 | /* 4 bytes of table header, plus 7 bytes of Dell header, plus at least | 355 | /* |
356 | 6 bytes of entry */ | 356 | * 4 bytes of table header, plus 7 bytes of Dell header |
357 | * plus at least 6 bytes of entry | ||
358 | */ | ||
357 | 359 | ||
358 | if (dm->length < 17) | 360 | if (dm->length < 17) |
359 | return; | 361 | return; |
@@ -554,7 +556,7 @@ static void free_group(struct platform_device *pdev) | |||
554 | static int __init dell_smbios_init(void) | 556 | static int __init dell_smbios_init(void) |
555 | { | 557 | { |
556 | const struct dmi_device *valid; | 558 | const struct dmi_device *valid; |
557 | int ret; | 559 | int ret, wmi, smm; |
558 | 560 | ||
559 | valid = dmi_find_device(DMI_DEV_TYPE_OEM_STRING, "Dell System", NULL); | 561 | valid = dmi_find_device(DMI_DEV_TYPE_OEM_STRING, "Dell System", NULL); |
560 | if (!valid) { | 562 | if (!valid) { |
@@ -589,8 +591,24 @@ static int __init dell_smbios_init(void) | |||
589 | if (ret) | 591 | if (ret) |
590 | goto fail_create_group; | 592 | goto fail_create_group; |
591 | 593 | ||
594 | /* register backends */ | ||
595 | wmi = init_dell_smbios_wmi(); | ||
596 | if (wmi) | ||
597 | pr_debug("Failed to initialize WMI backend: %d\n", wmi); | ||
598 | smm = init_dell_smbios_smm(); | ||
599 | if (smm) | ||
600 | pr_debug("Failed to initialize SMM backend: %d\n", smm); | ||
601 | if (wmi && smm) { | ||
602 | pr_err("No SMBIOS backends available (wmi: %d, smm: %d)\n", | ||
603 | wmi, smm); | ||
604 | goto fail_sysfs; | ||
605 | } | ||
606 | |||
592 | return 0; | 607 | return 0; |
593 | 608 | ||
609 | fail_sysfs: | ||
610 | free_group(platform_device); | ||
611 | |||
594 | fail_create_group: | 612 | fail_create_group: |
595 | platform_device_del(platform_device); | 613 | platform_device_del(platform_device); |
596 | 614 | ||
@@ -607,6 +625,8 @@ fail_platform_driver: | |||
607 | 625 | ||
608 | static void __exit dell_smbios_exit(void) | 626 | static void __exit dell_smbios_exit(void) |
609 | { | 627 | { |
628 | exit_dell_smbios_wmi(); | ||
629 | exit_dell_smbios_smm(); | ||
610 | mutex_lock(&smbios_mutex); | 630 | mutex_lock(&smbios_mutex); |
611 | if (platform_device) { | 631 | if (platform_device) { |
612 | free_group(platform_device); | 632 | free_group(platform_device); |
@@ -617,11 +637,12 @@ static void __exit dell_smbios_exit(void) | |||
617 | mutex_unlock(&smbios_mutex); | 637 | mutex_unlock(&smbios_mutex); |
618 | } | 638 | } |
619 | 639 | ||
620 | subsys_initcall(dell_smbios_init); | 640 | module_init(dell_smbios_init); |
621 | module_exit(dell_smbios_exit); | 641 | module_exit(dell_smbios_exit); |
622 | 642 | ||
623 | MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>"); | 643 | MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>"); |
624 | MODULE_AUTHOR("Gabriele Mazzotta <gabriele.mzt@gmail.com>"); | 644 | MODULE_AUTHOR("Gabriele Mazzotta <gabriele.mzt@gmail.com>"); |
625 | MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>"); | 645 | MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>"); |
646 | MODULE_AUTHOR("Mario Limonciello <mario.limonciello@dell.com>"); | ||
626 | MODULE_DESCRIPTION("Common functions for kernel modules using Dell SMBIOS"); | 647 | MODULE_DESCRIPTION("Common functions for kernel modules using Dell SMBIOS"); |
627 | MODULE_LICENSE("GPL"); | 648 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/platform/x86/dell-smbios-smm.c b/drivers/platform/x86/dell-smbios-smm.c index 89f65c4651a0..e9e9da556318 100644 --- a/drivers/platform/x86/dell-smbios-smm.c +++ b/drivers/platform/x86/dell-smbios-smm.c | |||
@@ -58,7 +58,7 @@ static const struct dmi_system_id dell_device_table[] __initconst = { | |||
58 | }; | 58 | }; |
59 | MODULE_DEVICE_TABLE(dmi, dell_device_table); | 59 | MODULE_DEVICE_TABLE(dmi, dell_device_table); |
60 | 60 | ||
61 | static void __init parse_da_table(const struct dmi_header *dm) | 61 | static void parse_da_table(const struct dmi_header *dm) |
62 | { | 62 | { |
63 | struct calling_interface_structure *table = | 63 | struct calling_interface_structure *table = |
64 | container_of(dm, struct calling_interface_structure, header); | 64 | container_of(dm, struct calling_interface_structure, header); |
@@ -73,7 +73,7 @@ static void __init parse_da_table(const struct dmi_header *dm) | |||
73 | da_command_code = table->cmdIOCode; | 73 | da_command_code = table->cmdIOCode; |
74 | } | 74 | } |
75 | 75 | ||
76 | static void __init find_cmd_address(const struct dmi_header *dm, void *dummy) | 76 | static void find_cmd_address(const struct dmi_header *dm, void *dummy) |
77 | { | 77 | { |
78 | switch (dm->type) { | 78 | switch (dm->type) { |
79 | case 0xda: /* Calling interface */ | 79 | case 0xda: /* Calling interface */ |
@@ -128,7 +128,7 @@ static bool test_wsmt_enabled(void) | |||
128 | return false; | 128 | return false; |
129 | } | 129 | } |
130 | 130 | ||
131 | static int __init dell_smbios_smm_init(void) | 131 | int init_dell_smbios_smm(void) |
132 | { | 132 | { |
133 | int ret; | 133 | int ret; |
134 | /* | 134 | /* |
@@ -176,7 +176,7 @@ fail_platform_device_alloc: | |||
176 | return ret; | 176 | return ret; |
177 | } | 177 | } |
178 | 178 | ||
179 | static void __exit dell_smbios_smm_exit(void) | 179 | void exit_dell_smbios_smm(void) |
180 | { | 180 | { |
181 | if (platform_device) { | 181 | if (platform_device) { |
182 | dell_smbios_unregister_device(&platform_device->dev); | 182 | dell_smbios_unregister_device(&platform_device->dev); |
@@ -184,13 +184,3 @@ static void __exit dell_smbios_smm_exit(void) | |||
184 | free_page((unsigned long)buffer); | 184 | free_page((unsigned long)buffer); |
185 | } | 185 | } |
186 | } | 186 | } |
187 | |||
188 | subsys_initcall(dell_smbios_smm_init); | ||
189 | module_exit(dell_smbios_smm_exit); | ||
190 | |||
191 | MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>"); | ||
192 | MODULE_AUTHOR("Gabriele Mazzotta <gabriele.mzt@gmail.com>"); | ||
193 | MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>"); | ||
194 | MODULE_AUTHOR("Mario Limonciello <mario.limonciello@dell.com>"); | ||
195 | MODULE_DESCRIPTION("Dell SMBIOS communications over SMI"); | ||
196 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/platform/x86/dell-smbios-wmi.c b/drivers/platform/x86/dell-smbios-wmi.c index 609557aa5868..fbefedb1c172 100644 --- a/drivers/platform/x86/dell-smbios-wmi.c +++ b/drivers/platform/x86/dell-smbios-wmi.c | |||
@@ -228,7 +228,7 @@ static const struct wmi_device_id dell_smbios_wmi_id_table[] = { | |||
228 | { }, | 228 | { }, |
229 | }; | 229 | }; |
230 | 230 | ||
231 | static void __init parse_b1_table(const struct dmi_header *dm) | 231 | static void parse_b1_table(const struct dmi_header *dm) |
232 | { | 232 | { |
233 | struct misc_bios_flags_structure *flags = | 233 | struct misc_bios_flags_structure *flags = |
234 | container_of(dm, struct misc_bios_flags_structure, header); | 234 | container_of(dm, struct misc_bios_flags_structure, header); |
@@ -242,7 +242,7 @@ static void __init parse_b1_table(const struct dmi_header *dm) | |||
242 | wmi_supported = 1; | 242 | wmi_supported = 1; |
243 | } | 243 | } |
244 | 244 | ||
245 | static void __init find_b1(const struct dmi_header *dm, void *dummy) | 245 | static void find_b1(const struct dmi_header *dm, void *dummy) |
246 | { | 246 | { |
247 | switch (dm->type) { | 247 | switch (dm->type) { |
248 | case 0xb1: /* misc bios flags */ | 248 | case 0xb1: /* misc bios flags */ |
@@ -261,7 +261,7 @@ static struct wmi_driver dell_smbios_wmi_driver = { | |||
261 | .filter_callback = dell_smbios_wmi_filter, | 261 | .filter_callback = dell_smbios_wmi_filter, |
262 | }; | 262 | }; |
263 | 263 | ||
264 | static int __init init_dell_smbios_wmi(void) | 264 | int init_dell_smbios_wmi(void) |
265 | { | 265 | { |
266 | dmi_walk(find_b1, NULL); | 266 | dmi_walk(find_b1, NULL); |
267 | 267 | ||
@@ -271,15 +271,9 @@ static int __init init_dell_smbios_wmi(void) | |||
271 | return wmi_driver_register(&dell_smbios_wmi_driver); | 271 | return wmi_driver_register(&dell_smbios_wmi_driver); |
272 | } | 272 | } |
273 | 273 | ||
274 | static void __exit exit_dell_smbios_wmi(void) | 274 | void exit_dell_smbios_wmi(void) |
275 | { | 275 | { |
276 | wmi_driver_unregister(&dell_smbios_wmi_driver); | 276 | wmi_driver_unregister(&dell_smbios_wmi_driver); |
277 | } | 277 | } |
278 | 278 | ||
279 | module_init(init_dell_smbios_wmi); | ||
280 | module_exit(exit_dell_smbios_wmi); | ||
281 | |||
282 | MODULE_ALIAS("wmi:" DELL_WMI_SMBIOS_GUID); | 279 | MODULE_ALIAS("wmi:" DELL_WMI_SMBIOS_GUID); |
283 | MODULE_AUTHOR("Mario Limonciello <mario.limonciello@dell.com>"); | ||
284 | MODULE_DESCRIPTION("Dell SMBIOS communications over WMI"); | ||
285 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/platform/x86/dell-smbios.h b/drivers/platform/x86/dell-smbios.h index 138d478d9adc..d8adaf959740 100644 --- a/drivers/platform/x86/dell-smbios.h +++ b/drivers/platform/x86/dell-smbios.h | |||
@@ -75,4 +75,29 @@ int dell_laptop_register_notifier(struct notifier_block *nb); | |||
75 | int dell_laptop_unregister_notifier(struct notifier_block *nb); | 75 | int dell_laptop_unregister_notifier(struct notifier_block *nb); |
76 | void dell_laptop_call_notifier(unsigned long action, void *data); | 76 | void dell_laptop_call_notifier(unsigned long action, void *data); |
77 | 77 | ||
78 | #endif | 78 | /* for the supported backends */ |
79 | #ifdef CONFIG_DELL_SMBIOS_WMI | ||
80 | int init_dell_smbios_wmi(void); | ||
81 | void exit_dell_smbios_wmi(void); | ||
82 | #else /* CONFIG_DELL_SMBIOS_WMI */ | ||
83 | static inline int init_dell_smbios_wmi(void) | ||
84 | { | ||
85 | return -ENODEV; | ||
86 | } | ||
87 | static inline void exit_dell_smbios_wmi(void) | ||
88 | {} | ||
89 | #endif /* CONFIG_DELL_SMBIOS_WMI */ | ||
90 | |||
91 | #ifdef CONFIG_DELL_SMBIOS_SMM | ||
92 | int init_dell_smbios_smm(void); | ||
93 | void exit_dell_smbios_smm(void); | ||
94 | #else /* CONFIG_DELL_SMBIOS_SMM */ | ||
95 | static inline int init_dell_smbios_smm(void) | ||
96 | { | ||
97 | return -ENODEV; | ||
98 | } | ||
99 | static inline void exit_dell_smbios_smm(void) | ||
100 | {} | ||
101 | #endif /* CONFIG_DELL_SMBIOS_SMM */ | ||
102 | |||
103 | #endif /* _DELL_SMBIOS_H_ */ | ||
diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c index 2c9927430d85..8d102195a392 100644 --- a/drivers/platform/x86/dell-wmi.c +++ b/drivers/platform/x86/dell-wmi.c | |||
@@ -714,7 +714,7 @@ static int __init dell_wmi_init(void) | |||
714 | 714 | ||
715 | return wmi_driver_register(&dell_wmi_driver); | 715 | return wmi_driver_register(&dell_wmi_driver); |
716 | } | 716 | } |
717 | module_init(dell_wmi_init); | 717 | late_initcall(dell_wmi_init); |
718 | 718 | ||
719 | static void __exit dell_wmi_exit(void) | 719 | static void __exit dell_wmi_exit(void) |
720 | { | 720 | { |
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c index 5b6f18b18801..535199c9e6bc 100644 --- a/drivers/platform/x86/ideapad-laptop.c +++ b/drivers/platform/x86/ideapad-laptop.c | |||
@@ -113,7 +113,7 @@ MODULE_PARM_DESC(no_bt_rfkill, "No rfkill for bluetooth."); | |||
113 | /* | 113 | /* |
114 | * ACPI Helpers | 114 | * ACPI Helpers |
115 | */ | 115 | */ |
116 | #define IDEAPAD_EC_TIMEOUT (100) /* in ms */ | 116 | #define IDEAPAD_EC_TIMEOUT (200) /* in ms */ |
117 | 117 | ||
118 | static int read_method_int(acpi_handle handle, const char *method, int *val) | 118 | static int read_method_int(acpi_handle handle, const char *method, int *val) |
119 | { | 119 | { |
diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c index d1a01311c1a2..5e3df194723e 100644 --- a/drivers/platform/x86/intel-hid.c +++ b/drivers/platform/x86/intel-hid.c | |||
@@ -376,6 +376,7 @@ static int intel_hid_remove(struct platform_device *device) | |||
376 | { | 376 | { |
377 | acpi_handle handle = ACPI_HANDLE(&device->dev); | 377 | acpi_handle handle = ACPI_HANDLE(&device->dev); |
378 | 378 | ||
379 | device_init_wakeup(&device->dev, false); | ||
379 | acpi_remove_notify_handler(handle, ACPI_DEVICE_NOTIFY, notify_handler); | 380 | acpi_remove_notify_handler(handle, ACPI_DEVICE_NOTIFY, notify_handler); |
380 | intel_hid_set_enable(&device->dev, false); | 381 | intel_hid_set_enable(&device->dev, false); |
381 | intel_button_array_enable(&device->dev, false); | 382 | intel_button_array_enable(&device->dev, false); |
diff --git a/drivers/platform/x86/intel-vbtn.c b/drivers/platform/x86/intel-vbtn.c index b703d6f5b099..c13780b8dabb 100644 --- a/drivers/platform/x86/intel-vbtn.c +++ b/drivers/platform/x86/intel-vbtn.c | |||
@@ -7,6 +7,7 @@ | |||
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/acpi.h> | 9 | #include <linux/acpi.h> |
10 | #include <linux/dmi.h> | ||
10 | #include <linux/input.h> | 11 | #include <linux/input.h> |
11 | #include <linux/input/sparse-keymap.h> | 12 | #include <linux/input/sparse-keymap.h> |
12 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
@@ -97,9 +98,35 @@ out_unknown: | |||
97 | dev_dbg(&device->dev, "unknown event index 0x%x\n", event); | 98 | dev_dbg(&device->dev, "unknown event index 0x%x\n", event); |
98 | } | 99 | } |
99 | 100 | ||
100 | static int intel_vbtn_probe(struct platform_device *device) | 101 | static void detect_tablet_mode(struct platform_device *device) |
101 | { | 102 | { |
103 | const char *chassis_type = dmi_get_system_info(DMI_CHASSIS_TYPE); | ||
104 | struct intel_vbtn_priv *priv = dev_get_drvdata(&device->dev); | ||
105 | acpi_handle handle = ACPI_HANDLE(&device->dev); | ||
102 | struct acpi_buffer vgbs_output = { ACPI_ALLOCATE_BUFFER, NULL }; | 106 | struct acpi_buffer vgbs_output = { ACPI_ALLOCATE_BUFFER, NULL }; |
107 | union acpi_object *obj; | ||
108 | acpi_status status; | ||
109 | int m; | ||
110 | |||
111 | if (!(chassis_type && strcmp(chassis_type, "31") == 0)) | ||
112 | goto out; | ||
113 | |||
114 | status = acpi_evaluate_object(handle, "VGBS", NULL, &vgbs_output); | ||
115 | if (ACPI_FAILURE(status)) | ||
116 | goto out; | ||
117 | |||
118 | obj = vgbs_output.pointer; | ||
119 | if (!(obj && obj->type == ACPI_TYPE_INTEGER)) | ||
120 | goto out; | ||
121 | |||
122 | m = !(obj->integer.value & TABLET_MODE_FLAG); | ||
123 | input_report_switch(priv->input_dev, SW_TABLET_MODE, m); | ||
124 | out: | ||
125 | kfree(vgbs_output.pointer); | ||
126 | } | ||
127 | |||
128 | static int intel_vbtn_probe(struct platform_device *device) | ||
129 | { | ||
103 | acpi_handle handle = ACPI_HANDLE(&device->dev); | 130 | acpi_handle handle = ACPI_HANDLE(&device->dev); |
104 | struct intel_vbtn_priv *priv; | 131 | struct intel_vbtn_priv *priv; |
105 | acpi_status status; | 132 | acpi_status status; |
@@ -122,22 +149,7 @@ static int intel_vbtn_probe(struct platform_device *device) | |||
122 | return err; | 149 | return err; |
123 | } | 150 | } |
124 | 151 | ||
125 | /* | 152 | detect_tablet_mode(device); |
126 | * VGBS being present and returning something means we have | ||
127 | * a tablet mode switch. | ||
128 | */ | ||
129 | status = acpi_evaluate_object(handle, "VGBS", NULL, &vgbs_output); | ||
130 | if (ACPI_SUCCESS(status)) { | ||
131 | union acpi_object *obj = vgbs_output.pointer; | ||
132 | |||
133 | if (obj && obj->type == ACPI_TYPE_INTEGER) { | ||
134 | int m = !(obj->integer.value & TABLET_MODE_FLAG); | ||
135 | |||
136 | input_report_switch(priv->input_dev, SW_TABLET_MODE, m); | ||
137 | } | ||
138 | } | ||
139 | |||
140 | kfree(vgbs_output.pointer); | ||
141 | 153 | ||
142 | status = acpi_install_notify_handler(handle, | 154 | status = acpi_install_notify_handler(handle, |
143 | ACPI_DEVICE_NOTIFY, | 155 | ACPI_DEVICE_NOTIFY, |
@@ -154,6 +166,7 @@ static int intel_vbtn_remove(struct platform_device *device) | |||
154 | { | 166 | { |
155 | acpi_handle handle = ACPI_HANDLE(&device->dev); | 167 | acpi_handle handle = ACPI_HANDLE(&device->dev); |
156 | 168 | ||
169 | device_init_wakeup(&device->dev, false); | ||
157 | acpi_remove_notify_handler(handle, ACPI_DEVICE_NOTIFY, notify_handler); | 170 | acpi_remove_notify_handler(handle, ACPI_DEVICE_NOTIFY, notify_handler); |
158 | 171 | ||
159 | /* | 172 | /* |
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c index daa68acbc900..8796211ef24a 100644 --- a/drivers/platform/x86/wmi.c +++ b/drivers/platform/x86/wmi.c | |||
@@ -933,7 +933,7 @@ static int wmi_dev_probe(struct device *dev) | |||
933 | goto probe_failure; | 933 | goto probe_failure; |
934 | } | 934 | } |
935 | 935 | ||
936 | buf = kmalloc(strlen(wdriver->driver.name) + 4, GFP_KERNEL); | 936 | buf = kmalloc(strlen(wdriver->driver.name) + 5, GFP_KERNEL); |
937 | if (!buf) { | 937 | if (!buf) { |
938 | ret = -ENOMEM; | 938 | ret = -ENOMEM; |
939 | goto probe_string_failure; | 939 | goto probe_string_failure; |
@@ -945,7 +945,7 @@ static int wmi_dev_probe(struct device *dev) | |||
945 | wblock->char_dev.mode = 0444; | 945 | wblock->char_dev.mode = 0444; |
946 | ret = misc_register(&wblock->char_dev); | 946 | ret = misc_register(&wblock->char_dev); |
947 | if (ret) { | 947 | if (ret) { |
948 | dev_warn(dev, "failed to register char dev: %d", ret); | 948 | dev_warn(dev, "failed to register char dev: %d\n", ret); |
949 | ret = -ENOMEM; | 949 | ret = -ENOMEM; |
950 | goto probe_misc_failure; | 950 | goto probe_misc_failure; |
951 | } | 951 | } |
@@ -1048,7 +1048,7 @@ static int wmi_create_device(struct device *wmi_bus_dev, | |||
1048 | 1048 | ||
1049 | if (result) { | 1049 | if (result) { |
1050 | dev_warn(wmi_bus_dev, | 1050 | dev_warn(wmi_bus_dev, |
1051 | "%s data block query control method not found", | 1051 | "%s data block query control method not found\n", |
1052 | method); | 1052 | method); |
1053 | return result; | 1053 | return result; |
1054 | } | 1054 | } |
@@ -1198,7 +1198,7 @@ static int parse_wdg(struct device *wmi_bus_dev, struct acpi_device *device) | |||
1198 | 1198 | ||
1199 | retval = device_add(&wblock->dev.dev); | 1199 | retval = device_add(&wblock->dev.dev); |
1200 | if (retval) { | 1200 | if (retval) { |
1201 | dev_err(wmi_bus_dev, "failed to register %pULL\n", | 1201 | dev_err(wmi_bus_dev, "failed to register %pUL\n", |
1202 | wblock->gblock.guid); | 1202 | wblock->gblock.guid); |
1203 | if (debug_event) | 1203 | if (debug_event) |
1204 | wmi_method_enable(wblock, 0); | 1204 | wmi_method_enable(wblock, 0); |
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index dd4708c58480..1fc0c0811da4 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c | |||
@@ -4310,7 +4310,7 @@ static int _regulator_resume_early(struct device *dev, void *data) | |||
4310 | 4310 | ||
4311 | rstate = regulator_get_suspend_state(rdev, *state); | 4311 | rstate = regulator_get_suspend_state(rdev, *state); |
4312 | if (rstate == NULL) | 4312 | if (rstate == NULL) |
4313 | return -EINVAL; | 4313 | return 0; |
4314 | 4314 | ||
4315 | mutex_lock(&rdev->mutex); | 4315 | mutex_lock(&rdev->mutex); |
4316 | 4316 | ||
diff --git a/drivers/regulator/stm32-vrefbuf.c b/drivers/regulator/stm32-vrefbuf.c index 72c8b3e1022b..e0a9c445ed67 100644 --- a/drivers/regulator/stm32-vrefbuf.c +++ b/drivers/regulator/stm32-vrefbuf.c | |||
@@ -51,7 +51,7 @@ static int stm32_vrefbuf_enable(struct regulator_dev *rdev) | |||
51 | * arbitrary timeout. | 51 | * arbitrary timeout. |
52 | */ | 52 | */ |
53 | ret = readl_poll_timeout(priv->base + STM32_VREFBUF_CSR, val, | 53 | ret = readl_poll_timeout(priv->base + STM32_VREFBUF_CSR, val, |
54 | !(val & STM32_VRR), 650, 10000); | 54 | val & STM32_VRR, 650, 10000); |
55 | if (ret) { | 55 | if (ret) { |
56 | dev_err(&rdev->dev, "stm32 vrefbuf timed out!\n"); | 56 | dev_err(&rdev->dev, "stm32 vrefbuf timed out!\n"); |
57 | val = readl_relaxed(priv->base + STM32_VREFBUF_CSR); | 57 | val = readl_relaxed(priv->base + STM32_VREFBUF_CSR); |
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index a7c15f0085e2..ecef8e73d40b 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c | |||
@@ -2581,8 +2581,6 @@ int dasd_cancel_req(struct dasd_ccw_req *cqr) | |||
2581 | case DASD_CQR_QUEUED: | 2581 | case DASD_CQR_QUEUED: |
2582 | /* request was not started - just set to cleared */ | 2582 | /* request was not started - just set to cleared */ |
2583 | cqr->status = DASD_CQR_CLEARED; | 2583 | cqr->status = DASD_CQR_CLEARED; |
2584 | if (cqr->callback_data == DASD_SLEEPON_START_TAG) | ||
2585 | cqr->callback_data = DASD_SLEEPON_END_TAG; | ||
2586 | break; | 2584 | break; |
2587 | case DASD_CQR_IN_IO: | 2585 | case DASD_CQR_IN_IO: |
2588 | /* request in IO - terminate IO and release again */ | 2586 | /* request in IO - terminate IO and release again */ |
@@ -3902,9 +3900,12 @@ static int dasd_generic_requeue_all_requests(struct dasd_device *device) | |||
3902 | wait_event(dasd_flush_wq, | 3900 | wait_event(dasd_flush_wq, |
3903 | (cqr->status != DASD_CQR_CLEAR_PENDING)); | 3901 | (cqr->status != DASD_CQR_CLEAR_PENDING)); |
3904 | 3902 | ||
3905 | /* mark sleepon requests as ended */ | 3903 | /* |
3906 | if (cqr->callback_data == DASD_SLEEPON_START_TAG) | 3904 | * requeue requests to blocklayer will only work |
3907 | cqr->callback_data = DASD_SLEEPON_END_TAG; | 3905 | * for block device requests |
3906 | */ | ||
3907 | if (_dasd_requeue_request(cqr)) | ||
3908 | continue; | ||
3908 | 3909 | ||
3909 | /* remove requests from device and block queue */ | 3910 | /* remove requests from device and block queue */ |
3910 | list_del_init(&cqr->devlist); | 3911 | list_del_init(&cqr->devlist); |
@@ -3917,13 +3918,6 @@ static int dasd_generic_requeue_all_requests(struct dasd_device *device) | |||
3917 | cqr = refers; | 3918 | cqr = refers; |
3918 | } | 3919 | } |
3919 | 3920 | ||
3920 | /* | ||
3921 | * requeue requests to blocklayer will only work | ||
3922 | * for block device requests | ||
3923 | */ | ||
3924 | if (_dasd_requeue_request(cqr)) | ||
3925 | continue; | ||
3926 | |||
3927 | if (cqr->block) | 3921 | if (cqr->block) |
3928 | list_del_init(&cqr->blocklist); | 3922 | list_del_init(&cqr->blocklist); |
3929 | cqr->block->base->discipline->free_cp( | 3923 | cqr->block->base->discipline->free_cp( |
@@ -3940,8 +3934,7 @@ static int dasd_generic_requeue_all_requests(struct dasd_device *device) | |||
3940 | list_splice_tail(&requeue_queue, &device->ccw_queue); | 3934 | list_splice_tail(&requeue_queue, &device->ccw_queue); |
3941 | spin_unlock_irq(get_ccwdev_lock(device->cdev)); | 3935 | spin_unlock_irq(get_ccwdev_lock(device->cdev)); |
3942 | } | 3936 | } |
3943 | /* wake up generic waitqueue for eventually ended sleepon requests */ | 3937 | dasd_schedule_device_bh(device); |
3944 | wake_up(&generic_waitq); | ||
3945 | return rc; | 3938 | return rc; |
3946 | } | 3939 | } |
3947 | 3940 | ||
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index 1319122e9d12..9169af7dbb43 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c | |||
@@ -795,6 +795,7 @@ ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event) | |||
795 | 795 | ||
796 | ccw_device_set_timeout(cdev, 0); | 796 | ccw_device_set_timeout(cdev, 0); |
797 | cdev->private->iretry = 255; | 797 | cdev->private->iretry = 255; |
798 | cdev->private->async_kill_io_rc = -ETIMEDOUT; | ||
798 | ret = ccw_device_cancel_halt_clear(cdev); | 799 | ret = ccw_device_cancel_halt_clear(cdev); |
799 | if (ret == -EBUSY) { | 800 | if (ret == -EBUSY) { |
800 | ccw_device_set_timeout(cdev, 3*HZ); | 801 | ccw_device_set_timeout(cdev, 3*HZ); |
@@ -871,7 +872,7 @@ ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event) | |||
871 | /* OK, i/o is dead now. Call interrupt handler. */ | 872 | /* OK, i/o is dead now. Call interrupt handler. */ |
872 | if (cdev->handler) | 873 | if (cdev->handler) |
873 | cdev->handler(cdev, cdev->private->intparm, | 874 | cdev->handler(cdev, cdev->private->intparm, |
874 | ERR_PTR(-EIO)); | 875 | ERR_PTR(cdev->private->async_kill_io_rc)); |
875 | } | 876 | } |
876 | 877 | ||
877 | static void | 878 | static void |
@@ -888,14 +889,16 @@ ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event) | |||
888 | ccw_device_online_verify(cdev, 0); | 889 | ccw_device_online_verify(cdev, 0); |
889 | if (cdev->handler) | 890 | if (cdev->handler) |
890 | cdev->handler(cdev, cdev->private->intparm, | 891 | cdev->handler(cdev, cdev->private->intparm, |
891 | ERR_PTR(-EIO)); | 892 | ERR_PTR(cdev->private->async_kill_io_rc)); |
892 | } | 893 | } |
893 | 894 | ||
894 | void ccw_device_kill_io(struct ccw_device *cdev) | 895 | void ccw_device_kill_io(struct ccw_device *cdev) |
895 | { | 896 | { |
896 | int ret; | 897 | int ret; |
897 | 898 | ||
899 | ccw_device_set_timeout(cdev, 0); | ||
898 | cdev->private->iretry = 255; | 900 | cdev->private->iretry = 255; |
901 | cdev->private->async_kill_io_rc = -EIO; | ||
899 | ret = ccw_device_cancel_halt_clear(cdev); | 902 | ret = ccw_device_cancel_halt_clear(cdev); |
900 | if (ret == -EBUSY) { | 903 | if (ret == -EBUSY) { |
901 | ccw_device_set_timeout(cdev, 3*HZ); | 904 | ccw_device_set_timeout(cdev, 3*HZ); |
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c index 1caf6a398760..75ce12a24dc2 100644 --- a/drivers/s390/cio/device_ops.c +++ b/drivers/s390/cio/device_ops.c | |||
@@ -159,7 +159,7 @@ int ccw_device_clear(struct ccw_device *cdev, unsigned long intparm) | |||
159 | } | 159 | } |
160 | 160 | ||
161 | /** | 161 | /** |
162 | * ccw_device_start_key() - start a s390 channel program with key | 162 | * ccw_device_start_timeout_key() - start a s390 channel program with timeout and key |
163 | * @cdev: target ccw device | 163 | * @cdev: target ccw device |
164 | * @cpa: logical start address of channel program | 164 | * @cpa: logical start address of channel program |
165 | * @intparm: user specific interruption parameter; will be presented back to | 165 | * @intparm: user specific interruption parameter; will be presented back to |
@@ -170,10 +170,15 @@ int ccw_device_clear(struct ccw_device *cdev, unsigned long intparm) | |||
170 | * @key: storage key to be used for the I/O | 170 | * @key: storage key to be used for the I/O |
171 | * @flags: additional flags; defines the action to be performed for I/O | 171 | * @flags: additional flags; defines the action to be performed for I/O |
172 | * processing. | 172 | * processing. |
173 | * @expires: timeout value in jiffies | ||
173 | * | 174 | * |
174 | * Start a S/390 channel program. When the interrupt arrives, the | 175 | * Start a S/390 channel program. When the interrupt arrives, the |
175 | * IRQ handler is called, either immediately, delayed (dev-end missing, | 176 | * IRQ handler is called, either immediately, delayed (dev-end missing, |
176 | * or sense required) or never (no IRQ handler registered). | 177 | * or sense required) or never (no IRQ handler registered). |
178 | * This function notifies the device driver if the channel program has not | ||
179 | * completed during the time specified by @expires. If a timeout occurs, the | ||
180 | * channel program is terminated via xsch, hsch or csch, and the device's | ||
181 | * interrupt handler will be called with an irb containing ERR_PTR(-%ETIMEDOUT). | ||
177 | * Returns: | 182 | * Returns: |
178 | * %0, if the operation was successful; | 183 | * %0, if the operation was successful; |
179 | * -%EBUSY, if the device is busy, or status pending; | 184 | * -%EBUSY, if the device is busy, or status pending; |
@@ -182,9 +187,9 @@ int ccw_device_clear(struct ccw_device *cdev, unsigned long intparm) | |||
182 | * Context: | 187 | * Context: |
183 | * Interrupts disabled, ccw device lock held | 188 | * Interrupts disabled, ccw device lock held |
184 | */ | 189 | */ |
185 | int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa, | 190 | int ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa, |
186 | unsigned long intparm, __u8 lpm, __u8 key, | 191 | unsigned long intparm, __u8 lpm, __u8 key, |
187 | unsigned long flags) | 192 | unsigned long flags, int expires) |
188 | { | 193 | { |
189 | struct subchannel *sch; | 194 | struct subchannel *sch; |
190 | int ret; | 195 | int ret; |
@@ -224,6 +229,8 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa, | |||
224 | switch (ret) { | 229 | switch (ret) { |
225 | case 0: | 230 | case 0: |
226 | cdev->private->intparm = intparm; | 231 | cdev->private->intparm = intparm; |
232 | if (expires) | ||
233 | ccw_device_set_timeout(cdev, expires); | ||
227 | break; | 234 | break; |
228 | case -EACCES: | 235 | case -EACCES: |
229 | case -ENODEV: | 236 | case -ENODEV: |
@@ -234,7 +241,7 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa, | |||
234 | } | 241 | } |
235 | 242 | ||
236 | /** | 243 | /** |
237 | * ccw_device_start_timeout_key() - start a s390 channel program with timeout and key | 244 | * ccw_device_start_key() - start a s390 channel program with key |
238 | * @cdev: target ccw device | 245 | * @cdev: target ccw device |
239 | * @cpa: logical start address of channel program | 246 | * @cpa: logical start address of channel program |
240 | * @intparm: user specific interruption parameter; will be presented back to | 247 | * @intparm: user specific interruption parameter; will be presented back to |
@@ -245,15 +252,10 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa, | |||
245 | * @key: storage key to be used for the I/O | 252 | * @key: storage key to be used for the I/O |
246 | * @flags: additional flags; defines the action to be performed for I/O | 253 | * @flags: additional flags; defines the action to be performed for I/O |
247 | * processing. | 254 | * processing. |
248 | * @expires: timeout value in jiffies | ||
249 | * | 255 | * |
250 | * Start a S/390 channel program. When the interrupt arrives, the | 256 | * Start a S/390 channel program. When the interrupt arrives, the |
251 | * IRQ handler is called, either immediately, delayed (dev-end missing, | 257 | * IRQ handler is called, either immediately, delayed (dev-end missing, |
252 | * or sense required) or never (no IRQ handler registered). | 258 | * or sense required) or never (no IRQ handler registered). |
253 | * This function notifies the device driver if the channel program has not | ||
254 | * completed during the time specified by @expires. If a timeout occurs, the | ||
255 | * channel program is terminated via xsch, hsch or csch, and the device's | ||
256 | * interrupt handler will be called with an irb containing ERR_PTR(-%ETIMEDOUT). | ||
257 | * Returns: | 259 | * Returns: |
258 | * %0, if the operation was successful; | 260 | * %0, if the operation was successful; |
259 | * -%EBUSY, if the device is busy, or status pending; | 261 | * -%EBUSY, if the device is busy, or status pending; |
@@ -262,19 +264,12 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa, | |||
262 | * Context: | 264 | * Context: |
263 | * Interrupts disabled, ccw device lock held | 265 | * Interrupts disabled, ccw device lock held |
264 | */ | 266 | */ |
265 | int ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa, | 267 | int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa, |
266 | unsigned long intparm, __u8 lpm, __u8 key, | 268 | unsigned long intparm, __u8 lpm, __u8 key, |
267 | unsigned long flags, int expires) | 269 | unsigned long flags) |
268 | { | 270 | { |
269 | int ret; | 271 | return ccw_device_start_timeout_key(cdev, cpa, intparm, lpm, key, |
270 | 272 | flags, 0); | |
271 | if (!cdev) | ||
272 | return -ENODEV; | ||
273 | ccw_device_set_timeout(cdev, expires); | ||
274 | ret = ccw_device_start_key(cdev, cpa, intparm, lpm, key, flags); | ||
275 | if (ret != 0) | ||
276 | ccw_device_set_timeout(cdev, 0); | ||
277 | return ret; | ||
278 | } | 273 | } |
279 | 274 | ||
280 | /** | 275 | /** |
@@ -489,18 +484,20 @@ void ccw_device_get_id(struct ccw_device *cdev, struct ccw_dev_id *dev_id) | |||
489 | EXPORT_SYMBOL(ccw_device_get_id); | 484 | EXPORT_SYMBOL(ccw_device_get_id); |
490 | 485 | ||
491 | /** | 486 | /** |
492 | * ccw_device_tm_start_key() - perform start function | 487 | * ccw_device_tm_start_timeout_key() - perform start function |
493 | * @cdev: ccw device on which to perform the start function | 488 | * @cdev: ccw device on which to perform the start function |
494 | * @tcw: transport-command word to be started | 489 | * @tcw: transport-command word to be started |
495 | * @intparm: user defined parameter to be passed to the interrupt handler | 490 | * @intparm: user defined parameter to be passed to the interrupt handler |
496 | * @lpm: mask of paths to use | 491 | * @lpm: mask of paths to use |
497 | * @key: storage key to use for storage access | 492 | * @key: storage key to use for storage access |
493 | * @expires: time span in jiffies after which to abort request | ||
498 | * | 494 | * |
499 | * Start the tcw on the given ccw device. Return zero on success, non-zero | 495 | * Start the tcw on the given ccw device. Return zero on success, non-zero |
500 | * otherwise. | 496 | * otherwise. |
501 | */ | 497 | */ |
502 | int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw, | 498 | int ccw_device_tm_start_timeout_key(struct ccw_device *cdev, struct tcw *tcw, |
503 | unsigned long intparm, u8 lpm, u8 key) | 499 | unsigned long intparm, u8 lpm, u8 key, |
500 | int expires) | ||
504 | { | 501 | { |
505 | struct subchannel *sch; | 502 | struct subchannel *sch; |
506 | int rc; | 503 | int rc; |
@@ -527,37 +524,32 @@ int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw, | |||
527 | return -EACCES; | 524 | return -EACCES; |
528 | } | 525 | } |
529 | rc = cio_tm_start_key(sch, tcw, lpm, key); | 526 | rc = cio_tm_start_key(sch, tcw, lpm, key); |
530 | if (rc == 0) | 527 | if (rc == 0) { |
531 | cdev->private->intparm = intparm; | 528 | cdev->private->intparm = intparm; |
529 | if (expires) | ||
530 | ccw_device_set_timeout(cdev, expires); | ||
531 | } | ||
532 | return rc; | 532 | return rc; |
533 | } | 533 | } |
534 | EXPORT_SYMBOL(ccw_device_tm_start_key); | 534 | EXPORT_SYMBOL(ccw_device_tm_start_timeout_key); |
535 | 535 | ||
536 | /** | 536 | /** |
537 | * ccw_device_tm_start_timeout_key() - perform start function | 537 | * ccw_device_tm_start_key() - perform start function |
538 | * @cdev: ccw device on which to perform the start function | 538 | * @cdev: ccw device on which to perform the start function |
539 | * @tcw: transport-command word to be started | 539 | * @tcw: transport-command word to be started |
540 | * @intparm: user defined parameter to be passed to the interrupt handler | 540 | * @intparm: user defined parameter to be passed to the interrupt handler |
541 | * @lpm: mask of paths to use | 541 | * @lpm: mask of paths to use |
542 | * @key: storage key to use for storage access | 542 | * @key: storage key to use for storage access |
543 | * @expires: time span in jiffies after which to abort request | ||
544 | * | 543 | * |
545 | * Start the tcw on the given ccw device. Return zero on success, non-zero | 544 | * Start the tcw on the given ccw device. Return zero on success, non-zero |
546 | * otherwise. | 545 | * otherwise. |
547 | */ | 546 | */ |
548 | int ccw_device_tm_start_timeout_key(struct ccw_device *cdev, struct tcw *tcw, | 547 | int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw, |
549 | unsigned long intparm, u8 lpm, u8 key, | 548 | unsigned long intparm, u8 lpm, u8 key) |
550 | int expires) | ||
551 | { | 549 | { |
552 | int ret; | 550 | return ccw_device_tm_start_timeout_key(cdev, tcw, intparm, lpm, key, 0); |
553 | |||
554 | ccw_device_set_timeout(cdev, expires); | ||
555 | ret = ccw_device_tm_start_key(cdev, tcw, intparm, lpm, key); | ||
556 | if (ret != 0) | ||
557 | ccw_device_set_timeout(cdev, 0); | ||
558 | return ret; | ||
559 | } | 551 | } |
560 | EXPORT_SYMBOL(ccw_device_tm_start_timeout_key); | 552 | EXPORT_SYMBOL(ccw_device_tm_start_key); |
561 | 553 | ||
562 | /** | 554 | /** |
563 | * ccw_device_tm_start() - perform start function | 555 | * ccw_device_tm_start() - perform start function |
diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h index af571d8d6925..90e4e3a7841b 100644 --- a/drivers/s390/cio/io_sch.h +++ b/drivers/s390/cio/io_sch.h | |||
@@ -157,6 +157,7 @@ struct ccw_device_private { | |||
157 | unsigned long intparm; /* user interruption parameter */ | 157 | unsigned long intparm; /* user interruption parameter */ |
158 | struct qdio_irq *qdio_data; | 158 | struct qdio_irq *qdio_data; |
159 | struct irb irb; /* device status */ | 159 | struct irb irb; /* device status */ |
160 | int async_kill_io_rc; | ||
160 | struct senseid senseid; /* SenseID info */ | 161 | struct senseid senseid; /* SenseID info */ |
161 | struct pgid pgid[8]; /* path group IDs per chpid*/ | 162 | struct pgid pgid[8]; /* path group IDs per chpid*/ |
162 | struct ccw1 iccws[2]; /* ccws for SNID/SID/SPGID commands */ | 163 | struct ccw1 iccws[2]; /* ccws for SNID/SID/SPGID commands */ |
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index ca72f3311004..3653bea38470 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c | |||
@@ -527,8 +527,7 @@ static inline int qeth_is_cq(struct qeth_card *card, unsigned int queue) | |||
527 | queue == card->qdio.no_in_queues - 1; | 527 | queue == card->qdio.no_in_queues - 1; |
528 | } | 528 | } |
529 | 529 | ||
530 | 530 | static int __qeth_issue_next_read(struct qeth_card *card) | |
531 | static int qeth_issue_next_read(struct qeth_card *card) | ||
532 | { | 531 | { |
533 | int rc; | 532 | int rc; |
534 | struct qeth_cmd_buffer *iob; | 533 | struct qeth_cmd_buffer *iob; |
@@ -559,6 +558,17 @@ static int qeth_issue_next_read(struct qeth_card *card) | |||
559 | return rc; | 558 | return rc; |
560 | } | 559 | } |
561 | 560 | ||
561 | static int qeth_issue_next_read(struct qeth_card *card) | ||
562 | { | ||
563 | int ret; | ||
564 | |||
565 | spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card))); | ||
566 | ret = __qeth_issue_next_read(card); | ||
567 | spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card))); | ||
568 | |||
569 | return ret; | ||
570 | } | ||
571 | |||
562 | static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card) | 572 | static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card) |
563 | { | 573 | { |
564 | struct qeth_reply *reply; | 574 | struct qeth_reply *reply; |
@@ -960,7 +970,7 @@ void qeth_clear_thread_running_bit(struct qeth_card *card, unsigned long thread) | |||
960 | spin_lock_irqsave(&card->thread_mask_lock, flags); | 970 | spin_lock_irqsave(&card->thread_mask_lock, flags); |
961 | card->thread_running_mask &= ~thread; | 971 | card->thread_running_mask &= ~thread; |
962 | spin_unlock_irqrestore(&card->thread_mask_lock, flags); | 972 | spin_unlock_irqrestore(&card->thread_mask_lock, flags); |
963 | wake_up(&card->wait_q); | 973 | wake_up_all(&card->wait_q); |
964 | } | 974 | } |
965 | EXPORT_SYMBOL_GPL(qeth_clear_thread_running_bit); | 975 | EXPORT_SYMBOL_GPL(qeth_clear_thread_running_bit); |
966 | 976 | ||
@@ -1164,6 +1174,7 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, | |||
1164 | } | 1174 | } |
1165 | rc = qeth_get_problem(cdev, irb); | 1175 | rc = qeth_get_problem(cdev, irb); |
1166 | if (rc) { | 1176 | if (rc) { |
1177 | card->read_or_write_problem = 1; | ||
1167 | qeth_clear_ipacmd_list(card); | 1178 | qeth_clear_ipacmd_list(card); |
1168 | qeth_schedule_recovery(card); | 1179 | qeth_schedule_recovery(card); |
1169 | goto out; | 1180 | goto out; |
@@ -1182,7 +1193,7 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, | |||
1182 | return; | 1193 | return; |
1183 | if (channel == &card->read && | 1194 | if (channel == &card->read && |
1184 | channel->state == CH_STATE_UP) | 1195 | channel->state == CH_STATE_UP) |
1185 | qeth_issue_next_read(card); | 1196 | __qeth_issue_next_read(card); |
1186 | 1197 | ||
1187 | iob = channel->iob; | 1198 | iob = channel->iob; |
1188 | index = channel->buf_no; | 1199 | index = channel->buf_no; |
@@ -2134,24 +2145,25 @@ int qeth_send_control_data(struct qeth_card *card, int len, | |||
2134 | } | 2145 | } |
2135 | reply->callback = reply_cb; | 2146 | reply->callback = reply_cb; |
2136 | reply->param = reply_param; | 2147 | reply->param = reply_param; |
2137 | if (card->state == CARD_STATE_DOWN) | 2148 | |
2138 | reply->seqno = QETH_IDX_COMMAND_SEQNO; | ||
2139 | else | ||
2140 | reply->seqno = card->seqno.ipa++; | ||
2141 | init_waitqueue_head(&reply->wait_q); | 2149 | init_waitqueue_head(&reply->wait_q); |
2142 | spin_lock_irqsave(&card->lock, flags); | ||
2143 | list_add_tail(&reply->list, &card->cmd_waiter_list); | ||
2144 | spin_unlock_irqrestore(&card->lock, flags); | ||
2145 | 2150 | ||
2146 | while (atomic_cmpxchg(&card->write.irq_pending, 0, 1)) ; | 2151 | while (atomic_cmpxchg(&card->write.irq_pending, 0, 1)) ; |
2147 | qeth_prepare_control_data(card, len, iob); | ||
2148 | 2152 | ||
2149 | if (IS_IPA(iob->data)) { | 2153 | if (IS_IPA(iob->data)) { |
2150 | cmd = __ipa_cmd(iob); | 2154 | cmd = __ipa_cmd(iob); |
2155 | cmd->hdr.seqno = card->seqno.ipa++; | ||
2156 | reply->seqno = cmd->hdr.seqno; | ||
2151 | event_timeout = QETH_IPA_TIMEOUT; | 2157 | event_timeout = QETH_IPA_TIMEOUT; |
2152 | } else { | 2158 | } else { |
2159 | reply->seqno = QETH_IDX_COMMAND_SEQNO; | ||
2153 | event_timeout = QETH_TIMEOUT; | 2160 | event_timeout = QETH_TIMEOUT; |
2154 | } | 2161 | } |
2162 | qeth_prepare_control_data(card, len, iob); | ||
2163 | |||
2164 | spin_lock_irqsave(&card->lock, flags); | ||
2165 | list_add_tail(&reply->list, &card->cmd_waiter_list); | ||
2166 | spin_unlock_irqrestore(&card->lock, flags); | ||
2155 | 2167 | ||
2156 | timeout = jiffies + event_timeout; | 2168 | timeout = jiffies + event_timeout; |
2157 | 2169 | ||
@@ -2933,7 +2945,7 @@ static void qeth_fill_ipacmd_header(struct qeth_card *card, | |||
2933 | memset(cmd, 0, sizeof(struct qeth_ipa_cmd)); | 2945 | memset(cmd, 0, sizeof(struct qeth_ipa_cmd)); |
2934 | cmd->hdr.command = command; | 2946 | cmd->hdr.command = command; |
2935 | cmd->hdr.initiator = IPA_CMD_INITIATOR_HOST; | 2947 | cmd->hdr.initiator = IPA_CMD_INITIATOR_HOST; |
2936 | cmd->hdr.seqno = card->seqno.ipa; | 2948 | /* cmd->hdr.seqno is set by qeth_send_control_data() */ |
2937 | cmd->hdr.adapter_type = qeth_get_ipa_adp_type(card->info.link_type); | 2949 | cmd->hdr.adapter_type = qeth_get_ipa_adp_type(card->info.link_type); |
2938 | cmd->hdr.rel_adapter_no = (__u8) card->info.portno; | 2950 | cmd->hdr.rel_adapter_no = (__u8) card->info.portno; |
2939 | if (card->options.layer2) | 2951 | if (card->options.layer2) |
@@ -3898,10 +3910,12 @@ EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags); | |||
3898 | int qeth_get_elements_no(struct qeth_card *card, | 3910 | int qeth_get_elements_no(struct qeth_card *card, |
3899 | struct sk_buff *skb, int extra_elems, int data_offset) | 3911 | struct sk_buff *skb, int extra_elems, int data_offset) |
3900 | { | 3912 | { |
3901 | int elements = qeth_get_elements_for_range( | 3913 | addr_t end = (addr_t)skb->data + skb_headlen(skb); |
3902 | (addr_t)skb->data + data_offset, | 3914 | int elements = qeth_get_elements_for_frags(skb); |
3903 | (addr_t)skb->data + skb_headlen(skb)) + | 3915 | addr_t start = (addr_t)skb->data + data_offset; |
3904 | qeth_get_elements_for_frags(skb); | 3916 | |
3917 | if (start != end) | ||
3918 | elements += qeth_get_elements_for_range(start, end); | ||
3905 | 3919 | ||
3906 | if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) { | 3920 | if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) { |
3907 | QETH_DBF_MESSAGE(2, "Invalid size of IP packet " | 3921 | QETH_DBF_MESSAGE(2, "Invalid size of IP packet " |
@@ -5084,8 +5098,6 @@ static void qeth_core_free_card(struct qeth_card *card) | |||
5084 | QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); | 5098 | QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); |
5085 | qeth_clean_channel(&card->read); | 5099 | qeth_clean_channel(&card->read); |
5086 | qeth_clean_channel(&card->write); | 5100 | qeth_clean_channel(&card->write); |
5087 | if (card->dev) | ||
5088 | free_netdev(card->dev); | ||
5089 | qeth_free_qdio_buffers(card); | 5101 | qeth_free_qdio_buffers(card); |
5090 | unregister_service_level(&card->qeth_service_level); | 5102 | unregister_service_level(&card->qeth_service_level); |
5091 | kfree(card); | 5103 | kfree(card); |
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 7f236440483f..5ef4c978ad19 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c | |||
@@ -915,8 +915,8 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev) | |||
915 | qeth_l2_set_offline(cgdev); | 915 | qeth_l2_set_offline(cgdev); |
916 | 916 | ||
917 | if (card->dev) { | 917 | if (card->dev) { |
918 | netif_napi_del(&card->napi); | ||
919 | unregister_netdev(card->dev); | 918 | unregister_netdev(card->dev); |
919 | free_netdev(card->dev); | ||
920 | card->dev = NULL; | 920 | card->dev = NULL; |
921 | } | 921 | } |
922 | return; | 922 | return; |
diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h index bdd45f4dcace..498fe9af2cdb 100644 --- a/drivers/s390/net/qeth_l3.h +++ b/drivers/s390/net/qeth_l3.h | |||
@@ -40,8 +40,40 @@ struct qeth_ipaddr { | |||
40 | unsigned int pfxlen; | 40 | unsigned int pfxlen; |
41 | } a6; | 41 | } a6; |
42 | } u; | 42 | } u; |
43 | |||
44 | }; | 43 | }; |
44 | |||
45 | static inline bool qeth_l3_addr_match_ip(struct qeth_ipaddr *a1, | ||
46 | struct qeth_ipaddr *a2) | ||
47 | { | ||
48 | if (a1->proto != a2->proto) | ||
49 | return false; | ||
50 | if (a1->proto == QETH_PROT_IPV6) | ||
51 | return ipv6_addr_equal(&a1->u.a6.addr, &a2->u.a6.addr); | ||
52 | return a1->u.a4.addr == a2->u.a4.addr; | ||
53 | } | ||
54 | |||
55 | static inline bool qeth_l3_addr_match_all(struct qeth_ipaddr *a1, | ||
56 | struct qeth_ipaddr *a2) | ||
57 | { | ||
58 | /* Assumes that the pair was obtained via qeth_l3_addr_find_by_ip(), | ||
59 | * so 'proto' and 'addr' match for sure. | ||
60 | * | ||
61 | * For ucast: | ||
62 | * - 'mac' is always 0. | ||
63 | * - 'mask'/'pfxlen' for RXIP/VIPA is always 0. For NORMAL, matching | ||
64 | * values are required to avoid mixups in takeover eligibility. | ||
65 | * | ||
66 | * For mcast, | ||
67 | * - 'mac' is mapped from the IP, and thus always matches. | ||
68 | * - 'mask'/'pfxlen' is always 0. | ||
69 | */ | ||
70 | if (a1->type != a2->type) | ||
71 | return false; | ||
72 | if (a1->proto == QETH_PROT_IPV6) | ||
73 | return a1->u.a6.pfxlen == a2->u.a6.pfxlen; | ||
74 | return a1->u.a4.mask == a2->u.a4.mask; | ||
75 | } | ||
76 | |||
45 | static inline u64 qeth_l3_ipaddr_hash(struct qeth_ipaddr *addr) | 77 | static inline u64 qeth_l3_ipaddr_hash(struct qeth_ipaddr *addr) |
46 | { | 78 | { |
47 | u64 ret = 0; | 79 | u64 ret = 0; |
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index b0c888e86cd4..b6b12220da71 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c | |||
@@ -67,6 +67,24 @@ void qeth_l3_ipaddr_to_string(enum qeth_prot_versions proto, const __u8 *addr, | |||
67 | qeth_l3_ipaddr6_to_string(addr, buf); | 67 | qeth_l3_ipaddr6_to_string(addr, buf); |
68 | } | 68 | } |
69 | 69 | ||
70 | static struct qeth_ipaddr *qeth_l3_find_addr_by_ip(struct qeth_card *card, | ||
71 | struct qeth_ipaddr *query) | ||
72 | { | ||
73 | u64 key = qeth_l3_ipaddr_hash(query); | ||
74 | struct qeth_ipaddr *addr; | ||
75 | |||
76 | if (query->is_multicast) { | ||
77 | hash_for_each_possible(card->ip_mc_htable, addr, hnode, key) | ||
78 | if (qeth_l3_addr_match_ip(addr, query)) | ||
79 | return addr; | ||
80 | } else { | ||
81 | hash_for_each_possible(card->ip_htable, addr, hnode, key) | ||
82 | if (qeth_l3_addr_match_ip(addr, query)) | ||
83 | return addr; | ||
84 | } | ||
85 | return NULL; | ||
86 | } | ||
87 | |||
70 | static void qeth_l3_convert_addr_to_bits(u8 *addr, u8 *bits, int len) | 88 | static void qeth_l3_convert_addr_to_bits(u8 *addr, u8 *bits, int len) |
71 | { | 89 | { |
72 | int i, j; | 90 | int i, j; |
@@ -120,34 +138,6 @@ static bool qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card, | |||
120 | return rc; | 138 | return rc; |
121 | } | 139 | } |
122 | 140 | ||
123 | inline int | ||
124 | qeth_l3_ipaddrs_is_equal(struct qeth_ipaddr *addr1, struct qeth_ipaddr *addr2) | ||
125 | { | ||
126 | return addr1->proto == addr2->proto && | ||
127 | !memcmp(&addr1->u, &addr2->u, sizeof(addr1->u)) && | ||
128 | ether_addr_equal_64bits(addr1->mac, addr2->mac); | ||
129 | } | ||
130 | |||
131 | static struct qeth_ipaddr * | ||
132 | qeth_l3_ip_from_hash(struct qeth_card *card, struct qeth_ipaddr *tmp_addr) | ||
133 | { | ||
134 | struct qeth_ipaddr *addr; | ||
135 | |||
136 | if (tmp_addr->is_multicast) { | ||
137 | hash_for_each_possible(card->ip_mc_htable, addr, | ||
138 | hnode, qeth_l3_ipaddr_hash(tmp_addr)) | ||
139 | if (qeth_l3_ipaddrs_is_equal(tmp_addr, addr)) | ||
140 | return addr; | ||
141 | } else { | ||
142 | hash_for_each_possible(card->ip_htable, addr, | ||
143 | hnode, qeth_l3_ipaddr_hash(tmp_addr)) | ||
144 | if (qeth_l3_ipaddrs_is_equal(tmp_addr, addr)) | ||
145 | return addr; | ||
146 | } | ||
147 | |||
148 | return NULL; | ||
149 | } | ||
150 | |||
151 | int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr) | 141 | int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr) |
152 | { | 142 | { |
153 | int rc = 0; | 143 | int rc = 0; |
@@ -162,23 +152,18 @@ int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr) | |||
162 | QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8); | 152 | QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8); |
163 | } | 153 | } |
164 | 154 | ||
165 | addr = qeth_l3_ip_from_hash(card, tmp_addr); | 155 | addr = qeth_l3_find_addr_by_ip(card, tmp_addr); |
166 | if (!addr) | 156 | if (!addr || !qeth_l3_addr_match_all(addr, tmp_addr)) |
167 | return -ENOENT; | 157 | return -ENOENT; |
168 | 158 | ||
169 | addr->ref_counter--; | 159 | addr->ref_counter--; |
170 | if (addr->ref_counter > 0 && (addr->type == QETH_IP_TYPE_NORMAL || | 160 | if (addr->type == QETH_IP_TYPE_NORMAL && addr->ref_counter > 0) |
171 | addr->type == QETH_IP_TYPE_RXIP)) | ||
172 | return rc; | 161 | return rc; |
173 | if (addr->in_progress) | 162 | if (addr->in_progress) |
174 | return -EINPROGRESS; | 163 | return -EINPROGRESS; |
175 | 164 | ||
176 | if (!qeth_card_hw_is_reachable(card)) { | 165 | if (qeth_card_hw_is_reachable(card)) |
177 | addr->disp_flag = QETH_DISP_ADDR_DELETE; | 166 | rc = qeth_l3_deregister_addr_entry(card, addr); |
178 | return 0; | ||
179 | } | ||
180 | |||
181 | rc = qeth_l3_deregister_addr_entry(card, addr); | ||
182 | 167 | ||
183 | hash_del(&addr->hnode); | 168 | hash_del(&addr->hnode); |
184 | kfree(addr); | 169 | kfree(addr); |
@@ -190,6 +175,7 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr) | |||
190 | { | 175 | { |
191 | int rc = 0; | 176 | int rc = 0; |
192 | struct qeth_ipaddr *addr; | 177 | struct qeth_ipaddr *addr; |
178 | char buf[40]; | ||
193 | 179 | ||
194 | QETH_CARD_TEXT(card, 4, "addip"); | 180 | QETH_CARD_TEXT(card, 4, "addip"); |
195 | 181 | ||
@@ -200,8 +186,20 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr) | |||
200 | QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8); | 186 | QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8); |
201 | } | 187 | } |
202 | 188 | ||
203 | addr = qeth_l3_ip_from_hash(card, tmp_addr); | 189 | addr = qeth_l3_find_addr_by_ip(card, tmp_addr); |
204 | if (!addr) { | 190 | if (addr) { |
191 | if (tmp_addr->type != QETH_IP_TYPE_NORMAL) | ||
192 | return -EADDRINUSE; | ||
193 | if (qeth_l3_addr_match_all(addr, tmp_addr)) { | ||
194 | addr->ref_counter++; | ||
195 | return 0; | ||
196 | } | ||
197 | qeth_l3_ipaddr_to_string(tmp_addr->proto, (u8 *)&tmp_addr->u, | ||
198 | buf); | ||
199 | dev_warn(&card->gdev->dev, | ||
200 | "Registering IP address %s failed\n", buf); | ||
201 | return -EADDRINUSE; | ||
202 | } else { | ||
205 | addr = qeth_l3_get_addr_buffer(tmp_addr->proto); | 203 | addr = qeth_l3_get_addr_buffer(tmp_addr->proto); |
206 | if (!addr) | 204 | if (!addr) |
207 | return -ENOMEM; | 205 | return -ENOMEM; |
@@ -241,19 +239,15 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr) | |||
241 | (rc == IPA_RC_LAN_OFFLINE)) { | 239 | (rc == IPA_RC_LAN_OFFLINE)) { |
242 | addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING; | 240 | addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING; |
243 | if (addr->ref_counter < 1) { | 241 | if (addr->ref_counter < 1) { |
244 | qeth_l3_delete_ip(card, addr); | 242 | qeth_l3_deregister_addr_entry(card, addr); |
243 | hash_del(&addr->hnode); | ||
245 | kfree(addr); | 244 | kfree(addr); |
246 | } | 245 | } |
247 | } else { | 246 | } else { |
248 | hash_del(&addr->hnode); | 247 | hash_del(&addr->hnode); |
249 | kfree(addr); | 248 | kfree(addr); |
250 | } | 249 | } |
251 | } else { | ||
252 | if (addr->type == QETH_IP_TYPE_NORMAL || | ||
253 | addr->type == QETH_IP_TYPE_RXIP) | ||
254 | addr->ref_counter++; | ||
255 | } | 250 | } |
256 | |||
257 | return rc; | 251 | return rc; |
258 | } | 252 | } |
259 | 253 | ||
@@ -321,11 +315,7 @@ static void qeth_l3_recover_ip(struct qeth_card *card) | |||
321 | spin_lock_bh(&card->ip_lock); | 315 | spin_lock_bh(&card->ip_lock); |
322 | 316 | ||
323 | hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) { | 317 | hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) { |
324 | if (addr->disp_flag == QETH_DISP_ADDR_DELETE) { | 318 | if (addr->disp_flag == QETH_DISP_ADDR_ADD) { |
325 | qeth_l3_deregister_addr_entry(card, addr); | ||
326 | hash_del(&addr->hnode); | ||
327 | kfree(addr); | ||
328 | } else if (addr->disp_flag == QETH_DISP_ADDR_ADD) { | ||
329 | if (addr->proto == QETH_PROT_IPV4) { | 319 | if (addr->proto == QETH_PROT_IPV4) { |
330 | addr->in_progress = 1; | 320 | addr->in_progress = 1; |
331 | spin_unlock_bh(&card->ip_lock); | 321 | spin_unlock_bh(&card->ip_lock); |
@@ -643,12 +633,7 @@ int qeth_l3_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto, | |||
643 | return -ENOMEM; | 633 | return -ENOMEM; |
644 | 634 | ||
645 | spin_lock_bh(&card->ip_lock); | 635 | spin_lock_bh(&card->ip_lock); |
646 | 636 | rc = qeth_l3_add_ip(card, ipaddr); | |
647 | if (qeth_l3_ip_from_hash(card, ipaddr)) | ||
648 | rc = -EEXIST; | ||
649 | else | ||
650 | rc = qeth_l3_add_ip(card, ipaddr); | ||
651 | |||
652 | spin_unlock_bh(&card->ip_lock); | 637 | spin_unlock_bh(&card->ip_lock); |
653 | 638 | ||
654 | kfree(ipaddr); | 639 | kfree(ipaddr); |
@@ -713,12 +698,7 @@ int qeth_l3_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto, | |||
713 | return -ENOMEM; | 698 | return -ENOMEM; |
714 | 699 | ||
715 | spin_lock_bh(&card->ip_lock); | 700 | spin_lock_bh(&card->ip_lock); |
716 | 701 | rc = qeth_l3_add_ip(card, ipaddr); | |
717 | if (qeth_l3_ip_from_hash(card, ipaddr)) | ||
718 | rc = -EEXIST; | ||
719 | else | ||
720 | rc = qeth_l3_add_ip(card, ipaddr); | ||
721 | |||
722 | spin_unlock_bh(&card->ip_lock); | 702 | spin_unlock_bh(&card->ip_lock); |
723 | 703 | ||
724 | kfree(ipaddr); | 704 | kfree(ipaddr); |
@@ -1239,8 +1219,9 @@ qeth_l3_add_mc_to_hash(struct qeth_card *card, struct in_device *in4_dev) | |||
1239 | tmp->u.a4.addr = be32_to_cpu(im4->multiaddr); | 1219 | tmp->u.a4.addr = be32_to_cpu(im4->multiaddr); |
1240 | tmp->is_multicast = 1; | 1220 | tmp->is_multicast = 1; |
1241 | 1221 | ||
1242 | ipm = qeth_l3_ip_from_hash(card, tmp); | 1222 | ipm = qeth_l3_find_addr_by_ip(card, tmp); |
1243 | if (ipm) { | 1223 | if (ipm) { |
1224 | /* for mcast, by-IP match means full match */ | ||
1244 | ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING; | 1225 | ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING; |
1245 | } else { | 1226 | } else { |
1246 | ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4); | 1227 | ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4); |
@@ -1319,8 +1300,9 @@ static void qeth_l3_add_mc6_to_hash(struct qeth_card *card, | |||
1319 | sizeof(struct in6_addr)); | 1300 | sizeof(struct in6_addr)); |
1320 | tmp->is_multicast = 1; | 1301 | tmp->is_multicast = 1; |
1321 | 1302 | ||
1322 | ipm = qeth_l3_ip_from_hash(card, tmp); | 1303 | ipm = qeth_l3_find_addr_by_ip(card, tmp); |
1323 | if (ipm) { | 1304 | if (ipm) { |
1305 | /* for mcast, by-IP match means full match */ | ||
1324 | ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING; | 1306 | ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING; |
1325 | continue; | 1307 | continue; |
1326 | } | 1308 | } |
@@ -2450,11 +2432,12 @@ static void qeth_tso_fill_header(struct qeth_card *card, | |||
2450 | static int qeth_l3_get_elements_no_tso(struct qeth_card *card, | 2432 | static int qeth_l3_get_elements_no_tso(struct qeth_card *card, |
2451 | struct sk_buff *skb, int extra_elems) | 2433 | struct sk_buff *skb, int extra_elems) |
2452 | { | 2434 | { |
2453 | addr_t tcpdptr = (addr_t)tcp_hdr(skb) + tcp_hdrlen(skb); | 2435 | addr_t start = (addr_t)tcp_hdr(skb) + tcp_hdrlen(skb); |
2454 | int elements = qeth_get_elements_for_range( | 2436 | addr_t end = (addr_t)skb->data + skb_headlen(skb); |
2455 | tcpdptr, | 2437 | int elements = qeth_get_elements_for_frags(skb); |
2456 | (addr_t)skb->data + skb_headlen(skb)) + | 2438 | |
2457 | qeth_get_elements_for_frags(skb); | 2439 | if (start != end) |
2440 | elements += qeth_get_elements_for_range(start, end); | ||
2458 | 2441 | ||
2459 | if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) { | 2442 | if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) { |
2460 | QETH_DBF_MESSAGE(2, | 2443 | QETH_DBF_MESSAGE(2, |
@@ -2882,8 +2865,8 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev) | |||
2882 | qeth_l3_set_offline(cgdev); | 2865 | qeth_l3_set_offline(cgdev); |
2883 | 2866 | ||
2884 | if (card->dev) { | 2867 | if (card->dev) { |
2885 | netif_napi_del(&card->napi); | ||
2886 | unregister_netdev(card->dev); | 2868 | unregister_netdev(card->dev); |
2869 | free_netdev(card->dev); | ||
2887 | card->dev = NULL; | 2870 | card->dev = NULL; |
2888 | } | 2871 | } |
2889 | 2872 | ||
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c index ba2e0856d22c..8f5c1d7f751a 100644 --- a/drivers/s390/virtio/virtio_ccw.c +++ b/drivers/s390/virtio/virtio_ccw.c | |||
@@ -1297,6 +1297,9 @@ static int virtio_ccw_cio_notify(struct ccw_device *cdev, int event) | |||
1297 | vcdev->device_lost = true; | 1297 | vcdev->device_lost = true; |
1298 | rc = NOTIFY_DONE; | 1298 | rc = NOTIFY_DONE; |
1299 | break; | 1299 | break; |
1300 | case CIO_OPER: | ||
1301 | rc = NOTIFY_OK; | ||
1302 | break; | ||
1300 | default: | 1303 | default: |
1301 | rc = NOTIFY_DONE; | 1304 | rc = NOTIFY_DONE; |
1302 | break; | 1305 | break; |
@@ -1309,6 +1312,27 @@ static struct ccw_device_id virtio_ids[] = { | |||
1309 | {}, | 1312 | {}, |
1310 | }; | 1313 | }; |
1311 | 1314 | ||
1315 | #ifdef CONFIG_PM_SLEEP | ||
1316 | static int virtio_ccw_freeze(struct ccw_device *cdev) | ||
1317 | { | ||
1318 | struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev); | ||
1319 | |||
1320 | return virtio_device_freeze(&vcdev->vdev); | ||
1321 | } | ||
1322 | |||
1323 | static int virtio_ccw_restore(struct ccw_device *cdev) | ||
1324 | { | ||
1325 | struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev); | ||
1326 | int ret; | ||
1327 | |||
1328 | ret = virtio_ccw_set_transport_rev(vcdev); | ||
1329 | if (ret) | ||
1330 | return ret; | ||
1331 | |||
1332 | return virtio_device_restore(&vcdev->vdev); | ||
1333 | } | ||
1334 | #endif | ||
1335 | |||
1312 | static struct ccw_driver virtio_ccw_driver = { | 1336 | static struct ccw_driver virtio_ccw_driver = { |
1313 | .driver = { | 1337 | .driver = { |
1314 | .owner = THIS_MODULE, | 1338 | .owner = THIS_MODULE, |
@@ -1321,6 +1345,11 @@ static struct ccw_driver virtio_ccw_driver = { | |||
1321 | .set_online = virtio_ccw_online, | 1345 | .set_online = virtio_ccw_online, |
1322 | .notify = virtio_ccw_cio_notify, | 1346 | .notify = virtio_ccw_cio_notify, |
1323 | .int_class = IRQIO_VIR, | 1347 | .int_class = IRQIO_VIR, |
1348 | #ifdef CONFIG_PM_SLEEP | ||
1349 | .freeze = virtio_ccw_freeze, | ||
1350 | .thaw = virtio_ccw_restore, | ||
1351 | .restore = virtio_ccw_restore, | ||
1352 | #endif | ||
1324 | }; | 1353 | }; |
1325 | 1354 | ||
1326 | static int __init pure_hex(char **cp, unsigned int *val, int min_digit, | 1355 | static int __init pure_hex(char **cp, unsigned int *val, int min_digit, |
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index fcfd28d2884c..de1b3fce936d 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile | |||
@@ -185,7 +185,6 @@ ncr53c8xx-flags-$(CONFIG_SCSI_ZALON) \ | |||
185 | CFLAGS_ncr53c8xx.o := $(ncr53c8xx-flags-y) $(ncr53c8xx-flags-m) | 185 | CFLAGS_ncr53c8xx.o := $(ncr53c8xx-flags-y) $(ncr53c8xx-flags-m) |
186 | zalon7xx-objs := zalon.o ncr53c8xx.o | 186 | zalon7xx-objs := zalon.o ncr53c8xx.o |
187 | NCR_Q720_mod-objs := NCR_Q720.o ncr53c8xx.o | 187 | NCR_Q720_mod-objs := NCR_Q720.o ncr53c8xx.o |
188 | oktagon_esp_mod-objs := oktagon_esp.o oktagon_io.o | ||
189 | 188 | ||
190 | # Files generated that shall be removed upon make clean | 189 | # Files generated that shall be removed upon make clean |
191 | clean-files := 53c700_d.h 53c700_u.h | 190 | clean-files := 53c700_d.h 53c700_u.h |
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index b3b931ab77eb..2664ea0df35f 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c | |||
@@ -1693,8 +1693,10 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1693 | * Map in the registers from the adapter. | 1693 | * Map in the registers from the adapter. |
1694 | */ | 1694 | */ |
1695 | aac->base_size = AAC_MIN_FOOTPRINT_SIZE; | 1695 | aac->base_size = AAC_MIN_FOOTPRINT_SIZE; |
1696 | if ((*aac_drivers[index].init)(aac)) | 1696 | if ((*aac_drivers[index].init)(aac)) { |
1697 | error = -ENODEV; | ||
1697 | goto out_unmap; | 1698 | goto out_unmap; |
1699 | } | ||
1698 | 1700 | ||
1699 | if (aac->sync_mode) { | 1701 | if (aac->sync_mode) { |
1700 | if (aac_sync_mode) | 1702 | if (aac_sync_mode) |
diff --git a/drivers/scsi/aic7xxx/aiclib.c b/drivers/scsi/aic7xxx/aiclib.c deleted file mode 100644 index 828ae3d9a510..000000000000 --- a/drivers/scsi/aic7xxx/aiclib.c +++ /dev/null | |||
@@ -1,34 +0,0 @@ | |||
1 | /* | ||
2 | * Implementation of Utility functions for all SCSI device types. | ||
3 | * | ||
4 | * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs. | ||
5 | * Copyright (c) 1997, 1998 Kenneth D. Merry. | ||
6 | * All rights reserved. | ||
7 | * | ||
8 | * Redistribution and use in source and binary forms, with or without | ||
9 | * modification, are permitted provided that the following conditions | ||
10 | * are met: | ||
11 | * 1. Redistributions of source code must retain the above copyright | ||
12 | * notice, this list of conditions, and the following disclaimer, | ||
13 | * without modification, immediately at the beginning of the file. | ||
14 | * 2. The name of the author may not be used to endorse or promote products | ||
15 | * derived from this software without specific prior written permission. | ||
16 | * | ||
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND | ||
18 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||
19 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
20 | * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR | ||
21 | * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | ||
22 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | ||
23 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | ||
24 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | ||
25 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | ||
26 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | ||
27 | * SUCH DAMAGE. | ||
28 | * | ||
29 | * $FreeBSD: src/sys/cam/scsi/scsi_all.c,v 1.38 2002/09/23 04:56:35 mjacob Exp $ | ||
30 | * $Id$ | ||
31 | */ | ||
32 | |||
33 | #include "aiclib.h" | ||
34 | |||
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c index 8e2f767147cb..5a645b8b9af1 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_io.c +++ b/drivers/scsi/bnx2fc/bnx2fc_io.c | |||
@@ -1889,6 +1889,7 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req, | |||
1889 | /* we will not receive ABTS response for this IO */ | 1889 | /* we will not receive ABTS response for this IO */ |
1890 | BNX2FC_IO_DBG(io_req, "Timer context finished processing " | 1890 | BNX2FC_IO_DBG(io_req, "Timer context finished processing " |
1891 | "this scsi cmd\n"); | 1891 | "this scsi cmd\n"); |
1892 | return; | ||
1892 | } | 1893 | } |
1893 | 1894 | ||
1894 | /* Cancel the timeout_work, as we received IO completion */ | 1895 | /* Cancel the timeout_work, as we received IO completion */ |
diff --git a/drivers/scsi/csiostor/csio_lnode.c b/drivers/scsi/csiostor/csio_lnode.c index be5ee2d37815..7dbbbb81a1e7 100644 --- a/drivers/scsi/csiostor/csio_lnode.c +++ b/drivers/scsi/csiostor/csio_lnode.c | |||
@@ -114,7 +114,7 @@ static enum csio_ln_ev fwevt_to_lnevt[] = { | |||
114 | static struct csio_lnode * | 114 | static struct csio_lnode * |
115 | csio_ln_lookup_by_portid(struct csio_hw *hw, uint8_t portid) | 115 | csio_ln_lookup_by_portid(struct csio_hw *hw, uint8_t portid) |
116 | { | 116 | { |
117 | struct csio_lnode *ln = hw->rln; | 117 | struct csio_lnode *ln; |
118 | struct list_head *tmp; | 118 | struct list_head *tmp; |
119 | 119 | ||
120 | /* Match siblings lnode with portid */ | 120 | /* Match siblings lnode with portid */ |
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c index 022e421c2185..4b44325d1a82 100644 --- a/drivers/scsi/device_handler/scsi_dh_alua.c +++ b/drivers/scsi/device_handler/scsi_dh_alua.c | |||
@@ -876,6 +876,11 @@ static void alua_rtpg_work(struct work_struct *work) | |||
876 | 876 | ||
877 | /** | 877 | /** |
878 | * alua_rtpg_queue() - cause RTPG to be submitted asynchronously | 878 | * alua_rtpg_queue() - cause RTPG to be submitted asynchronously |
879 | * @pg: ALUA port group associated with @sdev. | ||
880 | * @sdev: SCSI device for which to submit an RTPG. | ||
881 | * @qdata: Information about the callback to invoke after the RTPG. | ||
882 | * @force: Whether or not to submit an RTPG if a work item that will submit an | ||
883 | * RTPG already has been scheduled. | ||
879 | * | 884 | * |
880 | * Returns true if and only if alua_rtpg_work() will be called asynchronously. | 885 | * Returns true if and only if alua_rtpg_work() will be called asynchronously. |
881 | * That function is responsible for calling @qdata->fn(). | 886 | * That function is responsible for calling @qdata->fn(). |
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c index 57bf43e34863..dd9464920456 100644 --- a/drivers/scsi/hosts.c +++ b/drivers/scsi/hosts.c | |||
@@ -328,8 +328,6 @@ static void scsi_host_dev_release(struct device *dev) | |||
328 | if (shost->work_q) | 328 | if (shost->work_q) |
329 | destroy_workqueue(shost->work_q); | 329 | destroy_workqueue(shost->work_q); |
330 | 330 | ||
331 | destroy_rcu_head(&shost->rcu); | ||
332 | |||
333 | if (shost->shost_state == SHOST_CREATED) { | 331 | if (shost->shost_state == SHOST_CREATED) { |
334 | /* | 332 | /* |
335 | * Free the shost_dev device name here if scsi_host_alloc() | 333 | * Free the shost_dev device name here if scsi_host_alloc() |
@@ -404,7 +402,6 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize) | |||
404 | INIT_LIST_HEAD(&shost->starved_list); | 402 | INIT_LIST_HEAD(&shost->starved_list); |
405 | init_waitqueue_head(&shost->host_wait); | 403 | init_waitqueue_head(&shost->host_wait); |
406 | mutex_init(&shost->scan_mutex); | 404 | mutex_init(&shost->scan_mutex); |
407 | init_rcu_head(&shost->rcu); | ||
408 | 405 | ||
409 | index = ida_simple_get(&host_index_ida, 0, 0, GFP_KERNEL); | 406 | index = ida_simple_get(&host_index_ida, 0, 0, GFP_KERNEL); |
410 | if (index < 0) | 407 | if (index < 0) |
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h index 9a0696f68f37..b81a53c4a9a8 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.h +++ b/drivers/scsi/ibmvscsi/ibmvfc.h | |||
@@ -367,7 +367,7 @@ enum ibmvfc_fcp_rsp_info_codes { | |||
367 | }; | 367 | }; |
368 | 368 | ||
369 | struct ibmvfc_fcp_rsp_info { | 369 | struct ibmvfc_fcp_rsp_info { |
370 | __be16 reserved; | 370 | u8 reserved[3]; |
371 | u8 rsp_code; | 371 | u8 rsp_code; |
372 | u8 reserved2[4]; | 372 | u8 reserved2[4]; |
373 | }__attribute__((packed, aligned (2))); | 373 | }__attribute__((packed, aligned (2))); |
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c index 6de9681ace82..ceab5e5c41c2 100644 --- a/drivers/scsi/libsas/sas_scsi_host.c +++ b/drivers/scsi/libsas/sas_scsi_host.c | |||
@@ -223,6 +223,7 @@ out_done: | |||
223 | static void sas_eh_finish_cmd(struct scsi_cmnd *cmd) | 223 | static void sas_eh_finish_cmd(struct scsi_cmnd *cmd) |
224 | { | 224 | { |
225 | struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(cmd->device->host); | 225 | struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(cmd->device->host); |
226 | struct domain_device *dev = cmd_to_domain_dev(cmd); | ||
226 | struct sas_task *task = TO_SAS_TASK(cmd); | 227 | struct sas_task *task = TO_SAS_TASK(cmd); |
227 | 228 | ||
228 | /* At this point, we only get called following an actual abort | 229 | /* At this point, we only get called following an actual abort |
@@ -231,6 +232,14 @@ static void sas_eh_finish_cmd(struct scsi_cmnd *cmd) | |||
231 | */ | 232 | */ |
232 | sas_end_task(cmd, task); | 233 | sas_end_task(cmd, task); |
233 | 234 | ||
235 | if (dev_is_sata(dev)) { | ||
236 | /* defer commands to libata so that libata EH can | ||
237 | * handle ata qcs correctly | ||
238 | */ | ||
239 | list_move_tail(&cmd->eh_entry, &sas_ha->eh_ata_q); | ||
240 | return; | ||
241 | } | ||
242 | |||
234 | /* now finish the command and move it on to the error | 243 | /* now finish the command and move it on to the error |
235 | * handler done list, this also takes it off the | 244 | * handler done list, this also takes it off the |
236 | * error handler pending list. | 245 | * error handler pending list. |
@@ -238,22 +247,6 @@ static void sas_eh_finish_cmd(struct scsi_cmnd *cmd) | |||
238 | scsi_eh_finish_cmd(cmd, &sas_ha->eh_done_q); | 247 | scsi_eh_finish_cmd(cmd, &sas_ha->eh_done_q); |
239 | } | 248 | } |
240 | 249 | ||
241 | static void sas_eh_defer_cmd(struct scsi_cmnd *cmd) | ||
242 | { | ||
243 | struct domain_device *dev = cmd_to_domain_dev(cmd); | ||
244 | struct sas_ha_struct *ha = dev->port->ha; | ||
245 | struct sas_task *task = TO_SAS_TASK(cmd); | ||
246 | |||
247 | if (!dev_is_sata(dev)) { | ||
248 | sas_eh_finish_cmd(cmd); | ||
249 | return; | ||
250 | } | ||
251 | |||
252 | /* report the timeout to libata */ | ||
253 | sas_end_task(cmd, task); | ||
254 | list_move_tail(&cmd->eh_entry, &ha->eh_ata_q); | ||
255 | } | ||
256 | |||
257 | static void sas_scsi_clear_queue_lu(struct list_head *error_q, struct scsi_cmnd *my_cmd) | 250 | static void sas_scsi_clear_queue_lu(struct list_head *error_q, struct scsi_cmnd *my_cmd) |
258 | { | 251 | { |
259 | struct scsi_cmnd *cmd, *n; | 252 | struct scsi_cmnd *cmd, *n; |
@@ -261,7 +254,7 @@ static void sas_scsi_clear_queue_lu(struct list_head *error_q, struct scsi_cmnd | |||
261 | list_for_each_entry_safe(cmd, n, error_q, eh_entry) { | 254 | list_for_each_entry_safe(cmd, n, error_q, eh_entry) { |
262 | if (cmd->device->sdev_target == my_cmd->device->sdev_target && | 255 | if (cmd->device->sdev_target == my_cmd->device->sdev_target && |
263 | cmd->device->lun == my_cmd->device->lun) | 256 | cmd->device->lun == my_cmd->device->lun) |
264 | sas_eh_defer_cmd(cmd); | 257 | sas_eh_finish_cmd(cmd); |
265 | } | 258 | } |
266 | } | 259 | } |
267 | 260 | ||
@@ -631,12 +624,12 @@ static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head * | |||
631 | case TASK_IS_DONE: | 624 | case TASK_IS_DONE: |
632 | SAS_DPRINTK("%s: task 0x%p is done\n", __func__, | 625 | SAS_DPRINTK("%s: task 0x%p is done\n", __func__, |
633 | task); | 626 | task); |
634 | sas_eh_defer_cmd(cmd); | 627 | sas_eh_finish_cmd(cmd); |
635 | continue; | 628 | continue; |
636 | case TASK_IS_ABORTED: | 629 | case TASK_IS_ABORTED: |
637 | SAS_DPRINTK("%s: task 0x%p is aborted\n", | 630 | SAS_DPRINTK("%s: task 0x%p is aborted\n", |
638 | __func__, task); | 631 | __func__, task); |
639 | sas_eh_defer_cmd(cmd); | 632 | sas_eh_finish_cmd(cmd); |
640 | continue; | 633 | continue; |
641 | case TASK_IS_AT_LU: | 634 | case TASK_IS_AT_LU: |
642 | SAS_DPRINTK("task 0x%p is at LU: lu recover\n", task); | 635 | SAS_DPRINTK("task 0x%p is at LU: lu recover\n", task); |
@@ -647,7 +640,7 @@ static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head * | |||
647 | "recovered\n", | 640 | "recovered\n", |
648 | SAS_ADDR(task->dev), | 641 | SAS_ADDR(task->dev), |
649 | cmd->device->lun); | 642 | cmd->device->lun); |
650 | sas_eh_defer_cmd(cmd); | 643 | sas_eh_finish_cmd(cmd); |
651 | sas_scsi_clear_queue_lu(work_q, cmd); | 644 | sas_scsi_clear_queue_lu(work_q, cmd); |
652 | goto Again; | 645 | goto Again; |
653 | } | 646 | } |
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 073ced07e662..dc8e850fbfd2 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c | |||
@@ -216,36 +216,30 @@ inline void megasas_return_cmd_fusion(struct megasas_instance *instance, | |||
216 | /** | 216 | /** |
217 | * megasas_fire_cmd_fusion - Sends command to the FW | 217 | * megasas_fire_cmd_fusion - Sends command to the FW |
218 | * @instance: Adapter soft state | 218 | * @instance: Adapter soft state |
219 | * @req_desc: 32bit or 64bit Request descriptor | 219 | * @req_desc: 64bit Request descriptor |
220 | * | 220 | * |
221 | * Perform PCI Write. Ventura supports 32 bit Descriptor. | 221 | * Perform PCI Write. |
222 | * Prior to Ventura (12G) MR controller supports 64 bit Descriptor. | ||
223 | */ | 222 | */ |
224 | 223 | ||
225 | static void | 224 | static void |
226 | megasas_fire_cmd_fusion(struct megasas_instance *instance, | 225 | megasas_fire_cmd_fusion(struct megasas_instance *instance, |
227 | union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc) | 226 | union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc) |
228 | { | 227 | { |
229 | if (instance->adapter_type == VENTURA_SERIES) | ||
230 | writel(le32_to_cpu(req_desc->u.low), | ||
231 | &instance->reg_set->inbound_single_queue_port); | ||
232 | else { | ||
233 | #if defined(writeq) && defined(CONFIG_64BIT) | 228 | #if defined(writeq) && defined(CONFIG_64BIT) |
234 | u64 req_data = (((u64)le32_to_cpu(req_desc->u.high) << 32) | | 229 | u64 req_data = (((u64)le32_to_cpu(req_desc->u.high) << 32) | |
235 | le32_to_cpu(req_desc->u.low)); | 230 | le32_to_cpu(req_desc->u.low)); |
236 | 231 | ||
237 | writeq(req_data, &instance->reg_set->inbound_low_queue_port); | 232 | writeq(req_data, &instance->reg_set->inbound_low_queue_port); |
238 | #else | 233 | #else |
239 | unsigned long flags; | 234 | unsigned long flags; |
240 | spin_lock_irqsave(&instance->hba_lock, flags); | 235 | spin_lock_irqsave(&instance->hba_lock, flags); |
241 | writel(le32_to_cpu(req_desc->u.low), | 236 | writel(le32_to_cpu(req_desc->u.low), |
242 | &instance->reg_set->inbound_low_queue_port); | 237 | &instance->reg_set->inbound_low_queue_port); |
243 | writel(le32_to_cpu(req_desc->u.high), | 238 | writel(le32_to_cpu(req_desc->u.high), |
244 | &instance->reg_set->inbound_high_queue_port); | 239 | &instance->reg_set->inbound_high_queue_port); |
245 | mmiowb(); | 240 | mmiowb(); |
246 | spin_unlock_irqrestore(&instance->hba_lock, flags); | 241 | spin_unlock_irqrestore(&instance->hba_lock, flags); |
247 | #endif | 242 | #endif |
248 | } | ||
249 | } | 243 | } |
250 | 244 | ||
251 | /** | 245 | /** |
@@ -982,7 +976,6 @@ megasas_ioc_init_fusion(struct megasas_instance *instance) | |||
982 | const char *sys_info; | 976 | const char *sys_info; |
983 | MFI_CAPABILITIES *drv_ops; | 977 | MFI_CAPABILITIES *drv_ops; |
984 | u32 scratch_pad_2; | 978 | u32 scratch_pad_2; |
985 | unsigned long flags; | ||
986 | ktime_t time; | 979 | ktime_t time; |
987 | bool cur_fw_64bit_dma_capable; | 980 | bool cur_fw_64bit_dma_capable; |
988 | 981 | ||
@@ -1121,14 +1114,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance) | |||
1121 | break; | 1114 | break; |
1122 | } | 1115 | } |
1123 | 1116 | ||
1124 | /* For Ventura also IOC INIT required 64 bit Descriptor write. */ | 1117 | megasas_fire_cmd_fusion(instance, &req_desc); |
1125 | spin_lock_irqsave(&instance->hba_lock, flags); | ||
1126 | writel(le32_to_cpu(req_desc.u.low), | ||
1127 | &instance->reg_set->inbound_low_queue_port); | ||
1128 | writel(le32_to_cpu(req_desc.u.high), | ||
1129 | &instance->reg_set->inbound_high_queue_port); | ||
1130 | mmiowb(); | ||
1131 | spin_unlock_irqrestore(&instance->hba_lock, flags); | ||
1132 | 1118 | ||
1133 | wait_and_poll(instance, cmd, MFI_POLL_TIMEOUT_SECS); | 1119 | wait_and_poll(instance, cmd, MFI_POLL_TIMEOUT_SECS); |
1134 | 1120 | ||
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index 13d6e4ec3022..0aafbfd1b746 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c | |||
@@ -2410,8 +2410,11 @@ _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc) | |||
2410 | continue; | 2410 | continue; |
2411 | } | 2411 | } |
2412 | 2412 | ||
2413 | for_each_cpu(cpu, mask) | 2413 | for_each_cpu_and(cpu, mask, cpu_online_mask) { |
2414 | if (cpu >= ioc->cpu_msix_table_sz) | ||
2415 | break; | ||
2414 | ioc->cpu_msix_table[cpu] = reply_q->msix_index; | 2416 | ioc->cpu_msix_table[cpu] = reply_q->msix_index; |
2417 | } | ||
2415 | } | 2418 | } |
2416 | return; | 2419 | return; |
2417 | } | 2420 | } |
@@ -6294,14 +6297,14 @@ _base_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase) | |||
6294 | } | 6297 | } |
6295 | 6298 | ||
6296 | /** | 6299 | /** |
6297 | * _wait_for_commands_to_complete - reset controller | 6300 | * mpt3sas_wait_for_commands_to_complete - reset controller |
6298 | * @ioc: Pointer to MPT_ADAPTER structure | 6301 | * @ioc: Pointer to MPT_ADAPTER structure |
6299 | * | 6302 | * |
6300 | * This function is waiting 10s for all pending commands to complete | 6303 | * This function is waiting 10s for all pending commands to complete |
6301 | * prior to putting controller in reset. | 6304 | * prior to putting controller in reset. |
6302 | */ | 6305 | */ |
6303 | static void | 6306 | void |
6304 | _wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc) | 6307 | mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc) |
6305 | { | 6308 | { |
6306 | u32 ioc_state; | 6309 | u32 ioc_state; |
6307 | 6310 | ||
@@ -6374,7 +6377,7 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, | |||
6374 | is_fault = 1; | 6377 | is_fault = 1; |
6375 | } | 6378 | } |
6376 | _base_reset_handler(ioc, MPT3_IOC_PRE_RESET); | 6379 | _base_reset_handler(ioc, MPT3_IOC_PRE_RESET); |
6377 | _wait_for_commands_to_complete(ioc); | 6380 | mpt3sas_wait_for_commands_to_complete(ioc); |
6378 | _base_mask_interrupts(ioc); | 6381 | _base_mask_interrupts(ioc); |
6379 | r = _base_make_ioc_ready(ioc, type); | 6382 | r = _base_make_ioc_ready(ioc, type); |
6380 | if (r) | 6383 | if (r) |
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h index 789bc421424b..99ccf83b8c51 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.h +++ b/drivers/scsi/mpt3sas/mpt3sas_base.h | |||
@@ -1433,6 +1433,9 @@ void mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc, | |||
1433 | 1433 | ||
1434 | int mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc); | 1434 | int mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc); |
1435 | 1435 | ||
1436 | void | ||
1437 | mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc); | ||
1438 | |||
1436 | 1439 | ||
1437 | /* scsih shared API */ | 1440 | /* scsih shared API */ |
1438 | struct scsi_cmnd *mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, | 1441 | struct scsi_cmnd *mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, |
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index 74fca184dba9..a1cb0236c550 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c | |||
@@ -2835,7 +2835,8 @@ scsih_abort(struct scsi_cmnd *scmd) | |||
2835 | _scsih_tm_display_info(ioc, scmd); | 2835 | _scsih_tm_display_info(ioc, scmd); |
2836 | 2836 | ||
2837 | sas_device_priv_data = scmd->device->hostdata; | 2837 | sas_device_priv_data = scmd->device->hostdata; |
2838 | if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { | 2838 | if (!sas_device_priv_data || !sas_device_priv_data->sas_target || |
2839 | ioc->remove_host) { | ||
2839 | sdev_printk(KERN_INFO, scmd->device, | 2840 | sdev_printk(KERN_INFO, scmd->device, |
2840 | "device been deleted! scmd(%p)\n", scmd); | 2841 | "device been deleted! scmd(%p)\n", scmd); |
2841 | scmd->result = DID_NO_CONNECT << 16; | 2842 | scmd->result = DID_NO_CONNECT << 16; |
@@ -2898,7 +2899,8 @@ scsih_dev_reset(struct scsi_cmnd *scmd) | |||
2898 | _scsih_tm_display_info(ioc, scmd); | 2899 | _scsih_tm_display_info(ioc, scmd); |
2899 | 2900 | ||
2900 | sas_device_priv_data = scmd->device->hostdata; | 2901 | sas_device_priv_data = scmd->device->hostdata; |
2901 | if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { | 2902 | if (!sas_device_priv_data || !sas_device_priv_data->sas_target || |
2903 | ioc->remove_host) { | ||
2902 | sdev_printk(KERN_INFO, scmd->device, | 2904 | sdev_printk(KERN_INFO, scmd->device, |
2903 | "device been deleted! scmd(%p)\n", scmd); | 2905 | "device been deleted! scmd(%p)\n", scmd); |
2904 | scmd->result = DID_NO_CONNECT << 16; | 2906 | scmd->result = DID_NO_CONNECT << 16; |
@@ -2961,7 +2963,8 @@ scsih_target_reset(struct scsi_cmnd *scmd) | |||
2961 | _scsih_tm_display_info(ioc, scmd); | 2963 | _scsih_tm_display_info(ioc, scmd); |
2962 | 2964 | ||
2963 | sas_device_priv_data = scmd->device->hostdata; | 2965 | sas_device_priv_data = scmd->device->hostdata; |
2964 | if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { | 2966 | if (!sas_device_priv_data || !sas_device_priv_data->sas_target || |
2967 | ioc->remove_host) { | ||
2965 | starget_printk(KERN_INFO, starget, "target been deleted! scmd(%p)\n", | 2968 | starget_printk(KERN_INFO, starget, "target been deleted! scmd(%p)\n", |
2966 | scmd); | 2969 | scmd); |
2967 | scmd->result = DID_NO_CONNECT << 16; | 2970 | scmd->result = DID_NO_CONNECT << 16; |
@@ -3019,7 +3022,7 @@ scsih_host_reset(struct scsi_cmnd *scmd) | |||
3019 | ioc->name, scmd); | 3022 | ioc->name, scmd); |
3020 | scsi_print_command(scmd); | 3023 | scsi_print_command(scmd); |
3021 | 3024 | ||
3022 | if (ioc->is_driver_loading) { | 3025 | if (ioc->is_driver_loading || ioc->remove_host) { |
3023 | pr_info(MPT3SAS_FMT "Blocking the host reset\n", | 3026 | pr_info(MPT3SAS_FMT "Blocking the host reset\n", |
3024 | ioc->name); | 3027 | ioc->name); |
3025 | r = FAILED; | 3028 | r = FAILED; |
@@ -4453,7 +4456,7 @@ _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc) | |||
4453 | st = scsi_cmd_priv(scmd); | 4456 | st = scsi_cmd_priv(scmd); |
4454 | mpt3sas_base_clear_st(ioc, st); | 4457 | mpt3sas_base_clear_st(ioc, st); |
4455 | scsi_dma_unmap(scmd); | 4458 | scsi_dma_unmap(scmd); |
4456 | if (ioc->pci_error_recovery) | 4459 | if (ioc->pci_error_recovery || ioc->remove_host) |
4457 | scmd->result = DID_NO_CONNECT << 16; | 4460 | scmd->result = DID_NO_CONNECT << 16; |
4458 | else | 4461 | else |
4459 | scmd->result = DID_RESET << 16; | 4462 | scmd->result = DID_RESET << 16; |
@@ -9739,6 +9742,10 @@ static void scsih_remove(struct pci_dev *pdev) | |||
9739 | unsigned long flags; | 9742 | unsigned long flags; |
9740 | 9743 | ||
9741 | ioc->remove_host = 1; | 9744 | ioc->remove_host = 1; |
9745 | |||
9746 | mpt3sas_wait_for_commands_to_complete(ioc); | ||
9747 | _scsih_flush_running_cmds(ioc); | ||
9748 | |||
9742 | _scsih_fw_event_cleanup_queue(ioc); | 9749 | _scsih_fw_event_cleanup_queue(ioc); |
9743 | 9750 | ||
9744 | spin_lock_irqsave(&ioc->fw_event_lock, flags); | 9751 | spin_lock_irqsave(&ioc->fw_event_lock, flags); |
@@ -9815,6 +9822,10 @@ scsih_shutdown(struct pci_dev *pdev) | |||
9815 | unsigned long flags; | 9822 | unsigned long flags; |
9816 | 9823 | ||
9817 | ioc->remove_host = 1; | 9824 | ioc->remove_host = 1; |
9825 | |||
9826 | mpt3sas_wait_for_commands_to_complete(ioc); | ||
9827 | _scsih_flush_running_cmds(ioc); | ||
9828 | |||
9818 | _scsih_fw_event_cleanup_queue(ioc); | 9829 | _scsih_fw_event_cleanup_queue(ioc); |
9819 | 9830 | ||
9820 | spin_lock_irqsave(&ioc->fw_event_lock, flags); | 9831 | spin_lock_irqsave(&ioc->fw_event_lock, flags); |
@@ -10547,7 +10558,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
10547 | snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name), | 10558 | snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name), |
10548 | "fw_event_%s%d", ioc->driver_name, ioc->id); | 10559 | "fw_event_%s%d", ioc->driver_name, ioc->id); |
10549 | ioc->firmware_event_thread = alloc_ordered_workqueue( | 10560 | ioc->firmware_event_thread = alloc_ordered_workqueue( |
10550 | ioc->firmware_event_name, WQ_MEM_RECLAIM); | 10561 | ioc->firmware_event_name, 0); |
10551 | if (!ioc->firmware_event_thread) { | 10562 | if (!ioc->firmware_event_thread) { |
10552 | pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", | 10563 | pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", |
10553 | ioc->name, __FILE__, __LINE__, __func__); | 10564 | ioc->name, __FILE__, __LINE__, __func__); |
diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c index 667d7697ba01..d09afe1b567d 100644 --- a/drivers/scsi/qedi/qedi_fw.c +++ b/drivers/scsi/qedi/qedi_fw.c | |||
@@ -762,6 +762,11 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi, | |||
762 | 762 | ||
763 | iscsi_cid = cqe->conn_id; | 763 | iscsi_cid = cqe->conn_id; |
764 | qedi_conn = qedi->cid_que.conn_cid_tbl[iscsi_cid]; | 764 | qedi_conn = qedi->cid_que.conn_cid_tbl[iscsi_cid]; |
765 | if (!qedi_conn) { | ||
766 | QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, | ||
767 | "icid not found 0x%x\n", cqe->conn_id); | ||
768 | return; | ||
769 | } | ||
765 | 770 | ||
766 | /* Based on this itt get the corresponding qedi_cmd */ | 771 | /* Based on this itt get the corresponding qedi_cmd */ |
767 | spin_lock_bh(&qedi_conn->tmf_work_lock); | 772 | spin_lock_bh(&qedi_conn->tmf_work_lock); |
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c index 029e2e69b29f..f57a94b4f0d9 100644 --- a/drivers/scsi/qedi/qedi_main.c +++ b/drivers/scsi/qedi/qedi_main.c | |||
@@ -1724,7 +1724,6 @@ static ssize_t qedi_show_boot_eth_info(void *data, int type, char *buf) | |||
1724 | { | 1724 | { |
1725 | struct qedi_ctx *qedi = data; | 1725 | struct qedi_ctx *qedi = data; |
1726 | struct nvm_iscsi_initiator *initiator; | 1726 | struct nvm_iscsi_initiator *initiator; |
1727 | char *str = buf; | ||
1728 | int rc = 1; | 1727 | int rc = 1; |
1729 | u32 ipv6_en, dhcp_en, ip_len; | 1728 | u32 ipv6_en, dhcp_en, ip_len; |
1730 | struct nvm_iscsi_block *block; | 1729 | struct nvm_iscsi_block *block; |
@@ -1758,32 +1757,32 @@ static ssize_t qedi_show_boot_eth_info(void *data, int type, char *buf) | |||
1758 | 1757 | ||
1759 | switch (type) { | 1758 | switch (type) { |
1760 | case ISCSI_BOOT_ETH_IP_ADDR: | 1759 | case ISCSI_BOOT_ETH_IP_ADDR: |
1761 | rc = snprintf(str, ip_len, fmt, ip); | 1760 | rc = snprintf(buf, ip_len, fmt, ip); |
1762 | break; | 1761 | break; |
1763 | case ISCSI_BOOT_ETH_SUBNET_MASK: | 1762 | case ISCSI_BOOT_ETH_SUBNET_MASK: |
1764 | rc = snprintf(str, ip_len, fmt, sub); | 1763 | rc = snprintf(buf, ip_len, fmt, sub); |
1765 | break; | 1764 | break; |
1766 | case ISCSI_BOOT_ETH_GATEWAY: | 1765 | case ISCSI_BOOT_ETH_GATEWAY: |
1767 | rc = snprintf(str, ip_len, fmt, gw); | 1766 | rc = snprintf(buf, ip_len, fmt, gw); |
1768 | break; | 1767 | break; |
1769 | case ISCSI_BOOT_ETH_FLAGS: | 1768 | case ISCSI_BOOT_ETH_FLAGS: |
1770 | rc = snprintf(str, 3, "%hhd\n", | 1769 | rc = snprintf(buf, 3, "%hhd\n", |
1771 | SYSFS_FLAG_FW_SEL_BOOT); | 1770 | SYSFS_FLAG_FW_SEL_BOOT); |
1772 | break; | 1771 | break; |
1773 | case ISCSI_BOOT_ETH_INDEX: | 1772 | case ISCSI_BOOT_ETH_INDEX: |
1774 | rc = snprintf(str, 3, "0\n"); | 1773 | rc = snprintf(buf, 3, "0\n"); |
1775 | break; | 1774 | break; |
1776 | case ISCSI_BOOT_ETH_MAC: | 1775 | case ISCSI_BOOT_ETH_MAC: |
1777 | rc = sysfs_format_mac(str, qedi->mac, ETH_ALEN); | 1776 | rc = sysfs_format_mac(buf, qedi->mac, ETH_ALEN); |
1778 | break; | 1777 | break; |
1779 | case ISCSI_BOOT_ETH_VLAN: | 1778 | case ISCSI_BOOT_ETH_VLAN: |
1780 | rc = snprintf(str, 12, "%d\n", | 1779 | rc = snprintf(buf, 12, "%d\n", |
1781 | GET_FIELD2(initiator->generic_cont0, | 1780 | GET_FIELD2(initiator->generic_cont0, |
1782 | NVM_ISCSI_CFG_INITIATOR_VLAN)); | 1781 | NVM_ISCSI_CFG_INITIATOR_VLAN)); |
1783 | break; | 1782 | break; |
1784 | case ISCSI_BOOT_ETH_ORIGIN: | 1783 | case ISCSI_BOOT_ETH_ORIGIN: |
1785 | if (dhcp_en) | 1784 | if (dhcp_en) |
1786 | rc = snprintf(str, 3, "3\n"); | 1785 | rc = snprintf(buf, 3, "3\n"); |
1787 | break; | 1786 | break; |
1788 | default: | 1787 | default: |
1789 | rc = 0; | 1788 | rc = 0; |
@@ -1819,7 +1818,6 @@ static ssize_t qedi_show_boot_ini_info(void *data, int type, char *buf) | |||
1819 | { | 1818 | { |
1820 | struct qedi_ctx *qedi = data; | 1819 | struct qedi_ctx *qedi = data; |
1821 | struct nvm_iscsi_initiator *initiator; | 1820 | struct nvm_iscsi_initiator *initiator; |
1822 | char *str = buf; | ||
1823 | int rc; | 1821 | int rc; |
1824 | struct nvm_iscsi_block *block; | 1822 | struct nvm_iscsi_block *block; |
1825 | 1823 | ||
@@ -1831,8 +1829,8 @@ static ssize_t qedi_show_boot_ini_info(void *data, int type, char *buf) | |||
1831 | 1829 | ||
1832 | switch (type) { | 1830 | switch (type) { |
1833 | case ISCSI_BOOT_INI_INITIATOR_NAME: | 1831 | case ISCSI_BOOT_INI_INITIATOR_NAME: |
1834 | rc = snprintf(str, NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN, "%s\n", | 1832 | rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN, |
1835 | initiator->initiator_name.byte); | 1833 | initiator->initiator_name.byte); |
1836 | break; | 1834 | break; |
1837 | default: | 1835 | default: |
1838 | rc = 0; | 1836 | rc = 0; |
@@ -1860,7 +1858,6 @@ static ssize_t | |||
1860 | qedi_show_boot_tgt_info(struct qedi_ctx *qedi, int type, | 1858 | qedi_show_boot_tgt_info(struct qedi_ctx *qedi, int type, |
1861 | char *buf, enum qedi_nvm_tgts idx) | 1859 | char *buf, enum qedi_nvm_tgts idx) |
1862 | { | 1860 | { |
1863 | char *str = buf; | ||
1864 | int rc = 1; | 1861 | int rc = 1; |
1865 | u32 ctrl_flags, ipv6_en, chap_en, mchap_en, ip_len; | 1862 | u32 ctrl_flags, ipv6_en, chap_en, mchap_en, ip_len; |
1866 | struct nvm_iscsi_block *block; | 1863 | struct nvm_iscsi_block *block; |
@@ -1899,48 +1896,48 @@ qedi_show_boot_tgt_info(struct qedi_ctx *qedi, int type, | |||
1899 | 1896 | ||
1900 | switch (type) { | 1897 | switch (type) { |
1901 | case ISCSI_BOOT_TGT_NAME: | 1898 | case ISCSI_BOOT_TGT_NAME: |
1902 | rc = snprintf(str, NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN, "%s\n", | 1899 | rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN, |
1903 | block->target[idx].target_name.byte); | 1900 | block->target[idx].target_name.byte); |
1904 | break; | 1901 | break; |
1905 | case ISCSI_BOOT_TGT_IP_ADDR: | 1902 | case ISCSI_BOOT_TGT_IP_ADDR: |
1906 | if (ipv6_en) | 1903 | if (ipv6_en) |
1907 | rc = snprintf(str, ip_len, "%pI6\n", | 1904 | rc = snprintf(buf, ip_len, "%pI6\n", |
1908 | block->target[idx].ipv6_addr.byte); | 1905 | block->target[idx].ipv6_addr.byte); |
1909 | else | 1906 | else |
1910 | rc = snprintf(str, ip_len, "%pI4\n", | 1907 | rc = snprintf(buf, ip_len, "%pI4\n", |
1911 | block->target[idx].ipv4_addr.byte); | 1908 | block->target[idx].ipv4_addr.byte); |
1912 | break; | 1909 | break; |
1913 | case ISCSI_BOOT_TGT_PORT: | 1910 | case ISCSI_BOOT_TGT_PORT: |
1914 | rc = snprintf(str, 12, "%d\n", | 1911 | rc = snprintf(buf, 12, "%d\n", |
1915 | GET_FIELD2(block->target[idx].generic_cont0, | 1912 | GET_FIELD2(block->target[idx].generic_cont0, |
1916 | NVM_ISCSI_CFG_TARGET_TCP_PORT)); | 1913 | NVM_ISCSI_CFG_TARGET_TCP_PORT)); |
1917 | break; | 1914 | break; |
1918 | case ISCSI_BOOT_TGT_LUN: | 1915 | case ISCSI_BOOT_TGT_LUN: |
1919 | rc = snprintf(str, 22, "%.*d\n", | 1916 | rc = snprintf(buf, 22, "%.*d\n", |
1920 | block->target[idx].lun.value[1], | 1917 | block->target[idx].lun.value[1], |
1921 | block->target[idx].lun.value[0]); | 1918 | block->target[idx].lun.value[0]); |
1922 | break; | 1919 | break; |
1923 | case ISCSI_BOOT_TGT_CHAP_NAME: | 1920 | case ISCSI_BOOT_TGT_CHAP_NAME: |
1924 | rc = snprintf(str, NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN, "%s\n", | 1921 | rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN, |
1925 | chap_name); | 1922 | chap_name); |
1926 | break; | 1923 | break; |
1927 | case ISCSI_BOOT_TGT_CHAP_SECRET: | 1924 | case ISCSI_BOOT_TGT_CHAP_SECRET: |
1928 | rc = snprintf(str, NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN, "%s\n", | 1925 | rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN, |
1929 | chap_secret); | 1926 | chap_secret); |
1930 | break; | 1927 | break; |
1931 | case ISCSI_BOOT_TGT_REV_CHAP_NAME: | 1928 | case ISCSI_BOOT_TGT_REV_CHAP_NAME: |
1932 | rc = snprintf(str, NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN, "%s\n", | 1929 | rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN, |
1933 | mchap_name); | 1930 | mchap_name); |
1934 | break; | 1931 | break; |
1935 | case ISCSI_BOOT_TGT_REV_CHAP_SECRET: | 1932 | case ISCSI_BOOT_TGT_REV_CHAP_SECRET: |
1936 | rc = snprintf(str, NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN, "%s\n", | 1933 | rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN, |
1937 | mchap_secret); | 1934 | mchap_secret); |
1938 | break; | 1935 | break; |
1939 | case ISCSI_BOOT_TGT_FLAGS: | 1936 | case ISCSI_BOOT_TGT_FLAGS: |
1940 | rc = snprintf(str, 3, "%hhd\n", SYSFS_FLAG_FW_SEL_BOOT); | 1937 | rc = snprintf(buf, 3, "%hhd\n", SYSFS_FLAG_FW_SEL_BOOT); |
1941 | break; | 1938 | break; |
1942 | case ISCSI_BOOT_TGT_NIC_ASSOC: | 1939 | case ISCSI_BOOT_TGT_NIC_ASSOC: |
1943 | rc = snprintf(str, 3, "0\n"); | 1940 | rc = snprintf(buf, 3, "0\n"); |
1944 | break; | 1941 | break; |
1945 | default: | 1942 | default: |
1946 | rc = 0; | 1943 | rc = 0; |
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index be7d6824581a..c9689f97c307 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h | |||
@@ -261,9 +261,9 @@ | |||
261 | struct name_list_extended { | 261 | struct name_list_extended { |
262 | struct get_name_list_extended *l; | 262 | struct get_name_list_extended *l; |
263 | dma_addr_t ldma; | 263 | dma_addr_t ldma; |
264 | struct list_head fcports; /* protect by sess_list */ | 264 | struct list_head fcports; |
265 | spinlock_t fcports_lock; | ||
265 | u32 size; | 266 | u32 size; |
266 | u8 sent; | ||
267 | }; | 267 | }; |
268 | /* | 268 | /* |
269 | * Timeout timer counts in seconds | 269 | * Timeout timer counts in seconds |
@@ -2217,6 +2217,7 @@ typedef struct { | |||
2217 | 2217 | ||
2218 | /* FCP-4 types */ | 2218 | /* FCP-4 types */ |
2219 | #define FC4_TYPE_FCP_SCSI 0x08 | 2219 | #define FC4_TYPE_FCP_SCSI 0x08 |
2220 | #define FC4_TYPE_NVME 0x28 | ||
2220 | #define FC4_TYPE_OTHER 0x0 | 2221 | #define FC4_TYPE_OTHER 0x0 |
2221 | #define FC4_TYPE_UNKNOWN 0xff | 2222 | #define FC4_TYPE_UNKNOWN 0xff |
2222 | 2223 | ||
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c index 5bf9a59432f6..403fa096f8c8 100644 --- a/drivers/scsi/qla2xxx/qla_gs.c +++ b/drivers/scsi/qla2xxx/qla_gs.c | |||
@@ -3179,6 +3179,7 @@ done_free_sp: | |||
3179 | sp->free(sp); | 3179 | sp->free(sp); |
3180 | fcport->flags &= ~FCF_ASYNC_SENT; | 3180 | fcport->flags &= ~FCF_ASYNC_SENT; |
3181 | done: | 3181 | done: |
3182 | fcport->flags &= ~FCF_ASYNC_ACTIVE; | ||
3182 | return rval; | 3183 | return rval; |
3183 | } | 3184 | } |
3184 | 3185 | ||
@@ -3370,6 +3371,7 @@ done_free_sp: | |||
3370 | sp->free(sp); | 3371 | sp->free(sp); |
3371 | fcport->flags &= ~FCF_ASYNC_SENT; | 3372 | fcport->flags &= ~FCF_ASYNC_SENT; |
3372 | done: | 3373 | done: |
3374 | fcport->flags &= ~FCF_ASYNC_ACTIVE; | ||
3373 | return rval; | 3375 | return rval; |
3374 | } | 3376 | } |
3375 | 3377 | ||
@@ -3971,6 +3973,9 @@ out: | |||
3971 | spin_lock_irqsave(&vha->work_lock, flags); | 3973 | spin_lock_irqsave(&vha->work_lock, flags); |
3972 | vha->scan.scan_flags &= ~SF_SCANNING; | 3974 | vha->scan.scan_flags &= ~SF_SCANNING; |
3973 | spin_unlock_irqrestore(&vha->work_lock, flags); | 3975 | spin_unlock_irqrestore(&vha->work_lock, flags); |
3976 | |||
3977 | if ((fc4type == FC4_TYPE_FCP_SCSI) && vha->flags.nvme_enabled) | ||
3978 | qla24xx_async_gpnft(vha, FC4_TYPE_NVME); | ||
3974 | } | 3979 | } |
3975 | 3980 | ||
3976 | static void qla2x00_async_gpnft_gnnft_sp_done(void *s, int res) | 3981 | static void qla2x00_async_gpnft_gnnft_sp_done(void *s, int res) |
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index aececf664654..8d7fab3cd01d 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c | |||
@@ -59,8 +59,6 @@ qla2x00_sp_timeout(struct timer_list *t) | |||
59 | req->outstanding_cmds[sp->handle] = NULL; | 59 | req->outstanding_cmds[sp->handle] = NULL; |
60 | iocb = &sp->u.iocb_cmd; | 60 | iocb = &sp->u.iocb_cmd; |
61 | iocb->timeout(sp); | 61 | iocb->timeout(sp); |
62 | if (sp->type != SRB_ELS_DCMD) | ||
63 | sp->free(sp); | ||
64 | spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); | 62 | spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); |
65 | } | 63 | } |
66 | 64 | ||
@@ -102,7 +100,6 @@ qla2x00_async_iocb_timeout(void *data) | |||
102 | srb_t *sp = data; | 100 | srb_t *sp = data; |
103 | fc_port_t *fcport = sp->fcport; | 101 | fc_port_t *fcport = sp->fcport; |
104 | struct srb_iocb *lio = &sp->u.iocb_cmd; | 102 | struct srb_iocb *lio = &sp->u.iocb_cmd; |
105 | struct event_arg ea; | ||
106 | 103 | ||
107 | if (fcport) { | 104 | if (fcport) { |
108 | ql_dbg(ql_dbg_disc, fcport->vha, 0x2071, | 105 | ql_dbg(ql_dbg_disc, fcport->vha, 0x2071, |
@@ -117,25 +114,13 @@ qla2x00_async_iocb_timeout(void *data) | |||
117 | 114 | ||
118 | switch (sp->type) { | 115 | switch (sp->type) { |
119 | case SRB_LOGIN_CMD: | 116 | case SRB_LOGIN_CMD: |
120 | if (!fcport) | ||
121 | break; | ||
122 | /* Retry as needed. */ | 117 | /* Retry as needed. */ |
123 | lio->u.logio.data[0] = MBS_COMMAND_ERROR; | 118 | lio->u.logio.data[0] = MBS_COMMAND_ERROR; |
124 | lio->u.logio.data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? | 119 | lio->u.logio.data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? |
125 | QLA_LOGIO_LOGIN_RETRIED : 0; | 120 | QLA_LOGIO_LOGIN_RETRIED : 0; |
126 | memset(&ea, 0, sizeof(ea)); | 121 | sp->done(sp, QLA_FUNCTION_TIMEOUT); |
127 | ea.event = FCME_PLOGI_DONE; | ||
128 | ea.fcport = sp->fcport; | ||
129 | ea.data[0] = lio->u.logio.data[0]; | ||
130 | ea.data[1] = lio->u.logio.data[1]; | ||
131 | ea.sp = sp; | ||
132 | qla24xx_handle_plogi_done_event(fcport->vha, &ea); | ||
133 | break; | 122 | break; |
134 | case SRB_LOGOUT_CMD: | 123 | case SRB_LOGOUT_CMD: |
135 | if (!fcport) | ||
136 | break; | ||
137 | qlt_logo_completion_handler(fcport, QLA_FUNCTION_TIMEOUT); | ||
138 | break; | ||
139 | case SRB_CT_PTHRU_CMD: | 124 | case SRB_CT_PTHRU_CMD: |
140 | case SRB_MB_IOCB: | 125 | case SRB_MB_IOCB: |
141 | case SRB_NACK_PLOGI: | 126 | case SRB_NACK_PLOGI: |
@@ -228,6 +213,7 @@ done_free_sp: | |||
228 | sp->free(sp); | 213 | sp->free(sp); |
229 | fcport->flags &= ~FCF_ASYNC_SENT; | 214 | fcport->flags &= ~FCF_ASYNC_SENT; |
230 | done: | 215 | done: |
216 | fcport->flags &= ~FCF_ASYNC_ACTIVE; | ||
231 | return rval; | 217 | return rval; |
232 | } | 218 | } |
233 | 219 | ||
@@ -235,12 +221,10 @@ static void | |||
235 | qla2x00_async_logout_sp_done(void *ptr, int res) | 221 | qla2x00_async_logout_sp_done(void *ptr, int res) |
236 | { | 222 | { |
237 | srb_t *sp = ptr; | 223 | srb_t *sp = ptr; |
238 | struct srb_iocb *lio = &sp->u.iocb_cmd; | ||
239 | 224 | ||
240 | sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); | 225 | sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); |
241 | if (!test_bit(UNLOADING, &sp->vha->dpc_flags)) | 226 | sp->fcport->login_gen++; |
242 | qla2x00_post_async_logout_done_work(sp->vha, sp->fcport, | 227 | qlt_logo_completion_handler(sp->fcport, res); |
243 | lio->u.logio.data); | ||
244 | sp->free(sp); | 228 | sp->free(sp); |
245 | } | 229 | } |
246 | 230 | ||
@@ -280,7 +264,7 @@ qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport) | |||
280 | done_free_sp: | 264 | done_free_sp: |
281 | sp->free(sp); | 265 | sp->free(sp); |
282 | done: | 266 | done: |
283 | fcport->flags &= ~FCF_ASYNC_SENT; | 267 | fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); |
284 | return rval; | 268 | return rval; |
285 | } | 269 | } |
286 | 270 | ||
@@ -288,6 +272,7 @@ void | |||
288 | qla2x00_async_prlo_done(struct scsi_qla_host *vha, fc_port_t *fcport, | 272 | qla2x00_async_prlo_done(struct scsi_qla_host *vha, fc_port_t *fcport, |
289 | uint16_t *data) | 273 | uint16_t *data) |
290 | { | 274 | { |
275 | fcport->flags &= ~FCF_ASYNC_ACTIVE; | ||
291 | /* Don't re-login in target mode */ | 276 | /* Don't re-login in target mode */ |
292 | if (!fcport->tgt_session) | 277 | if (!fcport->tgt_session) |
293 | qla2x00_mark_device_lost(vha, fcport, 1, 0); | 278 | qla2x00_mark_device_lost(vha, fcport, 1, 0); |
@@ -301,6 +286,7 @@ qla2x00_async_prlo_sp_done(void *s, int res) | |||
301 | struct srb_iocb *lio = &sp->u.iocb_cmd; | 286 | struct srb_iocb *lio = &sp->u.iocb_cmd; |
302 | struct scsi_qla_host *vha = sp->vha; | 287 | struct scsi_qla_host *vha = sp->vha; |
303 | 288 | ||
289 | sp->fcport->flags &= ~FCF_ASYNC_ACTIVE; | ||
304 | if (!test_bit(UNLOADING, &vha->dpc_flags)) | 290 | if (!test_bit(UNLOADING, &vha->dpc_flags)) |
305 | qla2x00_post_async_prlo_done_work(sp->fcport->vha, sp->fcport, | 291 | qla2x00_post_async_prlo_done_work(sp->fcport->vha, sp->fcport, |
306 | lio->u.logio.data); | 292 | lio->u.logio.data); |
@@ -339,6 +325,7 @@ qla2x00_async_prlo(struct scsi_qla_host *vha, fc_port_t *fcport) | |||
339 | done_free_sp: | 325 | done_free_sp: |
340 | sp->free(sp); | 326 | sp->free(sp); |
341 | done: | 327 | done: |
328 | fcport->flags &= ~FCF_ASYNC_ACTIVE; | ||
342 | return rval; | 329 | return rval; |
343 | } | 330 | } |
344 | 331 | ||
@@ -392,6 +379,8 @@ qla2x00_async_adisc_sp_done(void *ptr, int res) | |||
392 | "Async done-%s res %x %8phC\n", | 379 | "Async done-%s res %x %8phC\n", |
393 | sp->name, res, sp->fcport->port_name); | 380 | sp->name, res, sp->fcport->port_name); |
394 | 381 | ||
382 | sp->fcport->flags &= ~FCF_ASYNC_SENT; | ||
383 | |||
395 | memset(&ea, 0, sizeof(ea)); | 384 | memset(&ea, 0, sizeof(ea)); |
396 | ea.event = FCME_ADISC_DONE; | 385 | ea.event = FCME_ADISC_DONE; |
397 | ea.rc = res; | 386 | ea.rc = res; |
@@ -442,7 +431,7 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport, | |||
442 | done_free_sp: | 431 | done_free_sp: |
443 | sp->free(sp); | 432 | sp->free(sp); |
444 | done: | 433 | done: |
445 | fcport->flags &= ~FCF_ASYNC_SENT; | 434 | fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); |
446 | qla2x00_post_async_adisc_work(vha, fcport, data); | 435 | qla2x00_post_async_adisc_work(vha, fcport, data); |
447 | return rval; | 436 | return rval; |
448 | } | 437 | } |
@@ -660,8 +649,7 @@ qla24xx_async_gnl_sp_done(void *s, int res) | |||
660 | (loop_id & 0x7fff)); | 649 | (loop_id & 0x7fff)); |
661 | } | 650 | } |
662 | 651 | ||
663 | spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); | 652 | spin_lock_irqsave(&vha->gnl.fcports_lock, flags); |
664 | vha->gnl.sent = 0; | ||
665 | 653 | ||
666 | INIT_LIST_HEAD(&h); | 654 | INIT_LIST_HEAD(&h); |
667 | fcport = tf = NULL; | 655 | fcport = tf = NULL; |
@@ -670,12 +658,16 @@ qla24xx_async_gnl_sp_done(void *s, int res) | |||
670 | 658 | ||
671 | list_for_each_entry_safe(fcport, tf, &h, gnl_entry) { | 659 | list_for_each_entry_safe(fcport, tf, &h, gnl_entry) { |
672 | list_del_init(&fcport->gnl_entry); | 660 | list_del_init(&fcport->gnl_entry); |
661 | spin_lock(&vha->hw->tgt.sess_lock); | ||
673 | fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); | 662 | fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); |
663 | spin_unlock(&vha->hw->tgt.sess_lock); | ||
674 | ea.fcport = fcport; | 664 | ea.fcport = fcport; |
675 | 665 | ||
676 | qla2x00_fcport_event_handler(vha, &ea); | 666 | qla2x00_fcport_event_handler(vha, &ea); |
677 | } | 667 | } |
668 | spin_unlock_irqrestore(&vha->gnl.fcports_lock, flags); | ||
678 | 669 | ||
670 | spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); | ||
679 | /* create new fcport if fw has knowledge of new sessions */ | 671 | /* create new fcport if fw has knowledge of new sessions */ |
680 | for (i = 0; i < n; i++) { | 672 | for (i = 0; i < n; i++) { |
681 | port_id_t id; | 673 | port_id_t id; |
@@ -727,18 +719,21 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport) | |||
727 | ql_dbg(ql_dbg_disc, vha, 0x20d9, | 719 | ql_dbg(ql_dbg_disc, vha, 0x20d9, |
728 | "Async-gnlist WWPN %8phC \n", fcport->port_name); | 720 | "Async-gnlist WWPN %8phC \n", fcport->port_name); |
729 | 721 | ||
730 | spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); | 722 | spin_lock_irqsave(&vha->gnl.fcports_lock, flags); |
723 | if (!list_empty(&fcport->gnl_entry)) { | ||
724 | spin_unlock_irqrestore(&vha->gnl.fcports_lock, flags); | ||
725 | rval = QLA_SUCCESS; | ||
726 | goto done; | ||
727 | } | ||
728 | |||
729 | spin_lock(&vha->hw->tgt.sess_lock); | ||
731 | fcport->disc_state = DSC_GNL; | 730 | fcport->disc_state = DSC_GNL; |
732 | fcport->last_rscn_gen = fcport->rscn_gen; | 731 | fcport->last_rscn_gen = fcport->rscn_gen; |
733 | fcport->last_login_gen = fcport->login_gen; | 732 | fcport->last_login_gen = fcport->login_gen; |
733 | spin_unlock(&vha->hw->tgt.sess_lock); | ||
734 | 734 | ||
735 | list_add_tail(&fcport->gnl_entry, &vha->gnl.fcports); | 735 | list_add_tail(&fcport->gnl_entry, &vha->gnl.fcports); |
736 | if (vha->gnl.sent) { | 736 | spin_unlock_irqrestore(&vha->gnl.fcports_lock, flags); |
737 | spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); | ||
738 | return QLA_SUCCESS; | ||
739 | } | ||
740 | vha->gnl.sent = 1; | ||
741 | spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); | ||
742 | 737 | ||
743 | sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); | 738 | sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); |
744 | if (!sp) | 739 | if (!sp) |
@@ -1066,6 +1061,7 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea) | |||
1066 | fc_port_t *fcport = ea->fcport; | 1061 | fc_port_t *fcport = ea->fcport; |
1067 | struct port_database_24xx *pd; | 1062 | struct port_database_24xx *pd; |
1068 | struct srb *sp = ea->sp; | 1063 | struct srb *sp = ea->sp; |
1064 | uint8_t ls; | ||
1069 | 1065 | ||
1070 | pd = (struct port_database_24xx *)sp->u.iocb_cmd.u.mbx.in; | 1066 | pd = (struct port_database_24xx *)sp->u.iocb_cmd.u.mbx.in; |
1071 | 1067 | ||
@@ -1078,7 +1074,12 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea) | |||
1078 | if (fcport->disc_state == DSC_DELETE_PEND) | 1074 | if (fcport->disc_state == DSC_DELETE_PEND) |
1079 | return; | 1075 | return; |
1080 | 1076 | ||
1081 | switch (pd->current_login_state) { | 1077 | if (fcport->fc4f_nvme) |
1078 | ls = pd->current_login_state >> 4; | ||
1079 | else | ||
1080 | ls = pd->current_login_state & 0xf; | ||
1081 | |||
1082 | switch (ls) { | ||
1082 | case PDS_PRLI_COMPLETE: | 1083 | case PDS_PRLI_COMPLETE: |
1083 | __qla24xx_parse_gpdb(vha, fcport, pd); | 1084 | __qla24xx_parse_gpdb(vha, fcport, pd); |
1084 | break; | 1085 | break; |
@@ -1168,8 +1169,9 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport) | |||
1168 | if (fcport->scan_state != QLA_FCPORT_FOUND) | 1169 | if (fcport->scan_state != QLA_FCPORT_FOUND) |
1169 | return 0; | 1170 | return 0; |
1170 | 1171 | ||
1171 | if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) || | 1172 | if ((fcport->loop_id != FC_NO_LOOP_ID) && |
1172 | (fcport->fw_login_state == DSC_LS_PRLI_PEND)) | 1173 | ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) || |
1174 | (fcport->fw_login_state == DSC_LS_PRLI_PEND))) | ||
1173 | return 0; | 1175 | return 0; |
1174 | 1176 | ||
1175 | if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) { | 1177 | if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) { |
@@ -1544,6 +1546,7 @@ qla24xx_abort_sp_done(void *ptr, int res) | |||
1544 | srb_t *sp = ptr; | 1546 | srb_t *sp = ptr; |
1545 | struct srb_iocb *abt = &sp->u.iocb_cmd; | 1547 | struct srb_iocb *abt = &sp->u.iocb_cmd; |
1546 | 1548 | ||
1549 | del_timer(&sp->u.iocb_cmd.timer); | ||
1547 | complete(&abt->u.abt.comp); | 1550 | complete(&abt->u.abt.comp); |
1548 | } | 1551 | } |
1549 | 1552 | ||
@@ -1716,7 +1719,6 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea) | |||
1716 | 1719 | ||
1717 | set_bit(ea->fcport->loop_id, vha->hw->loop_id_map); | 1720 | set_bit(ea->fcport->loop_id, vha->hw->loop_id_map); |
1718 | spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); | 1721 | spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); |
1719 | ea->fcport->loop_id = FC_NO_LOOP_ID; | ||
1720 | ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset; | 1722 | ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset; |
1721 | ea->fcport->logout_on_delete = 1; | 1723 | ea->fcport->logout_on_delete = 1; |
1722 | ea->fcport->send_els_logo = 0; | 1724 | ea->fcport->send_els_logo = 0; |
@@ -1808,6 +1810,7 @@ qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport, | |||
1808 | qla2x00_mark_device_lost(vha, fcport, 1, 0); | 1810 | qla2x00_mark_device_lost(vha, fcport, 1, 0); |
1809 | qlt_logo_completion_handler(fcport, data[0]); | 1811 | qlt_logo_completion_handler(fcport, data[0]); |
1810 | fcport->login_gen++; | 1812 | fcport->login_gen++; |
1813 | fcport->flags &= ~FCF_ASYNC_ACTIVE; | ||
1811 | return; | 1814 | return; |
1812 | } | 1815 | } |
1813 | 1816 | ||
@@ -1815,6 +1818,7 @@ void | |||
1815 | qla2x00_async_adisc_done(struct scsi_qla_host *vha, fc_port_t *fcport, | 1818 | qla2x00_async_adisc_done(struct scsi_qla_host *vha, fc_port_t *fcport, |
1816 | uint16_t *data) | 1819 | uint16_t *data) |
1817 | { | 1820 | { |
1821 | fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); | ||
1818 | if (data[0] == MBS_COMMAND_COMPLETE) { | 1822 | if (data[0] == MBS_COMMAND_COMPLETE) { |
1819 | qla2x00_update_fcport(vha, fcport); | 1823 | qla2x00_update_fcport(vha, fcport); |
1820 | 1824 | ||
@@ -1822,7 +1826,6 @@ qla2x00_async_adisc_done(struct scsi_qla_host *vha, fc_port_t *fcport, | |||
1822 | } | 1826 | } |
1823 | 1827 | ||
1824 | /* Retry login. */ | 1828 | /* Retry login. */ |
1825 | fcport->flags &= ~FCF_ASYNC_SENT; | ||
1826 | if (data[1] & QLA_LOGIO_LOGIN_RETRIED) | 1829 | if (data[1] & QLA_LOGIO_LOGIN_RETRIED) |
1827 | set_bit(RELOGIN_NEEDED, &vha->dpc_flags); | 1830 | set_bit(RELOGIN_NEEDED, &vha->dpc_flags); |
1828 | else | 1831 | else |
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index 1b62e943ec49..8d00d559bd26 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c | |||
@@ -3275,12 +3275,11 @@ qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb) | |||
3275 | memset(abt_iocb, 0, sizeof(struct abort_entry_24xx)); | 3275 | memset(abt_iocb, 0, sizeof(struct abort_entry_24xx)); |
3276 | abt_iocb->entry_type = ABORT_IOCB_TYPE; | 3276 | abt_iocb->entry_type = ABORT_IOCB_TYPE; |
3277 | abt_iocb->entry_count = 1; | 3277 | abt_iocb->entry_count = 1; |
3278 | abt_iocb->handle = | 3278 | abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle)); |
3279 | cpu_to_le32(MAKE_HANDLE(aio->u.abt.req_que_no, | ||
3280 | aio->u.abt.cmd_hndl)); | ||
3281 | abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); | 3279 | abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); |
3282 | abt_iocb->handle_to_abort = | 3280 | abt_iocb->handle_to_abort = |
3283 | cpu_to_le32(MAKE_HANDLE(req->id, aio->u.abt.cmd_hndl)); | 3281 | cpu_to_le32(MAKE_HANDLE(aio->u.abt.req_que_no, |
3282 | aio->u.abt.cmd_hndl)); | ||
3284 | abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa; | 3283 | abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa; |
3285 | abt_iocb->port_id[1] = sp->fcport->d_id.b.area; | 3284 | abt_iocb->port_id[1] = sp->fcport->d_id.b.area; |
3286 | abt_iocb->port_id[2] = sp->fcport->d_id.b.domain; | 3285 | abt_iocb->port_id[2] = sp->fcport->d_id.b.domain; |
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 14109d86c3f6..89f93ebd819d 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c | |||
@@ -272,7 +272,8 @@ qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) | |||
272 | struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; | 272 | struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; |
273 | 273 | ||
274 | /* Read all mbox registers? */ | 274 | /* Read all mbox registers? */ |
275 | mboxes = (1 << ha->mbx_count) - 1; | 275 | WARN_ON_ONCE(ha->mbx_count > 32); |
276 | mboxes = (1ULL << ha->mbx_count) - 1; | ||
276 | if (!ha->mcp) | 277 | if (!ha->mcp) |
277 | ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n"); | 278 | ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n"); |
278 | else | 279 | else |
@@ -2880,7 +2881,8 @@ qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) | |||
2880 | struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; | 2881 | struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; |
2881 | 2882 | ||
2882 | /* Read all mbox registers? */ | 2883 | /* Read all mbox registers? */ |
2883 | mboxes = (1 << ha->mbx_count) - 1; | 2884 | WARN_ON_ONCE(ha->mbx_count > 32); |
2885 | mboxes = (1ULL << ha->mbx_count) - 1; | ||
2884 | if (!ha->mcp) | 2886 | if (!ha->mcp) |
2885 | ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n"); | 2887 | ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n"); |
2886 | else | 2888 | else |
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 12ee6e02d146..5c5dcca4d1da 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c | |||
@@ -454,7 +454,7 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req, | |||
454 | ha->req_q_map[0] = req; | 454 | ha->req_q_map[0] = req; |
455 | set_bit(0, ha->rsp_qid_map); | 455 | set_bit(0, ha->rsp_qid_map); |
456 | set_bit(0, ha->req_qid_map); | 456 | set_bit(0, ha->req_qid_map); |
457 | return 1; | 457 | return 0; |
458 | 458 | ||
459 | fail_qpair_map: | 459 | fail_qpair_map: |
460 | kfree(ha->base_qpair); | 460 | kfree(ha->base_qpair); |
@@ -471,6 +471,9 @@ fail_req_map: | |||
471 | 471 | ||
472 | static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req) | 472 | static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req) |
473 | { | 473 | { |
474 | if (!ha->req_q_map) | ||
475 | return; | ||
476 | |||
474 | if (IS_QLAFX00(ha)) { | 477 | if (IS_QLAFX00(ha)) { |
475 | if (req && req->ring_fx00) | 478 | if (req && req->ring_fx00) |
476 | dma_free_coherent(&ha->pdev->dev, | 479 | dma_free_coherent(&ha->pdev->dev, |
@@ -481,14 +484,17 @@ static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req) | |||
481 | (req->length + 1) * sizeof(request_t), | 484 | (req->length + 1) * sizeof(request_t), |
482 | req->ring, req->dma); | 485 | req->ring, req->dma); |
483 | 486 | ||
484 | if (req) | 487 | if (req) { |
485 | kfree(req->outstanding_cmds); | 488 | kfree(req->outstanding_cmds); |
486 | 489 | kfree(req); | |
487 | kfree(req); | 490 | } |
488 | } | 491 | } |
489 | 492 | ||
490 | static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp) | 493 | static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp) |
491 | { | 494 | { |
495 | if (!ha->rsp_q_map) | ||
496 | return; | ||
497 | |||
492 | if (IS_QLAFX00(ha)) { | 498 | if (IS_QLAFX00(ha)) { |
493 | if (rsp && rsp->ring) | 499 | if (rsp && rsp->ring) |
494 | dma_free_coherent(&ha->pdev->dev, | 500 | dma_free_coherent(&ha->pdev->dev, |
@@ -499,7 +505,8 @@ static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp) | |||
499 | (rsp->length + 1) * sizeof(response_t), | 505 | (rsp->length + 1) * sizeof(response_t), |
500 | rsp->ring, rsp->dma); | 506 | rsp->ring, rsp->dma); |
501 | } | 507 | } |
502 | kfree(rsp); | 508 | if (rsp) |
509 | kfree(rsp); | ||
503 | } | 510 | } |
504 | 511 | ||
505 | static void qla2x00_free_queues(struct qla_hw_data *ha) | 512 | static void qla2x00_free_queues(struct qla_hw_data *ha) |
@@ -1723,6 +1730,8 @@ __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res) | |||
1723 | struct qla_tgt_cmd *cmd; | 1730 | struct qla_tgt_cmd *cmd; |
1724 | uint8_t trace = 0; | 1731 | uint8_t trace = 0; |
1725 | 1732 | ||
1733 | if (!ha->req_q_map) | ||
1734 | return; | ||
1726 | spin_lock_irqsave(qp->qp_lock_ptr, flags); | 1735 | spin_lock_irqsave(qp->qp_lock_ptr, flags); |
1727 | req = qp->req; | 1736 | req = qp->req; |
1728 | for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { | 1737 | for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { |
@@ -3095,14 +3104,14 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
3095 | /* Set up the irqs */ | 3104 | /* Set up the irqs */ |
3096 | ret = qla2x00_request_irqs(ha, rsp); | 3105 | ret = qla2x00_request_irqs(ha, rsp); |
3097 | if (ret) | 3106 | if (ret) |
3098 | goto probe_hw_failed; | 3107 | goto probe_failed; |
3099 | 3108 | ||
3100 | /* Alloc arrays of request and response ring ptrs */ | 3109 | /* Alloc arrays of request and response ring ptrs */ |
3101 | if (!qla2x00_alloc_queues(ha, req, rsp)) { | 3110 | if (qla2x00_alloc_queues(ha, req, rsp)) { |
3102 | ql_log(ql_log_fatal, base_vha, 0x003d, | 3111 | ql_log(ql_log_fatal, base_vha, 0x003d, |
3103 | "Failed to allocate memory for queue pointers..." | 3112 | "Failed to allocate memory for queue pointers..." |
3104 | "aborting.\n"); | 3113 | "aborting.\n"); |
3105 | goto probe_init_failed; | 3114 | goto probe_failed; |
3106 | } | 3115 | } |
3107 | 3116 | ||
3108 | if (ha->mqenable && shost_use_blk_mq(host)) { | 3117 | if (ha->mqenable && shost_use_blk_mq(host)) { |
@@ -3387,15 +3396,6 @@ skip_dpc: | |||
3387 | 3396 | ||
3388 | return 0; | 3397 | return 0; |
3389 | 3398 | ||
3390 | probe_init_failed: | ||
3391 | qla2x00_free_req_que(ha, req); | ||
3392 | ha->req_q_map[0] = NULL; | ||
3393 | clear_bit(0, ha->req_qid_map); | ||
3394 | qla2x00_free_rsp_que(ha, rsp); | ||
3395 | ha->rsp_q_map[0] = NULL; | ||
3396 | clear_bit(0, ha->rsp_qid_map); | ||
3397 | ha->max_req_queues = ha->max_rsp_queues = 0; | ||
3398 | |||
3399 | probe_failed: | 3399 | probe_failed: |
3400 | if (base_vha->timer_active) | 3400 | if (base_vha->timer_active) |
3401 | qla2x00_stop_timer(base_vha); | 3401 | qla2x00_stop_timer(base_vha); |
@@ -3625,6 +3625,8 @@ qla2x00_remove_one(struct pci_dev *pdev) | |||
3625 | } | 3625 | } |
3626 | qla2x00_wait_for_hba_ready(base_vha); | 3626 | qla2x00_wait_for_hba_ready(base_vha); |
3627 | 3627 | ||
3628 | qla2x00_wait_for_sess_deletion(base_vha); | ||
3629 | |||
3628 | /* | 3630 | /* |
3629 | * if UNLOAD flag is already set, then continue unload, | 3631 | * if UNLOAD flag is already set, then continue unload, |
3630 | * where it was set first. | 3632 | * where it was set first. |
@@ -4506,11 +4508,17 @@ qla2x00_mem_free(struct qla_hw_data *ha) | |||
4506 | if (ha->init_cb) | 4508 | if (ha->init_cb) |
4507 | dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, | 4509 | dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, |
4508 | ha->init_cb, ha->init_cb_dma); | 4510 | ha->init_cb, ha->init_cb_dma); |
4509 | vfree(ha->optrom_buffer); | 4511 | |
4510 | kfree(ha->nvram); | 4512 | if (ha->optrom_buffer) |
4511 | kfree(ha->npiv_info); | 4513 | vfree(ha->optrom_buffer); |
4512 | kfree(ha->swl); | 4514 | if (ha->nvram) |
4513 | kfree(ha->loop_id_map); | 4515 | kfree(ha->nvram); |
4516 | if (ha->npiv_info) | ||
4517 | kfree(ha->npiv_info); | ||
4518 | if (ha->swl) | ||
4519 | kfree(ha->swl); | ||
4520 | if (ha->loop_id_map) | ||
4521 | kfree(ha->loop_id_map); | ||
4514 | 4522 | ||
4515 | ha->srb_mempool = NULL; | 4523 | ha->srb_mempool = NULL; |
4516 | ha->ctx_mempool = NULL; | 4524 | ha->ctx_mempool = NULL; |
@@ -4526,6 +4534,15 @@ qla2x00_mem_free(struct qla_hw_data *ha) | |||
4526 | ha->ex_init_cb_dma = 0; | 4534 | ha->ex_init_cb_dma = 0; |
4527 | ha->async_pd = NULL; | 4535 | ha->async_pd = NULL; |
4528 | ha->async_pd_dma = 0; | 4536 | ha->async_pd_dma = 0; |
4537 | ha->loop_id_map = NULL; | ||
4538 | ha->npiv_info = NULL; | ||
4539 | ha->optrom_buffer = NULL; | ||
4540 | ha->swl = NULL; | ||
4541 | ha->nvram = NULL; | ||
4542 | ha->mctp_dump = NULL; | ||
4543 | ha->dcbx_tlv = NULL; | ||
4544 | ha->xgmac_data = NULL; | ||
4545 | ha->sfp_data = NULL; | ||
4529 | 4546 | ||
4530 | ha->s_dma_pool = NULL; | 4547 | ha->s_dma_pool = NULL; |
4531 | ha->dl_dma_pool = NULL; | 4548 | ha->dl_dma_pool = NULL; |
@@ -4575,6 +4592,7 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht, | |||
4575 | 4592 | ||
4576 | spin_lock_init(&vha->work_lock); | 4593 | spin_lock_init(&vha->work_lock); |
4577 | spin_lock_init(&vha->cmd_list_lock); | 4594 | spin_lock_init(&vha->cmd_list_lock); |
4595 | spin_lock_init(&vha->gnl.fcports_lock); | ||
4578 | init_waitqueue_head(&vha->fcport_waitQ); | 4596 | init_waitqueue_head(&vha->fcport_waitQ); |
4579 | init_waitqueue_head(&vha->vref_waitq); | 4597 | init_waitqueue_head(&vha->vref_waitq); |
4580 | 4598 | ||
@@ -4804,9 +4822,12 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e) | |||
4804 | fcport->d_id = e->u.new_sess.id; | 4822 | fcport->d_id = e->u.new_sess.id; |
4805 | fcport->flags |= FCF_FABRIC_DEVICE; | 4823 | fcport->flags |= FCF_FABRIC_DEVICE; |
4806 | fcport->fw_login_state = DSC_LS_PLOGI_PEND; | 4824 | fcport->fw_login_state = DSC_LS_PLOGI_PEND; |
4807 | if (e->u.new_sess.fc4_type == FC4_TYPE_FCP_SCSI) | 4825 | if (e->u.new_sess.fc4_type == FC4_TYPE_FCP_SCSI) { |
4808 | fcport->fc4_type = FC4_TYPE_FCP_SCSI; | 4826 | fcport->fc4_type = FC4_TYPE_FCP_SCSI; |
4809 | 4827 | } else if (e->u.new_sess.fc4_type == FC4_TYPE_NVME) { | |
4828 | fcport->fc4_type = FC4_TYPE_OTHER; | ||
4829 | fcport->fc4f_nvme = FC4_TYPE_NVME; | ||
4830 | } | ||
4810 | memcpy(fcport->port_name, e->u.new_sess.port_name, | 4831 | memcpy(fcport->port_name, e->u.new_sess.port_name, |
4811 | WWN_SIZE); | 4832 | WWN_SIZE); |
4812 | } else { | 4833 | } else { |
@@ -4875,6 +4896,8 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e) | |||
4875 | } | 4896 | } |
4876 | qlt_plogi_ack_unref(vha, pla); | 4897 | qlt_plogi_ack_unref(vha, pla); |
4877 | } else { | 4898 | } else { |
4899 | fc_port_t *dfcp = NULL; | ||
4900 | |||
4878 | spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); | 4901 | spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); |
4879 | tfcp = qla2x00_find_fcport_by_nportid(vha, | 4902 | tfcp = qla2x00_find_fcport_by_nportid(vha, |
4880 | &e->u.new_sess.id, 1); | 4903 | &e->u.new_sess.id, 1); |
@@ -4897,11 +4920,13 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e) | |||
4897 | default: | 4920 | default: |
4898 | fcport->login_pause = 1; | 4921 | fcport->login_pause = 1; |
4899 | tfcp->conflict = fcport; | 4922 | tfcp->conflict = fcport; |
4900 | qlt_schedule_sess_for_deletion(tfcp); | 4923 | dfcp = tfcp; |
4901 | break; | 4924 | break; |
4902 | } | 4925 | } |
4903 | } | 4926 | } |
4904 | spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); | 4927 | spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); |
4928 | if (dfcp) | ||
4929 | qlt_schedule_sess_for_deletion(tfcp); | ||
4905 | 4930 | ||
4906 | wwn = wwn_to_u64(fcport->node_name); | 4931 | wwn = wwn_to_u64(fcport->node_name); |
4907 | 4932 | ||
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index fc89af8fe256..b49ac85f3de2 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c | |||
@@ -1224,10 +1224,10 @@ static void qla24xx_chk_fcp_state(struct fc_port *sess) | |||
1224 | } | 1224 | } |
1225 | } | 1225 | } |
1226 | 1226 | ||
1227 | /* ha->tgt.sess_lock supposed to be held on entry */ | ||
1228 | void qlt_schedule_sess_for_deletion(struct fc_port *sess) | 1227 | void qlt_schedule_sess_for_deletion(struct fc_port *sess) |
1229 | { | 1228 | { |
1230 | struct qla_tgt *tgt = sess->tgt; | 1229 | struct qla_tgt *tgt = sess->tgt; |
1230 | struct qla_hw_data *ha = sess->vha->hw; | ||
1231 | unsigned long flags; | 1231 | unsigned long flags; |
1232 | 1232 | ||
1233 | if (sess->disc_state == DSC_DELETE_PEND) | 1233 | if (sess->disc_state == DSC_DELETE_PEND) |
@@ -1244,16 +1244,16 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess) | |||
1244 | return; | 1244 | return; |
1245 | } | 1245 | } |
1246 | 1246 | ||
1247 | spin_lock_irqsave(&ha->tgt.sess_lock, flags); | ||
1247 | if (sess->deleted == QLA_SESS_DELETED) | 1248 | if (sess->deleted == QLA_SESS_DELETED) |
1248 | sess->logout_on_delete = 0; | 1249 | sess->logout_on_delete = 0; |
1249 | 1250 | ||
1250 | spin_lock_irqsave(&sess->vha->work_lock, flags); | ||
1251 | if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { | 1251 | if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { |
1252 | spin_unlock_irqrestore(&sess->vha->work_lock, flags); | 1252 | spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); |
1253 | return; | 1253 | return; |
1254 | } | 1254 | } |
1255 | sess->deleted = QLA_SESS_DELETION_IN_PROGRESS; | 1255 | sess->deleted = QLA_SESS_DELETION_IN_PROGRESS; |
1256 | spin_unlock_irqrestore(&sess->vha->work_lock, flags); | 1256 | spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); |
1257 | 1257 | ||
1258 | sess->disc_state = DSC_DELETE_PEND; | 1258 | sess->disc_state = DSC_DELETE_PEND; |
1259 | 1259 | ||
@@ -1262,13 +1262,10 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess) | |||
1262 | ql_dbg(ql_dbg_tgt, sess->vha, 0xe001, | 1262 | ql_dbg(ql_dbg_tgt, sess->vha, 0xe001, |
1263 | "Scheduling sess %p for deletion\n", sess); | 1263 | "Scheduling sess %p for deletion\n", sess); |
1264 | 1264 | ||
1265 | /* use cancel to push work element through before re-queue */ | ||
1266 | cancel_work_sync(&sess->del_work); | ||
1267 | INIT_WORK(&sess->del_work, qla24xx_delete_sess_fn); | 1265 | INIT_WORK(&sess->del_work, qla24xx_delete_sess_fn); |
1268 | queue_work(sess->vha->hw->wq, &sess->del_work); | 1266 | WARN_ON(!queue_work(sess->vha->hw->wq, &sess->del_work)); |
1269 | } | 1267 | } |
1270 | 1268 | ||
1271 | /* ha->tgt.sess_lock supposed to be held on entry */ | ||
1272 | static void qlt_clear_tgt_db(struct qla_tgt *tgt) | 1269 | static void qlt_clear_tgt_db(struct qla_tgt *tgt) |
1273 | { | 1270 | { |
1274 | struct fc_port *sess; | 1271 | struct fc_port *sess; |
@@ -1451,8 +1448,8 @@ qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen) | |||
1451 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess); | 1448 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess); |
1452 | 1449 | ||
1453 | sess->local = 1; | 1450 | sess->local = 1; |
1454 | qlt_schedule_sess_for_deletion(sess); | ||
1455 | spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); | 1451 | spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); |
1452 | qlt_schedule_sess_for_deletion(sess); | ||
1456 | } | 1453 | } |
1457 | 1454 | ||
1458 | static inline int test_tgt_sess_count(struct qla_tgt *tgt) | 1455 | static inline int test_tgt_sess_count(struct qla_tgt *tgt) |
@@ -1512,10 +1509,8 @@ int qlt_stop_phase1(struct qla_tgt *tgt) | |||
1512 | * Lock is needed, because we still can get an incoming packet. | 1509 | * Lock is needed, because we still can get an incoming packet. |
1513 | */ | 1510 | */ |
1514 | mutex_lock(&vha->vha_tgt.tgt_mutex); | 1511 | mutex_lock(&vha->vha_tgt.tgt_mutex); |
1515 | spin_lock_irqsave(&ha->tgt.sess_lock, flags); | ||
1516 | tgt->tgt_stop = 1; | 1512 | tgt->tgt_stop = 1; |
1517 | qlt_clear_tgt_db(tgt); | 1513 | qlt_clear_tgt_db(tgt); |
1518 | spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); | ||
1519 | mutex_unlock(&vha->vha_tgt.tgt_mutex); | 1514 | mutex_unlock(&vha->vha_tgt.tgt_mutex); |
1520 | mutex_unlock(&qla_tgt_mutex); | 1515 | mutex_unlock(&qla_tgt_mutex); |
1521 | 1516 | ||
@@ -4871,8 +4866,6 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha, | |||
4871 | sess); | 4866 | sess); |
4872 | qlt_send_term_imm_notif(vha, iocb, 1); | 4867 | qlt_send_term_imm_notif(vha, iocb, 1); |
4873 | res = 0; | 4868 | res = 0; |
4874 | spin_lock_irqsave(&tgt->ha->tgt.sess_lock, | ||
4875 | flags); | ||
4876 | break; | 4869 | break; |
4877 | } | 4870 | } |
4878 | 4871 | ||
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h index fc233717355f..817f312023a9 100644 --- a/drivers/scsi/qla4xxx/ql4_def.h +++ b/drivers/scsi/qla4xxx/ql4_def.h | |||
@@ -168,6 +168,8 @@ | |||
168 | #define DEV_DB_NON_PERSISTENT 0 | 168 | #define DEV_DB_NON_PERSISTENT 0 |
169 | #define DEV_DB_PERSISTENT 1 | 169 | #define DEV_DB_PERSISTENT 1 |
170 | 170 | ||
171 | #define QL4_ISP_REG_DISCONNECT 0xffffffffU | ||
172 | |||
171 | #define COPY_ISID(dst_isid, src_isid) { \ | 173 | #define COPY_ISID(dst_isid, src_isid) { \ |
172 | int i, j; \ | 174 | int i, j; \ |
173 | for (i = 0, j = ISID_SIZE - 1; i < ISID_SIZE;) \ | 175 | for (i = 0, j = ISID_SIZE - 1; i < ISID_SIZE;) \ |
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index 82e889bbe0ed..fc2c97d9a0d6 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c | |||
@@ -262,6 +262,24 @@ static struct iscsi_transport qla4xxx_iscsi_transport = { | |||
262 | 262 | ||
263 | static struct scsi_transport_template *qla4xxx_scsi_transport; | 263 | static struct scsi_transport_template *qla4xxx_scsi_transport; |
264 | 264 | ||
265 | static int qla4xxx_isp_check_reg(struct scsi_qla_host *ha) | ||
266 | { | ||
267 | u32 reg_val = 0; | ||
268 | int rval = QLA_SUCCESS; | ||
269 | |||
270 | if (is_qla8022(ha)) | ||
271 | reg_val = readl(&ha->qla4_82xx_reg->host_status); | ||
272 | else if (is_qla8032(ha) || is_qla8042(ha)) | ||
273 | reg_val = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_ALIVE_COUNTER); | ||
274 | else | ||
275 | reg_val = readw(&ha->reg->ctrl_status); | ||
276 | |||
277 | if (reg_val == QL4_ISP_REG_DISCONNECT) | ||
278 | rval = QLA_ERROR; | ||
279 | |||
280 | return rval; | ||
281 | } | ||
282 | |||
265 | static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num, | 283 | static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num, |
266 | uint32_t iface_type, uint32_t payload_size, | 284 | uint32_t iface_type, uint32_t payload_size, |
267 | uint32_t pid, struct sockaddr *dst_addr) | 285 | uint32_t pid, struct sockaddr *dst_addr) |
@@ -9186,10 +9204,17 @@ static int qla4xxx_eh_abort(struct scsi_cmnd *cmd) | |||
9186 | struct srb *srb = NULL; | 9204 | struct srb *srb = NULL; |
9187 | int ret = SUCCESS; | 9205 | int ret = SUCCESS; |
9188 | int wait = 0; | 9206 | int wait = 0; |
9207 | int rval; | ||
9189 | 9208 | ||
9190 | ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%llu: Abort command issued cmd=%p, cdb=0x%x\n", | 9209 | ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%llu: Abort command issued cmd=%p, cdb=0x%x\n", |
9191 | ha->host_no, id, lun, cmd, cmd->cmnd[0]); | 9210 | ha->host_no, id, lun, cmd, cmd->cmnd[0]); |
9192 | 9211 | ||
9212 | rval = qla4xxx_isp_check_reg(ha); | ||
9213 | if (rval != QLA_SUCCESS) { | ||
9214 | ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n"); | ||
9215 | return FAILED; | ||
9216 | } | ||
9217 | |||
9193 | spin_lock_irqsave(&ha->hardware_lock, flags); | 9218 | spin_lock_irqsave(&ha->hardware_lock, flags); |
9194 | srb = (struct srb *) CMD_SP(cmd); | 9219 | srb = (struct srb *) CMD_SP(cmd); |
9195 | if (!srb) { | 9220 | if (!srb) { |
@@ -9241,6 +9266,7 @@ static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd) | |||
9241 | struct scsi_qla_host *ha = to_qla_host(cmd->device->host); | 9266 | struct scsi_qla_host *ha = to_qla_host(cmd->device->host); |
9242 | struct ddb_entry *ddb_entry = cmd->device->hostdata; | 9267 | struct ddb_entry *ddb_entry = cmd->device->hostdata; |
9243 | int ret = FAILED, stat; | 9268 | int ret = FAILED, stat; |
9269 | int rval; | ||
9244 | 9270 | ||
9245 | if (!ddb_entry) | 9271 | if (!ddb_entry) |
9246 | return ret; | 9272 | return ret; |
@@ -9260,6 +9286,12 @@ static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd) | |||
9260 | cmd, jiffies, cmd->request->timeout / HZ, | 9286 | cmd, jiffies, cmd->request->timeout / HZ, |
9261 | ha->dpc_flags, cmd->result, cmd->allowed)); | 9287 | ha->dpc_flags, cmd->result, cmd->allowed)); |
9262 | 9288 | ||
9289 | rval = qla4xxx_isp_check_reg(ha); | ||
9290 | if (rval != QLA_SUCCESS) { | ||
9291 | ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n"); | ||
9292 | return FAILED; | ||
9293 | } | ||
9294 | |||
9263 | /* FIXME: wait for hba to go online */ | 9295 | /* FIXME: wait for hba to go online */ |
9264 | stat = qla4xxx_reset_lun(ha, ddb_entry, cmd->device->lun); | 9296 | stat = qla4xxx_reset_lun(ha, ddb_entry, cmd->device->lun); |
9265 | if (stat != QLA_SUCCESS) { | 9297 | if (stat != QLA_SUCCESS) { |
@@ -9303,6 +9335,7 @@ static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd) | |||
9303 | struct scsi_qla_host *ha = to_qla_host(cmd->device->host); | 9335 | struct scsi_qla_host *ha = to_qla_host(cmd->device->host); |
9304 | struct ddb_entry *ddb_entry = cmd->device->hostdata; | 9336 | struct ddb_entry *ddb_entry = cmd->device->hostdata; |
9305 | int stat, ret; | 9337 | int stat, ret; |
9338 | int rval; | ||
9306 | 9339 | ||
9307 | if (!ddb_entry) | 9340 | if (!ddb_entry) |
9308 | return FAILED; | 9341 | return FAILED; |
@@ -9320,6 +9353,12 @@ static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd) | |||
9320 | ha->host_no, cmd, jiffies, cmd->request->timeout / HZ, | 9353 | ha->host_no, cmd, jiffies, cmd->request->timeout / HZ, |
9321 | ha->dpc_flags, cmd->result, cmd->allowed)); | 9354 | ha->dpc_flags, cmd->result, cmd->allowed)); |
9322 | 9355 | ||
9356 | rval = qla4xxx_isp_check_reg(ha); | ||
9357 | if (rval != QLA_SUCCESS) { | ||
9358 | ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n"); | ||
9359 | return FAILED; | ||
9360 | } | ||
9361 | |||
9323 | stat = qla4xxx_reset_target(ha, ddb_entry); | 9362 | stat = qla4xxx_reset_target(ha, ddb_entry); |
9324 | if (stat != QLA_SUCCESS) { | 9363 | if (stat != QLA_SUCCESS) { |
9325 | starget_printk(KERN_INFO, scsi_target(cmd->device), | 9364 | starget_printk(KERN_INFO, scsi_target(cmd->device), |
@@ -9374,9 +9413,16 @@ static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd) | |||
9374 | { | 9413 | { |
9375 | int return_status = FAILED; | 9414 | int return_status = FAILED; |
9376 | struct scsi_qla_host *ha; | 9415 | struct scsi_qla_host *ha; |
9416 | int rval; | ||
9377 | 9417 | ||
9378 | ha = to_qla_host(cmd->device->host); | 9418 | ha = to_qla_host(cmd->device->host); |
9379 | 9419 | ||
9420 | rval = qla4xxx_isp_check_reg(ha); | ||
9421 | if (rval != QLA_SUCCESS) { | ||
9422 | ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n"); | ||
9423 | return FAILED; | ||
9424 | } | ||
9425 | |||
9380 | if ((is_qla8032(ha) || is_qla8042(ha)) && ql4xdontresethba) | 9426 | if ((is_qla8032(ha) || is_qla8042(ha)) && ql4xdontresethba) |
9381 | qla4_83xx_set_idc_dontreset(ha); | 9427 | qla4_83xx_set_idc_dontreset(ha); |
9382 | 9428 | ||
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index d042915ce895..ca53a5f785ee 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c | |||
@@ -223,7 +223,8 @@ static void scsi_eh_reset(struct scsi_cmnd *scmd) | |||
223 | 223 | ||
224 | static void scsi_eh_inc_host_failed(struct rcu_head *head) | 224 | static void scsi_eh_inc_host_failed(struct rcu_head *head) |
225 | { | 225 | { |
226 | struct Scsi_Host *shost = container_of(head, typeof(*shost), rcu); | 226 | struct scsi_cmnd *scmd = container_of(head, typeof(*scmd), rcu); |
227 | struct Scsi_Host *shost = scmd->device->host; | ||
227 | unsigned long flags; | 228 | unsigned long flags; |
228 | 229 | ||
229 | spin_lock_irqsave(shost->host_lock, flags); | 230 | spin_lock_irqsave(shost->host_lock, flags); |
@@ -259,7 +260,7 @@ void scsi_eh_scmd_add(struct scsi_cmnd *scmd) | |||
259 | * Ensure that all tasks observe the host state change before the | 260 | * Ensure that all tasks observe the host state change before the |
260 | * host_failed change. | 261 | * host_failed change. |
261 | */ | 262 | */ |
262 | call_rcu(&shost->rcu, scsi_eh_inc_host_failed); | 263 | call_rcu(&scmd->rcu, scsi_eh_inc_host_failed); |
263 | } | 264 | } |
264 | 265 | ||
265 | /** | 266 | /** |
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index a86df9ca7d1c..c84f931388f2 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
@@ -671,6 +671,7 @@ static bool scsi_end_request(struct request *req, blk_status_t error, | |||
671 | if (!blk_rq_is_scsi(req)) { | 671 | if (!blk_rq_is_scsi(req)) { |
672 | WARN_ON_ONCE(!(cmd->flags & SCMD_INITIALIZED)); | 672 | WARN_ON_ONCE(!(cmd->flags & SCMD_INITIALIZED)); |
673 | cmd->flags &= ~SCMD_INITIALIZED; | 673 | cmd->flags &= ~SCMD_INITIALIZED; |
674 | destroy_rcu_head(&cmd->rcu); | ||
674 | } | 675 | } |
675 | 676 | ||
676 | if (req->mq_ctx) { | 677 | if (req->mq_ctx) { |
@@ -720,6 +721,8 @@ static blk_status_t __scsi_error_from_host_byte(struct scsi_cmnd *cmd, | |||
720 | int result) | 721 | int result) |
721 | { | 722 | { |
722 | switch (host_byte(result)) { | 723 | switch (host_byte(result)) { |
724 | case DID_OK: | ||
725 | return BLK_STS_OK; | ||
723 | case DID_TRANSPORT_FAILFAST: | 726 | case DID_TRANSPORT_FAILFAST: |
724 | return BLK_STS_TRANSPORT; | 727 | return BLK_STS_TRANSPORT; |
725 | case DID_TARGET_FAILURE: | 728 | case DID_TARGET_FAILURE: |
@@ -1151,6 +1154,7 @@ static void scsi_initialize_rq(struct request *rq) | |||
1151 | struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); | 1154 | struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); |
1152 | 1155 | ||
1153 | scsi_req_init(&cmd->req); | 1156 | scsi_req_init(&cmd->req); |
1157 | init_rcu_head(&cmd->rcu); | ||
1154 | cmd->jiffies_at_alloc = jiffies; | 1158 | cmd->jiffies_at_alloc = jiffies; |
1155 | cmd->retries = 0; | 1159 | cmd->retries = 0; |
1156 | } | 1160 | } |
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index bff21e636ddd..3541caf3fceb 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c | |||
@@ -2595,6 +2595,7 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer) | |||
2595 | int res; | 2595 | int res; |
2596 | struct scsi_device *sdp = sdkp->device; | 2596 | struct scsi_device *sdp = sdkp->device; |
2597 | struct scsi_mode_data data; | 2597 | struct scsi_mode_data data; |
2598 | int disk_ro = get_disk_ro(sdkp->disk); | ||
2598 | int old_wp = sdkp->write_prot; | 2599 | int old_wp = sdkp->write_prot; |
2599 | 2600 | ||
2600 | set_disk_ro(sdkp->disk, 0); | 2601 | set_disk_ro(sdkp->disk, 0); |
@@ -2635,7 +2636,7 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer) | |||
2635 | "Test WP failed, assume Write Enabled\n"); | 2636 | "Test WP failed, assume Write Enabled\n"); |
2636 | } else { | 2637 | } else { |
2637 | sdkp->write_prot = ((data.device_specific & 0x80) != 0); | 2638 | sdkp->write_prot = ((data.device_specific & 0x80) != 0); |
2638 | set_disk_ro(sdkp->disk, sdkp->write_prot); | 2639 | set_disk_ro(sdkp->disk, sdkp->write_prot || disk_ro); |
2639 | if (sdkp->first_scan || old_wp != sdkp->write_prot) { | 2640 | if (sdkp->first_scan || old_wp != sdkp->write_prot) { |
2640 | sd_printk(KERN_NOTICE, sdkp, "Write Protect is %s\n", | 2641 | sd_printk(KERN_NOTICE, sdkp, "Write Protect is %s\n", |
2641 | sdkp->write_prot ? "on" : "off"); | 2642 | sdkp->write_prot ? "on" : "off"); |
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c index 6c348a211ebb..89cf4498f535 100644 --- a/drivers/scsi/sd_zbc.c +++ b/drivers/scsi/sd_zbc.c | |||
@@ -403,7 +403,7 @@ static int sd_zbc_check_capacity(struct scsi_disk *sdkp, unsigned char *buf) | |||
403 | */ | 403 | */ |
404 | static int sd_zbc_check_zone_size(struct scsi_disk *sdkp) | 404 | static int sd_zbc_check_zone_size(struct scsi_disk *sdkp) |
405 | { | 405 | { |
406 | u64 zone_blocks; | 406 | u64 zone_blocks = 0; |
407 | sector_t block = 0; | 407 | sector_t block = 0; |
408 | unsigned char *buf; | 408 | unsigned char *buf; |
409 | unsigned char *rec; | 409 | unsigned char *rec; |
@@ -421,10 +421,8 @@ static int sd_zbc_check_zone_size(struct scsi_disk *sdkp) | |||
421 | 421 | ||
422 | /* Do a report zone to get the same field */ | 422 | /* Do a report zone to get the same field */ |
423 | ret = sd_zbc_report_zones(sdkp, buf, SD_ZBC_BUF_SIZE, 0); | 423 | ret = sd_zbc_report_zones(sdkp, buf, SD_ZBC_BUF_SIZE, 0); |
424 | if (ret) { | 424 | if (ret) |
425 | zone_blocks = 0; | 425 | goto out_free; |
426 | goto out; | ||
427 | } | ||
428 | 426 | ||
429 | same = buf[4] & 0x0f; | 427 | same = buf[4] & 0x0f; |
430 | if (same > 0) { | 428 | if (same > 0) { |
@@ -464,7 +462,7 @@ static int sd_zbc_check_zone_size(struct scsi_disk *sdkp) | |||
464 | ret = sd_zbc_report_zones(sdkp, buf, | 462 | ret = sd_zbc_report_zones(sdkp, buf, |
465 | SD_ZBC_BUF_SIZE, block); | 463 | SD_ZBC_BUF_SIZE, block); |
466 | if (ret) | 464 | if (ret) |
467 | return ret; | 465 | goto out_free; |
468 | } | 466 | } |
469 | 467 | ||
470 | } while (block < sdkp->capacity); | 468 | } while (block < sdkp->capacity); |
@@ -472,35 +470,32 @@ static int sd_zbc_check_zone_size(struct scsi_disk *sdkp) | |||
472 | zone_blocks = sdkp->zone_blocks; | 470 | zone_blocks = sdkp->zone_blocks; |
473 | 471 | ||
474 | out: | 472 | out: |
475 | kfree(buf); | ||
476 | |||
477 | if (!zone_blocks) { | 473 | if (!zone_blocks) { |
478 | if (sdkp->first_scan) | 474 | if (sdkp->first_scan) |
479 | sd_printk(KERN_NOTICE, sdkp, | 475 | sd_printk(KERN_NOTICE, sdkp, |
480 | "Devices with non constant zone " | 476 | "Devices with non constant zone " |
481 | "size are not supported\n"); | 477 | "size are not supported\n"); |
482 | return -ENODEV; | 478 | ret = -ENODEV; |
483 | } | 479 | } else if (!is_power_of_2(zone_blocks)) { |
484 | |||
485 | if (!is_power_of_2(zone_blocks)) { | ||
486 | if (sdkp->first_scan) | 480 | if (sdkp->first_scan) |
487 | sd_printk(KERN_NOTICE, sdkp, | 481 | sd_printk(KERN_NOTICE, sdkp, |
488 | "Devices with non power of 2 zone " | 482 | "Devices with non power of 2 zone " |
489 | "size are not supported\n"); | 483 | "size are not supported\n"); |
490 | return -ENODEV; | 484 | ret = -ENODEV; |
491 | } | 485 | } else if (logical_to_sectors(sdkp->device, zone_blocks) > UINT_MAX) { |
492 | |||
493 | if (logical_to_sectors(sdkp->device, zone_blocks) > UINT_MAX) { | ||
494 | if (sdkp->first_scan) | 486 | if (sdkp->first_scan) |
495 | sd_printk(KERN_NOTICE, sdkp, | 487 | sd_printk(KERN_NOTICE, sdkp, |
496 | "Zone size too large\n"); | 488 | "Zone size too large\n"); |
497 | return -ENODEV; | 489 | ret = -ENODEV; |
490 | } else { | ||
491 | sdkp->zone_blocks = zone_blocks; | ||
492 | sdkp->zone_shift = ilog2(zone_blocks); | ||
498 | } | 493 | } |
499 | 494 | ||
500 | sdkp->zone_blocks = zone_blocks; | 495 | out_free: |
501 | sdkp->zone_shift = ilog2(zone_blocks); | 496 | kfree(buf); |
502 | 497 | ||
503 | return 0; | 498 | return ret; |
504 | } | 499 | } |
505 | 500 | ||
506 | /** | 501 | /** |
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index 40fc7a590e81..8c51d628b52e 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c | |||
@@ -1311,7 +1311,8 @@ static int storvsc_do_io(struct hv_device *device, | |||
1311 | */ | 1311 | */ |
1312 | cpumask_and(&alloced_mask, &stor_device->alloced_cpus, | 1312 | cpumask_and(&alloced_mask, &stor_device->alloced_cpus, |
1313 | cpumask_of_node(cpu_to_node(q_num))); | 1313 | cpumask_of_node(cpu_to_node(q_num))); |
1314 | for_each_cpu(tgt_cpu, &alloced_mask) { | 1314 | for_each_cpu_wrap(tgt_cpu, &alloced_mask, |
1315 | outgoing_channel->target_cpu + 1) { | ||
1315 | if (tgt_cpu != outgoing_channel->target_cpu) { | 1316 | if (tgt_cpu != outgoing_channel->target_cpu) { |
1316 | outgoing_channel = | 1317 | outgoing_channel = |
1317 | stor_device->stor_chns[tgt_cpu]; | 1318 | stor_device->stor_chns[tgt_cpu]; |
@@ -1657,7 +1658,7 @@ static struct scsi_host_template scsi_driver = { | |||
1657 | .eh_timed_out = storvsc_eh_timed_out, | 1658 | .eh_timed_out = storvsc_eh_timed_out, |
1658 | .slave_alloc = storvsc_device_alloc, | 1659 | .slave_alloc = storvsc_device_alloc, |
1659 | .slave_configure = storvsc_device_configure, | 1660 | .slave_configure = storvsc_device_configure, |
1660 | .cmd_per_lun = 255, | 1661 | .cmd_per_lun = 2048, |
1661 | .this_id = -1, | 1662 | .this_id = -1, |
1662 | .use_clustering = ENABLE_CLUSTERING, | 1663 | .use_clustering = ENABLE_CLUSTERING, |
1663 | /* Make sure we dont get a sg segment crosses a page boundary */ | 1664 | /* Make sure we dont get a sg segment crosses a page boundary */ |
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c index ca360daa6a25..378af306fda1 100644 --- a/drivers/scsi/sym53c8xx_2/sym_hipd.c +++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c | |||
@@ -536,7 +536,7 @@ sym_getsync(struct sym_hcb *np, u_char dt, u_char sfac, u_char *divp, u_char *fa | |||
536 | * Look for the greatest clock divisor that allows an | 536 | * Look for the greatest clock divisor that allows an |
537 | * input speed faster than the period. | 537 | * input speed faster than the period. |
538 | */ | 538 | */ |
539 | while (div-- > 0) | 539 | while (--div > 0) |
540 | if (kpc >= (div_10M[div] << 2)) break; | 540 | if (kpc >= (div_10M[div] << 2)) break; |
541 | 541 | ||
542 | /* | 542 | /* |
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index a355d989b414..c7da2c185990 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c | |||
@@ -4352,6 +4352,8 @@ static int ufshcd_slave_alloc(struct scsi_device *sdev) | |||
4352 | /* REPORT SUPPORTED OPERATION CODES is not supported */ | 4352 | /* REPORT SUPPORTED OPERATION CODES is not supported */ |
4353 | sdev->no_report_opcodes = 1; | 4353 | sdev->no_report_opcodes = 1; |
4354 | 4354 | ||
4355 | /* WRITE_SAME command is not supported */ | ||
4356 | sdev->no_write_same = 1; | ||
4355 | 4357 | ||
4356 | ufshcd_set_queue_depth(sdev); | 4358 | ufshcd_set_queue_depth(sdev); |
4357 | 4359 | ||
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c index e4f5bb056fd2..ba3cfa8e279b 100644 --- a/drivers/soc/fsl/qbman/qman.c +++ b/drivers/soc/fsl/qbman/qman.c | |||
@@ -2443,39 +2443,21 @@ struct cgr_comp { | |||
2443 | struct completion completion; | 2443 | struct completion completion; |
2444 | }; | 2444 | }; |
2445 | 2445 | ||
2446 | static int qman_delete_cgr_thread(void *p) | 2446 | static void qman_delete_cgr_smp_call(void *p) |
2447 | { | 2447 | { |
2448 | struct cgr_comp *cgr_comp = (struct cgr_comp *)p; | 2448 | qman_delete_cgr((struct qman_cgr *)p); |
2449 | int ret; | ||
2450 | |||
2451 | ret = qman_delete_cgr(cgr_comp->cgr); | ||
2452 | complete(&cgr_comp->completion); | ||
2453 | |||
2454 | return ret; | ||
2455 | } | 2449 | } |
2456 | 2450 | ||
2457 | void qman_delete_cgr_safe(struct qman_cgr *cgr) | 2451 | void qman_delete_cgr_safe(struct qman_cgr *cgr) |
2458 | { | 2452 | { |
2459 | struct task_struct *thread; | ||
2460 | struct cgr_comp cgr_comp; | ||
2461 | |||
2462 | preempt_disable(); | 2453 | preempt_disable(); |
2463 | if (qman_cgr_cpus[cgr->cgrid] != smp_processor_id()) { | 2454 | if (qman_cgr_cpus[cgr->cgrid] != smp_processor_id()) { |
2464 | init_completion(&cgr_comp.completion); | 2455 | smp_call_function_single(qman_cgr_cpus[cgr->cgrid], |
2465 | cgr_comp.cgr = cgr; | 2456 | qman_delete_cgr_smp_call, cgr, true); |
2466 | thread = kthread_create(qman_delete_cgr_thread, &cgr_comp, | ||
2467 | "cgr_del"); | ||
2468 | |||
2469 | if (IS_ERR(thread)) | ||
2470 | goto out; | ||
2471 | |||
2472 | kthread_bind(thread, qman_cgr_cpus[cgr->cgrid]); | ||
2473 | wake_up_process(thread); | ||
2474 | wait_for_completion(&cgr_comp.completion); | ||
2475 | preempt_enable(); | 2457 | preempt_enable(); |
2476 | return; | 2458 | return; |
2477 | } | 2459 | } |
2478 | out: | 2460 | |
2479 | qman_delete_cgr(cgr); | 2461 | qman_delete_cgr(cgr); |
2480 | preempt_enable(); | 2462 | preempt_enable(); |
2481 | } | 2463 | } |
diff --git a/drivers/soc/imx/gpc.c b/drivers/soc/imx/gpc.c index 53f7275d6cbd..750f93197411 100644 --- a/drivers/soc/imx/gpc.c +++ b/drivers/soc/imx/gpc.c | |||
@@ -348,7 +348,7 @@ static int imx_gpc_old_dt_init(struct device *dev, struct regmap *regmap, | |||
348 | if (i == 1) { | 348 | if (i == 1) { |
349 | domain->supply = devm_regulator_get(dev, "pu"); | 349 | domain->supply = devm_regulator_get(dev, "pu"); |
350 | if (IS_ERR(domain->supply)) | 350 | if (IS_ERR(domain->supply)) |
351 | return PTR_ERR(domain->supply);; | 351 | return PTR_ERR(domain->supply); |
352 | 352 | ||
353 | ret = imx_pgc_get_clocks(dev, domain); | 353 | ret = imx_pgc_get_clocks(dev, domain); |
354 | if (ret) | 354 | if (ret) |
@@ -470,13 +470,21 @@ static int imx_gpc_probe(struct platform_device *pdev) | |||
470 | 470 | ||
471 | static int imx_gpc_remove(struct platform_device *pdev) | 471 | static int imx_gpc_remove(struct platform_device *pdev) |
472 | { | 472 | { |
473 | struct device_node *pgc_node; | ||
473 | int ret; | 474 | int ret; |
474 | 475 | ||
476 | pgc_node = of_get_child_by_name(pdev->dev.of_node, "pgc"); | ||
477 | |||
478 | /* bail out if DT too old and doesn't provide the necessary info */ | ||
479 | if (!of_property_read_bool(pdev->dev.of_node, "#power-domain-cells") && | ||
480 | !pgc_node) | ||
481 | return 0; | ||
482 | |||
475 | /* | 483 | /* |
476 | * If the old DT binding is used the toplevel driver needs to | 484 | * If the old DT binding is used the toplevel driver needs to |
477 | * de-register the power domains | 485 | * de-register the power domains |
478 | */ | 486 | */ |
479 | if (!of_get_child_by_name(pdev->dev.of_node, "pgc")) { | 487 | if (!pgc_node) { |
480 | of_genpd_del_provider(pdev->dev.of_node); | 488 | of_genpd_del_provider(pdev->dev.of_node); |
481 | 489 | ||
482 | ret = pm_genpd_remove(&imx_gpc_domains[GPC_PGC_DOMAIN_PU].base); | 490 | ret = pm_genpd_remove(&imx_gpc_domains[GPC_PGC_DOMAIN_PU].base); |
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c index bbdc53b686dd..86580b6df33d 100644 --- a/drivers/staging/android/ashmem.c +++ b/drivers/staging/android/ashmem.c | |||
@@ -326,24 +326,23 @@ static loff_t ashmem_llseek(struct file *file, loff_t offset, int origin) | |||
326 | mutex_lock(&ashmem_mutex); | 326 | mutex_lock(&ashmem_mutex); |
327 | 327 | ||
328 | if (asma->size == 0) { | 328 | if (asma->size == 0) { |
329 | ret = -EINVAL; | 329 | mutex_unlock(&ashmem_mutex); |
330 | goto out; | 330 | return -EINVAL; |
331 | } | 331 | } |
332 | 332 | ||
333 | if (!asma->file) { | 333 | if (!asma->file) { |
334 | ret = -EBADF; | 334 | mutex_unlock(&ashmem_mutex); |
335 | goto out; | 335 | return -EBADF; |
336 | } | 336 | } |
337 | 337 | ||
338 | mutex_unlock(&ashmem_mutex); | ||
339 | |||
338 | ret = vfs_llseek(asma->file, offset, origin); | 340 | ret = vfs_llseek(asma->file, offset, origin); |
339 | if (ret < 0) | 341 | if (ret < 0) |
340 | goto out; | 342 | return ret; |
341 | 343 | ||
342 | /** Copy f_pos from backing file, since f_ops->llseek() sets it */ | 344 | /** Copy f_pos from backing file, since f_ops->llseek() sets it */ |
343 | file->f_pos = asma->file->f_pos; | 345 | file->f_pos = asma->file->f_pos; |
344 | |||
345 | out: | ||
346 | mutex_unlock(&ashmem_mutex); | ||
347 | return ret; | 346 | return ret; |
348 | } | 347 | } |
349 | 348 | ||
@@ -702,30 +701,30 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd, | |||
702 | size_t pgstart, pgend; | 701 | size_t pgstart, pgend; |
703 | int ret = -EINVAL; | 702 | int ret = -EINVAL; |
704 | 703 | ||
705 | if (unlikely(!asma->file)) | ||
706 | return -EINVAL; | ||
707 | |||
708 | if (unlikely(copy_from_user(&pin, p, sizeof(pin)))) | 704 | if (unlikely(copy_from_user(&pin, p, sizeof(pin)))) |
709 | return -EFAULT; | 705 | return -EFAULT; |
710 | 706 | ||
707 | mutex_lock(&ashmem_mutex); | ||
708 | |||
709 | if (unlikely(!asma->file)) | ||
710 | goto out_unlock; | ||
711 | |||
711 | /* per custom, you can pass zero for len to mean "everything onward" */ | 712 | /* per custom, you can pass zero for len to mean "everything onward" */ |
712 | if (!pin.len) | 713 | if (!pin.len) |
713 | pin.len = PAGE_ALIGN(asma->size) - pin.offset; | 714 | pin.len = PAGE_ALIGN(asma->size) - pin.offset; |
714 | 715 | ||
715 | if (unlikely((pin.offset | pin.len) & ~PAGE_MASK)) | 716 | if (unlikely((pin.offset | pin.len) & ~PAGE_MASK)) |
716 | return -EINVAL; | 717 | goto out_unlock; |
717 | 718 | ||
718 | if (unlikely(((__u32)-1) - pin.offset < pin.len)) | 719 | if (unlikely(((__u32)-1) - pin.offset < pin.len)) |
719 | return -EINVAL; | 720 | goto out_unlock; |
720 | 721 | ||
721 | if (unlikely(PAGE_ALIGN(asma->size) < pin.offset + pin.len)) | 722 | if (unlikely(PAGE_ALIGN(asma->size) < pin.offset + pin.len)) |
722 | return -EINVAL; | 723 | goto out_unlock; |
723 | 724 | ||
724 | pgstart = pin.offset / PAGE_SIZE; | 725 | pgstart = pin.offset / PAGE_SIZE; |
725 | pgend = pgstart + (pin.len / PAGE_SIZE) - 1; | 726 | pgend = pgstart + (pin.len / PAGE_SIZE) - 1; |
726 | 727 | ||
727 | mutex_lock(&ashmem_mutex); | ||
728 | |||
729 | switch (cmd) { | 728 | switch (cmd) { |
730 | case ASHMEM_PIN: | 729 | case ASHMEM_PIN: |
731 | ret = ashmem_pin(asma, pgstart, pgend); | 730 | ret = ashmem_pin(asma, pgstart, pgend); |
@@ -738,6 +737,7 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd, | |||
738 | break; | 737 | break; |
739 | } | 738 | } |
740 | 739 | ||
740 | out_unlock: | ||
741 | mutex_unlock(&ashmem_mutex); | 741 | mutex_unlock(&ashmem_mutex); |
742 | 742 | ||
743 | return ret; | 743 | return ret; |
diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c index 94e06925c712..49718c96bf9e 100644 --- a/drivers/staging/android/ion/ion_cma_heap.c +++ b/drivers/staging/android/ion/ion_cma_heap.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/err.h> | 12 | #include <linux/err.h> |
13 | #include <linux/cma.h> | 13 | #include <linux/cma.h> |
14 | #include <linux/scatterlist.h> | 14 | #include <linux/scatterlist.h> |
15 | #include <linux/highmem.h> | ||
15 | 16 | ||
16 | #include "ion.h" | 17 | #include "ion.h" |
17 | 18 | ||
@@ -42,6 +43,22 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer, | |||
42 | if (!pages) | 43 | if (!pages) |
43 | return -ENOMEM; | 44 | return -ENOMEM; |
44 | 45 | ||
46 | if (PageHighMem(pages)) { | ||
47 | unsigned long nr_clear_pages = nr_pages; | ||
48 | struct page *page = pages; | ||
49 | |||
50 | while (nr_clear_pages > 0) { | ||
51 | void *vaddr = kmap_atomic(page); | ||
52 | |||
53 | memset(vaddr, 0, PAGE_SIZE); | ||
54 | kunmap_atomic(vaddr); | ||
55 | page++; | ||
56 | nr_clear_pages--; | ||
57 | } | ||
58 | } else { | ||
59 | memset(page_address(pages), 0, size); | ||
60 | } | ||
61 | |||
45 | table = kmalloc(sizeof(*table), GFP_KERNEL); | 62 | table = kmalloc(sizeof(*table), GFP_KERNEL); |
46 | if (!table) | 63 | if (!table) |
47 | goto err; | 64 | goto err; |
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c index e618a87521a3..9d733471ca2e 100644 --- a/drivers/staging/comedi/drivers.c +++ b/drivers/staging/comedi/drivers.c | |||
@@ -475,8 +475,7 @@ unsigned int comedi_nsamples_left(struct comedi_subdevice *s, | |||
475 | struct comedi_cmd *cmd = &async->cmd; | 475 | struct comedi_cmd *cmd = &async->cmd; |
476 | 476 | ||
477 | if (cmd->stop_src == TRIG_COUNT) { | 477 | if (cmd->stop_src == TRIG_COUNT) { |
478 | unsigned int nscans = nsamples / cmd->scan_end_arg; | 478 | unsigned int scans_left = __comedi_nscans_left(s, cmd->stop_arg); |
479 | unsigned int scans_left = __comedi_nscans_left(s, nscans); | ||
480 | unsigned int scan_pos = | 479 | unsigned int scan_pos = |
481 | comedi_bytes_to_samples(s, async->scan_progress); | 480 | comedi_bytes_to_samples(s, async->scan_progress); |
482 | unsigned long long samples_left = 0; | 481 | unsigned long long samples_left = 0; |
diff --git a/drivers/staging/fsl-mc/bus/Kconfig b/drivers/staging/fsl-mc/bus/Kconfig index 1f9100049176..b35ef7ee6901 100644 --- a/drivers/staging/fsl-mc/bus/Kconfig +++ b/drivers/staging/fsl-mc/bus/Kconfig | |||
@@ -7,7 +7,7 @@ | |||
7 | 7 | ||
8 | config FSL_MC_BUS | 8 | config FSL_MC_BUS |
9 | bool "QorIQ DPAA2 fsl-mc bus driver" | 9 | bool "QorIQ DPAA2 fsl-mc bus driver" |
10 | depends on OF && (ARCH_LAYERSCAPE || (COMPILE_TEST && (ARM || ARM64 || X86 || PPC))) | 10 | depends on OF && (ARCH_LAYERSCAPE || (COMPILE_TEST && (ARM || ARM64 || X86_LOCAL_APIC || PPC))) |
11 | select GENERIC_MSI_IRQ_DOMAIN | 11 | select GENERIC_MSI_IRQ_DOMAIN |
12 | help | 12 | help |
13 | Driver to enable the bus infrastructure for the QorIQ DPAA2 | 13 | Driver to enable the bus infrastructure for the QorIQ DPAA2 |
diff --git a/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c b/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c index 5064d5ddf581..fc2013aade51 100644 --- a/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c +++ b/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c | |||
@@ -73,6 +73,8 @@ static int __init its_fsl_mc_msi_init(void) | |||
73 | 73 | ||
74 | for (np = of_find_matching_node(NULL, its_device_id); np; | 74 | for (np = of_find_matching_node(NULL, its_device_id); np; |
75 | np = of_find_matching_node(np, its_device_id)) { | 75 | np = of_find_matching_node(np, its_device_id)) { |
76 | if (!of_device_is_available(np)) | ||
77 | continue; | ||
76 | if (!of_property_read_bool(np, "msi-controller")) | 78 | if (!of_property_read_bool(np, "msi-controller")) |
77 | continue; | 79 | continue; |
78 | 80 | ||
diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c index f01595593ce2..425e8b82533b 100644 --- a/drivers/staging/iio/adc/ad7192.c +++ b/drivers/staging/iio/adc/ad7192.c | |||
@@ -141,6 +141,8 @@ | |||
141 | #define AD7192_GPOCON_P1DAT BIT(1) /* P1 state */ | 141 | #define AD7192_GPOCON_P1DAT BIT(1) /* P1 state */ |
142 | #define AD7192_GPOCON_P0DAT BIT(0) /* P0 state */ | 142 | #define AD7192_GPOCON_P0DAT BIT(0) /* P0 state */ |
143 | 143 | ||
144 | #define AD7192_EXT_FREQ_MHZ_MIN 2457600 | ||
145 | #define AD7192_EXT_FREQ_MHZ_MAX 5120000 | ||
144 | #define AD7192_INT_FREQ_MHZ 4915200 | 146 | #define AD7192_INT_FREQ_MHZ 4915200 |
145 | 147 | ||
146 | /* NOTE: | 148 | /* NOTE: |
@@ -218,6 +220,12 @@ static int ad7192_calibrate_all(struct ad7192_state *st) | |||
218 | ARRAY_SIZE(ad7192_calib_arr)); | 220 | ARRAY_SIZE(ad7192_calib_arr)); |
219 | } | 221 | } |
220 | 222 | ||
223 | static inline bool ad7192_valid_external_frequency(u32 freq) | ||
224 | { | ||
225 | return (freq >= AD7192_EXT_FREQ_MHZ_MIN && | ||
226 | freq <= AD7192_EXT_FREQ_MHZ_MAX); | ||
227 | } | ||
228 | |||
221 | static int ad7192_setup(struct ad7192_state *st, | 229 | static int ad7192_setup(struct ad7192_state *st, |
222 | const struct ad7192_platform_data *pdata) | 230 | const struct ad7192_platform_data *pdata) |
223 | { | 231 | { |
@@ -243,17 +251,20 @@ static int ad7192_setup(struct ad7192_state *st, | |||
243 | id); | 251 | id); |
244 | 252 | ||
245 | switch (pdata->clock_source_sel) { | 253 | switch (pdata->clock_source_sel) { |
246 | case AD7192_CLK_EXT_MCLK1_2: | ||
247 | case AD7192_CLK_EXT_MCLK2: | ||
248 | st->mclk = AD7192_INT_FREQ_MHZ; | ||
249 | break; | ||
250 | case AD7192_CLK_INT: | 254 | case AD7192_CLK_INT: |
251 | case AD7192_CLK_INT_CO: | 255 | case AD7192_CLK_INT_CO: |
252 | if (pdata->ext_clk_hz) | 256 | st->mclk = AD7192_INT_FREQ_MHZ; |
253 | st->mclk = pdata->ext_clk_hz; | ||
254 | else | ||
255 | st->mclk = AD7192_INT_FREQ_MHZ; | ||
256 | break; | 257 | break; |
258 | case AD7192_CLK_EXT_MCLK1_2: | ||
259 | case AD7192_CLK_EXT_MCLK2: | ||
260 | if (ad7192_valid_external_frequency(pdata->ext_clk_hz)) { | ||
261 | st->mclk = pdata->ext_clk_hz; | ||
262 | break; | ||
263 | } | ||
264 | dev_err(&st->sd.spi->dev, "Invalid frequency setting %u\n", | ||
265 | pdata->ext_clk_hz); | ||
266 | ret = -EINVAL; | ||
267 | goto out; | ||
257 | default: | 268 | default: |
258 | ret = -EINVAL; | 269 | ret = -EINVAL; |
259 | goto out; | 270 | goto out; |
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c index 2b28fb9c0048..3bcf49466361 100644 --- a/drivers/staging/iio/impedance-analyzer/ad5933.c +++ b/drivers/staging/iio/impedance-analyzer/ad5933.c | |||
@@ -648,8 +648,6 @@ static int ad5933_register_ring_funcs_and_init(struct iio_dev *indio_dev) | |||
648 | /* Ring buffer functions - here trigger setup related */ | 648 | /* Ring buffer functions - here trigger setup related */ |
649 | indio_dev->setup_ops = &ad5933_ring_setup_ops; | 649 | indio_dev->setup_ops = &ad5933_ring_setup_ops; |
650 | 650 | ||
651 | indio_dev->modes |= INDIO_BUFFER_HARDWARE; | ||
652 | |||
653 | return 0; | 651 | return 0; |
654 | } | 652 | } |
655 | 653 | ||
@@ -762,7 +760,7 @@ static int ad5933_probe(struct i2c_client *client, | |||
762 | indio_dev->dev.parent = &client->dev; | 760 | indio_dev->dev.parent = &client->dev; |
763 | indio_dev->info = &ad5933_info; | 761 | indio_dev->info = &ad5933_info; |
764 | indio_dev->name = id->name; | 762 | indio_dev->name = id->name; |
765 | indio_dev->modes = INDIO_DIRECT_MODE; | 763 | indio_dev->modes = (INDIO_BUFFER_SOFTWARE | INDIO_DIRECT_MODE); |
766 | indio_dev->channels = ad5933_channels; | 764 | indio_dev->channels = ad5933_channels; |
767 | indio_dev->num_channels = ARRAY_SIZE(ad5933_channels); | 765 | indio_dev->num_channels = ARRAY_SIZE(ad5933_channels); |
768 | 766 | ||
diff --git a/drivers/staging/ncpfs/ncplib_kernel.c b/drivers/staging/ncpfs/ncplib_kernel.c index 804adfebba2f..3e047eb4cc7c 100644 --- a/drivers/staging/ncpfs/ncplib_kernel.c +++ b/drivers/staging/ncpfs/ncplib_kernel.c | |||
@@ -981,6 +981,10 @@ ncp_read_kernel(struct ncp_server *server, const char *file_id, | |||
981 | goto out; | 981 | goto out; |
982 | } | 982 | } |
983 | *bytes_read = ncp_reply_be16(server, 0); | 983 | *bytes_read = ncp_reply_be16(server, 0); |
984 | if (*bytes_read > to_read) { | ||
985 | result = -EINVAL; | ||
986 | goto out; | ||
987 | } | ||
984 | source = ncp_reply_data(server, 2 + (offset & 1)); | 988 | source = ncp_reply_data(server, 2 + (offset & 1)); |
985 | 989 | ||
986 | memcpy(target, source, *bytes_read); | 990 | memcpy(target, source, *bytes_read); |
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c index 5c0e59e8fe46..cbe98bc2b998 100644 --- a/drivers/tty/n_tty.c +++ b/drivers/tty/n_tty.c | |||
@@ -2180,6 +2180,12 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file, | |||
2180 | } | 2180 | } |
2181 | if (tty_hung_up_p(file)) | 2181 | if (tty_hung_up_p(file)) |
2182 | break; | 2182 | break; |
2183 | /* | ||
2184 | * Abort readers for ttys which never actually | ||
2185 | * get hung up. See __tty_hangup(). | ||
2186 | */ | ||
2187 | if (test_bit(TTY_HUPPING, &tty->flags)) | ||
2188 | break; | ||
2183 | if (!timeout) | 2189 | if (!timeout) |
2184 | break; | 2190 | break; |
2185 | if (file->f_flags & O_NONBLOCK) { | 2191 | if (file->f_flags & O_NONBLOCK) { |
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c index 54adf8d56350..a93f77ab3da0 100644 --- a/drivers/tty/serial/8250/8250_pci.c +++ b/drivers/tty/serial/8250/8250_pci.c | |||
@@ -3387,11 +3387,9 @@ static int serial_pci_is_class_communication(struct pci_dev *dev) | |||
3387 | /* | 3387 | /* |
3388 | * If it is not a communications device or the programming | 3388 | * If it is not a communications device or the programming |
3389 | * interface is greater than 6, give up. | 3389 | * interface is greater than 6, give up. |
3390 | * | ||
3391 | * (Should we try to make guesses for multiport serial devices | ||
3392 | * later?) | ||
3393 | */ | 3390 | */ |
3394 | if ((((dev->class >> 8) != PCI_CLASS_COMMUNICATION_SERIAL) && | 3391 | if ((((dev->class >> 8) != PCI_CLASS_COMMUNICATION_SERIAL) && |
3392 | ((dev->class >> 8) != PCI_CLASS_COMMUNICATION_MULTISERIAL) && | ||
3395 | ((dev->class >> 8) != PCI_CLASS_COMMUNICATION_MODEM)) || | 3393 | ((dev->class >> 8) != PCI_CLASS_COMMUNICATION_MODEM)) || |
3396 | (dev->class & 0xff) > 6) | 3394 | (dev->class & 0xff) > 6) |
3397 | return -ENODEV; | 3395 | return -ENODEV; |
@@ -3428,6 +3426,12 @@ serial_pci_guess_board(struct pci_dev *dev, struct pciserial_board *board) | |||
3428 | { | 3426 | { |
3429 | int num_iomem, num_port, first_port = -1, i; | 3427 | int num_iomem, num_port, first_port = -1, i; |
3430 | 3428 | ||
3429 | /* | ||
3430 | * Should we try to make guesses for multiport serial devices later? | ||
3431 | */ | ||
3432 | if ((dev->class >> 8) == PCI_CLASS_COMMUNICATION_MULTISERIAL) | ||
3433 | return -ENODEV; | ||
3434 | |||
3431 | num_iomem = num_port = 0; | 3435 | num_iomem = num_port = 0; |
3432 | for (i = 0; i < PCI_NUM_BAR_RESOURCES; i++) { | 3436 | for (i = 0; i < PCI_NUM_BAR_RESOURCES; i++) { |
3433 | if (pci_resource_flags(dev, i) & IORESOURCE_IO) { | 3437 | if (pci_resource_flags(dev, i) & IORESOURCE_IO) { |
@@ -4699,6 +4703,17 @@ static const struct pci_device_id serial_pci_tbl[] = { | |||
4699 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, /* 135a.0dc0 */ | 4703 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, /* 135a.0dc0 */ |
4700 | pbn_b2_4_115200 }, | 4704 | pbn_b2_4_115200 }, |
4701 | /* | 4705 | /* |
4706 | * BrainBoxes UC-260 | ||
4707 | */ | ||
4708 | { PCI_VENDOR_ID_INTASHIELD, 0x0D21, | ||
4709 | PCI_ANY_ID, PCI_ANY_ID, | ||
4710 | PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, 0xffff00, | ||
4711 | pbn_b2_4_115200 }, | ||
4712 | { PCI_VENDOR_ID_INTASHIELD, 0x0E34, | ||
4713 | PCI_ANY_ID, PCI_ANY_ID, | ||
4714 | PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, 0xffff00, | ||
4715 | pbn_b2_4_115200 }, | ||
4716 | /* | ||
4702 | * Perle PCI-RAS cards | 4717 | * Perle PCI-RAS cards |
4703 | */ | 4718 | */ |
4704 | { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, | 4719 | { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, |
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c index df46a9e88c34..e287fe8f10fc 100644 --- a/drivers/tty/serial/atmel_serial.c +++ b/drivers/tty/serial/atmel_serial.c | |||
@@ -1734,6 +1734,7 @@ static void atmel_get_ip_name(struct uart_port *port) | |||
1734 | switch (version) { | 1734 | switch (version) { |
1735 | case 0x302: | 1735 | case 0x302: |
1736 | case 0x10213: | 1736 | case 0x10213: |
1737 | case 0x10302: | ||
1737 | dev_dbg(port->dev, "This version is usart\n"); | 1738 | dev_dbg(port->dev, "This version is usart\n"); |
1738 | atmel_port->has_frac_baudrate = true; | 1739 | atmel_port->has_frac_baudrate = true; |
1739 | atmel_port->has_hw_timer = true; | 1740 | atmel_port->has_hw_timer = true; |
diff --git a/drivers/tty/serial/earlycon.c b/drivers/tty/serial/earlycon.c index 870e84fb6e39..a24278380fec 100644 --- a/drivers/tty/serial/earlycon.c +++ b/drivers/tty/serial/earlycon.c | |||
@@ -245,11 +245,12 @@ int __init of_setup_earlycon(const struct earlycon_id *match, | |||
245 | } | 245 | } |
246 | port->mapbase = addr; | 246 | port->mapbase = addr; |
247 | port->uartclk = BASE_BAUD * 16; | 247 | port->uartclk = BASE_BAUD * 16; |
248 | port->membase = earlycon_map(port->mapbase, SZ_4K); | ||
249 | 248 | ||
250 | val = of_get_flat_dt_prop(node, "reg-offset", NULL); | 249 | val = of_get_flat_dt_prop(node, "reg-offset", NULL); |
251 | if (val) | 250 | if (val) |
252 | port->mapbase += be32_to_cpu(*val); | 251 | port->mapbase += be32_to_cpu(*val); |
252 | port->membase = earlycon_map(port->mapbase, SZ_4K); | ||
253 | |||
253 | val = of_get_flat_dt_prop(node, "reg-shift", NULL); | 254 | val = of_get_flat_dt_prop(node, "reg-shift", NULL); |
254 | if (val) | 255 | if (val) |
255 | port->regshift = be32_to_cpu(*val); | 256 | port->regshift = be32_to_cpu(*val); |
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c index 1d7ca382bc12..a33c685af990 100644 --- a/drivers/tty/serial/imx.c +++ b/drivers/tty/serial/imx.c | |||
@@ -2093,7 +2093,7 @@ static int serial_imx_probe(struct platform_device *pdev) | |||
2093 | uart_get_rs485_mode(&pdev->dev, &sport->port.rs485); | 2093 | uart_get_rs485_mode(&pdev->dev, &sport->port.rs485); |
2094 | 2094 | ||
2095 | if (sport->port.rs485.flags & SER_RS485_ENABLED && | 2095 | if (sport->port.rs485.flags & SER_RS485_ENABLED && |
2096 | (!sport->have_rtscts || !sport->have_rtsgpio)) | 2096 | (!sport->have_rtscts && !sport->have_rtsgpio)) |
2097 | dev_err(&pdev->dev, "no RTS control, disabling rs485\n"); | 2097 | dev_err(&pdev->dev, "no RTS control, disabling rs485\n"); |
2098 | 2098 | ||
2099 | imx_rs485_config(&sport->port, &sport->port.rs485); | 2099 | imx_rs485_config(&sport->port, &sport->port.rs485); |
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index c8dde56b532b..35b9201db3b4 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c | |||
@@ -1144,6 +1144,8 @@ static int uart_do_autoconfig(struct tty_struct *tty,struct uart_state *state) | |||
1144 | uport->ops->config_port(uport, flags); | 1144 | uport->ops->config_port(uport, flags); |
1145 | 1145 | ||
1146 | ret = uart_startup(tty, state, 1); | 1146 | ret = uart_startup(tty, state, 1); |
1147 | if (ret == 0) | ||
1148 | tty_port_set_initialized(port, true); | ||
1147 | if (ret > 0) | 1149 | if (ret > 0) |
1148 | ret = 0; | 1150 | ret = 0; |
1149 | } | 1151 | } |
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index 7257c078e155..44adf9db38f8 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c | |||
@@ -885,6 +885,8 @@ static void sci_receive_chars(struct uart_port *port) | |||
885 | /* Tell the rest of the system the news. New characters! */ | 885 | /* Tell the rest of the system the news. New characters! */ |
886 | tty_flip_buffer_push(tport); | 886 | tty_flip_buffer_push(tport); |
887 | } else { | 887 | } else { |
888 | /* TTY buffers full; read from RX reg to prevent lockup */ | ||
889 | serial_port_in(port, SCxRDR); | ||
888 | serial_port_in(port, SCxSR); /* dummy read */ | 890 | serial_port_in(port, SCxSR); /* dummy read */ |
889 | sci_clear_SCxSR(port, SCxSR_RDxF_CLEAR(port)); | 891 | sci_clear_SCxSR(port, SCxSR_RDxF_CLEAR(port)); |
890 | } | 892 | } |
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index eb9133b472f4..63114ea35ec1 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c | |||
@@ -586,6 +586,14 @@ static void __tty_hangup(struct tty_struct *tty, int exit_session) | |||
586 | return; | 586 | return; |
587 | } | 587 | } |
588 | 588 | ||
589 | /* | ||
590 | * Some console devices aren't actually hung up for technical and | ||
591 | * historical reasons, which can lead to indefinite interruptible | ||
592 | * sleep in n_tty_read(). The following explicitly tells | ||
593 | * n_tty_read() to abort readers. | ||
594 | */ | ||
595 | set_bit(TTY_HUPPING, &tty->flags); | ||
596 | |||
589 | /* inuse_filps is protected by the single tty lock, | 597 | /* inuse_filps is protected by the single tty lock, |
590 | this really needs to change if we want to flush the | 598 | this really needs to change if we want to flush the |
591 | workqueue with the lock held */ | 599 | workqueue with the lock held */ |
@@ -640,6 +648,7 @@ static void __tty_hangup(struct tty_struct *tty, int exit_session) | |||
640 | * from the ldisc side, which is now guaranteed. | 648 | * from the ldisc side, which is now guaranteed. |
641 | */ | 649 | */ |
642 | set_bit(TTY_HUPPED, &tty->flags); | 650 | set_bit(TTY_HUPPED, &tty->flags); |
651 | clear_bit(TTY_HUPPING, &tty->flags); | ||
643 | tty_unlock(tty); | 652 | tty_unlock(tty); |
644 | 653 | ||
645 | if (f) | 654 | if (f) |
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index 88b902c525d7..b4e57c5a8bba 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c | |||
@@ -1727,7 +1727,7 @@ static void reset_terminal(struct vc_data *vc, int do_clear) | |||
1727 | default_attr(vc); | 1727 | default_attr(vc); |
1728 | update_attr(vc); | 1728 | update_attr(vc); |
1729 | 1729 | ||
1730 | vc->vc_tab_stop[0] = 0x01010100; | 1730 | vc->vc_tab_stop[0] = |
1731 | vc->vc_tab_stop[1] = | 1731 | vc->vc_tab_stop[1] = |
1732 | vc->vc_tab_stop[2] = | 1732 | vc->vc_tab_stop[2] = |
1733 | vc->vc_tab_stop[3] = | 1733 | vc->vc_tab_stop[3] = |
@@ -1771,7 +1771,7 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c) | |||
1771 | vc->vc_pos -= (vc->vc_x << 1); | 1771 | vc->vc_pos -= (vc->vc_x << 1); |
1772 | while (vc->vc_x < vc->vc_cols - 1) { | 1772 | while (vc->vc_x < vc->vc_cols - 1) { |
1773 | vc->vc_x++; | 1773 | vc->vc_x++; |
1774 | if (vc->vc_tab_stop[vc->vc_x >> 5] & (1 << (vc->vc_x & 31))) | 1774 | if (vc->vc_tab_stop[7 & (vc->vc_x >> 5)] & (1 << (vc->vc_x & 31))) |
1775 | break; | 1775 | break; |
1776 | } | 1776 | } |
1777 | vc->vc_pos += (vc->vc_x << 1); | 1777 | vc->vc_pos += (vc->vc_x << 1); |
@@ -1831,7 +1831,7 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c) | |||
1831 | lf(vc); | 1831 | lf(vc); |
1832 | return; | 1832 | return; |
1833 | case 'H': | 1833 | case 'H': |
1834 | vc->vc_tab_stop[vc->vc_x >> 5] |= (1 << (vc->vc_x & 31)); | 1834 | vc->vc_tab_stop[7 & (vc->vc_x >> 5)] |= (1 << (vc->vc_x & 31)); |
1835 | return; | 1835 | return; |
1836 | case 'Z': | 1836 | case 'Z': |
1837 | respond_ID(tty); | 1837 | respond_ID(tty); |
@@ -2024,7 +2024,7 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c) | |||
2024 | return; | 2024 | return; |
2025 | case 'g': | 2025 | case 'g': |
2026 | if (!vc->vc_par[0]) | 2026 | if (!vc->vc_par[0]) |
2027 | vc->vc_tab_stop[vc->vc_x >> 5] &= ~(1 << (vc->vc_x & 31)); | 2027 | vc->vc_tab_stop[7 & (vc->vc_x >> 5)] &= ~(1 << (vc->vc_x & 31)); |
2028 | else if (vc->vc_par[0] == 3) { | 2028 | else if (vc->vc_par[0] == 3) { |
2029 | vc->vc_tab_stop[0] = | 2029 | vc->vc_tab_stop[0] = |
2030 | vc->vc_tab_stop[1] = | 2030 | vc->vc_tab_stop[1] = |
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig index f699abab1787..148f3ee70286 100644 --- a/drivers/usb/Kconfig +++ b/drivers/usb/Kconfig | |||
@@ -19,6 +19,12 @@ config USB_EHCI_BIG_ENDIAN_MMIO | |||
19 | config USB_EHCI_BIG_ENDIAN_DESC | 19 | config USB_EHCI_BIG_ENDIAN_DESC |
20 | bool | 20 | bool |
21 | 21 | ||
22 | config USB_UHCI_BIG_ENDIAN_MMIO | ||
23 | bool | ||
24 | |||
25 | config USB_UHCI_BIG_ENDIAN_DESC | ||
26 | bool | ||
27 | |||
22 | menuconfig USB_SUPPORT | 28 | menuconfig USB_SUPPORT |
23 | bool "USB support" | 29 | bool "USB support" |
24 | depends on HAS_IOMEM | 30 | depends on HAS_IOMEM |
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 06b3b54a0e68..7b366a6c0b49 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c | |||
@@ -174,6 +174,7 @@ static int acm_wb_alloc(struct acm *acm) | |||
174 | wb = &acm->wb[wbn]; | 174 | wb = &acm->wb[wbn]; |
175 | if (!wb->use) { | 175 | if (!wb->use) { |
176 | wb->use = 1; | 176 | wb->use = 1; |
177 | wb->len = 0; | ||
177 | return wbn; | 178 | return wbn; |
178 | } | 179 | } |
179 | wbn = (wbn + 1) % ACM_NW; | 180 | wbn = (wbn + 1) % ACM_NW; |
@@ -805,16 +806,18 @@ static int acm_tty_write(struct tty_struct *tty, | |||
805 | static void acm_tty_flush_chars(struct tty_struct *tty) | 806 | static void acm_tty_flush_chars(struct tty_struct *tty) |
806 | { | 807 | { |
807 | struct acm *acm = tty->driver_data; | 808 | struct acm *acm = tty->driver_data; |
808 | struct acm_wb *cur = acm->putbuffer; | 809 | struct acm_wb *cur; |
809 | int err; | 810 | int err; |
810 | unsigned long flags; | 811 | unsigned long flags; |
811 | 812 | ||
813 | spin_lock_irqsave(&acm->write_lock, flags); | ||
814 | |||
815 | cur = acm->putbuffer; | ||
812 | if (!cur) /* nothing to do */ | 816 | if (!cur) /* nothing to do */ |
813 | return; | 817 | goto out; |
814 | 818 | ||
815 | acm->putbuffer = NULL; | 819 | acm->putbuffer = NULL; |
816 | err = usb_autopm_get_interface_async(acm->control); | 820 | err = usb_autopm_get_interface_async(acm->control); |
817 | spin_lock_irqsave(&acm->write_lock, flags); | ||
818 | if (err < 0) { | 821 | if (err < 0) { |
819 | cur->use = 0; | 822 | cur->use = 0; |
820 | acm->putbuffer = cur; | 823 | acm->putbuffer = cur; |
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c index c64cf6c4a83d..0c11d40a12bc 100644 --- a/drivers/usb/core/message.c +++ b/drivers/usb/core/message.c | |||
@@ -151,6 +151,10 @@ int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request, | |||
151 | 151 | ||
152 | ret = usb_internal_control_msg(dev, pipe, dr, data, size, timeout); | 152 | ret = usb_internal_control_msg(dev, pipe, dr, data, size, timeout); |
153 | 153 | ||
154 | /* Linger a bit, prior to the next control message. */ | ||
155 | if (dev->quirks & USB_QUIRK_DELAY_CTRL_MSG) | ||
156 | msleep(200); | ||
157 | |||
154 | kfree(dr); | 158 | kfree(dr); |
155 | 159 | ||
156 | return ret; | 160 | return ret; |
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 4024926c1d68..54b019e267c5 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c | |||
@@ -226,8 +226,12 @@ static const struct usb_device_id usb_quirk_list[] = { | |||
226 | { USB_DEVICE(0x1a0a, 0x0200), .driver_info = | 226 | { USB_DEVICE(0x1a0a, 0x0200), .driver_info = |
227 | USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL }, | 227 | USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL }, |
228 | 228 | ||
229 | /* Corsair K70 RGB */ | ||
230 | { USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT }, | ||
231 | |||
229 | /* Corsair Strafe RGB */ | 232 | /* Corsair Strafe RGB */ |
230 | { USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT }, | 233 | { USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT | |
234 | USB_QUIRK_DELAY_CTRL_MSG }, | ||
231 | 235 | ||
232 | /* Corsair K70 LUX */ | 236 | /* Corsair K70 LUX */ |
233 | { USB_DEVICE(0x1b1c, 0x1b36), .driver_info = USB_QUIRK_DELAY_INIT }, | 237 | { USB_DEVICE(0x1b1c, 0x1b36), .driver_info = USB_QUIRK_DELAY_INIT }, |
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c index e4c3ce0de5de..5bcad1d869b5 100644 --- a/drivers/usb/dwc2/gadget.c +++ b/drivers/usb/dwc2/gadget.c | |||
@@ -1917,7 +1917,9 @@ static void dwc2_hsotg_program_zlp(struct dwc2_hsotg *hsotg, | |||
1917 | /* Not specific buffer needed for ep0 ZLP */ | 1917 | /* Not specific buffer needed for ep0 ZLP */ |
1918 | dma_addr_t dma = hs_ep->desc_list_dma; | 1918 | dma_addr_t dma = hs_ep->desc_list_dma; |
1919 | 1919 | ||
1920 | dwc2_gadget_set_ep0_desc_chain(hsotg, hs_ep); | 1920 | if (!index) |
1921 | dwc2_gadget_set_ep0_desc_chain(hsotg, hs_ep); | ||
1922 | |||
1921 | dwc2_gadget_config_nonisoc_xfer_ddma(hs_ep, dma, 0); | 1923 | dwc2_gadget_config_nonisoc_xfer_ddma(hs_ep, dma, 0); |
1922 | } else { | 1924 | } else { |
1923 | dwc2_writel(DXEPTSIZ_MC(1) | DXEPTSIZ_PKTCNT(1) | | 1925 | dwc2_writel(DXEPTSIZ_MC(1) | DXEPTSIZ_PKTCNT(1) | |
@@ -2974,9 +2976,13 @@ static void dwc2_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx, | |||
2974 | if (ints & DXEPINT_STSPHSERCVD) { | 2976 | if (ints & DXEPINT_STSPHSERCVD) { |
2975 | dev_dbg(hsotg->dev, "%s: StsPhseRcvd\n", __func__); | 2977 | dev_dbg(hsotg->dev, "%s: StsPhseRcvd\n", __func__); |
2976 | 2978 | ||
2977 | /* Move to STATUS IN for DDMA */ | 2979 | /* Safety check EP0 state when STSPHSERCVD asserted */ |
2978 | if (using_desc_dma(hsotg)) | 2980 | if (hsotg->ep0_state == DWC2_EP0_DATA_OUT) { |
2979 | dwc2_hsotg_ep0_zlp(hsotg, true); | 2981 | /* Move to STATUS IN for DDMA */ |
2982 | if (using_desc_dma(hsotg)) | ||
2983 | dwc2_hsotg_ep0_zlp(hsotg, true); | ||
2984 | } | ||
2985 | |||
2980 | } | 2986 | } |
2981 | 2987 | ||
2982 | if (ints & DXEPINT_BACK2BACKSETUP) | 2988 | if (ints & DXEPINT_BACK2BACKSETUP) |
@@ -3375,12 +3381,6 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg, | |||
3375 | dwc2_writel(dwc2_hsotg_ep0_mps(hsotg->eps_out[0]->ep.maxpacket) | | 3381 | dwc2_writel(dwc2_hsotg_ep0_mps(hsotg->eps_out[0]->ep.maxpacket) | |
3376 | DXEPCTL_USBACTEP, hsotg->regs + DIEPCTL0); | 3382 | DXEPCTL_USBACTEP, hsotg->regs + DIEPCTL0); |
3377 | 3383 | ||
3378 | dwc2_hsotg_enqueue_setup(hsotg); | ||
3379 | |||
3380 | dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n", | ||
3381 | dwc2_readl(hsotg->regs + DIEPCTL0), | ||
3382 | dwc2_readl(hsotg->regs + DOEPCTL0)); | ||
3383 | |||
3384 | /* clear global NAKs */ | 3384 | /* clear global NAKs */ |
3385 | val = DCTL_CGOUTNAK | DCTL_CGNPINNAK; | 3385 | val = DCTL_CGOUTNAK | DCTL_CGNPINNAK; |
3386 | if (!is_usb_reset) | 3386 | if (!is_usb_reset) |
@@ -3391,6 +3391,12 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg, | |||
3391 | mdelay(3); | 3391 | mdelay(3); |
3392 | 3392 | ||
3393 | hsotg->lx_state = DWC2_L0; | 3393 | hsotg->lx_state = DWC2_L0; |
3394 | |||
3395 | dwc2_hsotg_enqueue_setup(hsotg); | ||
3396 | |||
3397 | dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n", | ||
3398 | dwc2_readl(hsotg->regs + DIEPCTL0), | ||
3399 | dwc2_readl(hsotg->regs + DOEPCTL0)); | ||
3394 | } | 3400 | } |
3395 | 3401 | ||
3396 | static void dwc2_hsotg_core_disconnect(struct dwc2_hsotg *hsotg) | 3402 | static void dwc2_hsotg_core_disconnect(struct dwc2_hsotg *hsotg) |
diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c index 03fd20f0b496..c4a47496d2fb 100644 --- a/drivers/usb/dwc2/params.c +++ b/drivers/usb/dwc2/params.c | |||
@@ -137,7 +137,7 @@ static void dwc2_set_stm32f4x9_fsotg_params(struct dwc2_hsotg *hsotg) | |||
137 | p->activate_stm_fs_transceiver = true; | 137 | p->activate_stm_fs_transceiver = true; |
138 | } | 138 | } |
139 | 139 | ||
140 | static void dwc2_set_stm32f7xx_hsotg_params(struct dwc2_hsotg *hsotg) | 140 | static void dwc2_set_stm32f7_hsotg_params(struct dwc2_hsotg *hsotg) |
141 | { | 141 | { |
142 | struct dwc2_core_params *p = &hsotg->params; | 142 | struct dwc2_core_params *p = &hsotg->params; |
143 | 143 | ||
@@ -164,8 +164,8 @@ const struct of_device_id dwc2_of_match_table[] = { | |||
164 | { .compatible = "st,stm32f4x9-fsotg", | 164 | { .compatible = "st,stm32f4x9-fsotg", |
165 | .data = dwc2_set_stm32f4x9_fsotg_params }, | 165 | .data = dwc2_set_stm32f4x9_fsotg_params }, |
166 | { .compatible = "st,stm32f4x9-hsotg" }, | 166 | { .compatible = "st,stm32f4x9-hsotg" }, |
167 | { .compatible = "st,stm32f7xx-hsotg", | 167 | { .compatible = "st,stm32f7-hsotg", |
168 | .data = dwc2_set_stm32f7xx_hsotg_params }, | 168 | .data = dwc2_set_stm32f7_hsotg_params }, |
169 | {}, | 169 | {}, |
170 | }; | 170 | }; |
171 | MODULE_DEVICE_TABLE(of, dwc2_of_match_table); | 171 | MODULE_DEVICE_TABLE(of, dwc2_of_match_table); |
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index ade2ab00d37a..e94bf91cc58a 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c | |||
@@ -100,6 +100,8 @@ static void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode) | |||
100 | reg &= ~(DWC3_GCTL_PRTCAPDIR(DWC3_GCTL_PRTCAP_OTG)); | 100 | reg &= ~(DWC3_GCTL_PRTCAPDIR(DWC3_GCTL_PRTCAP_OTG)); |
101 | reg |= DWC3_GCTL_PRTCAPDIR(mode); | 101 | reg |= DWC3_GCTL_PRTCAPDIR(mode); |
102 | dwc3_writel(dwc->regs, DWC3_GCTL, reg); | 102 | dwc3_writel(dwc->regs, DWC3_GCTL, reg); |
103 | |||
104 | dwc->current_dr_role = mode; | ||
103 | } | 105 | } |
104 | 106 | ||
105 | static void __dwc3_set_mode(struct work_struct *work) | 107 | static void __dwc3_set_mode(struct work_struct *work) |
@@ -133,8 +135,6 @@ static void __dwc3_set_mode(struct work_struct *work) | |||
133 | 135 | ||
134 | dwc3_set_prtcap(dwc, dwc->desired_dr_role); | 136 | dwc3_set_prtcap(dwc, dwc->desired_dr_role); |
135 | 137 | ||
136 | dwc->current_dr_role = dwc->desired_dr_role; | ||
137 | |||
138 | spin_unlock_irqrestore(&dwc->lock, flags); | 138 | spin_unlock_irqrestore(&dwc->lock, flags); |
139 | 139 | ||
140 | switch (dwc->desired_dr_role) { | 140 | switch (dwc->desired_dr_role) { |
@@ -175,7 +175,7 @@ void dwc3_set_mode(struct dwc3 *dwc, u32 mode) | |||
175 | dwc->desired_dr_role = mode; | 175 | dwc->desired_dr_role = mode; |
176 | spin_unlock_irqrestore(&dwc->lock, flags); | 176 | spin_unlock_irqrestore(&dwc->lock, flags); |
177 | 177 | ||
178 | queue_work(system_power_efficient_wq, &dwc->drd_work); | 178 | queue_work(system_freezable_wq, &dwc->drd_work); |
179 | } | 179 | } |
180 | 180 | ||
181 | u32 dwc3_core_fifo_space(struct dwc3_ep *dep, u8 type) | 181 | u32 dwc3_core_fifo_space(struct dwc3_ep *dep, u8 type) |
@@ -219,7 +219,7 @@ static int dwc3_core_soft_reset(struct dwc3 *dwc) | |||
219 | * XHCI driver will reset the host block. If dwc3 was configured for | 219 | * XHCI driver will reset the host block. If dwc3 was configured for |
220 | * host-only mode, then we can return early. | 220 | * host-only mode, then we can return early. |
221 | */ | 221 | */ |
222 | if (dwc->dr_mode == USB_DR_MODE_HOST) | 222 | if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST) |
223 | return 0; | 223 | return 0; |
224 | 224 | ||
225 | reg = dwc3_readl(dwc->regs, DWC3_DCTL); | 225 | reg = dwc3_readl(dwc->regs, DWC3_DCTL); |
@@ -234,6 +234,9 @@ static int dwc3_core_soft_reset(struct dwc3 *dwc) | |||
234 | udelay(1); | 234 | udelay(1); |
235 | } while (--retries); | 235 | } while (--retries); |
236 | 236 | ||
237 | phy_exit(dwc->usb3_generic_phy); | ||
238 | phy_exit(dwc->usb2_generic_phy); | ||
239 | |||
237 | return -ETIMEDOUT; | 240 | return -ETIMEDOUT; |
238 | } | 241 | } |
239 | 242 | ||
@@ -483,6 +486,22 @@ static void dwc3_cache_hwparams(struct dwc3 *dwc) | |||
483 | parms->hwparams8 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS8); | 486 | parms->hwparams8 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS8); |
484 | } | 487 | } |
485 | 488 | ||
489 | static int dwc3_core_ulpi_init(struct dwc3 *dwc) | ||
490 | { | ||
491 | int intf; | ||
492 | int ret = 0; | ||
493 | |||
494 | intf = DWC3_GHWPARAMS3_HSPHY_IFC(dwc->hwparams.hwparams3); | ||
495 | |||
496 | if (intf == DWC3_GHWPARAMS3_HSPHY_IFC_ULPI || | ||
497 | (intf == DWC3_GHWPARAMS3_HSPHY_IFC_UTMI_ULPI && | ||
498 | dwc->hsphy_interface && | ||
499 | !strncmp(dwc->hsphy_interface, "ulpi", 4))) | ||
500 | ret = dwc3_ulpi_init(dwc); | ||
501 | |||
502 | return ret; | ||
503 | } | ||
504 | |||
486 | /** | 505 | /** |
487 | * dwc3_phy_setup - Configure USB PHY Interface of DWC3 Core | 506 | * dwc3_phy_setup - Configure USB PHY Interface of DWC3 Core |
488 | * @dwc: Pointer to our controller context structure | 507 | * @dwc: Pointer to our controller context structure |
@@ -494,7 +513,6 @@ static void dwc3_cache_hwparams(struct dwc3 *dwc) | |||
494 | static int dwc3_phy_setup(struct dwc3 *dwc) | 513 | static int dwc3_phy_setup(struct dwc3 *dwc) |
495 | { | 514 | { |
496 | u32 reg; | 515 | u32 reg; |
497 | int ret; | ||
498 | 516 | ||
499 | reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0)); | 517 | reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0)); |
500 | 518 | ||
@@ -565,9 +583,6 @@ static int dwc3_phy_setup(struct dwc3 *dwc) | |||
565 | } | 583 | } |
566 | /* FALLTHROUGH */ | 584 | /* FALLTHROUGH */ |
567 | case DWC3_GHWPARAMS3_HSPHY_IFC_ULPI: | 585 | case DWC3_GHWPARAMS3_HSPHY_IFC_ULPI: |
568 | ret = dwc3_ulpi_init(dwc); | ||
569 | if (ret) | ||
570 | return ret; | ||
571 | /* FALLTHROUGH */ | 586 | /* FALLTHROUGH */ |
572 | default: | 587 | default: |
573 | break; | 588 | break; |
@@ -724,6 +739,7 @@ static void dwc3_core_setup_global_control(struct dwc3 *dwc) | |||
724 | } | 739 | } |
725 | 740 | ||
726 | static int dwc3_core_get_phy(struct dwc3 *dwc); | 741 | static int dwc3_core_get_phy(struct dwc3 *dwc); |
742 | static int dwc3_core_ulpi_init(struct dwc3 *dwc); | ||
727 | 743 | ||
728 | /** | 744 | /** |
729 | * dwc3_core_init - Low-level initialization of DWC3 Core | 745 | * dwc3_core_init - Low-level initialization of DWC3 Core |
@@ -755,17 +771,27 @@ static int dwc3_core_init(struct dwc3 *dwc) | |||
755 | dwc->maximum_speed = USB_SPEED_HIGH; | 771 | dwc->maximum_speed = USB_SPEED_HIGH; |
756 | } | 772 | } |
757 | 773 | ||
758 | ret = dwc3_core_get_phy(dwc); | 774 | ret = dwc3_phy_setup(dwc); |
759 | if (ret) | 775 | if (ret) |
760 | goto err0; | 776 | goto err0; |
761 | 777 | ||
762 | ret = dwc3_core_soft_reset(dwc); | 778 | if (!dwc->ulpi_ready) { |
763 | if (ret) | 779 | ret = dwc3_core_ulpi_init(dwc); |
764 | goto err0; | 780 | if (ret) |
781 | goto err0; | ||
782 | dwc->ulpi_ready = true; | ||
783 | } | ||
765 | 784 | ||
766 | ret = dwc3_phy_setup(dwc); | 785 | if (!dwc->phys_ready) { |
786 | ret = dwc3_core_get_phy(dwc); | ||
787 | if (ret) | ||
788 | goto err0a; | ||
789 | dwc->phys_ready = true; | ||
790 | } | ||
791 | |||
792 | ret = dwc3_core_soft_reset(dwc); | ||
767 | if (ret) | 793 | if (ret) |
768 | goto err0; | 794 | goto err0a; |
769 | 795 | ||
770 | dwc3_core_setup_global_control(dwc); | 796 | dwc3_core_setup_global_control(dwc); |
771 | dwc3_core_num_eps(dwc); | 797 | dwc3_core_num_eps(dwc); |
@@ -838,6 +864,9 @@ err1: | |||
838 | phy_exit(dwc->usb2_generic_phy); | 864 | phy_exit(dwc->usb2_generic_phy); |
839 | phy_exit(dwc->usb3_generic_phy); | 865 | phy_exit(dwc->usb3_generic_phy); |
840 | 866 | ||
867 | err0a: | ||
868 | dwc3_ulpi_exit(dwc); | ||
869 | |||
841 | err0: | 870 | err0: |
842 | return ret; | 871 | return ret; |
843 | } | 872 | } |
@@ -916,7 +945,6 @@ static int dwc3_core_init_mode(struct dwc3 *dwc) | |||
916 | 945 | ||
917 | switch (dwc->dr_mode) { | 946 | switch (dwc->dr_mode) { |
918 | case USB_DR_MODE_PERIPHERAL: | 947 | case USB_DR_MODE_PERIPHERAL: |
919 | dwc->current_dr_role = DWC3_GCTL_PRTCAP_DEVICE; | ||
920 | dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE); | 948 | dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE); |
921 | 949 | ||
922 | if (dwc->usb2_phy) | 950 | if (dwc->usb2_phy) |
@@ -932,7 +960,6 @@ static int dwc3_core_init_mode(struct dwc3 *dwc) | |||
932 | } | 960 | } |
933 | break; | 961 | break; |
934 | case USB_DR_MODE_HOST: | 962 | case USB_DR_MODE_HOST: |
935 | dwc->current_dr_role = DWC3_GCTL_PRTCAP_HOST; | ||
936 | dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST); | 963 | dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST); |
937 | 964 | ||
938 | if (dwc->usb2_phy) | 965 | if (dwc->usb2_phy) |
@@ -1234,7 +1261,6 @@ err4: | |||
1234 | 1261 | ||
1235 | err3: | 1262 | err3: |
1236 | dwc3_free_event_buffers(dwc); | 1263 | dwc3_free_event_buffers(dwc); |
1237 | dwc3_ulpi_exit(dwc); | ||
1238 | 1264 | ||
1239 | err2: | 1265 | err2: |
1240 | pm_runtime_allow(&pdev->dev); | 1266 | pm_runtime_allow(&pdev->dev); |
@@ -1284,7 +1310,7 @@ static int dwc3_remove(struct platform_device *pdev) | |||
1284 | } | 1310 | } |
1285 | 1311 | ||
1286 | #ifdef CONFIG_PM | 1312 | #ifdef CONFIG_PM |
1287 | static int dwc3_suspend_common(struct dwc3 *dwc) | 1313 | static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg) |
1288 | { | 1314 | { |
1289 | unsigned long flags; | 1315 | unsigned long flags; |
1290 | 1316 | ||
@@ -1296,6 +1322,10 @@ static int dwc3_suspend_common(struct dwc3 *dwc) | |||
1296 | dwc3_core_exit(dwc); | 1322 | dwc3_core_exit(dwc); |
1297 | break; | 1323 | break; |
1298 | case DWC3_GCTL_PRTCAP_HOST: | 1324 | case DWC3_GCTL_PRTCAP_HOST: |
1325 | /* do nothing during host runtime_suspend */ | ||
1326 | if (!PMSG_IS_AUTO(msg)) | ||
1327 | dwc3_core_exit(dwc); | ||
1328 | break; | ||
1299 | default: | 1329 | default: |
1300 | /* do nothing */ | 1330 | /* do nothing */ |
1301 | break; | 1331 | break; |
@@ -1304,7 +1334,7 @@ static int dwc3_suspend_common(struct dwc3 *dwc) | |||
1304 | return 0; | 1334 | return 0; |
1305 | } | 1335 | } |
1306 | 1336 | ||
1307 | static int dwc3_resume_common(struct dwc3 *dwc) | 1337 | static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg) |
1308 | { | 1338 | { |
1309 | unsigned long flags; | 1339 | unsigned long flags; |
1310 | int ret; | 1340 | int ret; |
@@ -1320,6 +1350,13 @@ static int dwc3_resume_common(struct dwc3 *dwc) | |||
1320 | spin_unlock_irqrestore(&dwc->lock, flags); | 1350 | spin_unlock_irqrestore(&dwc->lock, flags); |
1321 | break; | 1351 | break; |
1322 | case DWC3_GCTL_PRTCAP_HOST: | 1352 | case DWC3_GCTL_PRTCAP_HOST: |
1353 | /* nothing to do on host runtime_resume */ | ||
1354 | if (!PMSG_IS_AUTO(msg)) { | ||
1355 | ret = dwc3_core_init(dwc); | ||
1356 | if (ret) | ||
1357 | return ret; | ||
1358 | } | ||
1359 | break; | ||
1323 | default: | 1360 | default: |
1324 | /* do nothing */ | 1361 | /* do nothing */ |
1325 | break; | 1362 | break; |
@@ -1331,12 +1368,11 @@ static int dwc3_resume_common(struct dwc3 *dwc) | |||
1331 | static int dwc3_runtime_checks(struct dwc3 *dwc) | 1368 | static int dwc3_runtime_checks(struct dwc3 *dwc) |
1332 | { | 1369 | { |
1333 | switch (dwc->current_dr_role) { | 1370 | switch (dwc->current_dr_role) { |
1334 | case USB_DR_MODE_PERIPHERAL: | 1371 | case DWC3_GCTL_PRTCAP_DEVICE: |
1335 | case USB_DR_MODE_OTG: | ||
1336 | if (dwc->connected) | 1372 | if (dwc->connected) |
1337 | return -EBUSY; | 1373 | return -EBUSY; |
1338 | break; | 1374 | break; |
1339 | case USB_DR_MODE_HOST: | 1375 | case DWC3_GCTL_PRTCAP_HOST: |
1340 | default: | 1376 | default: |
1341 | /* do nothing */ | 1377 | /* do nothing */ |
1342 | break; | 1378 | break; |
@@ -1353,7 +1389,7 @@ static int dwc3_runtime_suspend(struct device *dev) | |||
1353 | if (dwc3_runtime_checks(dwc)) | 1389 | if (dwc3_runtime_checks(dwc)) |
1354 | return -EBUSY; | 1390 | return -EBUSY; |
1355 | 1391 | ||
1356 | ret = dwc3_suspend_common(dwc); | 1392 | ret = dwc3_suspend_common(dwc, PMSG_AUTO_SUSPEND); |
1357 | if (ret) | 1393 | if (ret) |
1358 | return ret; | 1394 | return ret; |
1359 | 1395 | ||
@@ -1369,7 +1405,7 @@ static int dwc3_runtime_resume(struct device *dev) | |||
1369 | 1405 | ||
1370 | device_init_wakeup(dev, false); | 1406 | device_init_wakeup(dev, false); |
1371 | 1407 | ||
1372 | ret = dwc3_resume_common(dwc); | 1408 | ret = dwc3_resume_common(dwc, PMSG_AUTO_RESUME); |
1373 | if (ret) | 1409 | if (ret) |
1374 | return ret; | 1410 | return ret; |
1375 | 1411 | ||
@@ -1416,7 +1452,7 @@ static int dwc3_suspend(struct device *dev) | |||
1416 | struct dwc3 *dwc = dev_get_drvdata(dev); | 1452 | struct dwc3 *dwc = dev_get_drvdata(dev); |
1417 | int ret; | 1453 | int ret; |
1418 | 1454 | ||
1419 | ret = dwc3_suspend_common(dwc); | 1455 | ret = dwc3_suspend_common(dwc, PMSG_SUSPEND); |
1420 | if (ret) | 1456 | if (ret) |
1421 | return ret; | 1457 | return ret; |
1422 | 1458 | ||
@@ -1432,7 +1468,7 @@ static int dwc3_resume(struct device *dev) | |||
1432 | 1468 | ||
1433 | pinctrl_pm_select_default_state(dev); | 1469 | pinctrl_pm_select_default_state(dev); |
1434 | 1470 | ||
1435 | ret = dwc3_resume_common(dwc); | 1471 | ret = dwc3_resume_common(dwc, PMSG_RESUME); |
1436 | if (ret) | 1472 | if (ret) |
1437 | return ret; | 1473 | return ret; |
1438 | 1474 | ||
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h index 03c7aaaac926..860d2bc184d1 100644 --- a/drivers/usb/dwc3/core.h +++ b/drivers/usb/dwc3/core.h | |||
@@ -158,13 +158,15 @@ | |||
158 | #define DWC3_GDBGFIFOSPACE_TYPE(n) (((n) << 5) & 0x1e0) | 158 | #define DWC3_GDBGFIFOSPACE_TYPE(n) (((n) << 5) & 0x1e0) |
159 | #define DWC3_GDBGFIFOSPACE_SPACE_AVAILABLE(n) (((n) >> 16) & 0xffff) | 159 | #define DWC3_GDBGFIFOSPACE_SPACE_AVAILABLE(n) (((n) >> 16) & 0xffff) |
160 | 160 | ||
161 | #define DWC3_TXFIFOQ 1 | 161 | #define DWC3_TXFIFOQ 0 |
162 | #define DWC3_RXFIFOQ 3 | 162 | #define DWC3_RXFIFOQ 1 |
163 | #define DWC3_TXREQQ 5 | 163 | #define DWC3_TXREQQ 2 |
164 | #define DWC3_RXREQQ 7 | 164 | #define DWC3_RXREQQ 3 |
165 | #define DWC3_RXINFOQ 9 | 165 | #define DWC3_RXINFOQ 4 |
166 | #define DWC3_DESCFETCHQ 13 | 166 | #define DWC3_PSTATQ 5 |
167 | #define DWC3_EVENTQ 15 | 167 | #define DWC3_DESCFETCHQ 6 |
168 | #define DWC3_EVENTQ 7 | ||
169 | #define DWC3_AUXEVENTQ 8 | ||
168 | 170 | ||
169 | /* Global RX Threshold Configuration Register */ | 171 | /* Global RX Threshold Configuration Register */ |
170 | #define DWC3_GRXTHRCFG_MAXRXBURSTSIZE(n) (((n) & 0x1f) << 19) | 172 | #define DWC3_GRXTHRCFG_MAXRXBURSTSIZE(n) (((n) & 0x1f) << 19) |
@@ -795,7 +797,9 @@ struct dwc3_scratchpad_array { | |||
795 | * @usb3_phy: pointer to USB3 PHY | 797 | * @usb3_phy: pointer to USB3 PHY |
796 | * @usb2_generic_phy: pointer to USB2 PHY | 798 | * @usb2_generic_phy: pointer to USB2 PHY |
797 | * @usb3_generic_phy: pointer to USB3 PHY | 799 | * @usb3_generic_phy: pointer to USB3 PHY |
800 | * @phys_ready: flag to indicate that PHYs are ready | ||
798 | * @ulpi: pointer to ulpi interface | 801 | * @ulpi: pointer to ulpi interface |
802 | * @ulpi_ready: flag to indicate that ULPI is initialized | ||
799 | * @u2sel: parameter from Set SEL request. | 803 | * @u2sel: parameter from Set SEL request. |
800 | * @u2pel: parameter from Set SEL request. | 804 | * @u2pel: parameter from Set SEL request. |
801 | * @u1sel: parameter from Set SEL request. | 805 | * @u1sel: parameter from Set SEL request. |
@@ -893,7 +897,10 @@ struct dwc3 { | |||
893 | struct phy *usb2_generic_phy; | 897 | struct phy *usb2_generic_phy; |
894 | struct phy *usb3_generic_phy; | 898 | struct phy *usb3_generic_phy; |
895 | 899 | ||
900 | bool phys_ready; | ||
901 | |||
896 | struct ulpi *ulpi; | 902 | struct ulpi *ulpi; |
903 | bool ulpi_ready; | ||
897 | 904 | ||
898 | void __iomem *regs; | 905 | void __iomem *regs; |
899 | size_t regs_size; | 906 | size_t regs_size; |
diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c index 7ae0eefc7cc7..e54c3622eb28 100644 --- a/drivers/usb/dwc3/dwc3-of-simple.c +++ b/drivers/usb/dwc3/dwc3-of-simple.c | |||
@@ -143,6 +143,7 @@ static int dwc3_of_simple_remove(struct platform_device *pdev) | |||
143 | clk_disable_unprepare(simple->clks[i]); | 143 | clk_disable_unprepare(simple->clks[i]); |
144 | clk_put(simple->clks[i]); | 144 | clk_put(simple->clks[i]); |
145 | } | 145 | } |
146 | simple->num_clocks = 0; | ||
146 | 147 | ||
147 | reset_control_assert(simple->resets); | 148 | reset_control_assert(simple->resets); |
148 | reset_control_put(simple->resets); | 149 | reset_control_put(simple->resets); |
diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c index a4719e853b85..ed8b86517675 100644 --- a/drivers/usb/dwc3/dwc3-omap.c +++ b/drivers/usb/dwc3/dwc3-omap.c | |||
@@ -582,9 +582,25 @@ static int dwc3_omap_resume(struct device *dev) | |||
582 | return 0; | 582 | return 0; |
583 | } | 583 | } |
584 | 584 | ||
585 | static void dwc3_omap_complete(struct device *dev) | ||
586 | { | ||
587 | struct dwc3_omap *omap = dev_get_drvdata(dev); | ||
588 | |||
589 | if (extcon_get_state(omap->edev, EXTCON_USB)) | ||
590 | dwc3_omap_set_mailbox(omap, OMAP_DWC3_VBUS_VALID); | ||
591 | else | ||
592 | dwc3_omap_set_mailbox(omap, OMAP_DWC3_VBUS_OFF); | ||
593 | |||
594 | if (extcon_get_state(omap->edev, EXTCON_USB_HOST)) | ||
595 | dwc3_omap_set_mailbox(omap, OMAP_DWC3_ID_GROUND); | ||
596 | else | ||
597 | dwc3_omap_set_mailbox(omap, OMAP_DWC3_ID_FLOAT); | ||
598 | } | ||
599 | |||
585 | static const struct dev_pm_ops dwc3_omap_dev_pm_ops = { | 600 | static const struct dev_pm_ops dwc3_omap_dev_pm_ops = { |
586 | 601 | ||
587 | SET_SYSTEM_SLEEP_PM_OPS(dwc3_omap_suspend, dwc3_omap_resume) | 602 | SET_SYSTEM_SLEEP_PM_OPS(dwc3_omap_suspend, dwc3_omap_resume) |
603 | .complete = dwc3_omap_complete, | ||
588 | }; | 604 | }; |
589 | 605 | ||
590 | #define DEV_PM_OPS (&dwc3_omap_dev_pm_ops) | 606 | #define DEV_PM_OPS (&dwc3_omap_dev_pm_ops) |
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c index 9c2e4a17918e..18be31d5743a 100644 --- a/drivers/usb/dwc3/ep0.c +++ b/drivers/usb/dwc3/ep0.c | |||
@@ -854,7 +854,12 @@ static void dwc3_ep0_complete_data(struct dwc3 *dwc, | |||
854 | trb++; | 854 | trb++; |
855 | trb->ctrl &= ~DWC3_TRB_CTRL_HWO; | 855 | trb->ctrl &= ~DWC3_TRB_CTRL_HWO; |
856 | trace_dwc3_complete_trb(ep0, trb); | 856 | trace_dwc3_complete_trb(ep0, trb); |
857 | ep0->trb_enqueue = 0; | 857 | |
858 | if (r->direction) | ||
859 | dwc->eps[1]->trb_enqueue = 0; | ||
860 | else | ||
861 | dwc->eps[0]->trb_enqueue = 0; | ||
862 | |||
858 | dwc->ep0_bounced = false; | 863 | dwc->ep0_bounced = false; |
859 | } | 864 | } |
860 | 865 | ||
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 616ef49ccb49..2bda4eb1e9ac 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c | |||
@@ -2745,6 +2745,8 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc) | |||
2745 | break; | 2745 | break; |
2746 | } | 2746 | } |
2747 | 2747 | ||
2748 | dwc->eps[1]->endpoint.maxpacket = dwc->gadget.ep0->maxpacket; | ||
2749 | |||
2748 | /* Enable USB2 LPM Capability */ | 2750 | /* Enable USB2 LPM Capability */ |
2749 | 2751 | ||
2750 | if ((dwc->revision > DWC3_REVISION_194A) && | 2752 | if ((dwc->revision > DWC3_REVISION_194A) && |
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index 8f2cf3baa19c..d2428a9e8900 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c | |||
@@ -1538,7 +1538,6 @@ ffs_fs_kill_sb(struct super_block *sb) | |||
1538 | if (sb->s_fs_info) { | 1538 | if (sb->s_fs_info) { |
1539 | ffs_release_dev(sb->s_fs_info); | 1539 | ffs_release_dev(sb->s_fs_info); |
1540 | ffs_data_closed(sb->s_fs_info); | 1540 | ffs_data_closed(sb->s_fs_info); |
1541 | ffs_data_put(sb->s_fs_info); | ||
1542 | } | 1541 | } |
1543 | } | 1542 | } |
1544 | 1543 | ||
@@ -1855,44 +1854,20 @@ static int ffs_func_eps_enable(struct ffs_function *func) | |||
1855 | 1854 | ||
1856 | spin_lock_irqsave(&func->ffs->eps_lock, flags); | 1855 | spin_lock_irqsave(&func->ffs->eps_lock, flags); |
1857 | while(count--) { | 1856 | while(count--) { |
1858 | struct usb_endpoint_descriptor *ds; | ||
1859 | struct usb_ss_ep_comp_descriptor *comp_desc = NULL; | ||
1860 | int needs_comp_desc = false; | ||
1861 | int desc_idx; | ||
1862 | |||
1863 | if (ffs->gadget->speed == USB_SPEED_SUPER) { | ||
1864 | desc_idx = 2; | ||
1865 | needs_comp_desc = true; | ||
1866 | } else if (ffs->gadget->speed == USB_SPEED_HIGH) | ||
1867 | desc_idx = 1; | ||
1868 | else | ||
1869 | desc_idx = 0; | ||
1870 | |||
1871 | /* fall-back to lower speed if desc missing for current speed */ | ||
1872 | do { | ||
1873 | ds = ep->descs[desc_idx]; | ||
1874 | } while (!ds && --desc_idx >= 0); | ||
1875 | |||
1876 | if (!ds) { | ||
1877 | ret = -EINVAL; | ||
1878 | break; | ||
1879 | } | ||
1880 | |||
1881 | ep->ep->driver_data = ep; | 1857 | ep->ep->driver_data = ep; |
1882 | ep->ep->desc = ds; | ||
1883 | 1858 | ||
1884 | if (needs_comp_desc) { | 1859 | ret = config_ep_by_speed(func->gadget, &func->function, ep->ep); |
1885 | comp_desc = (struct usb_ss_ep_comp_descriptor *)(ds + | 1860 | if (ret) { |
1886 | USB_DT_ENDPOINT_SIZE); | 1861 | pr_err("%s: config_ep_by_speed(%s) returned %d\n", |
1887 | ep->ep->maxburst = comp_desc->bMaxBurst + 1; | 1862 | __func__, ep->ep->name, ret); |
1888 | ep->ep->comp_desc = comp_desc; | 1863 | break; |
1889 | } | 1864 | } |
1890 | 1865 | ||
1891 | ret = usb_ep_enable(ep->ep); | 1866 | ret = usb_ep_enable(ep->ep); |
1892 | if (likely(!ret)) { | 1867 | if (likely(!ret)) { |
1893 | epfile->ep = ep; | 1868 | epfile->ep = ep; |
1894 | epfile->in = usb_endpoint_dir_in(ds); | 1869 | epfile->in = usb_endpoint_dir_in(ep->ep->desc); |
1895 | epfile->isoc = usb_endpoint_xfer_isoc(ds); | 1870 | epfile->isoc = usb_endpoint_xfer_isoc(ep->ep->desc); |
1896 | } else { | 1871 | } else { |
1897 | break; | 1872 | break; |
1898 | } | 1873 | } |
@@ -2979,10 +2954,8 @@ static int _ffs_func_bind(struct usb_configuration *c, | |||
2979 | struct ffs_data *ffs = func->ffs; | 2954 | struct ffs_data *ffs = func->ffs; |
2980 | 2955 | ||
2981 | const int full = !!func->ffs->fs_descs_count; | 2956 | const int full = !!func->ffs->fs_descs_count; |
2982 | const int high = gadget_is_dualspeed(func->gadget) && | 2957 | const int high = !!func->ffs->hs_descs_count; |
2983 | func->ffs->hs_descs_count; | 2958 | const int super = !!func->ffs->ss_descs_count; |
2984 | const int super = gadget_is_superspeed(func->gadget) && | ||
2985 | func->ffs->ss_descs_count; | ||
2986 | 2959 | ||
2987 | int fs_len, hs_len, ss_len, ret, i; | 2960 | int fs_len, hs_len, ss_len, ret, i; |
2988 | struct ffs_ep *eps_ptr; | 2961 | struct ffs_ep *eps_ptr; |
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c index 11fe788b4308..d2dc1f00180b 100644 --- a/drivers/usb/gadget/function/f_uac2.c +++ b/drivers/usb/gadget/function/f_uac2.c | |||
@@ -524,6 +524,8 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn) | |||
524 | dev_err(dev, "%s:%d Error!\n", __func__, __LINE__); | 524 | dev_err(dev, "%s:%d Error!\n", __func__, __LINE__); |
525 | return ret; | 525 | return ret; |
526 | } | 526 | } |
527 | iad_desc.bFirstInterface = ret; | ||
528 | |||
527 | std_ac_if_desc.bInterfaceNumber = ret; | 529 | std_ac_if_desc.bInterfaceNumber = ret; |
528 | uac2->ac_intf = ret; | 530 | uac2->ac_intf = ret; |
529 | uac2->ac_alt = 0; | 531 | uac2->ac_alt = 0; |
diff --git a/drivers/usb/gadget/udc/Kconfig b/drivers/usb/gadget/udc/Kconfig index 1e9567091d86..0875d38476ee 100644 --- a/drivers/usb/gadget/udc/Kconfig +++ b/drivers/usb/gadget/udc/Kconfig | |||
@@ -274,7 +274,6 @@ config USB_SNP_UDC_PLAT | |||
274 | tristate "Synopsys USB 2.0 Device controller" | 274 | tristate "Synopsys USB 2.0 Device controller" |
275 | depends on USB_GADGET && OF && HAS_DMA | 275 | depends on USB_GADGET && OF && HAS_DMA |
276 | depends on EXTCON || EXTCON=n | 276 | depends on EXTCON || EXTCON=n |
277 | select USB_GADGET_DUALSPEED | ||
278 | select USB_SNP_CORE | 277 | select USB_SNP_CORE |
279 | default ARCH_BCM_IPROC | 278 | default ARCH_BCM_IPROC |
280 | help | 279 | help |
diff --git a/drivers/usb/gadget/udc/bdc/bdc_pci.c b/drivers/usb/gadget/udc/bdc/bdc_pci.c index 1e940f054cb8..6dbc489513cd 100644 --- a/drivers/usb/gadget/udc/bdc/bdc_pci.c +++ b/drivers/usb/gadget/udc/bdc/bdc_pci.c | |||
@@ -77,6 +77,7 @@ static int bdc_pci_probe(struct pci_dev *pci, const struct pci_device_id *id) | |||
77 | if (ret) { | 77 | if (ret) { |
78 | dev_err(&pci->dev, | 78 | dev_err(&pci->dev, |
79 | "couldn't add resources to bdc device\n"); | 79 | "couldn't add resources to bdc device\n"); |
80 | platform_device_put(bdc); | ||
80 | return ret; | 81 | return ret; |
81 | } | 82 | } |
82 | 83 | ||
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c index 859d5b11ba4c..1f8b19d9cf97 100644 --- a/drivers/usb/gadget/udc/core.c +++ b/drivers/usb/gadget/udc/core.c | |||
@@ -180,8 +180,8 @@ EXPORT_SYMBOL_GPL(usb_ep_alloc_request); | |||
180 | void usb_ep_free_request(struct usb_ep *ep, | 180 | void usb_ep_free_request(struct usb_ep *ep, |
181 | struct usb_request *req) | 181 | struct usb_request *req) |
182 | { | 182 | { |
183 | ep->ops->free_request(ep, req); | ||
184 | trace_usb_ep_free_request(ep, req, 0); | 183 | trace_usb_ep_free_request(ep, req, 0); |
184 | ep->ops->free_request(ep, req); | ||
185 | } | 185 | } |
186 | EXPORT_SYMBOL_GPL(usb_ep_free_request); | 186 | EXPORT_SYMBOL_GPL(usb_ep_free_request); |
187 | 187 | ||
diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c index e5b4ee96c4bf..56b517a38865 100644 --- a/drivers/usb/gadget/udc/fsl_udc_core.c +++ b/drivers/usb/gadget/udc/fsl_udc_core.c | |||
@@ -1305,7 +1305,7 @@ static void udc_reset_ep_queue(struct fsl_udc *udc, u8 pipe) | |||
1305 | { | 1305 | { |
1306 | struct fsl_ep *ep = get_ep_by_pipe(udc, pipe); | 1306 | struct fsl_ep *ep = get_ep_by_pipe(udc, pipe); |
1307 | 1307 | ||
1308 | if (ep->name) | 1308 | if (ep->ep.name) |
1309 | nuke(ep, -ESHUTDOWN); | 1309 | nuke(ep, -ESHUTDOWN); |
1310 | } | 1310 | } |
1311 | 1311 | ||
@@ -1693,7 +1693,7 @@ static void dtd_complete_irq(struct fsl_udc *udc) | |||
1693 | curr_ep = get_ep_by_pipe(udc, i); | 1693 | curr_ep = get_ep_by_pipe(udc, i); |
1694 | 1694 | ||
1695 | /* If the ep is configured */ | 1695 | /* If the ep is configured */ |
1696 | if (curr_ep->name == NULL) { | 1696 | if (!curr_ep->ep.name) { |
1697 | WARNING("Invalid EP?"); | 1697 | WARNING("Invalid EP?"); |
1698 | continue; | 1698 | continue; |
1699 | } | 1699 | } |
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c index 6e87af248367..409cde4e6a51 100644 --- a/drivers/usb/gadget/udc/renesas_usb3.c +++ b/drivers/usb/gadget/udc/renesas_usb3.c | |||
@@ -2410,7 +2410,7 @@ static int renesas_usb3_remove(struct platform_device *pdev) | |||
2410 | __renesas_usb3_ep_free_request(usb3->ep0_req); | 2410 | __renesas_usb3_ep_free_request(usb3->ep0_req); |
2411 | if (usb3->phy) | 2411 | if (usb3->phy) |
2412 | phy_put(usb3->phy); | 2412 | phy_put(usb3->phy); |
2413 | pm_runtime_disable(usb3_to_dev(usb3)); | 2413 | pm_runtime_disable(&pdev->dev); |
2414 | 2414 | ||
2415 | return 0; | 2415 | return 0; |
2416 | } | 2416 | } |
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig index 6150bed7cfa8..4fcfb3084b36 100644 --- a/drivers/usb/host/Kconfig +++ b/drivers/usb/host/Kconfig | |||
@@ -633,14 +633,6 @@ config USB_UHCI_ASPEED | |||
633 | bool | 633 | bool |
634 | default y if ARCH_ASPEED | 634 | default y if ARCH_ASPEED |
635 | 635 | ||
636 | config USB_UHCI_BIG_ENDIAN_MMIO | ||
637 | bool | ||
638 | default y if SPARC_LEON | ||
639 | |||
640 | config USB_UHCI_BIG_ENDIAN_DESC | ||
641 | bool | ||
642 | default y if SPARC_LEON | ||
643 | |||
644 | config USB_FHCI_HCD | 636 | config USB_FHCI_HCD |
645 | tristate "Freescale QE USB Host Controller support" | 637 | tristate "Freescale QE USB Host Controller support" |
646 | depends on OF_GPIO && QE_GPIO && QUICC_ENGINE | 638 | depends on OF_GPIO && QE_GPIO && QUICC_ENGINE |
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c index facafdf8fb95..d7641cbdee43 100644 --- a/drivers/usb/host/ehci-hub.c +++ b/drivers/usb/host/ehci-hub.c | |||
@@ -774,12 +774,12 @@ static struct urb *request_single_step_set_feature_urb( | |||
774 | atomic_inc(&urb->use_count); | 774 | atomic_inc(&urb->use_count); |
775 | atomic_inc(&urb->dev->urbnum); | 775 | atomic_inc(&urb->dev->urbnum); |
776 | urb->setup_dma = dma_map_single( | 776 | urb->setup_dma = dma_map_single( |
777 | hcd->self.controller, | 777 | hcd->self.sysdev, |
778 | urb->setup_packet, | 778 | urb->setup_packet, |
779 | sizeof(struct usb_ctrlrequest), | 779 | sizeof(struct usb_ctrlrequest), |
780 | DMA_TO_DEVICE); | 780 | DMA_TO_DEVICE); |
781 | urb->transfer_dma = dma_map_single( | 781 | urb->transfer_dma = dma_map_single( |
782 | hcd->self.controller, | 782 | hcd->self.sysdev, |
783 | urb->transfer_buffer, | 783 | urb->transfer_buffer, |
784 | urb->transfer_buffer_length, | 784 | urb->transfer_buffer_length, |
785 | DMA_FROM_DEVICE); | 785 | DMA_FROM_DEVICE); |
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c index 88158324dcae..327630405695 100644 --- a/drivers/usb/host/ehci-q.c +++ b/drivers/usb/host/ehci-q.c | |||
@@ -1188,10 +1188,10 @@ static int submit_single_step_set_feature( | |||
1188 | * 15 secs after the setup | 1188 | * 15 secs after the setup |
1189 | */ | 1189 | */ |
1190 | if (is_setup) { | 1190 | if (is_setup) { |
1191 | /* SETUP pid */ | 1191 | /* SETUP pid, and interrupt after SETUP completion */ |
1192 | qtd_fill(ehci, qtd, urb->setup_dma, | 1192 | qtd_fill(ehci, qtd, urb->setup_dma, |
1193 | sizeof(struct usb_ctrlrequest), | 1193 | sizeof(struct usb_ctrlrequest), |
1194 | token | (2 /* "setup" */ << 8), 8); | 1194 | QTD_IOC | token | (2 /* "setup" */ << 8), 8); |
1195 | 1195 | ||
1196 | submit_async(ehci, urb, &qtd_list, GFP_ATOMIC); | 1196 | submit_async(ehci, urb, &qtd_list, GFP_ATOMIC); |
1197 | return 0; /*Return now; we shall come back after 15 seconds*/ | 1197 | return 0; /*Return now; we shall come back after 15 seconds*/ |
@@ -1228,12 +1228,8 @@ static int submit_single_step_set_feature( | |||
1228 | qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma); | 1228 | qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma); |
1229 | list_add_tail(&qtd->qtd_list, head); | 1229 | list_add_tail(&qtd->qtd_list, head); |
1230 | 1230 | ||
1231 | /* dont fill any data in such packets */ | 1231 | /* Interrupt after STATUS completion */ |
1232 | qtd_fill(ehci, qtd, 0, 0, token, 0); | 1232 | qtd_fill(ehci, qtd, 0, 0, token | QTD_IOC, 0); |
1233 | |||
1234 | /* by default, enable interrupt on urb completion */ | ||
1235 | if (likely(!(urb->transfer_flags & URB_NO_INTERRUPT))) | ||
1236 | qtd->hw_token |= cpu_to_hc32(ehci, QTD_IOC); | ||
1237 | 1233 | ||
1238 | submit_async(ehci, urb, &qtd_list, GFP_KERNEL); | 1234 | submit_async(ehci, urb, &qtd_list, GFP_KERNEL); |
1239 | 1235 | ||
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c index ee9676349333..d088c340e4d0 100644 --- a/drivers/usb/host/ohci-hcd.c +++ b/drivers/usb/host/ohci-hcd.c | |||
@@ -74,6 +74,7 @@ static const char hcd_name [] = "ohci_hcd"; | |||
74 | 74 | ||
75 | #define STATECHANGE_DELAY msecs_to_jiffies(300) | 75 | #define STATECHANGE_DELAY msecs_to_jiffies(300) |
76 | #define IO_WATCHDOG_DELAY msecs_to_jiffies(275) | 76 | #define IO_WATCHDOG_DELAY msecs_to_jiffies(275) |
77 | #define IO_WATCHDOG_OFF 0xffffff00 | ||
77 | 78 | ||
78 | #include "ohci.h" | 79 | #include "ohci.h" |
79 | #include "pci-quirks.h" | 80 | #include "pci-quirks.h" |
@@ -231,7 +232,7 @@ static int ohci_urb_enqueue ( | |||
231 | } | 232 | } |
232 | 233 | ||
233 | /* Start up the I/O watchdog timer, if it's not running */ | 234 | /* Start up the I/O watchdog timer, if it's not running */ |
234 | if (!timer_pending(&ohci->io_watchdog) && | 235 | if (ohci->prev_frame_no == IO_WATCHDOG_OFF && |
235 | list_empty(&ohci->eds_in_use) && | 236 | list_empty(&ohci->eds_in_use) && |
236 | !(ohci->flags & OHCI_QUIRK_QEMU)) { | 237 | !(ohci->flags & OHCI_QUIRK_QEMU)) { |
237 | ohci->prev_frame_no = ohci_frame_no(ohci); | 238 | ohci->prev_frame_no = ohci_frame_no(ohci); |
@@ -446,7 +447,8 @@ static int ohci_init (struct ohci_hcd *ohci) | |||
446 | struct usb_hcd *hcd = ohci_to_hcd(ohci); | 447 | struct usb_hcd *hcd = ohci_to_hcd(ohci); |
447 | 448 | ||
448 | /* Accept arbitrarily long scatter-gather lists */ | 449 | /* Accept arbitrarily long scatter-gather lists */ |
449 | hcd->self.sg_tablesize = ~0; | 450 | if (!(hcd->driver->flags & HCD_LOCAL_MEM)) |
451 | hcd->self.sg_tablesize = ~0; | ||
450 | 452 | ||
451 | if (distrust_firmware) | 453 | if (distrust_firmware) |
452 | ohci->flags |= OHCI_QUIRK_HUB_POWER; | 454 | ohci->flags |= OHCI_QUIRK_HUB_POWER; |
@@ -501,6 +503,7 @@ static int ohci_init (struct ohci_hcd *ohci) | |||
501 | return 0; | 503 | return 0; |
502 | 504 | ||
503 | timer_setup(&ohci->io_watchdog, io_watchdog_func, 0); | 505 | timer_setup(&ohci->io_watchdog, io_watchdog_func, 0); |
506 | ohci->prev_frame_no = IO_WATCHDOG_OFF; | ||
504 | 507 | ||
505 | ohci->hcca = dma_alloc_coherent (hcd->self.controller, | 508 | ohci->hcca = dma_alloc_coherent (hcd->self.controller, |
506 | sizeof(*ohci->hcca), &ohci->hcca_dma, GFP_KERNEL); | 509 | sizeof(*ohci->hcca), &ohci->hcca_dma, GFP_KERNEL); |
@@ -730,7 +733,7 @@ static void io_watchdog_func(struct timer_list *t) | |||
730 | u32 head; | 733 | u32 head; |
731 | struct ed *ed; | 734 | struct ed *ed; |
732 | struct td *td, *td_start, *td_next; | 735 | struct td *td, *td_start, *td_next; |
733 | unsigned frame_no; | 736 | unsigned frame_no, prev_frame_no = IO_WATCHDOG_OFF; |
734 | unsigned long flags; | 737 | unsigned long flags; |
735 | 738 | ||
736 | spin_lock_irqsave(&ohci->lock, flags); | 739 | spin_lock_irqsave(&ohci->lock, flags); |
@@ -835,7 +838,7 @@ static void io_watchdog_func(struct timer_list *t) | |||
835 | } | 838 | } |
836 | } | 839 | } |
837 | if (!list_empty(&ohci->eds_in_use)) { | 840 | if (!list_empty(&ohci->eds_in_use)) { |
838 | ohci->prev_frame_no = frame_no; | 841 | prev_frame_no = frame_no; |
839 | ohci->prev_wdh_cnt = ohci->wdh_cnt; | 842 | ohci->prev_wdh_cnt = ohci->wdh_cnt; |
840 | ohci->prev_donehead = ohci_readl(ohci, | 843 | ohci->prev_donehead = ohci_readl(ohci, |
841 | &ohci->regs->donehead); | 844 | &ohci->regs->donehead); |
@@ -845,6 +848,7 @@ static void io_watchdog_func(struct timer_list *t) | |||
845 | } | 848 | } |
846 | 849 | ||
847 | done: | 850 | done: |
851 | ohci->prev_frame_no = prev_frame_no; | ||
848 | spin_unlock_irqrestore(&ohci->lock, flags); | 852 | spin_unlock_irqrestore(&ohci->lock, flags); |
849 | } | 853 | } |
850 | 854 | ||
@@ -973,6 +977,7 @@ static void ohci_stop (struct usb_hcd *hcd) | |||
973 | if (quirk_nec(ohci)) | 977 | if (quirk_nec(ohci)) |
974 | flush_work(&ohci->nec_work); | 978 | flush_work(&ohci->nec_work); |
975 | del_timer_sync(&ohci->io_watchdog); | 979 | del_timer_sync(&ohci->io_watchdog); |
980 | ohci->prev_frame_no = IO_WATCHDOG_OFF; | ||
976 | 981 | ||
977 | ohci_writel (ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable); | 982 | ohci_writel (ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable); |
978 | ohci_usb_reset(ohci); | 983 | ohci_usb_reset(ohci); |
diff --git a/drivers/usb/host/ohci-hub.c b/drivers/usb/host/ohci-hub.c index fb7aaa3b9d06..634f3c7bf774 100644 --- a/drivers/usb/host/ohci-hub.c +++ b/drivers/usb/host/ohci-hub.c | |||
@@ -311,8 +311,10 @@ static int ohci_bus_suspend (struct usb_hcd *hcd) | |||
311 | rc = ohci_rh_suspend (ohci, 0); | 311 | rc = ohci_rh_suspend (ohci, 0); |
312 | spin_unlock_irq (&ohci->lock); | 312 | spin_unlock_irq (&ohci->lock); |
313 | 313 | ||
314 | if (rc == 0) | 314 | if (rc == 0) { |
315 | del_timer_sync(&ohci->io_watchdog); | 315 | del_timer_sync(&ohci->io_watchdog); |
316 | ohci->prev_frame_no = IO_WATCHDOG_OFF; | ||
317 | } | ||
316 | return rc; | 318 | return rc; |
317 | } | 319 | } |
318 | 320 | ||
diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c index b2ec8c399363..4ccb85a67bb3 100644 --- a/drivers/usb/host/ohci-q.c +++ b/drivers/usb/host/ohci-q.c | |||
@@ -1019,6 +1019,8 @@ skip_ed: | |||
1019 | * have modified this list. normally it's just prepending | 1019 | * have modified this list. normally it's just prepending |
1020 | * entries (which we'd ignore), but paranoia won't hurt. | 1020 | * entries (which we'd ignore), but paranoia won't hurt. |
1021 | */ | 1021 | */ |
1022 | *last = ed->ed_next; | ||
1023 | ed->ed_next = NULL; | ||
1022 | modified = 0; | 1024 | modified = 0; |
1023 | 1025 | ||
1024 | /* unlink urbs as requested, but rescan the list after | 1026 | /* unlink urbs as requested, but rescan the list after |
@@ -1077,21 +1079,22 @@ rescan_this: | |||
1077 | goto rescan_this; | 1079 | goto rescan_this; |
1078 | 1080 | ||
1079 | /* | 1081 | /* |
1080 | * If no TDs are queued, take ED off the ed_rm_list. | 1082 | * If no TDs are queued, ED is now idle. |
1081 | * Otherwise, if the HC is running, reschedule. | 1083 | * Otherwise, if the HC is running, reschedule. |
1082 | * If not, leave it on the list for further dequeues. | 1084 | * If the HC isn't running, add ED back to the |
1085 | * start of the list for later processing. | ||
1083 | */ | 1086 | */ |
1084 | if (list_empty(&ed->td_list)) { | 1087 | if (list_empty(&ed->td_list)) { |
1085 | *last = ed->ed_next; | ||
1086 | ed->ed_next = NULL; | ||
1087 | ed->state = ED_IDLE; | 1088 | ed->state = ED_IDLE; |
1088 | list_del(&ed->in_use_list); | 1089 | list_del(&ed->in_use_list); |
1089 | } else if (ohci->rh_state == OHCI_RH_RUNNING) { | 1090 | } else if (ohci->rh_state == OHCI_RH_RUNNING) { |
1090 | *last = ed->ed_next; | ||
1091 | ed->ed_next = NULL; | ||
1092 | ed_schedule(ohci, ed); | 1091 | ed_schedule(ohci, ed); |
1093 | } else { | 1092 | } else { |
1094 | last = &ed->ed_next; | 1093 | ed->ed_next = ohci->ed_rm_list; |
1094 | ohci->ed_rm_list = ed; | ||
1095 | /* Don't loop on the same ED */ | ||
1096 | if (last == &ohci->ed_rm_list) | ||
1097 | last = &ed->ed_next; | ||
1095 | } | 1098 | } |
1096 | 1099 | ||
1097 | if (modified) | 1100 | if (modified) |
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c index 161536717025..67ad4bb6919a 100644 --- a/drivers/usb/host/pci-quirks.c +++ b/drivers/usb/host/pci-quirks.c | |||
@@ -66,6 +66,23 @@ | |||
66 | #define AX_INDXC 0x30 | 66 | #define AX_INDXC 0x30 |
67 | #define AX_DATAC 0x34 | 67 | #define AX_DATAC 0x34 |
68 | 68 | ||
69 | #define PT_ADDR_INDX 0xE8 | ||
70 | #define PT_READ_INDX 0xE4 | ||
71 | #define PT_SIG_1_ADDR 0xA520 | ||
72 | #define PT_SIG_2_ADDR 0xA521 | ||
73 | #define PT_SIG_3_ADDR 0xA522 | ||
74 | #define PT_SIG_4_ADDR 0xA523 | ||
75 | #define PT_SIG_1_DATA 0x78 | ||
76 | #define PT_SIG_2_DATA 0x56 | ||
77 | #define PT_SIG_3_DATA 0x34 | ||
78 | #define PT_SIG_4_DATA 0x12 | ||
79 | #define PT4_P1_REG 0xB521 | ||
80 | #define PT4_P2_REG 0xB522 | ||
81 | #define PT2_P1_REG 0xD520 | ||
82 | #define PT2_P2_REG 0xD521 | ||
83 | #define PT1_P1_REG 0xD522 | ||
84 | #define PT1_P2_REG 0xD523 | ||
85 | |||
69 | #define NB_PCIE_INDX_ADDR 0xe0 | 86 | #define NB_PCIE_INDX_ADDR 0xe0 |
70 | #define NB_PCIE_INDX_DATA 0xe4 | 87 | #define NB_PCIE_INDX_DATA 0xe4 |
71 | #define PCIE_P_CNTL 0x10040 | 88 | #define PCIE_P_CNTL 0x10040 |
@@ -513,6 +530,98 @@ void usb_amd_dev_put(void) | |||
513 | EXPORT_SYMBOL_GPL(usb_amd_dev_put); | 530 | EXPORT_SYMBOL_GPL(usb_amd_dev_put); |
514 | 531 | ||
515 | /* | 532 | /* |
533 | * Check if port is disabled in BIOS on AMD Promontory host. | ||
534 | * BIOS Disabled ports may wake on connect/disconnect and need | ||
535 | * driver workaround to keep them disabled. | ||
536 | * Returns true if port is marked disabled. | ||
537 | */ | ||
538 | bool usb_amd_pt_check_port(struct device *device, int port) | ||
539 | { | ||
540 | unsigned char value, port_shift; | ||
541 | struct pci_dev *pdev; | ||
542 | u16 reg; | ||
543 | |||
544 | pdev = to_pci_dev(device); | ||
545 | pci_write_config_word(pdev, PT_ADDR_INDX, PT_SIG_1_ADDR); | ||
546 | |||
547 | pci_read_config_byte(pdev, PT_READ_INDX, &value); | ||
548 | if (value != PT_SIG_1_DATA) | ||
549 | return false; | ||
550 | |||
551 | pci_write_config_word(pdev, PT_ADDR_INDX, PT_SIG_2_ADDR); | ||
552 | |||
553 | pci_read_config_byte(pdev, PT_READ_INDX, &value); | ||
554 | if (value != PT_SIG_2_DATA) | ||
555 | return false; | ||
556 | |||
557 | pci_write_config_word(pdev, PT_ADDR_INDX, PT_SIG_3_ADDR); | ||
558 | |||
559 | pci_read_config_byte(pdev, PT_READ_INDX, &value); | ||
560 | if (value != PT_SIG_3_DATA) | ||
561 | return false; | ||
562 | |||
563 | pci_write_config_word(pdev, PT_ADDR_INDX, PT_SIG_4_ADDR); | ||
564 | |||
565 | pci_read_config_byte(pdev, PT_READ_INDX, &value); | ||
566 | if (value != PT_SIG_4_DATA) | ||
567 | return false; | ||
568 | |||
569 | /* Check disabled port setting, if bit is set port is enabled */ | ||
570 | switch (pdev->device) { | ||
571 | case 0x43b9: | ||
572 | case 0x43ba: | ||
573 | /* | ||
574 | * device is AMD_PROMONTORYA_4(0x43b9) or PROMONTORYA_3(0x43ba) | ||
575 | * PT4_P1_REG bits[7..1] represents USB2.0 ports 6 to 0 | ||
576 | * PT4_P2_REG bits[6..0] represents ports 13 to 7 | ||
577 | */ | ||
578 | if (port > 6) { | ||
579 | reg = PT4_P2_REG; | ||
580 | port_shift = port - 7; | ||
581 | } else { | ||
582 | reg = PT4_P1_REG; | ||
583 | port_shift = port + 1; | ||
584 | } | ||
585 | break; | ||
586 | case 0x43bb: | ||
587 | /* | ||
588 | * device is AMD_PROMONTORYA_2(0x43bb) | ||
589 | * PT2_P1_REG bits[7..5] represents USB2.0 ports 2 to 0 | ||
590 | * PT2_P2_REG bits[5..0] represents ports 9 to 3 | ||
591 | */ | ||
592 | if (port > 2) { | ||
593 | reg = PT2_P2_REG; | ||
594 | port_shift = port - 3; | ||
595 | } else { | ||
596 | reg = PT2_P1_REG; | ||
597 | port_shift = port + 5; | ||
598 | } | ||
599 | break; | ||
600 | case 0x43bc: | ||
601 | /* | ||
602 | * device is AMD_PROMONTORYA_1(0x43bc) | ||
603 | * PT1_P1_REG[7..4] represents USB2.0 ports 3 to 0 | ||
604 | * PT1_P2_REG[5..0] represents ports 9 to 4 | ||
605 | */ | ||
606 | if (port > 3) { | ||
607 | reg = PT1_P2_REG; | ||
608 | port_shift = port - 4; | ||
609 | } else { | ||
610 | reg = PT1_P1_REG; | ||
611 | port_shift = port + 4; | ||
612 | } | ||
613 | break; | ||
614 | default: | ||
615 | return false; | ||
616 | } | ||
617 | pci_write_config_word(pdev, PT_ADDR_INDX, reg); | ||
618 | pci_read_config_byte(pdev, PT_READ_INDX, &value); | ||
619 | |||
620 | return !(value & BIT(port_shift)); | ||
621 | } | ||
622 | EXPORT_SYMBOL_GPL(usb_amd_pt_check_port); | ||
623 | |||
624 | /* | ||
516 | * Make sure the controller is completely inactive, unable to | 625 | * Make sure the controller is completely inactive, unable to |
517 | * generate interrupts or do DMA. | 626 | * generate interrupts or do DMA. |
518 | */ | 627 | */ |
diff --git a/drivers/usb/host/pci-quirks.h b/drivers/usb/host/pci-quirks.h index b68dcb5dd0fd..4ca0d9b7e463 100644 --- a/drivers/usb/host/pci-quirks.h +++ b/drivers/usb/host/pci-quirks.h | |||
@@ -17,6 +17,7 @@ void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev); | |||
17 | void usb_disable_xhci_ports(struct pci_dev *xhci_pdev); | 17 | void usb_disable_xhci_ports(struct pci_dev *xhci_pdev); |
18 | void sb800_prefetch(struct device *dev, int on); | 18 | void sb800_prefetch(struct device *dev, int on); |
19 | bool usb_xhci_needs_pci_reset(struct pci_dev *pdev); | 19 | bool usb_xhci_needs_pci_reset(struct pci_dev *pdev); |
20 | bool usb_amd_pt_check_port(struct device *device, int port); | ||
20 | #else | 21 | #else |
21 | struct pci_dev; | 22 | struct pci_dev; |
22 | static inline void usb_amd_quirk_pll_disable(void) {} | 23 | static inline void usb_amd_quirk_pll_disable(void) {} |
@@ -25,6 +26,10 @@ static inline void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev) {} | |||
25 | static inline void usb_amd_dev_put(void) {} | 26 | static inline void usb_amd_dev_put(void) {} |
26 | static inline void usb_disable_xhci_ports(struct pci_dev *xhci_pdev) {} | 27 | static inline void usb_disable_xhci_ports(struct pci_dev *xhci_pdev) {} |
27 | static inline void sb800_prefetch(struct device *dev, int on) {} | 28 | static inline void sb800_prefetch(struct device *dev, int on) {} |
29 | static inline bool usb_amd_pt_check_port(struct device *device, int port) | ||
30 | { | ||
31 | return false; | ||
32 | } | ||
28 | #endif /* CONFIG_USB_PCI */ | 33 | #endif /* CONFIG_USB_PCI */ |
29 | 34 | ||
30 | #endif /* __LINUX_USB_PCI_QUIRKS_H */ | 35 | #endif /* __LINUX_USB_PCI_QUIRKS_H */ |
diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c index a1ab8acf39ba..c359bae7b754 100644 --- a/drivers/usb/host/xhci-dbgcap.c +++ b/drivers/usb/host/xhci-dbgcap.c | |||
@@ -328,13 +328,14 @@ dbc_ep_do_queue(struct dbc_ep *dep, struct dbc_request *req) | |||
328 | int dbc_ep_queue(struct dbc_ep *dep, struct dbc_request *req, | 328 | int dbc_ep_queue(struct dbc_ep *dep, struct dbc_request *req, |
329 | gfp_t gfp_flags) | 329 | gfp_t gfp_flags) |
330 | { | 330 | { |
331 | unsigned long flags; | ||
331 | struct xhci_dbc *dbc = dep->dbc; | 332 | struct xhci_dbc *dbc = dep->dbc; |
332 | int ret = -ESHUTDOWN; | 333 | int ret = -ESHUTDOWN; |
333 | 334 | ||
334 | spin_lock(&dbc->lock); | 335 | spin_lock_irqsave(&dbc->lock, flags); |
335 | if (dbc->state == DS_CONFIGURED) | 336 | if (dbc->state == DS_CONFIGURED) |
336 | ret = dbc_ep_do_queue(dep, req); | 337 | ret = dbc_ep_do_queue(dep, req); |
337 | spin_unlock(&dbc->lock); | 338 | spin_unlock_irqrestore(&dbc->lock, flags); |
338 | 339 | ||
339 | mod_delayed_work(system_wq, &dbc->event_work, 0); | 340 | mod_delayed_work(system_wq, &dbc->event_work, 0); |
340 | 341 | ||
@@ -521,15 +522,16 @@ static void xhci_do_dbc_stop(struct xhci_hcd *xhci) | |||
521 | static int xhci_dbc_start(struct xhci_hcd *xhci) | 522 | static int xhci_dbc_start(struct xhci_hcd *xhci) |
522 | { | 523 | { |
523 | int ret; | 524 | int ret; |
525 | unsigned long flags; | ||
524 | struct xhci_dbc *dbc = xhci->dbc; | 526 | struct xhci_dbc *dbc = xhci->dbc; |
525 | 527 | ||
526 | WARN_ON(!dbc); | 528 | WARN_ON(!dbc); |
527 | 529 | ||
528 | pm_runtime_get_sync(xhci_to_hcd(xhci)->self.controller); | 530 | pm_runtime_get_sync(xhci_to_hcd(xhci)->self.controller); |
529 | 531 | ||
530 | spin_lock(&dbc->lock); | 532 | spin_lock_irqsave(&dbc->lock, flags); |
531 | ret = xhci_do_dbc_start(xhci); | 533 | ret = xhci_do_dbc_start(xhci); |
532 | spin_unlock(&dbc->lock); | 534 | spin_unlock_irqrestore(&dbc->lock, flags); |
533 | 535 | ||
534 | if (ret) { | 536 | if (ret) { |
535 | pm_runtime_put(xhci_to_hcd(xhci)->self.controller); | 537 | pm_runtime_put(xhci_to_hcd(xhci)->self.controller); |
@@ -541,6 +543,7 @@ static int xhci_dbc_start(struct xhci_hcd *xhci) | |||
541 | 543 | ||
542 | static void xhci_dbc_stop(struct xhci_hcd *xhci) | 544 | static void xhci_dbc_stop(struct xhci_hcd *xhci) |
543 | { | 545 | { |
546 | unsigned long flags; | ||
544 | struct xhci_dbc *dbc = xhci->dbc; | 547 | struct xhci_dbc *dbc = xhci->dbc; |
545 | struct dbc_port *port = &dbc->port; | 548 | struct dbc_port *port = &dbc->port; |
546 | 549 | ||
@@ -551,9 +554,9 @@ static void xhci_dbc_stop(struct xhci_hcd *xhci) | |||
551 | if (port->registered) | 554 | if (port->registered) |
552 | xhci_dbc_tty_unregister_device(xhci); | 555 | xhci_dbc_tty_unregister_device(xhci); |
553 | 556 | ||
554 | spin_lock(&dbc->lock); | 557 | spin_lock_irqsave(&dbc->lock, flags); |
555 | xhci_do_dbc_stop(xhci); | 558 | xhci_do_dbc_stop(xhci); |
556 | spin_unlock(&dbc->lock); | 559 | spin_unlock_irqrestore(&dbc->lock, flags); |
557 | 560 | ||
558 | pm_runtime_put_sync(xhci_to_hcd(xhci)->self.controller); | 561 | pm_runtime_put_sync(xhci_to_hcd(xhci)->self.controller); |
559 | } | 562 | } |
@@ -779,14 +782,15 @@ static void xhci_dbc_handle_events(struct work_struct *work) | |||
779 | int ret; | 782 | int ret; |
780 | enum evtreturn evtr; | 783 | enum evtreturn evtr; |
781 | struct xhci_dbc *dbc; | 784 | struct xhci_dbc *dbc; |
785 | unsigned long flags; | ||
782 | struct xhci_hcd *xhci; | 786 | struct xhci_hcd *xhci; |
783 | 787 | ||
784 | dbc = container_of(to_delayed_work(work), struct xhci_dbc, event_work); | 788 | dbc = container_of(to_delayed_work(work), struct xhci_dbc, event_work); |
785 | xhci = dbc->xhci; | 789 | xhci = dbc->xhci; |
786 | 790 | ||
787 | spin_lock(&dbc->lock); | 791 | spin_lock_irqsave(&dbc->lock, flags); |
788 | evtr = xhci_dbc_do_handle_events(dbc); | 792 | evtr = xhci_dbc_do_handle_events(dbc); |
789 | spin_unlock(&dbc->lock); | 793 | spin_unlock_irqrestore(&dbc->lock, flags); |
790 | 794 | ||
791 | switch (evtr) { | 795 | switch (evtr) { |
792 | case EVT_GSER: | 796 | case EVT_GSER: |
diff --git a/drivers/usb/host/xhci-dbgtty.c b/drivers/usb/host/xhci-dbgtty.c index 8d47b6fbf973..75f0b92694ba 100644 --- a/drivers/usb/host/xhci-dbgtty.c +++ b/drivers/usb/host/xhci-dbgtty.c | |||
@@ -92,21 +92,23 @@ static void dbc_start_rx(struct dbc_port *port) | |||
92 | static void | 92 | static void |
93 | dbc_read_complete(struct xhci_hcd *xhci, struct dbc_request *req) | 93 | dbc_read_complete(struct xhci_hcd *xhci, struct dbc_request *req) |
94 | { | 94 | { |
95 | unsigned long flags; | ||
95 | struct xhci_dbc *dbc = xhci->dbc; | 96 | struct xhci_dbc *dbc = xhci->dbc; |
96 | struct dbc_port *port = &dbc->port; | 97 | struct dbc_port *port = &dbc->port; |
97 | 98 | ||
98 | spin_lock(&port->port_lock); | 99 | spin_lock_irqsave(&port->port_lock, flags); |
99 | list_add_tail(&req->list_pool, &port->read_queue); | 100 | list_add_tail(&req->list_pool, &port->read_queue); |
100 | tasklet_schedule(&port->push); | 101 | tasklet_schedule(&port->push); |
101 | spin_unlock(&port->port_lock); | 102 | spin_unlock_irqrestore(&port->port_lock, flags); |
102 | } | 103 | } |
103 | 104 | ||
104 | static void dbc_write_complete(struct xhci_hcd *xhci, struct dbc_request *req) | 105 | static void dbc_write_complete(struct xhci_hcd *xhci, struct dbc_request *req) |
105 | { | 106 | { |
107 | unsigned long flags; | ||
106 | struct xhci_dbc *dbc = xhci->dbc; | 108 | struct xhci_dbc *dbc = xhci->dbc; |
107 | struct dbc_port *port = &dbc->port; | 109 | struct dbc_port *port = &dbc->port; |
108 | 110 | ||
109 | spin_lock(&port->port_lock); | 111 | spin_lock_irqsave(&port->port_lock, flags); |
110 | list_add(&req->list_pool, &port->write_pool); | 112 | list_add(&req->list_pool, &port->write_pool); |
111 | switch (req->status) { | 113 | switch (req->status) { |
112 | case 0: | 114 | case 0: |
@@ -119,7 +121,7 @@ static void dbc_write_complete(struct xhci_hcd *xhci, struct dbc_request *req) | |||
119 | req->status); | 121 | req->status); |
120 | break; | 122 | break; |
121 | } | 123 | } |
122 | spin_unlock(&port->port_lock); | 124 | spin_unlock_irqrestore(&port->port_lock, flags); |
123 | } | 125 | } |
124 | 126 | ||
125 | static void xhci_dbc_free_req(struct dbc_ep *dep, struct dbc_request *req) | 127 | static void xhci_dbc_free_req(struct dbc_ep *dep, struct dbc_request *req) |
@@ -327,12 +329,13 @@ static void dbc_rx_push(unsigned long _port) | |||
327 | { | 329 | { |
328 | struct dbc_request *req; | 330 | struct dbc_request *req; |
329 | struct tty_struct *tty; | 331 | struct tty_struct *tty; |
332 | unsigned long flags; | ||
330 | bool do_push = false; | 333 | bool do_push = false; |
331 | bool disconnect = false; | 334 | bool disconnect = false; |
332 | struct dbc_port *port = (void *)_port; | 335 | struct dbc_port *port = (void *)_port; |
333 | struct list_head *queue = &port->read_queue; | 336 | struct list_head *queue = &port->read_queue; |
334 | 337 | ||
335 | spin_lock_irq(&port->port_lock); | 338 | spin_lock_irqsave(&port->port_lock, flags); |
336 | tty = port->port.tty; | 339 | tty = port->port.tty; |
337 | while (!list_empty(queue)) { | 340 | while (!list_empty(queue)) { |
338 | req = list_first_entry(queue, struct dbc_request, list_pool); | 341 | req = list_first_entry(queue, struct dbc_request, list_pool); |
@@ -392,16 +395,17 @@ static void dbc_rx_push(unsigned long _port) | |||
392 | if (!disconnect) | 395 | if (!disconnect) |
393 | dbc_start_rx(port); | 396 | dbc_start_rx(port); |
394 | 397 | ||
395 | spin_unlock_irq(&port->port_lock); | 398 | spin_unlock_irqrestore(&port->port_lock, flags); |
396 | } | 399 | } |
397 | 400 | ||
398 | static int dbc_port_activate(struct tty_port *_port, struct tty_struct *tty) | 401 | static int dbc_port_activate(struct tty_port *_port, struct tty_struct *tty) |
399 | { | 402 | { |
403 | unsigned long flags; | ||
400 | struct dbc_port *port = container_of(_port, struct dbc_port, port); | 404 | struct dbc_port *port = container_of(_port, struct dbc_port, port); |
401 | 405 | ||
402 | spin_lock_irq(&port->port_lock); | 406 | spin_lock_irqsave(&port->port_lock, flags); |
403 | dbc_start_rx(port); | 407 | dbc_start_rx(port); |
404 | spin_unlock_irq(&port->port_lock); | 408 | spin_unlock_irqrestore(&port->port_lock, flags); |
405 | 409 | ||
406 | return 0; | 410 | return 0; |
407 | } | 411 | } |
diff --git a/drivers/usb/host/xhci-debugfs.c b/drivers/usb/host/xhci-debugfs.c index e26e685d8a57..5851052d4668 100644 --- a/drivers/usb/host/xhci-debugfs.c +++ b/drivers/usb/host/xhci-debugfs.c | |||
@@ -211,7 +211,7 @@ static void xhci_ring_dump_segment(struct seq_file *s, | |||
211 | static int xhci_ring_trb_show(struct seq_file *s, void *unused) | 211 | static int xhci_ring_trb_show(struct seq_file *s, void *unused) |
212 | { | 212 | { |
213 | int i; | 213 | int i; |
214 | struct xhci_ring *ring = s->private; | 214 | struct xhci_ring *ring = *(struct xhci_ring **)s->private; |
215 | struct xhci_segment *seg = ring->first_seg; | 215 | struct xhci_segment *seg = ring->first_seg; |
216 | 216 | ||
217 | for (i = 0; i < ring->num_segs; i++) { | 217 | for (i = 0; i < ring->num_segs; i++) { |
@@ -387,7 +387,7 @@ void xhci_debugfs_create_endpoint(struct xhci_hcd *xhci, | |||
387 | 387 | ||
388 | snprintf(epriv->name, sizeof(epriv->name), "ep%02d", ep_index); | 388 | snprintf(epriv->name, sizeof(epriv->name), "ep%02d", ep_index); |
389 | epriv->root = xhci_debugfs_create_ring_dir(xhci, | 389 | epriv->root = xhci_debugfs_create_ring_dir(xhci, |
390 | &dev->eps[ep_index].new_ring, | 390 | &dev->eps[ep_index].ring, |
391 | epriv->name, | 391 | epriv->name, |
392 | spriv->root); | 392 | spriv->root); |
393 | spriv->eps[ep_index] = epriv; | 393 | spriv->eps[ep_index] = epriv; |
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index 46d5e08f05f1..72ebbc908e19 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c | |||
@@ -1224,17 +1224,17 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, | |||
1224 | temp = readl(port_array[wIndex]); | 1224 | temp = readl(port_array[wIndex]); |
1225 | break; | 1225 | break; |
1226 | } | 1226 | } |
1227 | 1227 | /* Port must be enabled */ | |
1228 | /* Software should not attempt to set | 1228 | if (!(temp & PORT_PE)) { |
1229 | * port link state above '3' (U3) and the port | 1229 | retval = -ENODEV; |
1230 | * must be enabled. | 1230 | break; |
1231 | */ | 1231 | } |
1232 | if ((temp & PORT_PE) == 0 || | 1232 | /* Can't set port link state above '3' (U3) */ |
1233 | (link_state > USB_SS_PORT_LS_U3)) { | 1233 | if (link_state > USB_SS_PORT_LS_U3) { |
1234 | xhci_warn(xhci, "Cannot set link state.\n"); | 1234 | xhci_warn(xhci, "Cannot set port %d link state %d\n", |
1235 | wIndex, link_state); | ||
1235 | goto error; | 1236 | goto error; |
1236 | } | 1237 | } |
1237 | |||
1238 | if (link_state == USB_SS_PORT_LS_U3) { | 1238 | if (link_state == USB_SS_PORT_LS_U3) { |
1239 | slot_id = xhci_find_slot_id_by_port(hcd, xhci, | 1239 | slot_id = xhci_find_slot_id_by_port(hcd, xhci, |
1240 | wIndex + 1); | 1240 | wIndex + 1); |
@@ -1522,6 +1522,13 @@ int xhci_bus_suspend(struct usb_hcd *hcd) | |||
1522 | t2 |= PORT_WKOC_E | PORT_WKCONN_E; | 1522 | t2 |= PORT_WKOC_E | PORT_WKCONN_E; |
1523 | t2 &= ~PORT_WKDISC_E; | 1523 | t2 &= ~PORT_WKDISC_E; |
1524 | } | 1524 | } |
1525 | |||
1526 | if ((xhci->quirks & XHCI_U2_DISABLE_WAKE) && | ||
1527 | (hcd->speed < HCD_USB3)) { | ||
1528 | if (usb_amd_pt_check_port(hcd->self.controller, | ||
1529 | port_index)) | ||
1530 | t2 &= ~PORT_WAKE_BITS; | ||
1531 | } | ||
1525 | } else | 1532 | } else |
1526 | t2 &= ~PORT_WAKE_BITS; | 1533 | t2 &= ~PORT_WAKE_BITS; |
1527 | 1534 | ||
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 6c79037876db..d9f831b67e57 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c | |||
@@ -42,6 +42,10 @@ | |||
42 | #define PCI_DEVICE_ID_INTEL_APL_XHCI 0x5aa8 | 42 | #define PCI_DEVICE_ID_INTEL_APL_XHCI 0x5aa8 |
43 | #define PCI_DEVICE_ID_INTEL_DNV_XHCI 0x19d0 | 43 | #define PCI_DEVICE_ID_INTEL_DNV_XHCI 0x19d0 |
44 | 44 | ||
45 | #define PCI_DEVICE_ID_AMD_PROMONTORYA_4 0x43b9 | ||
46 | #define PCI_DEVICE_ID_AMD_PROMONTORYA_3 0x43ba | ||
47 | #define PCI_DEVICE_ID_AMD_PROMONTORYA_2 0x43bb | ||
48 | #define PCI_DEVICE_ID_AMD_PROMONTORYA_1 0x43bc | ||
45 | #define PCI_DEVICE_ID_ASMEDIA_1042A_XHCI 0x1142 | 49 | #define PCI_DEVICE_ID_ASMEDIA_1042A_XHCI 0x1142 |
46 | 50 | ||
47 | static const char hcd_name[] = "xhci_hcd"; | 51 | static const char hcd_name[] = "xhci_hcd"; |
@@ -122,9 +126,19 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) | |||
122 | if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_find_chipset_info()) | 126 | if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_find_chipset_info()) |
123 | xhci->quirks |= XHCI_AMD_PLL_FIX; | 127 | xhci->quirks |= XHCI_AMD_PLL_FIX; |
124 | 128 | ||
129 | if (pdev->vendor == PCI_VENDOR_ID_AMD && pdev->device == 0x43bb) | ||
130 | xhci->quirks |= XHCI_SUSPEND_DELAY; | ||
131 | |||
125 | if (pdev->vendor == PCI_VENDOR_ID_AMD) | 132 | if (pdev->vendor == PCI_VENDOR_ID_AMD) |
126 | xhci->quirks |= XHCI_TRUST_TX_LENGTH; | 133 | xhci->quirks |= XHCI_TRUST_TX_LENGTH; |
127 | 134 | ||
135 | if ((pdev->vendor == PCI_VENDOR_ID_AMD) && | ||
136 | ((pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_4) || | ||
137 | (pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_3) || | ||
138 | (pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_2) || | ||
139 | (pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_1))) | ||
140 | xhci->quirks |= XHCI_U2_DISABLE_WAKE; | ||
141 | |||
128 | if (pdev->vendor == PCI_VENDOR_ID_INTEL) { | 142 | if (pdev->vendor == PCI_VENDOR_ID_INTEL) { |
129 | xhci->quirks |= XHCI_LPM_SUPPORT; | 143 | xhci->quirks |= XHCI_LPM_SUPPORT; |
130 | xhci->quirks |= XHCI_INTEL_HOST; | 144 | xhci->quirks |= XHCI_INTEL_HOST; |
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c index 6f038306c14d..6652e2d5bd2e 100644 --- a/drivers/usb/host/xhci-plat.c +++ b/drivers/usb/host/xhci-plat.c | |||
@@ -360,7 +360,6 @@ static int __maybe_unused xhci_plat_suspend(struct device *dev) | |||
360 | { | 360 | { |
361 | struct usb_hcd *hcd = dev_get_drvdata(dev); | 361 | struct usb_hcd *hcd = dev_get_drvdata(dev); |
362 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | 362 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
363 | int ret; | ||
364 | 363 | ||
365 | /* | 364 | /* |
366 | * xhci_suspend() needs `do_wakeup` to know whether host is allowed | 365 | * xhci_suspend() needs `do_wakeup` to know whether host is allowed |
@@ -370,12 +369,7 @@ static int __maybe_unused xhci_plat_suspend(struct device *dev) | |||
370 | * reconsider this when xhci_plat_suspend enlarges its scope, e.g., | 369 | * reconsider this when xhci_plat_suspend enlarges its scope, e.g., |
371 | * also applies to runtime suspend. | 370 | * also applies to runtime suspend. |
372 | */ | 371 | */ |
373 | ret = xhci_suspend(xhci, device_may_wakeup(dev)); | 372 | return xhci_suspend(xhci, device_may_wakeup(dev)); |
374 | |||
375 | if (!device_may_wakeup(dev) && !IS_ERR(xhci->clk)) | ||
376 | clk_disable_unprepare(xhci->clk); | ||
377 | |||
378 | return ret; | ||
379 | } | 373 | } |
380 | 374 | ||
381 | static int __maybe_unused xhci_plat_resume(struct device *dev) | 375 | static int __maybe_unused xhci_plat_resume(struct device *dev) |
@@ -384,9 +378,6 @@ static int __maybe_unused xhci_plat_resume(struct device *dev) | |||
384 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | 378 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
385 | int ret; | 379 | int ret; |
386 | 380 | ||
387 | if (!device_may_wakeup(dev) && !IS_ERR(xhci->clk)) | ||
388 | clk_prepare_enable(xhci->clk); | ||
389 | |||
390 | ret = xhci_priv_resume_quirk(hcd); | 381 | ret = xhci_priv_resume_quirk(hcd); |
391 | if (ret) | 382 | if (ret) |
392 | return ret; | 383 | return ret; |
diff --git a/drivers/usb/host/xhci-rcar.c b/drivers/usb/host/xhci-rcar.c index f0b559660007..f33ffc2bc4ed 100644 --- a/drivers/usb/host/xhci-rcar.c +++ b/drivers/usb/host/xhci-rcar.c | |||
@@ -83,6 +83,10 @@ static const struct soc_device_attribute rcar_quirks_match[] = { | |||
83 | .soc_id = "r8a7796", | 83 | .soc_id = "r8a7796", |
84 | .data = (void *)RCAR_XHCI_FIRMWARE_V3, | 84 | .data = (void *)RCAR_XHCI_FIRMWARE_V3, |
85 | }, | 85 | }, |
86 | { | ||
87 | .soc_id = "r8a77965", | ||
88 | .data = (void *)RCAR_XHCI_FIRMWARE_V3, | ||
89 | }, | ||
86 | { /* sentinel */ }, | 90 | { /* sentinel */ }, |
87 | }; | 91 | }; |
88 | 92 | ||
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 1eeb3396300f..5d37700ae4b0 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c | |||
@@ -646,8 +646,6 @@ static void xhci_stop(struct usb_hcd *hcd) | |||
646 | return; | 646 | return; |
647 | } | 647 | } |
648 | 648 | ||
649 | xhci_debugfs_exit(xhci); | ||
650 | |||
651 | xhci_dbc_exit(xhci); | 649 | xhci_dbc_exit(xhci); |
652 | 650 | ||
653 | spin_lock_irq(&xhci->lock); | 651 | spin_lock_irq(&xhci->lock); |
@@ -680,6 +678,7 @@ static void xhci_stop(struct usb_hcd *hcd) | |||
680 | 678 | ||
681 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, "cleaning up memory"); | 679 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, "cleaning up memory"); |
682 | xhci_mem_cleanup(xhci); | 680 | xhci_mem_cleanup(xhci); |
681 | xhci_debugfs_exit(xhci); | ||
683 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, | 682 | xhci_dbg_trace(xhci, trace_xhci_dbg_init, |
684 | "xhci_stop completed - status = %x", | 683 | "xhci_stop completed - status = %x", |
685 | readl(&xhci->op_regs->status)); | 684 | readl(&xhci->op_regs->status)); |
@@ -878,6 +877,9 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup) | |||
878 | clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); | 877 | clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); |
879 | del_timer_sync(&xhci->shared_hcd->rh_timer); | 878 | del_timer_sync(&xhci->shared_hcd->rh_timer); |
880 | 879 | ||
880 | if (xhci->quirks & XHCI_SUSPEND_DELAY) | ||
881 | usleep_range(1000, 1500); | ||
882 | |||
881 | spin_lock_irq(&xhci->lock); | 883 | spin_lock_irq(&xhci->lock); |
882 | clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); | 884 | clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); |
883 | clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); | 885 | clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); |
@@ -1014,6 +1016,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) | |||
1014 | 1016 | ||
1015 | xhci_dbg(xhci, "cleaning up memory\n"); | 1017 | xhci_dbg(xhci, "cleaning up memory\n"); |
1016 | xhci_mem_cleanup(xhci); | 1018 | xhci_mem_cleanup(xhci); |
1019 | xhci_debugfs_exit(xhci); | ||
1017 | xhci_dbg(xhci, "xhci_stop completed - status = %x\n", | 1020 | xhci_dbg(xhci, "xhci_stop completed - status = %x\n", |
1018 | readl(&xhci->op_regs->status)); | 1021 | readl(&xhci->op_regs->status)); |
1019 | 1022 | ||
@@ -3544,12 +3547,10 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) | |||
3544 | virt_dev->eps[i].ep_state &= ~EP_STOP_CMD_PENDING; | 3547 | virt_dev->eps[i].ep_state &= ~EP_STOP_CMD_PENDING; |
3545 | del_timer_sync(&virt_dev->eps[i].stop_cmd_timer); | 3548 | del_timer_sync(&virt_dev->eps[i].stop_cmd_timer); |
3546 | } | 3549 | } |
3547 | 3550 | xhci_debugfs_remove_slot(xhci, udev->slot_id); | |
3548 | ret = xhci_disable_slot(xhci, udev->slot_id); | 3551 | ret = xhci_disable_slot(xhci, udev->slot_id); |
3549 | if (ret) { | 3552 | if (ret) |
3550 | xhci_debugfs_remove_slot(xhci, udev->slot_id); | ||
3551 | xhci_free_virt_device(xhci, udev->slot_id); | 3553 | xhci_free_virt_device(xhci, udev->slot_id); |
3552 | } | ||
3553 | } | 3554 | } |
3554 | 3555 | ||
3555 | int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id) | 3556 | int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id) |
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 96099a245c69..866e141d4972 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h | |||
@@ -718,11 +718,12 @@ struct xhci_ep_ctx { | |||
718 | /* bits 10:14 are Max Primary Streams */ | 718 | /* bits 10:14 are Max Primary Streams */ |
719 | /* bit 15 is Linear Stream Array */ | 719 | /* bit 15 is Linear Stream Array */ |
720 | /* Interval - period between requests to an endpoint - 125u increments. */ | 720 | /* Interval - period between requests to an endpoint - 125u increments. */ |
721 | #define EP_INTERVAL(p) (((p) & 0xff) << 16) | 721 | #define EP_INTERVAL(p) (((p) & 0xff) << 16) |
722 | #define EP_INTERVAL_TO_UFRAMES(p) (1 << (((p) >> 16) & 0xff)) | 722 | #define EP_INTERVAL_TO_UFRAMES(p) (1 << (((p) >> 16) & 0xff)) |
723 | #define CTX_TO_EP_INTERVAL(p) (((p) >> 16) & 0xff) | 723 | #define CTX_TO_EP_INTERVAL(p) (((p) >> 16) & 0xff) |
724 | #define EP_MAXPSTREAMS_MASK (0x1f << 10) | 724 | #define EP_MAXPSTREAMS_MASK (0x1f << 10) |
725 | #define EP_MAXPSTREAMS(p) (((p) << 10) & EP_MAXPSTREAMS_MASK) | 725 | #define EP_MAXPSTREAMS(p) (((p) << 10) & EP_MAXPSTREAMS_MASK) |
726 | #define CTX_TO_EP_MAXPSTREAMS(p) (((p) & EP_MAXPSTREAMS_MASK) >> 10) | ||
726 | /* Endpoint is set up with a Linear Stream Array (vs. Secondary Stream Array) */ | 727 | /* Endpoint is set up with a Linear Stream Array (vs. Secondary Stream Array) */ |
727 | #define EP_HAS_LSA (1 << 15) | 728 | #define EP_HAS_LSA (1 << 15) |
728 | /* hosts with LEC=1 use bits 31:24 as ESIT high bits. */ | 729 | /* hosts with LEC=1 use bits 31:24 as ESIT high bits. */ |
@@ -1822,9 +1823,10 @@ struct xhci_hcd { | |||
1822 | /* For controller with a broken Port Disable implementation */ | 1823 | /* For controller with a broken Port Disable implementation */ |
1823 | #define XHCI_BROKEN_PORT_PED (1 << 25) | 1824 | #define XHCI_BROKEN_PORT_PED (1 << 25) |
1824 | #define XHCI_LIMIT_ENDPOINT_INTERVAL_7 (1 << 26) | 1825 | #define XHCI_LIMIT_ENDPOINT_INTERVAL_7 (1 << 26) |
1825 | /* Reserved. It was XHCI_U2_DISABLE_WAKE */ | 1826 | #define XHCI_U2_DISABLE_WAKE (1 << 27) |
1826 | #define XHCI_ASMEDIA_MODIFY_FLOWCONTROL (1 << 28) | 1827 | #define XHCI_ASMEDIA_MODIFY_FLOWCONTROL (1 << 28) |
1827 | #define XHCI_HW_LPM_DISABLE (1 << 29) | 1828 | #define XHCI_HW_LPM_DISABLE (1 << 29) |
1829 | #define XHCI_SUSPEND_DELAY (1 << 30) | ||
1828 | 1830 | ||
1829 | unsigned int num_active_eps; | 1831 | unsigned int num_active_eps; |
1830 | unsigned int limit_active_eps; | 1832 | unsigned int limit_active_eps; |
@@ -2549,21 +2551,22 @@ static inline const char *xhci_decode_ep_context(u32 info, u32 info2, u64 deq, | |||
2549 | u8 burst; | 2551 | u8 burst; |
2550 | u8 cerr; | 2552 | u8 cerr; |
2551 | u8 mult; | 2553 | u8 mult; |
2552 | u8 lsa; | 2554 | |
2553 | u8 hid; | 2555 | bool lsa; |
2556 | bool hid; | ||
2554 | 2557 | ||
2555 | esit = CTX_TO_MAX_ESIT_PAYLOAD_HI(info) << 16 | | 2558 | esit = CTX_TO_MAX_ESIT_PAYLOAD_HI(info) << 16 | |
2556 | CTX_TO_MAX_ESIT_PAYLOAD(tx_info); | 2559 | CTX_TO_MAX_ESIT_PAYLOAD(tx_info); |
2557 | 2560 | ||
2558 | ep_state = info & EP_STATE_MASK; | 2561 | ep_state = info & EP_STATE_MASK; |
2559 | max_pstr = info & EP_MAXPSTREAMS_MASK; | 2562 | max_pstr = CTX_TO_EP_MAXPSTREAMS(info); |
2560 | interval = CTX_TO_EP_INTERVAL(info); | 2563 | interval = CTX_TO_EP_INTERVAL(info); |
2561 | mult = CTX_TO_EP_MULT(info) + 1; | 2564 | mult = CTX_TO_EP_MULT(info) + 1; |
2562 | lsa = info & EP_HAS_LSA; | 2565 | lsa = !!(info & EP_HAS_LSA); |
2563 | 2566 | ||
2564 | cerr = (info2 & (3 << 1)) >> 1; | 2567 | cerr = (info2 & (3 << 1)) >> 1; |
2565 | ep_type = CTX_TO_EP_TYPE(info2); | 2568 | ep_type = CTX_TO_EP_TYPE(info2); |
2566 | hid = info2 & (1 << 7); | 2569 | hid = !!(info2 & (1 << 7)); |
2567 | burst = CTX_TO_MAX_BURST(info2); | 2570 | burst = CTX_TO_MAX_BURST(info2); |
2568 | maxp = MAX_PACKET_DECODED(info2); | 2571 | maxp = MAX_PACKET_DECODED(info2); |
2569 | 2572 | ||
diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c index 63b9e85dc0e9..236a60f53099 100644 --- a/drivers/usb/misc/ldusb.c +++ b/drivers/usb/misc/ldusb.c | |||
@@ -42,6 +42,9 @@ | |||
42 | #define USB_DEVICE_ID_LD_MICROCASSYTIME 0x1033 /* USB Product ID of Micro-CASSY Time (reserved) */ | 42 | #define USB_DEVICE_ID_LD_MICROCASSYTIME 0x1033 /* USB Product ID of Micro-CASSY Time (reserved) */ |
43 | #define USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE 0x1035 /* USB Product ID of Micro-CASSY Temperature */ | 43 | #define USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE 0x1035 /* USB Product ID of Micro-CASSY Temperature */ |
44 | #define USB_DEVICE_ID_LD_MICROCASSYPH 0x1038 /* USB Product ID of Micro-CASSY pH */ | 44 | #define USB_DEVICE_ID_LD_MICROCASSYPH 0x1038 /* USB Product ID of Micro-CASSY pH */ |
45 | #define USB_DEVICE_ID_LD_POWERANALYSERCASSY 0x1040 /* USB Product ID of Power Analyser CASSY */ | ||
46 | #define USB_DEVICE_ID_LD_CONVERTERCONTROLLERCASSY 0x1042 /* USB Product ID of Converter Controller CASSY */ | ||
47 | #define USB_DEVICE_ID_LD_MACHINETESTCASSY 0x1043 /* USB Product ID of Machine Test CASSY */ | ||
45 | #define USB_DEVICE_ID_LD_JWM 0x1080 /* USB Product ID of Joule and Wattmeter */ | 48 | #define USB_DEVICE_ID_LD_JWM 0x1080 /* USB Product ID of Joule and Wattmeter */ |
46 | #define USB_DEVICE_ID_LD_DMMP 0x1081 /* USB Product ID of Digital Multimeter P (reserved) */ | 49 | #define USB_DEVICE_ID_LD_DMMP 0x1081 /* USB Product ID of Digital Multimeter P (reserved) */ |
47 | #define USB_DEVICE_ID_LD_UMIP 0x1090 /* USB Product ID of UMI P */ | 50 | #define USB_DEVICE_ID_LD_UMIP 0x1090 /* USB Product ID of UMI P */ |
@@ -84,6 +87,9 @@ static const struct usb_device_id ld_usb_table[] = { | |||
84 | { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTIME) }, | 87 | { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTIME) }, |
85 | { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE) }, | 88 | { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE) }, |
86 | { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYPH) }, | 89 | { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYPH) }, |
90 | { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POWERANALYSERCASSY) }, | ||
91 | { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_CONVERTERCONTROLLERCASSY) }, | ||
92 | { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MACHINETESTCASSY) }, | ||
87 | { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_JWM) }, | 93 | { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_JWM) }, |
88 | { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_DMMP) }, | 94 | { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_DMMP) }, |
89 | { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_UMIP) }, | 95 | { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_UMIP) }, |
diff --git a/drivers/usb/mon/mon_text.c b/drivers/usb/mon/mon_text.c index f5e1bb5e5217..984f7e12a6a5 100644 --- a/drivers/usb/mon/mon_text.c +++ b/drivers/usb/mon/mon_text.c | |||
@@ -85,6 +85,8 @@ struct mon_reader_text { | |||
85 | 85 | ||
86 | wait_queue_head_t wait; | 86 | wait_queue_head_t wait; |
87 | int printf_size; | 87 | int printf_size; |
88 | size_t printf_offset; | ||
89 | size_t printf_togo; | ||
88 | char *printf_buf; | 90 | char *printf_buf; |
89 | struct mutex printf_lock; | 91 | struct mutex printf_lock; |
90 | 92 | ||
@@ -376,75 +378,103 @@ err_alloc: | |||
376 | return rc; | 378 | return rc; |
377 | } | 379 | } |
378 | 380 | ||
379 | /* | 381 | static ssize_t mon_text_copy_to_user(struct mon_reader_text *rp, |
380 | * For simplicity, we read one record in one system call and throw out | 382 | char __user * const buf, const size_t nbytes) |
381 | * what does not fit. This means that the following does not work: | 383 | { |
382 | * dd if=/dbg/usbmon/0t bs=10 | 384 | const size_t togo = min(nbytes, rp->printf_togo); |
383 | * Also, we do not allow seeks and do not bother advancing the offset. | 385 | |
384 | */ | 386 | if (copy_to_user(buf, &rp->printf_buf[rp->printf_offset], togo)) |
387 | return -EFAULT; | ||
388 | rp->printf_togo -= togo; | ||
389 | rp->printf_offset += togo; | ||
390 | return togo; | ||
391 | } | ||
392 | |||
393 | /* ppos is not advanced since the llseek operation is not permitted. */ | ||
385 | static ssize_t mon_text_read_t(struct file *file, char __user *buf, | 394 | static ssize_t mon_text_read_t(struct file *file, char __user *buf, |
386 | size_t nbytes, loff_t *ppos) | 395 | size_t nbytes, loff_t *ppos) |
387 | { | 396 | { |
388 | struct mon_reader_text *rp = file->private_data; | 397 | struct mon_reader_text *rp = file->private_data; |
389 | struct mon_event_text *ep; | 398 | struct mon_event_text *ep; |
390 | struct mon_text_ptr ptr; | 399 | struct mon_text_ptr ptr; |
400 | ssize_t ret; | ||
391 | 401 | ||
392 | ep = mon_text_read_wait(rp, file); | ||
393 | if (IS_ERR(ep)) | ||
394 | return PTR_ERR(ep); | ||
395 | mutex_lock(&rp->printf_lock); | 402 | mutex_lock(&rp->printf_lock); |
396 | ptr.cnt = 0; | 403 | |
397 | ptr.pbuf = rp->printf_buf; | 404 | if (rp->printf_togo == 0) { |
398 | ptr.limit = rp->printf_size; | 405 | |
399 | 406 | ep = mon_text_read_wait(rp, file); | |
400 | mon_text_read_head_t(rp, &ptr, ep); | 407 | if (IS_ERR(ep)) { |
401 | mon_text_read_statset(rp, &ptr, ep); | 408 | mutex_unlock(&rp->printf_lock); |
402 | ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt, | 409 | return PTR_ERR(ep); |
403 | " %d", ep->length); | 410 | } |
404 | mon_text_read_data(rp, &ptr, ep); | 411 | ptr.cnt = 0; |
405 | 412 | ptr.pbuf = rp->printf_buf; | |
406 | if (copy_to_user(buf, rp->printf_buf, ptr.cnt)) | 413 | ptr.limit = rp->printf_size; |
407 | ptr.cnt = -EFAULT; | 414 | |
415 | mon_text_read_head_t(rp, &ptr, ep); | ||
416 | mon_text_read_statset(rp, &ptr, ep); | ||
417 | ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt, | ||
418 | " %d", ep->length); | ||
419 | mon_text_read_data(rp, &ptr, ep); | ||
420 | |||
421 | rp->printf_togo = ptr.cnt; | ||
422 | rp->printf_offset = 0; | ||
423 | |||
424 | kmem_cache_free(rp->e_slab, ep); | ||
425 | } | ||
426 | |||
427 | ret = mon_text_copy_to_user(rp, buf, nbytes); | ||
408 | mutex_unlock(&rp->printf_lock); | 428 | mutex_unlock(&rp->printf_lock); |
409 | kmem_cache_free(rp->e_slab, ep); | 429 | return ret; |
410 | return ptr.cnt; | ||
411 | } | 430 | } |
412 | 431 | ||
432 | /* ppos is not advanced since the llseek operation is not permitted. */ | ||
413 | static ssize_t mon_text_read_u(struct file *file, char __user *buf, | 433 | static ssize_t mon_text_read_u(struct file *file, char __user *buf, |
414 | size_t nbytes, loff_t *ppos) | 434 | size_t nbytes, loff_t *ppos) |
415 | { | 435 | { |
416 | struct mon_reader_text *rp = file->private_data; | 436 | struct mon_reader_text *rp = file->private_data; |
417 | struct mon_event_text *ep; | 437 | struct mon_event_text *ep; |
418 | struct mon_text_ptr ptr; | 438 | struct mon_text_ptr ptr; |
439 | ssize_t ret; | ||
419 | 440 | ||
420 | ep = mon_text_read_wait(rp, file); | ||
421 | if (IS_ERR(ep)) | ||
422 | return PTR_ERR(ep); | ||
423 | mutex_lock(&rp->printf_lock); | 441 | mutex_lock(&rp->printf_lock); |
424 | ptr.cnt = 0; | ||
425 | ptr.pbuf = rp->printf_buf; | ||
426 | ptr.limit = rp->printf_size; | ||
427 | 442 | ||
428 | mon_text_read_head_u(rp, &ptr, ep); | 443 | if (rp->printf_togo == 0) { |
429 | if (ep->type == 'E') { | 444 | |
430 | mon_text_read_statset(rp, &ptr, ep); | 445 | ep = mon_text_read_wait(rp, file); |
431 | } else if (ep->xfertype == USB_ENDPOINT_XFER_ISOC) { | 446 | if (IS_ERR(ep)) { |
432 | mon_text_read_isostat(rp, &ptr, ep); | 447 | mutex_unlock(&rp->printf_lock); |
433 | mon_text_read_isodesc(rp, &ptr, ep); | 448 | return PTR_ERR(ep); |
434 | } else if (ep->xfertype == USB_ENDPOINT_XFER_INT) { | 449 | } |
435 | mon_text_read_intstat(rp, &ptr, ep); | 450 | ptr.cnt = 0; |
436 | } else { | 451 | ptr.pbuf = rp->printf_buf; |
437 | mon_text_read_statset(rp, &ptr, ep); | 452 | ptr.limit = rp->printf_size; |
453 | |||
454 | mon_text_read_head_u(rp, &ptr, ep); | ||
455 | if (ep->type == 'E') { | ||
456 | mon_text_read_statset(rp, &ptr, ep); | ||
457 | } else if (ep->xfertype == USB_ENDPOINT_XFER_ISOC) { | ||
458 | mon_text_read_isostat(rp, &ptr, ep); | ||
459 | mon_text_read_isodesc(rp, &ptr, ep); | ||
460 | } else if (ep->xfertype == USB_ENDPOINT_XFER_INT) { | ||
461 | mon_text_read_intstat(rp, &ptr, ep); | ||
462 | } else { | ||
463 | mon_text_read_statset(rp, &ptr, ep); | ||
464 | } | ||
465 | ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt, | ||
466 | " %d", ep->length); | ||
467 | mon_text_read_data(rp, &ptr, ep); | ||
468 | |||
469 | rp->printf_togo = ptr.cnt; | ||
470 | rp->printf_offset = 0; | ||
471 | |||
472 | kmem_cache_free(rp->e_slab, ep); | ||
438 | } | 473 | } |
439 | ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt, | ||
440 | " %d", ep->length); | ||
441 | mon_text_read_data(rp, &ptr, ep); | ||
442 | 474 | ||
443 | if (copy_to_user(buf, rp->printf_buf, ptr.cnt)) | 475 | ret = mon_text_copy_to_user(rp, buf, nbytes); |
444 | ptr.cnt = -EFAULT; | ||
445 | mutex_unlock(&rp->printf_lock); | 476 | mutex_unlock(&rp->printf_lock); |
446 | kmem_cache_free(rp->e_slab, ep); | 477 | return ret; |
447 | return ptr.cnt; | ||
448 | } | 478 | } |
449 | 479 | ||
450 | static struct mon_event_text *mon_text_read_wait(struct mon_reader_text *rp, | 480 | static struct mon_event_text *mon_text_read_wait(struct mon_reader_text *rp, |
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index 968bf1e8b0fe..4d723077be2b 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c | |||
@@ -1756,6 +1756,7 @@ vbus_show(struct device *dev, struct device_attribute *attr, char *buf) | |||
1756 | int vbus; | 1756 | int vbus; |
1757 | u8 devctl; | 1757 | u8 devctl; |
1758 | 1758 | ||
1759 | pm_runtime_get_sync(dev); | ||
1759 | spin_lock_irqsave(&musb->lock, flags); | 1760 | spin_lock_irqsave(&musb->lock, flags); |
1760 | val = musb->a_wait_bcon; | 1761 | val = musb->a_wait_bcon; |
1761 | vbus = musb_platform_get_vbus_status(musb); | 1762 | vbus = musb_platform_get_vbus_status(musb); |
@@ -1769,6 +1770,7 @@ vbus_show(struct device *dev, struct device_attribute *attr, char *buf) | |||
1769 | vbus = 0; | 1770 | vbus = 0; |
1770 | } | 1771 | } |
1771 | spin_unlock_irqrestore(&musb->lock, flags); | 1772 | spin_unlock_irqrestore(&musb->lock, flags); |
1773 | pm_runtime_put_sync(dev); | ||
1772 | 1774 | ||
1773 | return sprintf(buf, "Vbus %s, timeout %lu msec\n", | 1775 | return sprintf(buf, "Vbus %s, timeout %lu msec\n", |
1774 | vbus ? "on" : "off", val); | 1776 | vbus ? "on" : "off", val); |
@@ -2471,11 +2473,11 @@ static int musb_remove(struct platform_device *pdev) | |||
2471 | musb_disable_interrupts(musb); | 2473 | musb_disable_interrupts(musb); |
2472 | musb_writeb(musb->mregs, MUSB_DEVCTL, 0); | 2474 | musb_writeb(musb->mregs, MUSB_DEVCTL, 0); |
2473 | spin_unlock_irqrestore(&musb->lock, flags); | 2475 | spin_unlock_irqrestore(&musb->lock, flags); |
2476 | musb_platform_exit(musb); | ||
2474 | 2477 | ||
2475 | pm_runtime_dont_use_autosuspend(musb->controller); | 2478 | pm_runtime_dont_use_autosuspend(musb->controller); |
2476 | pm_runtime_put_sync(musb->controller); | 2479 | pm_runtime_put_sync(musb->controller); |
2477 | pm_runtime_disable(musb->controller); | 2480 | pm_runtime_disable(musb->controller); |
2478 | musb_platform_exit(musb); | ||
2479 | musb_phy_callback = NULL; | 2481 | musb_phy_callback = NULL; |
2480 | if (musb->dma_controller) | 2482 | if (musb->dma_controller) |
2481 | musb_dma_controller_destroy(musb->dma_controller); | 2483 | musb_dma_controller_destroy(musb->dma_controller); |
@@ -2708,7 +2710,8 @@ static int musb_resume(struct device *dev) | |||
2708 | if ((devctl & mask) != (musb->context.devctl & mask)) | 2710 | if ((devctl & mask) != (musb->context.devctl & mask)) |
2709 | musb->port1_status = 0; | 2711 | musb->port1_status = 0; |
2710 | 2712 | ||
2711 | musb_start(musb); | 2713 | musb_enable_interrupts(musb); |
2714 | musb_platform_enable(musb); | ||
2712 | 2715 | ||
2713 | spin_lock_irqsave(&musb->lock, flags); | 2716 | spin_lock_irqsave(&musb->lock, flags); |
2714 | error = musb_run_resume_work(musb); | 2717 | error = musb_run_resume_work(musb); |
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c index 394b4ac86161..45ed32c2cba9 100644 --- a/drivers/usb/musb/musb_host.c +++ b/drivers/usb/musb/musb_host.c | |||
@@ -391,13 +391,7 @@ static void musb_advance_schedule(struct musb *musb, struct urb *urb, | |||
391 | } | 391 | } |
392 | } | 392 | } |
393 | 393 | ||
394 | /* | 394 | if (qh != NULL && qh->is_ready) { |
395 | * The pipe must be broken if current urb->status is set, so don't | ||
396 | * start next urb. | ||
397 | * TODO: to minimize the risk of regression, only check urb->status | ||
398 | * for RX, until we have a test case to understand the behavior of TX. | ||
399 | */ | ||
400 | if ((!status || !is_in) && qh && qh->is_ready) { | ||
401 | musb_dbg(musb, "... next ep%d %cX urb %p", | 395 | musb_dbg(musb, "... next ep%d %cX urb %p", |
402 | hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh)); | 396 | hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh)); |
403 | musb_start_urb(musb, is_in, qh); | 397 | musb_start_urb(musb, is_in, qh); |
diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c index da031c45395a..fbec863350f6 100644 --- a/drivers/usb/phy/phy-mxs-usb.c +++ b/drivers/usb/phy/phy-mxs-usb.c | |||
@@ -602,6 +602,9 @@ static enum usb_charger_type mxs_phy_charger_detect(struct usb_phy *phy) | |||
602 | void __iomem *base = phy->io_priv; | 602 | void __iomem *base = phy->io_priv; |
603 | enum usb_charger_type chgr_type = UNKNOWN_TYPE; | 603 | enum usb_charger_type chgr_type = UNKNOWN_TYPE; |
604 | 604 | ||
605 | if (!regmap) | ||
606 | return UNKNOWN_TYPE; | ||
607 | |||
605 | if (mxs_charger_data_contact_detect(mxs_phy)) | 608 | if (mxs_charger_data_contact_detect(mxs_phy)) |
606 | return chgr_type; | 609 | return chgr_type; |
607 | 610 | ||
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c index 5925d111bd47..39fa2fc1b8b7 100644 --- a/drivers/usb/renesas_usbhs/fifo.c +++ b/drivers/usb/renesas_usbhs/fifo.c | |||
@@ -982,6 +982,10 @@ static int usbhsf_dma_prepare_pop_with_usb_dmac(struct usbhs_pkt *pkt, | |||
982 | if ((uintptr_t)pkt->buf & (USBHS_USB_DMAC_XFER_SIZE - 1)) | 982 | if ((uintptr_t)pkt->buf & (USBHS_USB_DMAC_XFER_SIZE - 1)) |
983 | goto usbhsf_pio_prepare_pop; | 983 | goto usbhsf_pio_prepare_pop; |
984 | 984 | ||
985 | /* return at this time if the pipe is running */ | ||
986 | if (usbhs_pipe_is_running(pipe)) | ||
987 | return 0; | ||
988 | |||
985 | usbhs_pipe_config_change_bfre(pipe, 1); | 989 | usbhs_pipe_config_change_bfre(pipe, 1); |
986 | 990 | ||
987 | ret = usbhsf_fifo_select(pipe, fifo, 0); | 991 | ret = usbhsf_fifo_select(pipe, fifo, 0); |
@@ -1172,6 +1176,7 @@ static int usbhsf_dma_pop_done_with_usb_dmac(struct usbhs_pkt *pkt, | |||
1172 | usbhsf_fifo_clear(pipe, fifo); | 1176 | usbhsf_fifo_clear(pipe, fifo); |
1173 | pkt->actual = usbhs_dma_calc_received_size(pkt, chan, rcv_len); | 1177 | pkt->actual = usbhs_dma_calc_received_size(pkt, chan, rcv_len); |
1174 | 1178 | ||
1179 | usbhs_pipe_running(pipe, 0); | ||
1175 | usbhsf_dma_stop(pipe, fifo); | 1180 | usbhsf_dma_stop(pipe, fifo); |
1176 | usbhsf_dma_unmap(pkt); | 1181 | usbhsf_dma_unmap(pkt); |
1177 | usbhsf_fifo_unselect(pipe, pipe->fifo); | 1182 | usbhsf_fifo_unselect(pipe, pipe->fifo); |
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 5db8ed517e0e..2d8d9150da0c 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c | |||
@@ -241,6 +241,7 @@ static void option_instat_callback(struct urb *urb); | |||
241 | #define QUECTEL_PRODUCT_EC21 0x0121 | 241 | #define QUECTEL_PRODUCT_EC21 0x0121 |
242 | #define QUECTEL_PRODUCT_EC25 0x0125 | 242 | #define QUECTEL_PRODUCT_EC25 0x0125 |
243 | #define QUECTEL_PRODUCT_BG96 0x0296 | 243 | #define QUECTEL_PRODUCT_BG96 0x0296 |
244 | #define QUECTEL_PRODUCT_EP06 0x0306 | ||
244 | 245 | ||
245 | #define CMOTECH_VENDOR_ID 0x16d8 | 246 | #define CMOTECH_VENDOR_ID 0x16d8 |
246 | #define CMOTECH_PRODUCT_6001 0x6001 | 247 | #define CMOTECH_PRODUCT_6001 0x6001 |
@@ -689,6 +690,10 @@ static const struct option_blacklist_info yuga_clm920_nc5_blacklist = { | |||
689 | .reserved = BIT(1) | BIT(4), | 690 | .reserved = BIT(1) | BIT(4), |
690 | }; | 691 | }; |
691 | 692 | ||
693 | static const struct option_blacklist_info quectel_ep06_blacklist = { | ||
694 | .reserved = BIT(4) | BIT(5), | ||
695 | }; | ||
696 | |||
692 | static const struct usb_device_id option_ids[] = { | 697 | static const struct usb_device_id option_ids[] = { |
693 | { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, | 698 | { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, |
694 | { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) }, | 699 | { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) }, |
@@ -1203,6 +1208,8 @@ static const struct usb_device_id option_ids[] = { | |||
1203 | .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, | 1208 | .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, |
1204 | { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96), | 1209 | { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96), |
1205 | .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, | 1210 | .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, |
1211 | { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06), | ||
1212 | .driver_info = (kernel_ulong_t)&quectel_ep06_blacklist }, | ||
1206 | { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, | 1213 | { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, |
1207 | { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) }, | 1214 | { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) }, |
1208 | { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003), | 1215 | { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003), |
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c index 3b1b9695177a..6034c39b67d1 100644 --- a/drivers/usb/storage/uas.c +++ b/drivers/usb/storage/uas.c | |||
@@ -1076,7 +1076,7 @@ static int uas_post_reset(struct usb_interface *intf) | |||
1076 | return 0; | 1076 | return 0; |
1077 | 1077 | ||
1078 | err = uas_configure_endpoints(devinfo); | 1078 | err = uas_configure_endpoints(devinfo); |
1079 | if (err && err != ENODEV) | 1079 | if (err && err != -ENODEV) |
1080 | shost_printk(KERN_ERR, shost, | 1080 | shost_printk(KERN_ERR, shost, |
1081 | "%s: alloc streams error %d after reset", | 1081 | "%s: alloc streams error %d after reset", |
1082 | __func__, err); | 1082 | __func__, err); |
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index 264af199aec8..747d3a9596d9 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h | |||
@@ -2118,6 +2118,13 @@ UNUSUAL_DEV( 0x152d, 0x2566, 0x0114, 0x0114, | |||
2118 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, | 2118 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, |
2119 | US_FL_BROKEN_FUA ), | 2119 | US_FL_BROKEN_FUA ), |
2120 | 2120 | ||
2121 | /* Reported by Teijo Kinnunen <teijo.kinnunen@code-q.fi> */ | ||
2122 | UNUSUAL_DEV( 0x152d, 0x2567, 0x0117, 0x0117, | ||
2123 | "JMicron", | ||
2124 | "USB to ATA/ATAPI Bridge", | ||
2125 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, | ||
2126 | US_FL_BROKEN_FUA ), | ||
2127 | |||
2121 | /* Reported-by George Cherian <george.cherian@cavium.com> */ | 2128 | /* Reported-by George Cherian <george.cherian@cavium.com> */ |
2122 | UNUSUAL_DEV(0x152d, 0x9561, 0x0000, 0x9999, | 2129 | UNUSUAL_DEV(0x152d, 0x9561, 0x0000, 0x9999, |
2123 | "JMicron", | 2130 | "JMicron", |
diff --git a/drivers/usb/typec/fusb302/fusb302.c b/drivers/usb/typec/fusb302/fusb302.c index 9ce4756adad6..dcd8ef085b30 100644 --- a/drivers/usb/typec/fusb302/fusb302.c +++ b/drivers/usb/typec/fusb302/fusb302.c | |||
@@ -1857,7 +1857,8 @@ static int fusb302_probe(struct i2c_client *client, | |||
1857 | chip->tcpm_port = tcpm_register_port(&client->dev, &chip->tcpc_dev); | 1857 | chip->tcpm_port = tcpm_register_port(&client->dev, &chip->tcpc_dev); |
1858 | if (IS_ERR(chip->tcpm_port)) { | 1858 | if (IS_ERR(chip->tcpm_port)) { |
1859 | ret = PTR_ERR(chip->tcpm_port); | 1859 | ret = PTR_ERR(chip->tcpm_port); |
1860 | dev_err(dev, "cannot register tcpm port, ret=%d", ret); | 1860 | if (ret != -EPROBE_DEFER) |
1861 | dev_err(dev, "cannot register tcpm port, ret=%d", ret); | ||
1861 | goto destroy_workqueue; | 1862 | goto destroy_workqueue; |
1862 | } | 1863 | } |
1863 | 1864 | ||
diff --git a/drivers/usb/typec/tcpm.c b/drivers/usb/typec/tcpm.c index f4d563ee7690..8b637a4b474b 100644 --- a/drivers/usb/typec/tcpm.c +++ b/drivers/usb/typec/tcpm.c | |||
@@ -252,9 +252,6 @@ struct tcpm_port { | |||
252 | unsigned int nr_src_pdo; | 252 | unsigned int nr_src_pdo; |
253 | u32 snk_pdo[PDO_MAX_OBJECTS]; | 253 | u32 snk_pdo[PDO_MAX_OBJECTS]; |
254 | unsigned int nr_snk_pdo; | 254 | unsigned int nr_snk_pdo; |
255 | unsigned int nr_fixed; /* number of fixed sink PDOs */ | ||
256 | unsigned int nr_var; /* number of variable sink PDOs */ | ||
257 | unsigned int nr_batt; /* number of battery sink PDOs */ | ||
258 | u32 snk_vdo[VDO_MAX_OBJECTS]; | 255 | u32 snk_vdo[VDO_MAX_OBJECTS]; |
259 | unsigned int nr_snk_vdo; | 256 | unsigned int nr_snk_vdo; |
260 | 257 | ||
@@ -1770,90 +1767,39 @@ static int tcpm_pd_check_request(struct tcpm_port *port) | |||
1770 | return 0; | 1767 | return 0; |
1771 | } | 1768 | } |
1772 | 1769 | ||
1773 | #define min_power(x, y) min(pdo_max_power(x), pdo_max_power(y)) | 1770 | static int tcpm_pd_select_pdo(struct tcpm_port *port) |
1774 | #define min_current(x, y) min(pdo_max_current(x), pdo_max_current(y)) | ||
1775 | |||
1776 | static int tcpm_pd_select_pdo(struct tcpm_port *port, int *sink_pdo, | ||
1777 | int *src_pdo) | ||
1778 | { | 1771 | { |
1779 | unsigned int i, j, max_mw = 0, max_mv = 0, mw = 0, mv = 0, ma = 0; | 1772 | unsigned int i, max_mw = 0, max_mv = 0; |
1780 | int ret = -EINVAL; | 1773 | int ret = -EINVAL; |
1781 | 1774 | ||
1782 | /* | 1775 | /* |
1783 | * Select the source PDO providing the most power which has a | 1776 | * Select the source PDO providing the most power while staying within |
1784 | * matchig sink cap. | 1777 | * the board's voltage limits. Prefer PDO providing exp |
1785 | */ | 1778 | */ |
1786 | for (i = 0; i < port->nr_source_caps; i++) { | 1779 | for (i = 0; i < port->nr_source_caps; i++) { |
1787 | u32 pdo = port->source_caps[i]; | 1780 | u32 pdo = port->source_caps[i]; |
1788 | enum pd_pdo_type type = pdo_type(pdo); | 1781 | enum pd_pdo_type type = pdo_type(pdo); |
1782 | unsigned int mv, ma, mw; | ||
1789 | 1783 | ||
1790 | if (type == PDO_TYPE_FIXED) { | 1784 | if (type == PDO_TYPE_FIXED) |
1791 | for (j = 0; j < port->nr_fixed; j++) { | 1785 | mv = pdo_fixed_voltage(pdo); |
1792 | if (pdo_fixed_voltage(pdo) == | 1786 | else |
1793 | pdo_fixed_voltage(port->snk_pdo[j])) { | 1787 | mv = pdo_min_voltage(pdo); |
1794 | ma = min_current(pdo, port->snk_pdo[j]); | 1788 | |
1795 | mv = pdo_fixed_voltage(pdo); | 1789 | if (type == PDO_TYPE_BATT) { |
1796 | mw = ma * mv / 1000; | 1790 | mw = pdo_max_power(pdo); |
1797 | if (mw > max_mw || | 1791 | } else { |
1798 | (mw == max_mw && mv > max_mv)) { | 1792 | ma = min(pdo_max_current(pdo), |
1799 | ret = 0; | 1793 | port->max_snk_ma); |
1800 | *src_pdo = i; | 1794 | mw = ma * mv / 1000; |
1801 | *sink_pdo = j; | 1795 | } |
1802 | max_mw = mw; | 1796 | |
1803 | max_mv = mv; | 1797 | /* Perfer higher voltages if available */ |
1804 | } | 1798 | if ((mw > max_mw || (mw == max_mw && mv > max_mv)) && |
1805 | /* There could only be one fixed pdo | 1799 | mv <= port->max_snk_mv) { |
1806 | * at a specific voltage level. | 1800 | ret = i; |
1807 | * So breaking here. | 1801 | max_mw = mw; |
1808 | */ | 1802 | max_mv = mv; |
1809 | break; | ||
1810 | } | ||
1811 | } | ||
1812 | } else if (type == PDO_TYPE_BATT) { | ||
1813 | for (j = port->nr_fixed; | ||
1814 | j < port->nr_fixed + | ||
1815 | port->nr_batt; | ||
1816 | j++) { | ||
1817 | if (pdo_min_voltage(pdo) >= | ||
1818 | pdo_min_voltage(port->snk_pdo[j]) && | ||
1819 | pdo_max_voltage(pdo) <= | ||
1820 | pdo_max_voltage(port->snk_pdo[j])) { | ||
1821 | mw = min_power(pdo, port->snk_pdo[j]); | ||
1822 | mv = pdo_min_voltage(pdo); | ||
1823 | if (mw > max_mw || | ||
1824 | (mw == max_mw && mv > max_mv)) { | ||
1825 | ret = 0; | ||
1826 | *src_pdo = i; | ||
1827 | *sink_pdo = j; | ||
1828 | max_mw = mw; | ||
1829 | max_mv = mv; | ||
1830 | } | ||
1831 | } | ||
1832 | } | ||
1833 | } else if (type == PDO_TYPE_VAR) { | ||
1834 | for (j = port->nr_fixed + | ||
1835 | port->nr_batt; | ||
1836 | j < port->nr_fixed + | ||
1837 | port->nr_batt + | ||
1838 | port->nr_var; | ||
1839 | j++) { | ||
1840 | if (pdo_min_voltage(pdo) >= | ||
1841 | pdo_min_voltage(port->snk_pdo[j]) && | ||
1842 | pdo_max_voltage(pdo) <= | ||
1843 | pdo_max_voltage(port->snk_pdo[j])) { | ||
1844 | ma = min_current(pdo, port->snk_pdo[j]); | ||
1845 | mv = pdo_min_voltage(pdo); | ||
1846 | mw = ma * mv / 1000; | ||
1847 | if (mw > max_mw || | ||
1848 | (mw == max_mw && mv > max_mv)) { | ||
1849 | ret = 0; | ||
1850 | *src_pdo = i; | ||
1851 | *sink_pdo = j; | ||
1852 | max_mw = mw; | ||
1853 | max_mv = mv; | ||
1854 | } | ||
1855 | } | ||
1856 | } | ||
1857 | } | 1803 | } |
1858 | } | 1804 | } |
1859 | 1805 | ||
@@ -1865,14 +1811,13 @@ static int tcpm_pd_build_request(struct tcpm_port *port, u32 *rdo) | |||
1865 | unsigned int mv, ma, mw, flags; | 1811 | unsigned int mv, ma, mw, flags; |
1866 | unsigned int max_ma, max_mw; | 1812 | unsigned int max_ma, max_mw; |
1867 | enum pd_pdo_type type; | 1813 | enum pd_pdo_type type; |
1868 | int src_pdo_index, snk_pdo_index; | 1814 | int index; |
1869 | u32 pdo, matching_snk_pdo; | 1815 | u32 pdo; |
1870 | 1816 | ||
1871 | if (tcpm_pd_select_pdo(port, &snk_pdo_index, &src_pdo_index) < 0) | 1817 | index = tcpm_pd_select_pdo(port); |
1818 | if (index < 0) | ||
1872 | return -EINVAL; | 1819 | return -EINVAL; |
1873 | 1820 | pdo = port->source_caps[index]; | |
1874 | pdo = port->source_caps[src_pdo_index]; | ||
1875 | matching_snk_pdo = port->snk_pdo[snk_pdo_index]; | ||
1876 | type = pdo_type(pdo); | 1821 | type = pdo_type(pdo); |
1877 | 1822 | ||
1878 | if (type == PDO_TYPE_FIXED) | 1823 | if (type == PDO_TYPE_FIXED) |
@@ -1880,28 +1825,26 @@ static int tcpm_pd_build_request(struct tcpm_port *port, u32 *rdo) | |||
1880 | else | 1825 | else |
1881 | mv = pdo_min_voltage(pdo); | 1826 | mv = pdo_min_voltage(pdo); |
1882 | 1827 | ||
1883 | /* Select maximum available current within the sink pdo's limit */ | 1828 | /* Select maximum available current within the board's power limit */ |
1884 | if (type == PDO_TYPE_BATT) { | 1829 | if (type == PDO_TYPE_BATT) { |
1885 | mw = min_power(pdo, matching_snk_pdo); | 1830 | mw = pdo_max_power(pdo); |
1886 | ma = 1000 * mw / mv; | 1831 | ma = 1000 * min(mw, port->max_snk_mw) / mv; |
1887 | } else { | 1832 | } else { |
1888 | ma = min_current(pdo, matching_snk_pdo); | 1833 | ma = min(pdo_max_current(pdo), |
1889 | mw = ma * mv / 1000; | 1834 | 1000 * port->max_snk_mw / mv); |
1890 | } | 1835 | } |
1836 | ma = min(ma, port->max_snk_ma); | ||
1891 | 1837 | ||
1892 | flags = RDO_USB_COMM | RDO_NO_SUSPEND; | 1838 | flags = RDO_USB_COMM | RDO_NO_SUSPEND; |
1893 | 1839 | ||
1894 | /* Set mismatch bit if offered power is less than operating power */ | 1840 | /* Set mismatch bit if offered power is less than operating power */ |
1841 | mw = ma * mv / 1000; | ||
1895 | max_ma = ma; | 1842 | max_ma = ma; |
1896 | max_mw = mw; | 1843 | max_mw = mw; |
1897 | if (mw < port->operating_snk_mw) { | 1844 | if (mw < port->operating_snk_mw) { |
1898 | flags |= RDO_CAP_MISMATCH; | 1845 | flags |= RDO_CAP_MISMATCH; |
1899 | if (type == PDO_TYPE_BATT && | 1846 | max_mw = port->operating_snk_mw; |
1900 | (pdo_max_power(matching_snk_pdo) > pdo_max_power(pdo))) | 1847 | max_ma = max_mw * 1000 / mv; |
1901 | max_mw = pdo_max_power(matching_snk_pdo); | ||
1902 | else if (pdo_max_current(matching_snk_pdo) > | ||
1903 | pdo_max_current(pdo)) | ||
1904 | max_ma = pdo_max_current(matching_snk_pdo); | ||
1905 | } | 1848 | } |
1906 | 1849 | ||
1907 | tcpm_log(port, "cc=%d cc1=%d cc2=%d vbus=%d vconn=%s polarity=%d", | 1850 | tcpm_log(port, "cc=%d cc1=%d cc2=%d vbus=%d vconn=%s polarity=%d", |
@@ -1910,16 +1853,16 @@ static int tcpm_pd_build_request(struct tcpm_port *port, u32 *rdo) | |||
1910 | port->polarity); | 1853 | port->polarity); |
1911 | 1854 | ||
1912 | if (type == PDO_TYPE_BATT) { | 1855 | if (type == PDO_TYPE_BATT) { |
1913 | *rdo = RDO_BATT(src_pdo_index + 1, mw, max_mw, flags); | 1856 | *rdo = RDO_BATT(index + 1, mw, max_mw, flags); |
1914 | 1857 | ||
1915 | tcpm_log(port, "Requesting PDO %d: %u mV, %u mW%s", | 1858 | tcpm_log(port, "Requesting PDO %d: %u mV, %u mW%s", |
1916 | src_pdo_index, mv, mw, | 1859 | index, mv, mw, |
1917 | flags & RDO_CAP_MISMATCH ? " [mismatch]" : ""); | 1860 | flags & RDO_CAP_MISMATCH ? " [mismatch]" : ""); |
1918 | } else { | 1861 | } else { |
1919 | *rdo = RDO_FIXED(src_pdo_index + 1, ma, max_ma, flags); | 1862 | *rdo = RDO_FIXED(index + 1, ma, max_ma, flags); |
1920 | 1863 | ||
1921 | tcpm_log(port, "Requesting PDO %d: %u mV, %u mA%s", | 1864 | tcpm_log(port, "Requesting PDO %d: %u mV, %u mA%s", |
1922 | src_pdo_index, mv, ma, | 1865 | index, mv, ma, |
1923 | flags & RDO_CAP_MISMATCH ? " [mismatch]" : ""); | 1866 | flags & RDO_CAP_MISMATCH ? " [mismatch]" : ""); |
1924 | } | 1867 | } |
1925 | 1868 | ||
@@ -3650,19 +3593,6 @@ int tcpm_update_sink_capabilities(struct tcpm_port *port, const u32 *pdo, | |||
3650 | } | 3593 | } |
3651 | EXPORT_SYMBOL_GPL(tcpm_update_sink_capabilities); | 3594 | EXPORT_SYMBOL_GPL(tcpm_update_sink_capabilities); |
3652 | 3595 | ||
3653 | static int nr_type_pdos(const u32 *pdo, unsigned int nr_pdo, | ||
3654 | enum pd_pdo_type type) | ||
3655 | { | ||
3656 | int count = 0; | ||
3657 | int i; | ||
3658 | |||
3659 | for (i = 0; i < nr_pdo; i++) { | ||
3660 | if (pdo_type(pdo[i]) == type) | ||
3661 | count++; | ||
3662 | } | ||
3663 | return count; | ||
3664 | } | ||
3665 | |||
3666 | struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc) | 3596 | struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc) |
3667 | { | 3597 | { |
3668 | struct tcpm_port *port; | 3598 | struct tcpm_port *port; |
@@ -3708,15 +3638,6 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc) | |||
3708 | tcpc->config->nr_src_pdo); | 3638 | tcpc->config->nr_src_pdo); |
3709 | port->nr_snk_pdo = tcpm_copy_pdos(port->snk_pdo, tcpc->config->snk_pdo, | 3639 | port->nr_snk_pdo = tcpm_copy_pdos(port->snk_pdo, tcpc->config->snk_pdo, |
3710 | tcpc->config->nr_snk_pdo); | 3640 | tcpc->config->nr_snk_pdo); |
3711 | port->nr_fixed = nr_type_pdos(port->snk_pdo, | ||
3712 | port->nr_snk_pdo, | ||
3713 | PDO_TYPE_FIXED); | ||
3714 | port->nr_var = nr_type_pdos(port->snk_pdo, | ||
3715 | port->nr_snk_pdo, | ||
3716 | PDO_TYPE_VAR); | ||
3717 | port->nr_batt = nr_type_pdos(port->snk_pdo, | ||
3718 | port->nr_snk_pdo, | ||
3719 | PDO_TYPE_BATT); | ||
3720 | port->nr_snk_vdo = tcpm_copy_vdos(port->snk_vdo, tcpc->config->snk_vdo, | 3641 | port->nr_snk_vdo = tcpm_copy_vdos(port->snk_vdo, tcpc->config->snk_vdo, |
3721 | tcpc->config->nr_snk_vdo); | 3642 | tcpc->config->nr_snk_vdo); |
3722 | 3643 | ||
diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c index 49e552472c3f..dd8ef36ab10e 100644 --- a/drivers/usb/usbip/stub_dev.c +++ b/drivers/usb/usbip/stub_dev.c | |||
@@ -73,6 +73,7 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a | |||
73 | goto err; | 73 | goto err; |
74 | 74 | ||
75 | sdev->ud.tcp_socket = socket; | 75 | sdev->ud.tcp_socket = socket; |
76 | sdev->ud.sockfd = sockfd; | ||
76 | 77 | ||
77 | spin_unlock_irq(&sdev->ud.lock); | 78 | spin_unlock_irq(&sdev->ud.lock); |
78 | 79 | ||
@@ -172,6 +173,7 @@ static void stub_shutdown_connection(struct usbip_device *ud) | |||
172 | if (ud->tcp_socket) { | 173 | if (ud->tcp_socket) { |
173 | sockfd_put(ud->tcp_socket); | 174 | sockfd_put(ud->tcp_socket); |
174 | ud->tcp_socket = NULL; | 175 | ud->tcp_socket = NULL; |
176 | ud->sockfd = -1; | ||
175 | } | 177 | } |
176 | 178 | ||
177 | /* 3. free used data */ | 179 | /* 3. free used data */ |
@@ -266,6 +268,7 @@ static struct stub_device *stub_device_alloc(struct usb_device *udev) | |||
266 | sdev->ud.status = SDEV_ST_AVAILABLE; | 268 | sdev->ud.status = SDEV_ST_AVAILABLE; |
267 | spin_lock_init(&sdev->ud.lock); | 269 | spin_lock_init(&sdev->ud.lock); |
268 | sdev->ud.tcp_socket = NULL; | 270 | sdev->ud.tcp_socket = NULL; |
271 | sdev->ud.sockfd = -1; | ||
269 | 272 | ||
270 | INIT_LIST_HEAD(&sdev->priv_init); | 273 | INIT_LIST_HEAD(&sdev->priv_init); |
271 | INIT_LIST_HEAD(&sdev->priv_tx); | 274 | INIT_LIST_HEAD(&sdev->priv_tx); |
diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c index c3e1008aa491..20e3d4609583 100644 --- a/drivers/usb/usbip/vhci_hcd.c +++ b/drivers/usb/usbip/vhci_hcd.c | |||
@@ -984,6 +984,7 @@ static void vhci_shutdown_connection(struct usbip_device *ud) | |||
984 | if (vdev->ud.tcp_socket) { | 984 | if (vdev->ud.tcp_socket) { |
985 | sockfd_put(vdev->ud.tcp_socket); | 985 | sockfd_put(vdev->ud.tcp_socket); |
986 | vdev->ud.tcp_socket = NULL; | 986 | vdev->ud.tcp_socket = NULL; |
987 | vdev->ud.sockfd = -1; | ||
987 | } | 988 | } |
988 | pr_info("release socket\n"); | 989 | pr_info("release socket\n"); |
989 | 990 | ||
@@ -1030,6 +1031,7 @@ static void vhci_device_reset(struct usbip_device *ud) | |||
1030 | if (ud->tcp_socket) { | 1031 | if (ud->tcp_socket) { |
1031 | sockfd_put(ud->tcp_socket); | 1032 | sockfd_put(ud->tcp_socket); |
1032 | ud->tcp_socket = NULL; | 1033 | ud->tcp_socket = NULL; |
1034 | ud->sockfd = -1; | ||
1033 | } | 1035 | } |
1034 | ud->status = VDEV_ST_NULL; | 1036 | ud->status = VDEV_ST_NULL; |
1035 | 1037 | ||
diff --git a/drivers/usb/usbip/vudc_sysfs.c b/drivers/usb/usbip/vudc_sysfs.c index d86f72bbbb91..6dcd3ff655c3 100644 --- a/drivers/usb/usbip/vudc_sysfs.c +++ b/drivers/usb/usbip/vudc_sysfs.c | |||
@@ -105,10 +105,14 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a | |||
105 | if (rv != 0) | 105 | if (rv != 0) |
106 | return -EINVAL; | 106 | return -EINVAL; |
107 | 107 | ||
108 | if (!udc) { | ||
109 | dev_err(dev, "no device"); | ||
110 | return -ENODEV; | ||
111 | } | ||
108 | spin_lock_irqsave(&udc->lock, flags); | 112 | spin_lock_irqsave(&udc->lock, flags); |
109 | /* Don't export what we don't have */ | 113 | /* Don't export what we don't have */ |
110 | if (!udc || !udc->driver || !udc->pullup) { | 114 | if (!udc->driver || !udc->pullup) { |
111 | dev_err(dev, "no device or gadget not bound"); | 115 | dev_err(dev, "gadget not bound"); |
112 | ret = -ENODEV; | 116 | ret = -ENODEV; |
113 | goto unlock; | 117 | goto unlock; |
114 | } | 118 | } |
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c index b0f759476900..8a1508a8e481 100644 --- a/drivers/vfio/pci/vfio_pci.c +++ b/drivers/vfio/pci/vfio_pci.c | |||
@@ -207,9 +207,6 @@ static bool vfio_pci_nointx(struct pci_dev *pdev) | |||
207 | } | 207 | } |
208 | } | 208 | } |
209 | 209 | ||
210 | if (!pdev->irq) | ||
211 | return true; | ||
212 | |||
213 | return false; | 210 | return false; |
214 | } | 211 | } |
215 | 212 | ||
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index e30e29ae4819..45657e2b1ff7 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c | |||
@@ -338,11 +338,12 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr, | |||
338 | { | 338 | { |
339 | struct page *page[1]; | 339 | struct page *page[1]; |
340 | struct vm_area_struct *vma; | 340 | struct vm_area_struct *vma; |
341 | struct vm_area_struct *vmas[1]; | ||
341 | int ret; | 342 | int ret; |
342 | 343 | ||
343 | if (mm == current->mm) { | 344 | if (mm == current->mm) { |
344 | ret = get_user_pages_fast(vaddr, 1, !!(prot & IOMMU_WRITE), | 345 | ret = get_user_pages_longterm(vaddr, 1, !!(prot & IOMMU_WRITE), |
345 | page); | 346 | page, vmas); |
346 | } else { | 347 | } else { |
347 | unsigned int flags = 0; | 348 | unsigned int flags = 0; |
348 | 349 | ||
@@ -351,7 +352,18 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr, | |||
351 | 352 | ||
352 | down_read(&mm->mmap_sem); | 353 | down_read(&mm->mmap_sem); |
353 | ret = get_user_pages_remote(NULL, mm, vaddr, 1, flags, page, | 354 | ret = get_user_pages_remote(NULL, mm, vaddr, 1, flags, page, |
354 | NULL, NULL); | 355 | vmas, NULL); |
356 | /* | ||
357 | * The lifetime of a vaddr_get_pfn() page pin is | ||
358 | * userspace-controlled. In the fs-dax case this could | ||
359 | * lead to indefinite stalls in filesystem operations. | ||
360 | * Disallow attempts to pin fs-dax pages via this | ||
361 | * interface. | ||
362 | */ | ||
363 | if (ret > 0 && vma_is_fsdax(vmas[0])) { | ||
364 | ret = -EOPNOTSUPP; | ||
365 | put_page(page[0]); | ||
366 | } | ||
355 | up_read(&mm->mmap_sem); | 367 | up_read(&mm->mmap_sem); |
356 | } | 368 | } |
357 | 369 | ||
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 610cba276d47..8139bc70ad7d 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c | |||
@@ -170,7 +170,7 @@ static void vhost_net_buf_unproduce(struct vhost_net_virtqueue *nvq) | |||
170 | if (nvq->rx_ring && !vhost_net_buf_is_empty(rxq)) { | 170 | if (nvq->rx_ring && !vhost_net_buf_is_empty(rxq)) { |
171 | ptr_ring_unconsume(nvq->rx_ring, rxq->queue + rxq->head, | 171 | ptr_ring_unconsume(nvq->rx_ring, rxq->queue + rxq->head, |
172 | vhost_net_buf_get_size(rxq), | 172 | vhost_net_buf_get_size(rxq), |
173 | __skb_array_destroy_skb); | 173 | tun_ptr_free); |
174 | rxq->head = rxq->tail = 0; | 174 | rxq->head = rxq->tail = 0; |
175 | } | 175 | } |
176 | } | 176 | } |
@@ -948,6 +948,7 @@ static int vhost_net_open(struct inode *inode, struct file *f) | |||
948 | n->vqs[i].done_idx = 0; | 948 | n->vqs[i].done_idx = 0; |
949 | n->vqs[i].vhost_hlen = 0; | 949 | n->vqs[i].vhost_hlen = 0; |
950 | n->vqs[i].sock_hlen = 0; | 950 | n->vqs[i].sock_hlen = 0; |
951 | n->vqs[i].rx_ring = NULL; | ||
951 | vhost_net_buf_init(&n->vqs[i].rxq); | 952 | vhost_net_buf_init(&n->vqs[i].rxq); |
952 | } | 953 | } |
953 | vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX); | 954 | vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX); |
@@ -972,6 +973,7 @@ static struct socket *vhost_net_stop_vq(struct vhost_net *n, | |||
972 | vhost_net_disable_vq(n, vq); | 973 | vhost_net_disable_vq(n, vq); |
973 | vq->private_data = NULL; | 974 | vq->private_data = NULL; |
974 | vhost_net_buf_unproduce(nvq); | 975 | vhost_net_buf_unproduce(nvq); |
976 | nvq->rx_ring = NULL; | ||
975 | mutex_unlock(&vq->mutex); | 977 | mutex_unlock(&vq->mutex); |
976 | return sock; | 978 | return sock; |
977 | } | 979 | } |
@@ -1161,14 +1163,14 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd) | |||
1161 | vhost_net_disable_vq(n, vq); | 1163 | vhost_net_disable_vq(n, vq); |
1162 | vq->private_data = sock; | 1164 | vq->private_data = sock; |
1163 | vhost_net_buf_unproduce(nvq); | 1165 | vhost_net_buf_unproduce(nvq); |
1164 | if (index == VHOST_NET_VQ_RX) | ||
1165 | nvq->rx_ring = get_tap_ptr_ring(fd); | ||
1166 | r = vhost_vq_init_access(vq); | 1166 | r = vhost_vq_init_access(vq); |
1167 | if (r) | 1167 | if (r) |
1168 | goto err_used; | 1168 | goto err_used; |
1169 | r = vhost_net_enable_vq(n, vq); | 1169 | r = vhost_net_enable_vq(n, vq); |
1170 | if (r) | 1170 | if (r) |
1171 | goto err_used; | 1171 | goto err_used; |
1172 | if (index == VHOST_NET_VQ_RX) | ||
1173 | nvq->rx_ring = get_tap_ptr_ring(fd); | ||
1172 | 1174 | ||
1173 | oldubufs = nvq->ubufs; | 1175 | oldubufs = nvq->ubufs; |
1174 | nvq->ubufs = ubufs; | 1176 | nvq->ubufs = ubufs; |
diff --git a/drivers/video/fbdev/geode/video_gx.c b/drivers/video/fbdev/geode/video_gx.c index 6082f653c68a..67773e8bbb95 100644 --- a/drivers/video/fbdev/geode/video_gx.c +++ b/drivers/video/fbdev/geode/video_gx.c | |||
@@ -127,7 +127,7 @@ void gx_set_dclk_frequency(struct fb_info *info) | |||
127 | int timeout = 1000; | 127 | int timeout = 1000; |
128 | 128 | ||
129 | /* Rev. 1 Geode GXs use a 14 MHz reference clock instead of 48 MHz. */ | 129 | /* Rev. 1 Geode GXs use a 14 MHz reference clock instead of 48 MHz. */ |
130 | if (cpu_data(0).x86_mask == 1) { | 130 | if (cpu_data(0).x86_stepping == 1) { |
131 | pll_table = gx_pll_table_14MHz; | 131 | pll_table = gx_pll_table_14MHz; |
132 | pll_table_len = ARRAY_SIZE(gx_pll_table_14MHz); | 132 | pll_table_len = ARRAY_SIZE(gx_pll_table_14MHz); |
133 | } else { | 133 | } else { |
diff --git a/drivers/video/fbdev/sbuslib.c b/drivers/video/fbdev/sbuslib.c index af6fc97f4ba4..a436d44f1b7f 100644 --- a/drivers/video/fbdev/sbuslib.c +++ b/drivers/video/fbdev/sbuslib.c | |||
@@ -122,7 +122,7 @@ int sbusfb_ioctl_helper(unsigned long cmd, unsigned long arg, | |||
122 | unsigned char __user *ured; | 122 | unsigned char __user *ured; |
123 | unsigned char __user *ugreen; | 123 | unsigned char __user *ugreen; |
124 | unsigned char __user *ublue; | 124 | unsigned char __user *ublue; |
125 | int index, count, i; | 125 | unsigned int index, count, i; |
126 | 126 | ||
127 | if (get_user(index, &c->index) || | 127 | if (get_user(index, &c->index) || |
128 | __get_user(count, &c->count) || | 128 | __get_user(count, &c->count) || |
@@ -161,7 +161,7 @@ int sbusfb_ioctl_helper(unsigned long cmd, unsigned long arg, | |||
161 | unsigned char __user *ugreen; | 161 | unsigned char __user *ugreen; |
162 | unsigned char __user *ublue; | 162 | unsigned char __user *ublue; |
163 | struct fb_cmap *cmap = &info->cmap; | 163 | struct fb_cmap *cmap = &info->cmap; |
164 | int index, count, i; | 164 | unsigned int index, count, i; |
165 | u8 red, green, blue; | 165 | u8 red, green, blue; |
166 | 166 | ||
167 | if (get_user(index, &c->index) || | 167 | if (get_user(index, &c->index) || |
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index eb30f3e09a47..71458f493cf8 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c | |||
@@ -428,8 +428,6 @@ unmap_release: | |||
428 | i = virtio16_to_cpu(_vq->vdev, vq->vring.desc[i].next); | 428 | i = virtio16_to_cpu(_vq->vdev, vq->vring.desc[i].next); |
429 | } | 429 | } |
430 | 430 | ||
431 | vq->vq.num_free += total_sg; | ||
432 | |||
433 | if (indirect) | 431 | if (indirect) |
434 | kfree(desc); | 432 | kfree(desc); |
435 | 433 | ||
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index aff773bcebdb..37460cd6cabb 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig | |||
@@ -226,6 +226,7 @@ config ZIIRAVE_WATCHDOG | |||
226 | config RAVE_SP_WATCHDOG | 226 | config RAVE_SP_WATCHDOG |
227 | tristate "RAVE SP Watchdog timer" | 227 | tristate "RAVE SP Watchdog timer" |
228 | depends on RAVE_SP_CORE | 228 | depends on RAVE_SP_CORE |
229 | depends on NVMEM || !NVMEM | ||
229 | select WATCHDOG_CORE | 230 | select WATCHDOG_CORE |
230 | help | 231 | help |
231 | Support for the watchdog on RAVE SP device. | 232 | Support for the watchdog on RAVE SP device. |
@@ -903,6 +904,7 @@ config F71808E_WDT | |||
903 | config SP5100_TCO | 904 | config SP5100_TCO |
904 | tristate "AMD/ATI SP5100 TCO Timer/Watchdog" | 905 | tristate "AMD/ATI SP5100 TCO Timer/Watchdog" |
905 | depends on X86 && PCI | 906 | depends on X86 && PCI |
907 | select WATCHDOG_CORE | ||
906 | ---help--- | 908 | ---help--- |
907 | Hardware watchdog driver for the AMD/ATI SP5100 chipset. The TCO | 909 | Hardware watchdog driver for the AMD/ATI SP5100 chipset. The TCO |
908 | (Total Cost of Ownership) timer is a watchdog timer that will reboot | 910 | (Total Cost of Ownership) timer is a watchdog timer that will reboot |
@@ -1008,6 +1010,7 @@ config WAFER_WDT | |||
1008 | config I6300ESB_WDT | 1010 | config I6300ESB_WDT |
1009 | tristate "Intel 6300ESB Timer/Watchdog" | 1011 | tristate "Intel 6300ESB Timer/Watchdog" |
1010 | depends on PCI | 1012 | depends on PCI |
1013 | select WATCHDOG_CORE | ||
1011 | ---help--- | 1014 | ---help--- |
1012 | Hardware driver for the watchdog timer built into the Intel | 1015 | Hardware driver for the watchdog timer built into the Intel |
1013 | 6300ESB controller hub. | 1016 | 6300ESB controller hub. |
@@ -1837,6 +1840,7 @@ config WATCHDOG_SUN4V | |||
1837 | config XEN_WDT | 1840 | config XEN_WDT |
1838 | tristate "Xen Watchdog support" | 1841 | tristate "Xen Watchdog support" |
1839 | depends on XEN | 1842 | depends on XEN |
1843 | select WATCHDOG_CORE | ||
1840 | help | 1844 | help |
1841 | Say Y here to support the hypervisor watchdog capability provided | 1845 | Say Y here to support the hypervisor watchdog capability provided |
1842 | by Xen 4.0 and newer. The watchdog timeout period is normally one | 1846 | by Xen 4.0 and newer. The watchdog timeout period is normally one |
diff --git a/drivers/watchdog/f71808e_wdt.c b/drivers/watchdog/f71808e_wdt.c index e0678c14480f..3a33c5344bd5 100644 --- a/drivers/watchdog/f71808e_wdt.c +++ b/drivers/watchdog/f71808e_wdt.c | |||
@@ -566,7 +566,8 @@ static ssize_t watchdog_write(struct file *file, const char __user *buf, | |||
566 | char c; | 566 | char c; |
567 | if (get_user(c, buf + i)) | 567 | if (get_user(c, buf + i)) |
568 | return -EFAULT; | 568 | return -EFAULT; |
569 | expect_close = (c == 'V'); | 569 | if (c == 'V') |
570 | expect_close = true; | ||
570 | } | 571 | } |
571 | 572 | ||
572 | /* Properly order writes across fork()ed processes */ | 573 | /* Properly order writes across fork()ed processes */ |
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c index f1f00dfc0e68..b0a158073abd 100644 --- a/drivers/watchdog/hpwdt.c +++ b/drivers/watchdog/hpwdt.c | |||
@@ -28,16 +28,7 @@ | |||
28 | #include <linux/types.h> | 28 | #include <linux/types.h> |
29 | #include <linux/uaccess.h> | 29 | #include <linux/uaccess.h> |
30 | #include <linux/watchdog.h> | 30 | #include <linux/watchdog.h> |
31 | #ifdef CONFIG_HPWDT_NMI_DECODING | ||
32 | #include <linux/dmi.h> | ||
33 | #include <linux/spinlock.h> | ||
34 | #include <linux/nmi.h> | ||
35 | #include <linux/kdebug.h> | ||
36 | #include <linux/notifier.h> | ||
37 | #include <asm/set_memory.h> | ||
38 | #endif /* CONFIG_HPWDT_NMI_DECODING */ | ||
39 | #include <asm/nmi.h> | 31 | #include <asm/nmi.h> |
40 | #include <asm/frame.h> | ||
41 | 32 | ||
42 | #define HPWDT_VERSION "1.4.0" | 33 | #define HPWDT_VERSION "1.4.0" |
43 | #define SECS_TO_TICKS(secs) ((secs) * 1000 / 128) | 34 | #define SECS_TO_TICKS(secs) ((secs) * 1000 / 128) |
@@ -48,6 +39,9 @@ | |||
48 | static unsigned int soft_margin = DEFAULT_MARGIN; /* in seconds */ | 39 | static unsigned int soft_margin = DEFAULT_MARGIN; /* in seconds */ |
49 | static unsigned int reload; /* the computed soft_margin */ | 40 | static unsigned int reload; /* the computed soft_margin */ |
50 | static bool nowayout = WATCHDOG_NOWAYOUT; | 41 | static bool nowayout = WATCHDOG_NOWAYOUT; |
42 | #ifdef CONFIG_HPWDT_NMI_DECODING | ||
43 | static unsigned int allow_kdump = 1; | ||
44 | #endif | ||
51 | static char expect_release; | 45 | static char expect_release; |
52 | static unsigned long hpwdt_is_open; | 46 | static unsigned long hpwdt_is_open; |
53 | 47 | ||
@@ -63,373 +57,6 @@ static const struct pci_device_id hpwdt_devices[] = { | |||
63 | }; | 57 | }; |
64 | MODULE_DEVICE_TABLE(pci, hpwdt_devices); | 58 | MODULE_DEVICE_TABLE(pci, hpwdt_devices); |
65 | 59 | ||
66 | #ifdef CONFIG_HPWDT_NMI_DECODING | ||
67 | #define PCI_BIOS32_SD_VALUE 0x5F32335F /* "_32_" */ | ||
68 | #define CRU_BIOS_SIGNATURE_VALUE 0x55524324 | ||
69 | #define PCI_BIOS32_PARAGRAPH_LEN 16 | ||
70 | #define PCI_ROM_BASE1 0x000F0000 | ||
71 | #define ROM_SIZE 0x10000 | ||
72 | |||
73 | struct bios32_service_dir { | ||
74 | u32 signature; | ||
75 | u32 entry_point; | ||
76 | u8 revision; | ||
77 | u8 length; | ||
78 | u8 checksum; | ||
79 | u8 reserved[5]; | ||
80 | }; | ||
81 | |||
82 | /* type 212 */ | ||
83 | struct smbios_cru64_info { | ||
84 | u8 type; | ||
85 | u8 byte_length; | ||
86 | u16 handle; | ||
87 | u32 signature; | ||
88 | u64 physical_address; | ||
89 | u32 double_length; | ||
90 | u32 double_offset; | ||
91 | }; | ||
92 | #define SMBIOS_CRU64_INFORMATION 212 | ||
93 | |||
94 | /* type 219 */ | ||
95 | struct smbios_proliant_info { | ||
96 | u8 type; | ||
97 | u8 byte_length; | ||
98 | u16 handle; | ||
99 | u32 power_features; | ||
100 | u32 omega_features; | ||
101 | u32 reserved; | ||
102 | u32 misc_features; | ||
103 | }; | ||
104 | #define SMBIOS_ICRU_INFORMATION 219 | ||
105 | |||
106 | |||
107 | struct cmn_registers { | ||
108 | union { | ||
109 | struct { | ||
110 | u8 ral; | ||
111 | u8 rah; | ||
112 | u16 rea2; | ||
113 | }; | ||
114 | u32 reax; | ||
115 | } u1; | ||
116 | union { | ||
117 | struct { | ||
118 | u8 rbl; | ||
119 | u8 rbh; | ||
120 | u8 reb2l; | ||
121 | u8 reb2h; | ||
122 | }; | ||
123 | u32 rebx; | ||
124 | } u2; | ||
125 | union { | ||
126 | struct { | ||
127 | u8 rcl; | ||
128 | u8 rch; | ||
129 | u16 rec2; | ||
130 | }; | ||
131 | u32 recx; | ||
132 | } u3; | ||
133 | union { | ||
134 | struct { | ||
135 | u8 rdl; | ||
136 | u8 rdh; | ||
137 | u16 red2; | ||
138 | }; | ||
139 | u32 redx; | ||
140 | } u4; | ||
141 | |||
142 | u32 resi; | ||
143 | u32 redi; | ||
144 | u16 rds; | ||
145 | u16 res; | ||
146 | u32 reflags; | ||
147 | } __attribute__((packed)); | ||
148 | |||
149 | static unsigned int hpwdt_nmi_decoding; | ||
150 | static unsigned int allow_kdump = 1; | ||
151 | static unsigned int is_icru; | ||
152 | static unsigned int is_uefi; | ||
153 | static DEFINE_SPINLOCK(rom_lock); | ||
154 | static void *cru_rom_addr; | ||
155 | static struct cmn_registers cmn_regs; | ||
156 | |||
157 | extern asmlinkage void asminline_call(struct cmn_registers *pi86Regs, | ||
158 | unsigned long *pRomEntry); | ||
159 | |||
160 | #ifdef CONFIG_X86_32 | ||
161 | /* --32 Bit Bios------------------------------------------------------------ */ | ||
162 | |||
163 | #define HPWDT_ARCH 32 | ||
164 | |||
165 | asm(".text \n\t" | ||
166 | ".align 4 \n\t" | ||
167 | ".globl asminline_call \n" | ||
168 | "asminline_call: \n\t" | ||
169 | "pushl %ebp \n\t" | ||
170 | "movl %esp, %ebp \n\t" | ||
171 | "pusha \n\t" | ||
172 | "pushf \n\t" | ||
173 | "push %es \n\t" | ||
174 | "push %ds \n\t" | ||
175 | "pop %es \n\t" | ||
176 | "movl 8(%ebp),%eax \n\t" | ||
177 | "movl 4(%eax),%ebx \n\t" | ||
178 | "movl 8(%eax),%ecx \n\t" | ||
179 | "movl 12(%eax),%edx \n\t" | ||
180 | "movl 16(%eax),%esi \n\t" | ||
181 | "movl 20(%eax),%edi \n\t" | ||
182 | "movl (%eax),%eax \n\t" | ||
183 | "push %cs \n\t" | ||
184 | "call *12(%ebp) \n\t" | ||
185 | "pushf \n\t" | ||
186 | "pushl %eax \n\t" | ||
187 | "movl 8(%ebp),%eax \n\t" | ||
188 | "movl %ebx,4(%eax) \n\t" | ||
189 | "movl %ecx,8(%eax) \n\t" | ||
190 | "movl %edx,12(%eax) \n\t" | ||
191 | "movl %esi,16(%eax) \n\t" | ||
192 | "movl %edi,20(%eax) \n\t" | ||
193 | "movw %ds,24(%eax) \n\t" | ||
194 | "movw %es,26(%eax) \n\t" | ||
195 | "popl %ebx \n\t" | ||
196 | "movl %ebx,(%eax) \n\t" | ||
197 | "popl %ebx \n\t" | ||
198 | "movl %ebx,28(%eax) \n\t" | ||
199 | "pop %es \n\t" | ||
200 | "popf \n\t" | ||
201 | "popa \n\t" | ||
202 | "leave \n\t" | ||
203 | "ret \n\t" | ||
204 | ".previous"); | ||
205 | |||
206 | |||
207 | /* | ||
208 | * cru_detect | ||
209 | * | ||
210 | * Routine Description: | ||
211 | * This function uses the 32-bit BIOS Service Directory record to | ||
212 | * search for a $CRU record. | ||
213 | * | ||
214 | * Return Value: | ||
215 | * 0 : SUCCESS | ||
216 | * <0 : FAILURE | ||
217 | */ | ||
218 | static int cru_detect(unsigned long map_entry, | ||
219 | unsigned long map_offset) | ||
220 | { | ||
221 | void *bios32_map; | ||
222 | unsigned long *bios32_entrypoint; | ||
223 | unsigned long cru_physical_address; | ||
224 | unsigned long cru_length; | ||
225 | unsigned long physical_bios_base = 0; | ||
226 | unsigned long physical_bios_offset = 0; | ||
227 | int retval = -ENODEV; | ||
228 | |||
229 | bios32_map = ioremap(map_entry, (2 * PAGE_SIZE)); | ||
230 | |||
231 | if (bios32_map == NULL) | ||
232 | return -ENODEV; | ||
233 | |||
234 | bios32_entrypoint = bios32_map + map_offset; | ||
235 | |||
236 | cmn_regs.u1.reax = CRU_BIOS_SIGNATURE_VALUE; | ||
237 | |||
238 | set_memory_x((unsigned long)bios32_map, 2); | ||
239 | asminline_call(&cmn_regs, bios32_entrypoint); | ||
240 | |||
241 | if (cmn_regs.u1.ral != 0) { | ||
242 | pr_warn("Call succeeded but with an error: 0x%x\n", | ||
243 | cmn_regs.u1.ral); | ||
244 | } else { | ||
245 | physical_bios_base = cmn_regs.u2.rebx; | ||
246 | physical_bios_offset = cmn_regs.u4.redx; | ||
247 | cru_length = cmn_regs.u3.recx; | ||
248 | cru_physical_address = | ||
249 | physical_bios_base + physical_bios_offset; | ||
250 | |||
251 | /* If the values look OK, then map it in. */ | ||
252 | if ((physical_bios_base + physical_bios_offset)) { | ||
253 | cru_rom_addr = | ||
254 | ioremap(cru_physical_address, cru_length); | ||
255 | if (cru_rom_addr) { | ||
256 | set_memory_x((unsigned long)cru_rom_addr & PAGE_MASK, | ||
257 | (cru_length + PAGE_SIZE - 1) >> PAGE_SHIFT); | ||
258 | retval = 0; | ||
259 | } | ||
260 | } | ||
261 | |||
262 | pr_debug("CRU Base Address: 0x%lx\n", physical_bios_base); | ||
263 | pr_debug("CRU Offset Address: 0x%lx\n", physical_bios_offset); | ||
264 | pr_debug("CRU Length: 0x%lx\n", cru_length); | ||
265 | pr_debug("CRU Mapped Address: %p\n", &cru_rom_addr); | ||
266 | } | ||
267 | iounmap(bios32_map); | ||
268 | return retval; | ||
269 | } | ||
270 | |||
271 | /* | ||
272 | * bios_checksum | ||
273 | */ | ||
274 | static int bios_checksum(const char __iomem *ptr, int len) | ||
275 | { | ||
276 | char sum = 0; | ||
277 | int i; | ||
278 | |||
279 | /* | ||
280 | * calculate checksum of size bytes. This should add up | ||
281 | * to zero if we have a valid header. | ||
282 | */ | ||
283 | for (i = 0; i < len; i++) | ||
284 | sum += ptr[i]; | ||
285 | |||
286 | return ((sum == 0) && (len > 0)); | ||
287 | } | ||
288 | |||
289 | /* | ||
290 | * bios32_present | ||
291 | * | ||
292 | * Routine Description: | ||
293 | * This function finds the 32-bit BIOS Service Directory | ||
294 | * | ||
295 | * Return Value: | ||
296 | * 0 : SUCCESS | ||
297 | * <0 : FAILURE | ||
298 | */ | ||
299 | static int bios32_present(const char __iomem *p) | ||
300 | { | ||
301 | struct bios32_service_dir *bios_32_ptr; | ||
302 | int length; | ||
303 | unsigned long map_entry, map_offset; | ||
304 | |||
305 | bios_32_ptr = (struct bios32_service_dir *) p; | ||
306 | |||
307 | /* | ||
308 | * Search for signature by checking equal to the swizzled value | ||
309 | * instead of calling another routine to perform a strcmp. | ||
310 | */ | ||
311 | if (bios_32_ptr->signature == PCI_BIOS32_SD_VALUE) { | ||
312 | length = bios_32_ptr->length * PCI_BIOS32_PARAGRAPH_LEN; | ||
313 | if (bios_checksum(p, length)) { | ||
314 | /* | ||
315 | * According to the spec, we're looking for the | ||
316 | * first 4KB-aligned address below the entrypoint | ||
317 | * listed in the header. The Service Directory code | ||
318 | * is guaranteed to occupy no more than 2 4KB pages. | ||
319 | */ | ||
320 | map_entry = bios_32_ptr->entry_point & ~(PAGE_SIZE - 1); | ||
321 | map_offset = bios_32_ptr->entry_point - map_entry; | ||
322 | |||
323 | return cru_detect(map_entry, map_offset); | ||
324 | } | ||
325 | } | ||
326 | return -ENODEV; | ||
327 | } | ||
328 | |||
329 | static int detect_cru_service(void) | ||
330 | { | ||
331 | char __iomem *p, *q; | ||
332 | int rc = -1; | ||
333 | |||
334 | /* | ||
335 | * Search from 0x0f0000 through 0x0fffff, inclusive. | ||
336 | */ | ||
337 | p = ioremap(PCI_ROM_BASE1, ROM_SIZE); | ||
338 | if (p == NULL) | ||
339 | return -ENOMEM; | ||
340 | |||
341 | for (q = p; q < p + ROM_SIZE; q += 16) { | ||
342 | rc = bios32_present(q); | ||
343 | if (!rc) | ||
344 | break; | ||
345 | } | ||
346 | iounmap(p); | ||
347 | return rc; | ||
348 | } | ||
349 | /* ------------------------------------------------------------------------- */ | ||
350 | #endif /* CONFIG_X86_32 */ | ||
351 | #ifdef CONFIG_X86_64 | ||
352 | /* --64 Bit Bios------------------------------------------------------------ */ | ||
353 | |||
354 | #define HPWDT_ARCH 64 | ||
355 | |||
356 | asm(".text \n\t" | ||
357 | ".align 4 \n\t" | ||
358 | ".globl asminline_call \n\t" | ||
359 | ".type asminline_call, @function \n\t" | ||
360 | "asminline_call: \n\t" | ||
361 | FRAME_BEGIN | ||
362 | "pushq %rax \n\t" | ||
363 | "pushq %rbx \n\t" | ||
364 | "pushq %rdx \n\t" | ||
365 | "pushq %r12 \n\t" | ||
366 | "pushq %r9 \n\t" | ||
367 | "movq %rsi, %r12 \n\t" | ||
368 | "movq %rdi, %r9 \n\t" | ||
369 | "movl 4(%r9),%ebx \n\t" | ||
370 | "movl 8(%r9),%ecx \n\t" | ||
371 | "movl 12(%r9),%edx \n\t" | ||
372 | "movl 16(%r9),%esi \n\t" | ||
373 | "movl 20(%r9),%edi \n\t" | ||
374 | "movl (%r9),%eax \n\t" | ||
375 | "call *%r12 \n\t" | ||
376 | "pushfq \n\t" | ||
377 | "popq %r12 \n\t" | ||
378 | "movl %eax, (%r9) \n\t" | ||
379 | "movl %ebx, 4(%r9) \n\t" | ||
380 | "movl %ecx, 8(%r9) \n\t" | ||
381 | "movl %edx, 12(%r9) \n\t" | ||
382 | "movl %esi, 16(%r9) \n\t" | ||
383 | "movl %edi, 20(%r9) \n\t" | ||
384 | "movq %r12, %rax \n\t" | ||
385 | "movl %eax, 28(%r9) \n\t" | ||
386 | "popq %r9 \n\t" | ||
387 | "popq %r12 \n\t" | ||
388 | "popq %rdx \n\t" | ||
389 | "popq %rbx \n\t" | ||
390 | "popq %rax \n\t" | ||
391 | FRAME_END | ||
392 | "ret \n\t" | ||
393 | ".previous"); | ||
394 | |||
395 | /* | ||
396 | * dmi_find_cru | ||
397 | * | ||
398 | * Routine Description: | ||
399 | * This function checks whether or not a SMBIOS/DMI record is | ||
400 | * the 64bit CRU info or not | ||
401 | */ | ||
402 | static void dmi_find_cru(const struct dmi_header *dm, void *dummy) | ||
403 | { | ||
404 | struct smbios_cru64_info *smbios_cru64_ptr; | ||
405 | unsigned long cru_physical_address; | ||
406 | |||
407 | if (dm->type == SMBIOS_CRU64_INFORMATION) { | ||
408 | smbios_cru64_ptr = (struct smbios_cru64_info *) dm; | ||
409 | if (smbios_cru64_ptr->signature == CRU_BIOS_SIGNATURE_VALUE) { | ||
410 | cru_physical_address = | ||
411 | smbios_cru64_ptr->physical_address + | ||
412 | smbios_cru64_ptr->double_offset; | ||
413 | cru_rom_addr = ioremap(cru_physical_address, | ||
414 | smbios_cru64_ptr->double_length); | ||
415 | set_memory_x((unsigned long)cru_rom_addr & PAGE_MASK, | ||
416 | smbios_cru64_ptr->double_length >> PAGE_SHIFT); | ||
417 | } | ||
418 | } | ||
419 | } | ||
420 | |||
421 | static int detect_cru_service(void) | ||
422 | { | ||
423 | cru_rom_addr = NULL; | ||
424 | |||
425 | dmi_walk(dmi_find_cru, NULL); | ||
426 | |||
427 | /* if cru_rom_addr has been set then we found a CRU service */ | ||
428 | return ((cru_rom_addr != NULL) ? 0 : -ENODEV); | ||
429 | } | ||
430 | /* ------------------------------------------------------------------------- */ | ||
431 | #endif /* CONFIG_X86_64 */ | ||
432 | #endif /* CONFIG_HPWDT_NMI_DECODING */ | ||
433 | 60 | ||
434 | /* | 61 | /* |
435 | * Watchdog operations | 62 | * Watchdog operations |
@@ -486,30 +113,12 @@ static int hpwdt_my_nmi(void) | |||
486 | */ | 113 | */ |
487 | static int hpwdt_pretimeout(unsigned int ulReason, struct pt_regs *regs) | 114 | static int hpwdt_pretimeout(unsigned int ulReason, struct pt_regs *regs) |
488 | { | 115 | { |
489 | unsigned long rom_pl; | ||
490 | static int die_nmi_called; | ||
491 | |||
492 | if (!hpwdt_nmi_decoding) | ||
493 | return NMI_DONE; | ||
494 | |||
495 | if ((ulReason == NMI_UNKNOWN) && !hpwdt_my_nmi()) | 116 | if ((ulReason == NMI_UNKNOWN) && !hpwdt_my_nmi()) |
496 | return NMI_DONE; | 117 | return NMI_DONE; |
497 | 118 | ||
498 | spin_lock_irqsave(&rom_lock, rom_pl); | ||
499 | if (!die_nmi_called && !is_icru && !is_uefi) | ||
500 | asminline_call(&cmn_regs, cru_rom_addr); | ||
501 | die_nmi_called = 1; | ||
502 | spin_unlock_irqrestore(&rom_lock, rom_pl); | ||
503 | |||
504 | if (allow_kdump) | 119 | if (allow_kdump) |
505 | hpwdt_stop(); | 120 | hpwdt_stop(); |
506 | 121 | ||
507 | if (!is_icru && !is_uefi) { | ||
508 | if (cmn_regs.u1.ral == 0) { | ||
509 | nmi_panic(regs, "An NMI occurred, but unable to determine source.\n"); | ||
510 | return NMI_HANDLED; | ||
511 | } | ||
512 | } | ||
513 | nmi_panic(regs, "An NMI occurred. Depending on your system the reason " | 122 | nmi_panic(regs, "An NMI occurred. Depending on your system the reason " |
514 | "for the NMI is logged in any one of the following " | 123 | "for the NMI is logged in any one of the following " |
515 | "resources:\n" | 124 | "resources:\n" |
@@ -675,84 +284,11 @@ static struct miscdevice hpwdt_miscdev = { | |||
675 | * Init & Exit | 284 | * Init & Exit |
676 | */ | 285 | */ |
677 | 286 | ||
678 | #ifdef CONFIG_HPWDT_NMI_DECODING | ||
679 | #ifdef CONFIG_X86_LOCAL_APIC | ||
680 | static void hpwdt_check_nmi_decoding(struct pci_dev *dev) | ||
681 | { | ||
682 | /* | ||
683 | * If nmi_watchdog is turned off then we can turn on | ||
684 | * our nmi decoding capability. | ||
685 | */ | ||
686 | hpwdt_nmi_decoding = 1; | ||
687 | } | ||
688 | #else | ||
689 | static void hpwdt_check_nmi_decoding(struct pci_dev *dev) | ||
690 | { | ||
691 | dev_warn(&dev->dev, "NMI decoding is disabled. " | ||
692 | "Your kernel does not support a NMI Watchdog.\n"); | ||
693 | } | ||
694 | #endif /* CONFIG_X86_LOCAL_APIC */ | ||
695 | |||
696 | /* | ||
697 | * dmi_find_icru | ||
698 | * | ||
699 | * Routine Description: | ||
700 | * This function checks whether or not we are on an iCRU-based server. | ||
701 | * This check is independent of architecture and needs to be made for | ||
702 | * any ProLiant system. | ||
703 | */ | ||
704 | static void dmi_find_icru(const struct dmi_header *dm, void *dummy) | ||
705 | { | ||
706 | struct smbios_proliant_info *smbios_proliant_ptr; | ||
707 | |||
708 | if (dm->type == SMBIOS_ICRU_INFORMATION) { | ||
709 | smbios_proliant_ptr = (struct smbios_proliant_info *) dm; | ||
710 | if (smbios_proliant_ptr->misc_features & 0x01) | ||
711 | is_icru = 1; | ||
712 | if (smbios_proliant_ptr->misc_features & 0x1400) | ||
713 | is_uefi = 1; | ||
714 | } | ||
715 | } | ||
716 | 287 | ||
717 | static int hpwdt_init_nmi_decoding(struct pci_dev *dev) | 288 | static int hpwdt_init_nmi_decoding(struct pci_dev *dev) |
718 | { | 289 | { |
290 | #ifdef CONFIG_HPWDT_NMI_DECODING | ||
719 | int retval; | 291 | int retval; |
720 | |||
721 | /* | ||
722 | * On typical CRU-based systems we need to map that service in | ||
723 | * the BIOS. For 32 bit Operating Systems we need to go through | ||
724 | * the 32 Bit BIOS Service Directory. For 64 bit Operating | ||
725 | * Systems we get that service through SMBIOS. | ||
726 | * | ||
727 | * On systems that support the new iCRU service all we need to | ||
728 | * do is call dmi_walk to get the supported flag value and skip | ||
729 | * the old cru detect code. | ||
730 | */ | ||
731 | dmi_walk(dmi_find_icru, NULL); | ||
732 | if (!is_icru && !is_uefi) { | ||
733 | |||
734 | /* | ||
735 | * We need to map the ROM to get the CRU service. | ||
736 | * For 32 bit Operating Systems we need to go through the 32 Bit | ||
737 | * BIOS Service Directory | ||
738 | * For 64 bit Operating Systems we get that service through SMBIOS. | ||
739 | */ | ||
740 | retval = detect_cru_service(); | ||
741 | if (retval < 0) { | ||
742 | dev_warn(&dev->dev, | ||
743 | "Unable to detect the %d Bit CRU Service.\n", | ||
744 | HPWDT_ARCH); | ||
745 | return retval; | ||
746 | } | ||
747 | |||
748 | /* | ||
749 | * We know this is the only CRU call we need to make so lets keep as | ||
750 | * few instructions as possible once the NMI comes in. | ||
751 | */ | ||
752 | cmn_regs.u1.rah = 0x0D; | ||
753 | cmn_regs.u1.ral = 0x02; | ||
754 | } | ||
755 | |||
756 | /* | 292 | /* |
757 | * Only one function can register for NMI_UNKNOWN | 293 | * Only one function can register for NMI_UNKNOWN |
758 | */ | 294 | */ |
@@ -780,45 +316,26 @@ error: | |||
780 | dev_warn(&dev->dev, | 316 | dev_warn(&dev->dev, |
781 | "Unable to register a die notifier (err=%d).\n", | 317 | "Unable to register a die notifier (err=%d).\n", |
782 | retval); | 318 | retval); |
783 | if (cru_rom_addr) | ||
784 | iounmap(cru_rom_addr); | ||
785 | return retval; | 319 | return retval; |
320 | #endif /* CONFIG_HPWDT_NMI_DECODING */ | ||
321 | return 0; | ||
786 | } | 322 | } |
787 | 323 | ||
788 | static void hpwdt_exit_nmi_decoding(void) | 324 | static void hpwdt_exit_nmi_decoding(void) |
789 | { | 325 | { |
326 | #ifdef CONFIG_HPWDT_NMI_DECODING | ||
790 | unregister_nmi_handler(NMI_UNKNOWN, "hpwdt"); | 327 | unregister_nmi_handler(NMI_UNKNOWN, "hpwdt"); |
791 | unregister_nmi_handler(NMI_SERR, "hpwdt"); | 328 | unregister_nmi_handler(NMI_SERR, "hpwdt"); |
792 | unregister_nmi_handler(NMI_IO_CHECK, "hpwdt"); | 329 | unregister_nmi_handler(NMI_IO_CHECK, "hpwdt"); |
793 | if (cru_rom_addr) | 330 | #endif |
794 | iounmap(cru_rom_addr); | ||
795 | } | ||
796 | #else /* !CONFIG_HPWDT_NMI_DECODING */ | ||
797 | static void hpwdt_check_nmi_decoding(struct pci_dev *dev) | ||
798 | { | ||
799 | } | ||
800 | |||
801 | static int hpwdt_init_nmi_decoding(struct pci_dev *dev) | ||
802 | { | ||
803 | return 0; | ||
804 | } | 331 | } |
805 | 332 | ||
806 | static void hpwdt_exit_nmi_decoding(void) | ||
807 | { | ||
808 | } | ||
809 | #endif /* CONFIG_HPWDT_NMI_DECODING */ | ||
810 | |||
811 | static int hpwdt_init_one(struct pci_dev *dev, | 333 | static int hpwdt_init_one(struct pci_dev *dev, |
812 | const struct pci_device_id *ent) | 334 | const struct pci_device_id *ent) |
813 | { | 335 | { |
814 | int retval; | 336 | int retval; |
815 | 337 | ||
816 | /* | 338 | /* |
817 | * Check if we can do NMI decoding or not | ||
818 | */ | ||
819 | hpwdt_check_nmi_decoding(dev); | ||
820 | |||
821 | /* | ||
822 | * First let's find out if we are on an iLO2+ server. We will | 339 | * First let's find out if we are on an iLO2+ server. We will |
823 | * not run on a legacy ASM box. | 340 | * not run on a legacy ASM box. |
824 | * So we only support the G5 ProLiant servers and higher. | 341 | * So we only support the G5 ProLiant servers and higher. |
@@ -922,6 +439,6 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" | |||
922 | #ifdef CONFIG_HPWDT_NMI_DECODING | 439 | #ifdef CONFIG_HPWDT_NMI_DECODING |
923 | module_param(allow_kdump, int, 0); | 440 | module_param(allow_kdump, int, 0); |
924 | MODULE_PARM_DESC(allow_kdump, "Start a kernel dump after NMI occurs"); | 441 | MODULE_PARM_DESC(allow_kdump, "Start a kernel dump after NMI occurs"); |
925 | #endif /* !CONFIG_HPWDT_NMI_DECODING */ | 442 | #endif /* CONFIG_HPWDT_NMI_DECODING */ |
926 | 443 | ||
927 | module_pci_driver(hpwdt_driver); | 444 | module_pci_driver(hpwdt_driver); |
diff --git a/drivers/watchdog/sbsa_gwdt.c b/drivers/watchdog/sbsa_gwdt.c index 316c2eb122d2..e8bd9887c566 100644 --- a/drivers/watchdog/sbsa_gwdt.c +++ b/drivers/watchdog/sbsa_gwdt.c | |||
@@ -50,6 +50,7 @@ | |||
50 | */ | 50 | */ |
51 | 51 | ||
52 | #include <linux/io.h> | 52 | #include <linux/io.h> |
53 | #include <linux/io-64-nonatomic-lo-hi.h> | ||
53 | #include <linux/interrupt.h> | 54 | #include <linux/interrupt.h> |
54 | #include <linux/module.h> | 55 | #include <linux/module.h> |
55 | #include <linux/moduleparam.h> | 56 | #include <linux/moduleparam.h> |
@@ -159,7 +160,7 @@ static unsigned int sbsa_gwdt_get_timeleft(struct watchdog_device *wdd) | |||
159 | !(readl(gwdt->control_base + SBSA_GWDT_WCS) & SBSA_GWDT_WCS_WS0)) | 160 | !(readl(gwdt->control_base + SBSA_GWDT_WCS) & SBSA_GWDT_WCS_WS0)) |
160 | timeleft += readl(gwdt->control_base + SBSA_GWDT_WOR); | 161 | timeleft += readl(gwdt->control_base + SBSA_GWDT_WOR); |
161 | 162 | ||
162 | timeleft += readq(gwdt->control_base + SBSA_GWDT_WCV) - | 163 | timeleft += lo_hi_readq(gwdt->control_base + SBSA_GWDT_WCV) - |
163 | arch_counter_get_cntvct(); | 164 | arch_counter_get_cntvct(); |
164 | 165 | ||
165 | do_div(timeleft, gwdt->clk); | 166 | do_div(timeleft, gwdt->clk); |
diff --git a/drivers/watchdog/wdat_wdt.c b/drivers/watchdog/wdat_wdt.c index 6d1fbda0f461..0da9943d405f 100644 --- a/drivers/watchdog/wdat_wdt.c +++ b/drivers/watchdog/wdat_wdt.c | |||
@@ -392,7 +392,7 @@ static int wdat_wdt_probe(struct platform_device *pdev) | |||
392 | 392 | ||
393 | memset(&r, 0, sizeof(r)); | 393 | memset(&r, 0, sizeof(r)); |
394 | r.start = gas->address; | 394 | r.start = gas->address; |
395 | r.end = r.start + gas->access_width; | 395 | r.end = r.start + gas->access_width - 1; |
396 | if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { | 396 | if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { |
397 | r.flags = IORESOURCE_MEM; | 397 | r.flags = IORESOURCE_MEM; |
398 | } else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) { | 398 | } else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) { |
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index 1ab4bd11f5f3..762378f1811c 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c | |||
@@ -755,8 +755,8 @@ out: | |||
755 | mutex_unlock(&irq_mapping_update_lock); | 755 | mutex_unlock(&irq_mapping_update_lock); |
756 | return irq; | 756 | return irq; |
757 | error_irq: | 757 | error_irq: |
758 | for (; i >= 0; i--) | 758 | while (nvec--) |
759 | __unbind_from_irq(irq + i); | 759 | __unbind_from_irq(irq + nvec); |
760 | mutex_unlock(&irq_mapping_update_lock); | 760 | mutex_unlock(&irq_mapping_update_lock); |
761 | return ret; | 761 | return ret; |
762 | } | 762 | } |
diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c index 156e5aea36db..b1092fbefa63 100644 --- a/drivers/xen/pvcalls-back.c +++ b/drivers/xen/pvcalls-back.c | |||
@@ -416,7 +416,7 @@ static int pvcalls_back_connect(struct xenbus_device *dev, | |||
416 | sock); | 416 | sock); |
417 | if (!map) { | 417 | if (!map) { |
418 | ret = -EFAULT; | 418 | ret = -EFAULT; |
419 | sock_release(map->sock); | 419 | sock_release(sock); |
420 | } | 420 | } |
421 | 421 | ||
422 | out: | 422 | out: |
diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c index 753d9cb437d0..2f11ca72a281 100644 --- a/drivers/xen/pvcalls-front.c +++ b/drivers/xen/pvcalls-front.c | |||
@@ -60,6 +60,7 @@ struct sock_mapping { | |||
60 | bool active_socket; | 60 | bool active_socket; |
61 | struct list_head list; | 61 | struct list_head list; |
62 | struct socket *sock; | 62 | struct socket *sock; |
63 | atomic_t refcount; | ||
63 | union { | 64 | union { |
64 | struct { | 65 | struct { |
65 | int irq; | 66 | int irq; |
@@ -72,20 +73,25 @@ struct sock_mapping { | |||
72 | wait_queue_head_t inflight_conn_req; | 73 | wait_queue_head_t inflight_conn_req; |
73 | } active; | 74 | } active; |
74 | struct { | 75 | struct { |
75 | /* Socket status */ | 76 | /* |
77 | * Socket status, needs to be 64-bit aligned due to the | ||
78 | * test_and_* functions which have this requirement on arm64. | ||
79 | */ | ||
76 | #define PVCALLS_STATUS_UNINITALIZED 0 | 80 | #define PVCALLS_STATUS_UNINITALIZED 0 |
77 | #define PVCALLS_STATUS_BIND 1 | 81 | #define PVCALLS_STATUS_BIND 1 |
78 | #define PVCALLS_STATUS_LISTEN 2 | 82 | #define PVCALLS_STATUS_LISTEN 2 |
79 | uint8_t status; | 83 | uint8_t status __attribute__((aligned(8))); |
80 | /* | 84 | /* |
81 | * Internal state-machine flags. | 85 | * Internal state-machine flags. |
82 | * Only one accept operation can be inflight for a socket. | 86 | * Only one accept operation can be inflight for a socket. |
83 | * Only one poll operation can be inflight for a given socket. | 87 | * Only one poll operation can be inflight for a given socket. |
88 | * flags needs to be 64-bit aligned due to the test_and_* | ||
89 | * functions which have this requirement on arm64. | ||
84 | */ | 90 | */ |
85 | #define PVCALLS_FLAG_ACCEPT_INFLIGHT 0 | 91 | #define PVCALLS_FLAG_ACCEPT_INFLIGHT 0 |
86 | #define PVCALLS_FLAG_POLL_INFLIGHT 1 | 92 | #define PVCALLS_FLAG_POLL_INFLIGHT 1 |
87 | #define PVCALLS_FLAG_POLL_RET 2 | 93 | #define PVCALLS_FLAG_POLL_RET 2 |
88 | uint8_t flags; | 94 | uint8_t flags __attribute__((aligned(8))); |
89 | uint32_t inflight_req_id; | 95 | uint32_t inflight_req_id; |
90 | struct sock_mapping *accept_map; | 96 | struct sock_mapping *accept_map; |
91 | wait_queue_head_t inflight_accept_req; | 97 | wait_queue_head_t inflight_accept_req; |
@@ -93,6 +99,32 @@ struct sock_mapping { | |||
93 | }; | 99 | }; |
94 | }; | 100 | }; |
95 | 101 | ||
102 | static inline struct sock_mapping *pvcalls_enter_sock(struct socket *sock) | ||
103 | { | ||
104 | struct sock_mapping *map; | ||
105 | |||
106 | if (!pvcalls_front_dev || | ||
107 | dev_get_drvdata(&pvcalls_front_dev->dev) == NULL) | ||
108 | return ERR_PTR(-ENOTCONN); | ||
109 | |||
110 | map = (struct sock_mapping *)sock->sk->sk_send_head; | ||
111 | if (map == NULL) | ||
112 | return ERR_PTR(-ENOTSOCK); | ||
113 | |||
114 | pvcalls_enter(); | ||
115 | atomic_inc(&map->refcount); | ||
116 | return map; | ||
117 | } | ||
118 | |||
119 | static inline void pvcalls_exit_sock(struct socket *sock) | ||
120 | { | ||
121 | struct sock_mapping *map; | ||
122 | |||
123 | map = (struct sock_mapping *)sock->sk->sk_send_head; | ||
124 | atomic_dec(&map->refcount); | ||
125 | pvcalls_exit(); | ||
126 | } | ||
127 | |||
96 | static inline int get_request(struct pvcalls_bedata *bedata, int *req_id) | 128 | static inline int get_request(struct pvcalls_bedata *bedata, int *req_id) |
97 | { | 129 | { |
98 | *req_id = bedata->ring.req_prod_pvt & (RING_SIZE(&bedata->ring) - 1); | 130 | *req_id = bedata->ring.req_prod_pvt & (RING_SIZE(&bedata->ring) - 1); |
@@ -369,31 +401,23 @@ int pvcalls_front_connect(struct socket *sock, struct sockaddr *addr, | |||
369 | if (addr->sa_family != AF_INET || sock->type != SOCK_STREAM) | 401 | if (addr->sa_family != AF_INET || sock->type != SOCK_STREAM) |
370 | return -EOPNOTSUPP; | 402 | return -EOPNOTSUPP; |
371 | 403 | ||
372 | pvcalls_enter(); | 404 | map = pvcalls_enter_sock(sock); |
373 | if (!pvcalls_front_dev) { | 405 | if (IS_ERR(map)) |
374 | pvcalls_exit(); | 406 | return PTR_ERR(map); |
375 | return -ENOTCONN; | ||
376 | } | ||
377 | 407 | ||
378 | bedata = dev_get_drvdata(&pvcalls_front_dev->dev); | 408 | bedata = dev_get_drvdata(&pvcalls_front_dev->dev); |
379 | 409 | ||
380 | map = (struct sock_mapping *)sock->sk->sk_send_head; | ||
381 | if (!map) { | ||
382 | pvcalls_exit(); | ||
383 | return -ENOTSOCK; | ||
384 | } | ||
385 | |||
386 | spin_lock(&bedata->socket_lock); | 410 | spin_lock(&bedata->socket_lock); |
387 | ret = get_request(bedata, &req_id); | 411 | ret = get_request(bedata, &req_id); |
388 | if (ret < 0) { | 412 | if (ret < 0) { |
389 | spin_unlock(&bedata->socket_lock); | 413 | spin_unlock(&bedata->socket_lock); |
390 | pvcalls_exit(); | 414 | pvcalls_exit_sock(sock); |
391 | return ret; | 415 | return ret; |
392 | } | 416 | } |
393 | ret = create_active(map, &evtchn); | 417 | ret = create_active(map, &evtchn); |
394 | if (ret < 0) { | 418 | if (ret < 0) { |
395 | spin_unlock(&bedata->socket_lock); | 419 | spin_unlock(&bedata->socket_lock); |
396 | pvcalls_exit(); | 420 | pvcalls_exit_sock(sock); |
397 | return ret; | 421 | return ret; |
398 | } | 422 | } |
399 | 423 | ||
@@ -423,7 +447,7 @@ int pvcalls_front_connect(struct socket *sock, struct sockaddr *addr, | |||
423 | smp_rmb(); | 447 | smp_rmb(); |
424 | ret = bedata->rsp[req_id].ret; | 448 | ret = bedata->rsp[req_id].ret; |
425 | bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID; | 449 | bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID; |
426 | pvcalls_exit(); | 450 | pvcalls_exit_sock(sock); |
427 | return ret; | 451 | return ret; |
428 | } | 452 | } |
429 | 453 | ||
@@ -488,23 +512,15 @@ int pvcalls_front_sendmsg(struct socket *sock, struct msghdr *msg, | |||
488 | if (flags & (MSG_CONFIRM|MSG_DONTROUTE|MSG_EOR|MSG_OOB)) | 512 | if (flags & (MSG_CONFIRM|MSG_DONTROUTE|MSG_EOR|MSG_OOB)) |
489 | return -EOPNOTSUPP; | 513 | return -EOPNOTSUPP; |
490 | 514 | ||
491 | pvcalls_enter(); | 515 | map = pvcalls_enter_sock(sock); |
492 | if (!pvcalls_front_dev) { | 516 | if (IS_ERR(map)) |
493 | pvcalls_exit(); | 517 | return PTR_ERR(map); |
494 | return -ENOTCONN; | ||
495 | } | ||
496 | bedata = dev_get_drvdata(&pvcalls_front_dev->dev); | 518 | bedata = dev_get_drvdata(&pvcalls_front_dev->dev); |
497 | 519 | ||
498 | map = (struct sock_mapping *) sock->sk->sk_send_head; | ||
499 | if (!map) { | ||
500 | pvcalls_exit(); | ||
501 | return -ENOTSOCK; | ||
502 | } | ||
503 | |||
504 | mutex_lock(&map->active.out_mutex); | 520 | mutex_lock(&map->active.out_mutex); |
505 | if ((flags & MSG_DONTWAIT) && !pvcalls_front_write_todo(map)) { | 521 | if ((flags & MSG_DONTWAIT) && !pvcalls_front_write_todo(map)) { |
506 | mutex_unlock(&map->active.out_mutex); | 522 | mutex_unlock(&map->active.out_mutex); |
507 | pvcalls_exit(); | 523 | pvcalls_exit_sock(sock); |
508 | return -EAGAIN; | 524 | return -EAGAIN; |
509 | } | 525 | } |
510 | if (len > INT_MAX) | 526 | if (len > INT_MAX) |
@@ -526,7 +542,7 @@ again: | |||
526 | tot_sent = sent; | 542 | tot_sent = sent; |
527 | 543 | ||
528 | mutex_unlock(&map->active.out_mutex); | 544 | mutex_unlock(&map->active.out_mutex); |
529 | pvcalls_exit(); | 545 | pvcalls_exit_sock(sock); |
530 | return tot_sent; | 546 | return tot_sent; |
531 | } | 547 | } |
532 | 548 | ||
@@ -591,19 +607,11 @@ int pvcalls_front_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, | |||
591 | if (flags & (MSG_CMSG_CLOEXEC|MSG_ERRQUEUE|MSG_OOB|MSG_TRUNC)) | 607 | if (flags & (MSG_CMSG_CLOEXEC|MSG_ERRQUEUE|MSG_OOB|MSG_TRUNC)) |
592 | return -EOPNOTSUPP; | 608 | return -EOPNOTSUPP; |
593 | 609 | ||
594 | pvcalls_enter(); | 610 | map = pvcalls_enter_sock(sock); |
595 | if (!pvcalls_front_dev) { | 611 | if (IS_ERR(map)) |
596 | pvcalls_exit(); | 612 | return PTR_ERR(map); |
597 | return -ENOTCONN; | ||
598 | } | ||
599 | bedata = dev_get_drvdata(&pvcalls_front_dev->dev); | 613 | bedata = dev_get_drvdata(&pvcalls_front_dev->dev); |
600 | 614 | ||
601 | map = (struct sock_mapping *) sock->sk->sk_send_head; | ||
602 | if (!map) { | ||
603 | pvcalls_exit(); | ||
604 | return -ENOTSOCK; | ||
605 | } | ||
606 | |||
607 | mutex_lock(&map->active.in_mutex); | 615 | mutex_lock(&map->active.in_mutex); |
608 | if (len > XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER)) | 616 | if (len > XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER)) |
609 | len = XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER); | 617 | len = XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER); |
@@ -623,7 +631,7 @@ int pvcalls_front_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, | |||
623 | ret = 0; | 631 | ret = 0; |
624 | 632 | ||
625 | mutex_unlock(&map->active.in_mutex); | 633 | mutex_unlock(&map->active.in_mutex); |
626 | pvcalls_exit(); | 634 | pvcalls_exit_sock(sock); |
627 | return ret; | 635 | return ret; |
628 | } | 636 | } |
629 | 637 | ||
@@ -637,24 +645,16 @@ int pvcalls_front_bind(struct socket *sock, struct sockaddr *addr, int addr_len) | |||
637 | if (addr->sa_family != AF_INET || sock->type != SOCK_STREAM) | 645 | if (addr->sa_family != AF_INET || sock->type != SOCK_STREAM) |
638 | return -EOPNOTSUPP; | 646 | return -EOPNOTSUPP; |
639 | 647 | ||
640 | pvcalls_enter(); | 648 | map = pvcalls_enter_sock(sock); |
641 | if (!pvcalls_front_dev) { | 649 | if (IS_ERR(map)) |
642 | pvcalls_exit(); | 650 | return PTR_ERR(map); |
643 | return -ENOTCONN; | ||
644 | } | ||
645 | bedata = dev_get_drvdata(&pvcalls_front_dev->dev); | 651 | bedata = dev_get_drvdata(&pvcalls_front_dev->dev); |
646 | 652 | ||
647 | map = (struct sock_mapping *) sock->sk->sk_send_head; | ||
648 | if (map == NULL) { | ||
649 | pvcalls_exit(); | ||
650 | return -ENOTSOCK; | ||
651 | } | ||
652 | |||
653 | spin_lock(&bedata->socket_lock); | 653 | spin_lock(&bedata->socket_lock); |
654 | ret = get_request(bedata, &req_id); | 654 | ret = get_request(bedata, &req_id); |
655 | if (ret < 0) { | 655 | if (ret < 0) { |
656 | spin_unlock(&bedata->socket_lock); | 656 | spin_unlock(&bedata->socket_lock); |
657 | pvcalls_exit(); | 657 | pvcalls_exit_sock(sock); |
658 | return ret; | 658 | return ret; |
659 | } | 659 | } |
660 | req = RING_GET_REQUEST(&bedata->ring, req_id); | 660 | req = RING_GET_REQUEST(&bedata->ring, req_id); |
@@ -684,7 +684,7 @@ int pvcalls_front_bind(struct socket *sock, struct sockaddr *addr, int addr_len) | |||
684 | bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID; | 684 | bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID; |
685 | 685 | ||
686 | map->passive.status = PVCALLS_STATUS_BIND; | 686 | map->passive.status = PVCALLS_STATUS_BIND; |
687 | pvcalls_exit(); | 687 | pvcalls_exit_sock(sock); |
688 | return 0; | 688 | return 0; |
689 | } | 689 | } |
690 | 690 | ||
@@ -695,21 +695,13 @@ int pvcalls_front_listen(struct socket *sock, int backlog) | |||
695 | struct xen_pvcalls_request *req; | 695 | struct xen_pvcalls_request *req; |
696 | int notify, req_id, ret; | 696 | int notify, req_id, ret; |
697 | 697 | ||
698 | pvcalls_enter(); | 698 | map = pvcalls_enter_sock(sock); |
699 | if (!pvcalls_front_dev) { | 699 | if (IS_ERR(map)) |
700 | pvcalls_exit(); | 700 | return PTR_ERR(map); |
701 | return -ENOTCONN; | ||
702 | } | ||
703 | bedata = dev_get_drvdata(&pvcalls_front_dev->dev); | 701 | bedata = dev_get_drvdata(&pvcalls_front_dev->dev); |
704 | 702 | ||
705 | map = (struct sock_mapping *) sock->sk->sk_send_head; | ||
706 | if (!map) { | ||
707 | pvcalls_exit(); | ||
708 | return -ENOTSOCK; | ||
709 | } | ||
710 | |||
711 | if (map->passive.status != PVCALLS_STATUS_BIND) { | 703 | if (map->passive.status != PVCALLS_STATUS_BIND) { |
712 | pvcalls_exit(); | 704 | pvcalls_exit_sock(sock); |
713 | return -EOPNOTSUPP; | 705 | return -EOPNOTSUPP; |
714 | } | 706 | } |
715 | 707 | ||
@@ -717,7 +709,7 @@ int pvcalls_front_listen(struct socket *sock, int backlog) | |||
717 | ret = get_request(bedata, &req_id); | 709 | ret = get_request(bedata, &req_id); |
718 | if (ret < 0) { | 710 | if (ret < 0) { |
719 | spin_unlock(&bedata->socket_lock); | 711 | spin_unlock(&bedata->socket_lock); |
720 | pvcalls_exit(); | 712 | pvcalls_exit_sock(sock); |
721 | return ret; | 713 | return ret; |
722 | } | 714 | } |
723 | req = RING_GET_REQUEST(&bedata->ring, req_id); | 715 | req = RING_GET_REQUEST(&bedata->ring, req_id); |
@@ -741,7 +733,7 @@ int pvcalls_front_listen(struct socket *sock, int backlog) | |||
741 | bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID; | 733 | bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID; |
742 | 734 | ||
743 | map->passive.status = PVCALLS_STATUS_LISTEN; | 735 | map->passive.status = PVCALLS_STATUS_LISTEN; |
744 | pvcalls_exit(); | 736 | pvcalls_exit_sock(sock); |
745 | return ret; | 737 | return ret; |
746 | } | 738 | } |
747 | 739 | ||
@@ -753,21 +745,13 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags) | |||
753 | struct xen_pvcalls_request *req; | 745 | struct xen_pvcalls_request *req; |
754 | int notify, req_id, ret, evtchn, nonblock; | 746 | int notify, req_id, ret, evtchn, nonblock; |
755 | 747 | ||
756 | pvcalls_enter(); | 748 | map = pvcalls_enter_sock(sock); |
757 | if (!pvcalls_front_dev) { | 749 | if (IS_ERR(map)) |
758 | pvcalls_exit(); | 750 | return PTR_ERR(map); |
759 | return -ENOTCONN; | ||
760 | } | ||
761 | bedata = dev_get_drvdata(&pvcalls_front_dev->dev); | 751 | bedata = dev_get_drvdata(&pvcalls_front_dev->dev); |
762 | 752 | ||
763 | map = (struct sock_mapping *) sock->sk->sk_send_head; | ||
764 | if (!map) { | ||
765 | pvcalls_exit(); | ||
766 | return -ENOTSOCK; | ||
767 | } | ||
768 | |||
769 | if (map->passive.status != PVCALLS_STATUS_LISTEN) { | 753 | if (map->passive.status != PVCALLS_STATUS_LISTEN) { |
770 | pvcalls_exit(); | 754 | pvcalls_exit_sock(sock); |
771 | return -EINVAL; | 755 | return -EINVAL; |
772 | } | 756 | } |
773 | 757 | ||
@@ -785,13 +769,13 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags) | |||
785 | goto received; | 769 | goto received; |
786 | } | 770 | } |
787 | if (nonblock) { | 771 | if (nonblock) { |
788 | pvcalls_exit(); | 772 | pvcalls_exit_sock(sock); |
789 | return -EAGAIN; | 773 | return -EAGAIN; |
790 | } | 774 | } |
791 | if (wait_event_interruptible(map->passive.inflight_accept_req, | 775 | if (wait_event_interruptible(map->passive.inflight_accept_req, |
792 | !test_and_set_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, | 776 | !test_and_set_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, |
793 | (void *)&map->passive.flags))) { | 777 | (void *)&map->passive.flags))) { |
794 | pvcalls_exit(); | 778 | pvcalls_exit_sock(sock); |
795 | return -EINTR; | 779 | return -EINTR; |
796 | } | 780 | } |
797 | } | 781 | } |
@@ -802,7 +786,7 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags) | |||
802 | clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, | 786 | clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, |
803 | (void *)&map->passive.flags); | 787 | (void *)&map->passive.flags); |
804 | spin_unlock(&bedata->socket_lock); | 788 | spin_unlock(&bedata->socket_lock); |
805 | pvcalls_exit(); | 789 | pvcalls_exit_sock(sock); |
806 | return ret; | 790 | return ret; |
807 | } | 791 | } |
808 | map2 = kzalloc(sizeof(*map2), GFP_ATOMIC); | 792 | map2 = kzalloc(sizeof(*map2), GFP_ATOMIC); |
@@ -810,7 +794,7 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags) | |||
810 | clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, | 794 | clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, |
811 | (void *)&map->passive.flags); | 795 | (void *)&map->passive.flags); |
812 | spin_unlock(&bedata->socket_lock); | 796 | spin_unlock(&bedata->socket_lock); |
813 | pvcalls_exit(); | 797 | pvcalls_exit_sock(sock); |
814 | return -ENOMEM; | 798 | return -ENOMEM; |
815 | } | 799 | } |
816 | ret = create_active(map2, &evtchn); | 800 | ret = create_active(map2, &evtchn); |
@@ -819,7 +803,7 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags) | |||
819 | clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, | 803 | clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, |
820 | (void *)&map->passive.flags); | 804 | (void *)&map->passive.flags); |
821 | spin_unlock(&bedata->socket_lock); | 805 | spin_unlock(&bedata->socket_lock); |
822 | pvcalls_exit(); | 806 | pvcalls_exit_sock(sock); |
823 | return ret; | 807 | return ret; |
824 | } | 808 | } |
825 | list_add_tail(&map2->list, &bedata->socket_mappings); | 809 | list_add_tail(&map2->list, &bedata->socket_mappings); |
@@ -841,13 +825,13 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags) | |||
841 | /* We could check if we have received a response before returning. */ | 825 | /* We could check if we have received a response before returning. */ |
842 | if (nonblock) { | 826 | if (nonblock) { |
843 | WRITE_ONCE(map->passive.inflight_req_id, req_id); | 827 | WRITE_ONCE(map->passive.inflight_req_id, req_id); |
844 | pvcalls_exit(); | 828 | pvcalls_exit_sock(sock); |
845 | return -EAGAIN; | 829 | return -EAGAIN; |
846 | } | 830 | } |
847 | 831 | ||
848 | if (wait_event_interruptible(bedata->inflight_req, | 832 | if (wait_event_interruptible(bedata->inflight_req, |
849 | READ_ONCE(bedata->rsp[req_id].req_id) == req_id)) { | 833 | READ_ONCE(bedata->rsp[req_id].req_id) == req_id)) { |
850 | pvcalls_exit(); | 834 | pvcalls_exit_sock(sock); |
851 | return -EINTR; | 835 | return -EINTR; |
852 | } | 836 | } |
853 | /* read req_id, then the content */ | 837 | /* read req_id, then the content */ |
@@ -862,7 +846,7 @@ received: | |||
862 | clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, | 846 | clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, |
863 | (void *)&map->passive.flags); | 847 | (void *)&map->passive.flags); |
864 | pvcalls_front_free_map(bedata, map2); | 848 | pvcalls_front_free_map(bedata, map2); |
865 | pvcalls_exit(); | 849 | pvcalls_exit_sock(sock); |
866 | return -ENOMEM; | 850 | return -ENOMEM; |
867 | } | 851 | } |
868 | newsock->sk->sk_send_head = (void *)map2; | 852 | newsock->sk->sk_send_head = (void *)map2; |
@@ -874,7 +858,7 @@ received: | |||
874 | clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, (void *)&map->passive.flags); | 858 | clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, (void *)&map->passive.flags); |
875 | wake_up(&map->passive.inflight_accept_req); | 859 | wake_up(&map->passive.inflight_accept_req); |
876 | 860 | ||
877 | pvcalls_exit(); | 861 | pvcalls_exit_sock(sock); |
878 | return ret; | 862 | return ret; |
879 | } | 863 | } |
880 | 864 | ||
@@ -965,23 +949,16 @@ __poll_t pvcalls_front_poll(struct file *file, struct socket *sock, | |||
965 | struct sock_mapping *map; | 949 | struct sock_mapping *map; |
966 | __poll_t ret; | 950 | __poll_t ret; |
967 | 951 | ||
968 | pvcalls_enter(); | 952 | map = pvcalls_enter_sock(sock); |
969 | if (!pvcalls_front_dev) { | 953 | if (IS_ERR(map)) |
970 | pvcalls_exit(); | ||
971 | return EPOLLNVAL; | 954 | return EPOLLNVAL; |
972 | } | ||
973 | bedata = dev_get_drvdata(&pvcalls_front_dev->dev); | 955 | bedata = dev_get_drvdata(&pvcalls_front_dev->dev); |
974 | 956 | ||
975 | map = (struct sock_mapping *) sock->sk->sk_send_head; | ||
976 | if (!map) { | ||
977 | pvcalls_exit(); | ||
978 | return EPOLLNVAL; | ||
979 | } | ||
980 | if (map->active_socket) | 957 | if (map->active_socket) |
981 | ret = pvcalls_front_poll_active(file, bedata, map, wait); | 958 | ret = pvcalls_front_poll_active(file, bedata, map, wait); |
982 | else | 959 | else |
983 | ret = pvcalls_front_poll_passive(file, bedata, map, wait); | 960 | ret = pvcalls_front_poll_passive(file, bedata, map, wait); |
984 | pvcalls_exit(); | 961 | pvcalls_exit_sock(sock); |
985 | return ret; | 962 | return ret; |
986 | } | 963 | } |
987 | 964 | ||
@@ -995,25 +972,20 @@ int pvcalls_front_release(struct socket *sock) | |||
995 | if (sock->sk == NULL) | 972 | if (sock->sk == NULL) |
996 | return 0; | 973 | return 0; |
997 | 974 | ||
998 | pvcalls_enter(); | 975 | map = pvcalls_enter_sock(sock); |
999 | if (!pvcalls_front_dev) { | 976 | if (IS_ERR(map)) { |
1000 | pvcalls_exit(); | 977 | if (PTR_ERR(map) == -ENOTCONN) |
1001 | return -EIO; | 978 | return -EIO; |
979 | else | ||
980 | return 0; | ||
1002 | } | 981 | } |
1003 | |||
1004 | bedata = dev_get_drvdata(&pvcalls_front_dev->dev); | 982 | bedata = dev_get_drvdata(&pvcalls_front_dev->dev); |
1005 | 983 | ||
1006 | map = (struct sock_mapping *) sock->sk->sk_send_head; | ||
1007 | if (map == NULL) { | ||
1008 | pvcalls_exit(); | ||
1009 | return 0; | ||
1010 | } | ||
1011 | |||
1012 | spin_lock(&bedata->socket_lock); | 984 | spin_lock(&bedata->socket_lock); |
1013 | ret = get_request(bedata, &req_id); | 985 | ret = get_request(bedata, &req_id); |
1014 | if (ret < 0) { | 986 | if (ret < 0) { |
1015 | spin_unlock(&bedata->socket_lock); | 987 | spin_unlock(&bedata->socket_lock); |
1016 | pvcalls_exit(); | 988 | pvcalls_exit_sock(sock); |
1017 | return ret; | 989 | return ret; |
1018 | } | 990 | } |
1019 | sock->sk->sk_send_head = NULL; | 991 | sock->sk->sk_send_head = NULL; |
@@ -1043,14 +1015,20 @@ int pvcalls_front_release(struct socket *sock) | |||
1043 | /* | 1015 | /* |
1044 | * We need to make sure that sendmsg/recvmsg on this socket have | 1016 | * We need to make sure that sendmsg/recvmsg on this socket have |
1045 | * not started before we've cleared sk_send_head here. The | 1017 | * not started before we've cleared sk_send_head here. The |
1046 | * easiest (though not optimal) way to guarantee this is to see | 1018 | * easiest way to guarantee this is to see that no pvcalls |
1047 | * that no pvcall (other than us) is in progress. | 1019 | * (other than us) is in progress on this socket. |
1048 | */ | 1020 | */ |
1049 | while (atomic_read(&pvcalls_refcount) > 1) | 1021 | while (atomic_read(&map->refcount) > 1) |
1050 | cpu_relax(); | 1022 | cpu_relax(); |
1051 | 1023 | ||
1052 | pvcalls_front_free_map(bedata, map); | 1024 | pvcalls_front_free_map(bedata, map); |
1053 | } else { | 1025 | } else { |
1026 | wake_up(&bedata->inflight_req); | ||
1027 | wake_up(&map->passive.inflight_accept_req); | ||
1028 | |||
1029 | while (atomic_read(&map->refcount) > 1) | ||
1030 | cpu_relax(); | ||
1031 | |||
1054 | spin_lock(&bedata->socket_lock); | 1032 | spin_lock(&bedata->socket_lock); |
1055 | list_del(&map->list); | 1033 | list_del(&map->list); |
1056 | spin_unlock(&bedata->socket_lock); | 1034 | spin_unlock(&bedata->socket_lock); |
diff --git a/drivers/xen/tmem.c b/drivers/xen/tmem.c index bf13d1ec51f3..04e7b3b29bac 100644 --- a/drivers/xen/tmem.c +++ b/drivers/xen/tmem.c | |||
@@ -284,6 +284,10 @@ static int tmem_frontswap_store(unsigned type, pgoff_t offset, | |||
284 | int pool = tmem_frontswap_poolid; | 284 | int pool = tmem_frontswap_poolid; |
285 | int ret; | 285 | int ret; |
286 | 286 | ||
287 | /* THP isn't supported */ | ||
288 | if (PageTransHuge(page)) | ||
289 | return -1; | ||
290 | |||
287 | if (pool < 0) | 291 | if (pool < 0) |
288 | return -1; | 292 | return -1; |
289 | if (ind64 != ind) | 293 | if (ind64 != ind) |
diff --git a/drivers/xen/xenbus/xenbus.h b/drivers/xen/xenbus/xenbus.h index 149c5e7efc89..092981171df1 100644 --- a/drivers/xen/xenbus/xenbus.h +++ b/drivers/xen/xenbus/xenbus.h | |||
@@ -76,6 +76,7 @@ struct xb_req_data { | |||
76 | struct list_head list; | 76 | struct list_head list; |
77 | wait_queue_head_t wq; | 77 | wait_queue_head_t wq; |
78 | struct xsd_sockmsg msg; | 78 | struct xsd_sockmsg msg; |
79 | uint32_t caller_req_id; | ||
79 | enum xsd_sockmsg_type type; | 80 | enum xsd_sockmsg_type type; |
80 | char *body; | 81 | char *body; |
81 | const struct kvec *vec; | 82 | const struct kvec *vec; |
diff --git a/drivers/xen/xenbus/xenbus_comms.c b/drivers/xen/xenbus/xenbus_comms.c index 5b081a01779d..d239fc3c5e3d 100644 --- a/drivers/xen/xenbus/xenbus_comms.c +++ b/drivers/xen/xenbus/xenbus_comms.c | |||
@@ -309,6 +309,7 @@ static int process_msg(void) | |||
309 | goto out; | 309 | goto out; |
310 | 310 | ||
311 | if (req->state == xb_req_state_wait_reply) { | 311 | if (req->state == xb_req_state_wait_reply) { |
312 | req->msg.req_id = req->caller_req_id; | ||
312 | req->msg.type = state.msg.type; | 313 | req->msg.type = state.msg.type; |
313 | req->msg.len = state.msg.len; | 314 | req->msg.len = state.msg.len; |
314 | req->body = state.body; | 315 | req->body = state.body; |
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c index 74888cacd0b0..ec9eb4fba59c 100644 --- a/drivers/xen/xenbus/xenbus_probe.c +++ b/drivers/xen/xenbus/xenbus_probe.c | |||
@@ -466,8 +466,11 @@ int xenbus_probe_node(struct xen_bus_type *bus, | |||
466 | 466 | ||
467 | /* Register with generic device framework. */ | 467 | /* Register with generic device framework. */ |
468 | err = device_register(&xendev->dev); | 468 | err = device_register(&xendev->dev); |
469 | if (err) | 469 | if (err) { |
470 | put_device(&xendev->dev); | ||
471 | xendev = NULL; | ||
470 | goto fail; | 472 | goto fail; |
473 | } | ||
471 | 474 | ||
472 | return 0; | 475 | return 0; |
473 | fail: | 476 | fail: |
diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c index 3e59590c7254..3f3b29398ab8 100644 --- a/drivers/xen/xenbus/xenbus_xs.c +++ b/drivers/xen/xenbus/xenbus_xs.c | |||
@@ -227,6 +227,8 @@ static void xs_send(struct xb_req_data *req, struct xsd_sockmsg *msg) | |||
227 | req->state = xb_req_state_queued; | 227 | req->state = xb_req_state_queued; |
228 | init_waitqueue_head(&req->wq); | 228 | init_waitqueue_head(&req->wq); |
229 | 229 | ||
230 | /* Save the caller req_id and restore it later in the reply */ | ||
231 | req->caller_req_id = req->msg.req_id; | ||
230 | req->msg.req_id = xs_request_enter(req); | 232 | req->msg.req_id = xs_request_enter(req); |
231 | 233 | ||
232 | mutex_lock(&xb_write_mutex); | 234 | mutex_lock(&xb_write_mutex); |
@@ -310,6 +312,7 @@ static void *xs_talkv(struct xenbus_transaction t, | |||
310 | req->num_vecs = num_vecs; | 312 | req->num_vecs = num_vecs; |
311 | req->cb = xs_wake_up; | 313 | req->cb = xs_wake_up; |
312 | 314 | ||
315 | msg.req_id = 0; | ||
313 | msg.tx_id = t.id; | 316 | msg.tx_id = t.id; |
314 | msg.type = type; | 317 | msg.type = type; |
315 | msg.len = 0; | 318 | msg.len = 0; |