diff options
Diffstat (limited to 'drivers')
420 files changed, 4098 insertions, 3018 deletions
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c index d4551e33fa71..8569b79e8b58 100644 --- a/drivers/acpi/arm64/iort.c +++ b/drivers/acpi/arm64/iort.c | |||
@@ -611,8 +611,8 @@ static int iort_dev_find_its_id(struct device *dev, u32 req_id, | |||
611 | 611 | ||
612 | /* Move to ITS specific data */ | 612 | /* Move to ITS specific data */ |
613 | its = (struct acpi_iort_its_group *)node->node_data; | 613 | its = (struct acpi_iort_its_group *)node->node_data; |
614 | if (idx > its->its_count) { | 614 | if (idx >= its->its_count) { |
615 | dev_err(dev, "requested ITS ID index [%d] is greater than available [%d]\n", | 615 | dev_err(dev, "requested ITS ID index [%d] overruns ITS entries [%d]\n", |
616 | idx, its->its_count); | 616 | idx, its->its_count); |
617 | return -ENXIO; | 617 | return -ENXIO; |
618 | } | 618 | } |
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c index 28cffaaf9d82..f616b16c1f0b 100644 --- a/drivers/acpi/device_pm.c +++ b/drivers/acpi/device_pm.c | |||
@@ -232,13 +232,15 @@ int acpi_device_set_power(struct acpi_device *device, int state) | |||
232 | if (device->power.flags.power_resources) | 232 | if (device->power.flags.power_resources) |
233 | result = acpi_power_transition(device, target_state); | 233 | result = acpi_power_transition(device, target_state); |
234 | } else { | 234 | } else { |
235 | int cur_state = device->power.state; | ||
236 | |||
235 | if (device->power.flags.power_resources) { | 237 | if (device->power.flags.power_resources) { |
236 | result = acpi_power_transition(device, ACPI_STATE_D0); | 238 | result = acpi_power_transition(device, ACPI_STATE_D0); |
237 | if (result) | 239 | if (result) |
238 | goto end; | 240 | goto end; |
239 | } | 241 | } |
240 | 242 | ||
241 | if (device->power.state == ACPI_STATE_D0) { | 243 | if (cur_state == ACPI_STATE_D0) { |
242 | int psc; | 244 | int psc; |
243 | 245 | ||
244 | /* Nothing to do here if _PSC is not present. */ | 246 | /* Nothing to do here if _PSC is not present. */ |
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index c02fa27dd3f3..1413324982f0 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c | |||
@@ -1282,7 +1282,7 @@ static ssize_t hw_error_scrub_store(struct device *dev, | |||
1282 | if (rc) | 1282 | if (rc) |
1283 | return rc; | 1283 | return rc; |
1284 | 1284 | ||
1285 | device_lock(dev); | 1285 | nfit_device_lock(dev); |
1286 | nd_desc = dev_get_drvdata(dev); | 1286 | nd_desc = dev_get_drvdata(dev); |
1287 | if (nd_desc) { | 1287 | if (nd_desc) { |
1288 | struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); | 1288 | struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); |
@@ -1299,7 +1299,7 @@ static ssize_t hw_error_scrub_store(struct device *dev, | |||
1299 | break; | 1299 | break; |
1300 | } | 1300 | } |
1301 | } | 1301 | } |
1302 | device_unlock(dev); | 1302 | nfit_device_unlock(dev); |
1303 | if (rc) | 1303 | if (rc) |
1304 | return rc; | 1304 | return rc; |
1305 | return size; | 1305 | return size; |
@@ -1319,7 +1319,7 @@ static ssize_t scrub_show(struct device *dev, | |||
1319 | ssize_t rc = -ENXIO; | 1319 | ssize_t rc = -ENXIO; |
1320 | bool busy; | 1320 | bool busy; |
1321 | 1321 | ||
1322 | device_lock(dev); | 1322 | nfit_device_lock(dev); |
1323 | nd_desc = dev_get_drvdata(dev); | 1323 | nd_desc = dev_get_drvdata(dev); |
1324 | if (!nd_desc) { | 1324 | if (!nd_desc) { |
1325 | device_unlock(dev); | 1325 | device_unlock(dev); |
@@ -1339,7 +1339,7 @@ static ssize_t scrub_show(struct device *dev, | |||
1339 | } | 1339 | } |
1340 | 1340 | ||
1341 | mutex_unlock(&acpi_desc->init_mutex); | 1341 | mutex_unlock(&acpi_desc->init_mutex); |
1342 | device_unlock(dev); | 1342 | nfit_device_unlock(dev); |
1343 | return rc; | 1343 | return rc; |
1344 | } | 1344 | } |
1345 | 1345 | ||
@@ -1356,14 +1356,14 @@ static ssize_t scrub_store(struct device *dev, | |||
1356 | if (val != 1) | 1356 | if (val != 1) |
1357 | return -EINVAL; | 1357 | return -EINVAL; |
1358 | 1358 | ||
1359 | device_lock(dev); | 1359 | nfit_device_lock(dev); |
1360 | nd_desc = dev_get_drvdata(dev); | 1360 | nd_desc = dev_get_drvdata(dev); |
1361 | if (nd_desc) { | 1361 | if (nd_desc) { |
1362 | struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); | 1362 | struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); |
1363 | 1363 | ||
1364 | rc = acpi_nfit_ars_rescan(acpi_desc, ARS_REQ_LONG); | 1364 | rc = acpi_nfit_ars_rescan(acpi_desc, ARS_REQ_LONG); |
1365 | } | 1365 | } |
1366 | device_unlock(dev); | 1366 | nfit_device_unlock(dev); |
1367 | if (rc) | 1367 | if (rc) |
1368 | return rc; | 1368 | return rc; |
1369 | return size; | 1369 | return size; |
@@ -1749,9 +1749,9 @@ static void acpi_nvdimm_notify(acpi_handle handle, u32 event, void *data) | |||
1749 | struct acpi_device *adev = data; | 1749 | struct acpi_device *adev = data; |
1750 | struct device *dev = &adev->dev; | 1750 | struct device *dev = &adev->dev; |
1751 | 1751 | ||
1752 | device_lock(dev->parent); | 1752 | nfit_device_lock(dev->parent); |
1753 | __acpi_nvdimm_notify(dev, event); | 1753 | __acpi_nvdimm_notify(dev, event); |
1754 | device_unlock(dev->parent); | 1754 | nfit_device_unlock(dev->parent); |
1755 | } | 1755 | } |
1756 | 1756 | ||
1757 | static bool acpi_nvdimm_has_method(struct acpi_device *adev, char *method) | 1757 | static bool acpi_nvdimm_has_method(struct acpi_device *adev, char *method) |
@@ -3457,8 +3457,8 @@ static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc) | |||
3457 | struct device *dev = acpi_desc->dev; | 3457 | struct device *dev = acpi_desc->dev; |
3458 | 3458 | ||
3459 | /* Bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */ | 3459 | /* Bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */ |
3460 | device_lock(dev); | 3460 | nfit_device_lock(dev); |
3461 | device_unlock(dev); | 3461 | nfit_device_unlock(dev); |
3462 | 3462 | ||
3463 | /* Bounce the init_mutex to complete initial registration */ | 3463 | /* Bounce the init_mutex to complete initial registration */ |
3464 | mutex_lock(&acpi_desc->init_mutex); | 3464 | mutex_lock(&acpi_desc->init_mutex); |
@@ -3602,8 +3602,8 @@ void acpi_nfit_shutdown(void *data) | |||
3602 | * acpi_nfit_ars_rescan() submissions have had a chance to | 3602 | * acpi_nfit_ars_rescan() submissions have had a chance to |
3603 | * either submit or see ->cancel set. | 3603 | * either submit or see ->cancel set. |
3604 | */ | 3604 | */ |
3605 | device_lock(bus_dev); | 3605 | nfit_device_lock(bus_dev); |
3606 | device_unlock(bus_dev); | 3606 | nfit_device_unlock(bus_dev); |
3607 | 3607 | ||
3608 | flush_workqueue(nfit_wq); | 3608 | flush_workqueue(nfit_wq); |
3609 | } | 3609 | } |
@@ -3746,9 +3746,9 @@ EXPORT_SYMBOL_GPL(__acpi_nfit_notify); | |||
3746 | 3746 | ||
3747 | static void acpi_nfit_notify(struct acpi_device *adev, u32 event) | 3747 | static void acpi_nfit_notify(struct acpi_device *adev, u32 event) |
3748 | { | 3748 | { |
3749 | device_lock(&adev->dev); | 3749 | nfit_device_lock(&adev->dev); |
3750 | __acpi_nfit_notify(&adev->dev, adev->handle, event); | 3750 | __acpi_nfit_notify(&adev->dev, adev->handle, event); |
3751 | device_unlock(&adev->dev); | 3751 | nfit_device_unlock(&adev->dev); |
3752 | } | 3752 | } |
3753 | 3753 | ||
3754 | static const struct acpi_device_id acpi_nfit_ids[] = { | 3754 | static const struct acpi_device_id acpi_nfit_ids[] = { |
diff --git a/drivers/acpi/nfit/nfit.h b/drivers/acpi/nfit/nfit.h index 6ee2b02af73e..24241941181c 100644 --- a/drivers/acpi/nfit/nfit.h +++ b/drivers/acpi/nfit/nfit.h | |||
@@ -312,6 +312,30 @@ static inline struct acpi_nfit_desc *to_acpi_desc( | |||
312 | return container_of(nd_desc, struct acpi_nfit_desc, nd_desc); | 312 | return container_of(nd_desc, struct acpi_nfit_desc, nd_desc); |
313 | } | 313 | } |
314 | 314 | ||
315 | #ifdef CONFIG_PROVE_LOCKING | ||
316 | static inline void nfit_device_lock(struct device *dev) | ||
317 | { | ||
318 | device_lock(dev); | ||
319 | mutex_lock(&dev->lockdep_mutex); | ||
320 | } | ||
321 | |||
322 | static inline void nfit_device_unlock(struct device *dev) | ||
323 | { | ||
324 | mutex_unlock(&dev->lockdep_mutex); | ||
325 | device_unlock(dev); | ||
326 | } | ||
327 | #else | ||
328 | static inline void nfit_device_lock(struct device *dev) | ||
329 | { | ||
330 | device_lock(dev); | ||
331 | } | ||
332 | |||
333 | static inline void nfit_device_unlock(struct device *dev) | ||
334 | { | ||
335 | device_unlock(dev); | ||
336 | } | ||
337 | #endif | ||
338 | |||
315 | const guid_t *to_nfit_uuid(enum nfit_uuids id); | 339 | const guid_t *to_nfit_uuid(enum nfit_uuids id); |
316 | int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *nfit, acpi_size sz); | 340 | int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *nfit, acpi_size sz); |
317 | void acpi_nfit_shutdown(void *data); | 341 | void acpi_nfit_shutdown(void *data); |
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 0e28270b0fd8..aad6be5c0af0 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c | |||
@@ -2204,6 +2204,12 @@ int __init acpi_scan_init(void) | |||
2204 | acpi_gpe_apply_masked_gpes(); | 2204 | acpi_gpe_apply_masked_gpes(); |
2205 | acpi_update_all_gpes(); | 2205 | acpi_update_all_gpes(); |
2206 | 2206 | ||
2207 | /* | ||
2208 | * Although we call __add_memory() that is documented to require the | ||
2209 | * device_hotplug_lock, it is not necessary here because this is an | ||
2210 | * early code when userspace or any other code path cannot trigger | ||
2211 | * hotplug/hotunplug operations. | ||
2212 | */ | ||
2207 | mutex_lock(&acpi_scan_lock); | 2213 | mutex_lock(&acpi_scan_lock); |
2208 | /* | 2214 | /* |
2209 | * Enumerate devices in the ACPI namespace. | 2215 | * Enumerate devices in the ACPI namespace. |
diff --git a/drivers/android/binder.c b/drivers/android/binder.c index 38a59a630cd4..dc1c83eafc22 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c | |||
@@ -2988,7 +2988,7 @@ static void binder_transaction(struct binder_proc *proc, | |||
2988 | else | 2988 | else |
2989 | return_error = BR_DEAD_REPLY; | 2989 | return_error = BR_DEAD_REPLY; |
2990 | mutex_unlock(&context->context_mgr_node_lock); | 2990 | mutex_unlock(&context->context_mgr_node_lock); |
2991 | if (target_node && target_proc == proc) { | 2991 | if (target_node && target_proc->pid == proc->pid) { |
2992 | binder_user_error("%d:%d got transaction to context manager from process owning it\n", | 2992 | binder_user_error("%d:%d got transaction to context manager from process owning it\n", |
2993 | proc->pid, thread->pid); | 2993 | proc->pid, thread->pid); |
2994 | return_error = BR_FAILED_REPLY; | 2994 | return_error = BR_FAILED_REPLY; |
@@ -3239,7 +3239,8 @@ static void binder_transaction(struct binder_proc *proc, | |||
3239 | buffer_offset = off_start_offset; | 3239 | buffer_offset = off_start_offset; |
3240 | off_end_offset = off_start_offset + tr->offsets_size; | 3240 | off_end_offset = off_start_offset + tr->offsets_size; |
3241 | sg_buf_offset = ALIGN(off_end_offset, sizeof(void *)); | 3241 | sg_buf_offset = ALIGN(off_end_offset, sizeof(void *)); |
3242 | sg_buf_end_offset = sg_buf_offset + extra_buffers_size; | 3242 | sg_buf_end_offset = sg_buf_offset + extra_buffers_size - |
3243 | ALIGN(secctx_sz, sizeof(u64)); | ||
3243 | off_min = 0; | 3244 | off_min = 0; |
3244 | for (buffer_offset = off_start_offset; buffer_offset < off_end_offset; | 3245 | for (buffer_offset = off_start_offset; buffer_offset < off_end_offset; |
3245 | buffer_offset += sizeof(binder_size_t)) { | 3246 | buffer_offset += sizeof(binder_size_t)) { |
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c index 72312ad2e142..9e9583a6bba9 100644 --- a/drivers/ata/libahci_platform.c +++ b/drivers/ata/libahci_platform.c | |||
@@ -338,6 +338,9 @@ static int ahci_platform_get_phy(struct ahci_host_priv *hpriv, u32 port, | |||
338 | hpriv->phys[port] = NULL; | 338 | hpriv->phys[port] = NULL; |
339 | rc = 0; | 339 | rc = 0; |
340 | break; | 340 | break; |
341 | case -EPROBE_DEFER: | ||
342 | /* Do not complain yet */ | ||
343 | break; | ||
341 | 344 | ||
342 | default: | 345 | default: |
343 | dev_err(dev, | 346 | dev_err(dev, |
@@ -408,7 +411,6 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev, | |||
408 | hpriv->mmio = devm_ioremap_resource(dev, | 411 | hpriv->mmio = devm_ioremap_resource(dev, |
409 | platform_get_resource(pdev, IORESOURCE_MEM, 0)); | 412 | platform_get_resource(pdev, IORESOURCE_MEM, 0)); |
410 | if (IS_ERR(hpriv->mmio)) { | 413 | if (IS_ERR(hpriv->mmio)) { |
411 | dev_err(dev, "no mmio space\n"); | ||
412 | rc = PTR_ERR(hpriv->mmio); | 414 | rc = PTR_ERR(hpriv->mmio); |
413 | goto err_out; | 415 | goto err_out; |
414 | } | 416 | } |
diff --git a/drivers/ata/libata-zpodd.c b/drivers/ata/libata-zpodd.c index 173e6f2dd9af..eefda51f97d3 100644 --- a/drivers/ata/libata-zpodd.c +++ b/drivers/ata/libata-zpodd.c | |||
@@ -56,7 +56,7 @@ static enum odd_mech_type zpodd_get_mech_type(struct ata_device *dev) | |||
56 | unsigned int ret; | 56 | unsigned int ret; |
57 | struct rm_feature_desc *desc; | 57 | struct rm_feature_desc *desc; |
58 | struct ata_taskfile tf; | 58 | struct ata_taskfile tf; |
59 | static const char cdb[] = { GPCMD_GET_CONFIGURATION, | 59 | static const char cdb[ATAPI_CDB_LEN] = { GPCMD_GET_CONFIGURATION, |
60 | 2, /* only 1 feature descriptor requested */ | 60 | 2, /* only 1 feature descriptor requested */ |
61 | 0, 3, /* 3, removable medium feature */ | 61 | 0, 3, /* 3, removable medium feature */ |
62 | 0, 0, 0,/* reserved */ | 62 | 0, 0, 0,/* reserved */ |
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c index 302cf0ba1600..8c7a996d1f16 100644 --- a/drivers/atm/iphase.c +++ b/drivers/atm/iphase.c | |||
@@ -63,6 +63,7 @@ | |||
63 | #include <asm/byteorder.h> | 63 | #include <asm/byteorder.h> |
64 | #include <linux/vmalloc.h> | 64 | #include <linux/vmalloc.h> |
65 | #include <linux/jiffies.h> | 65 | #include <linux/jiffies.h> |
66 | #include <linux/nospec.h> | ||
66 | #include "iphase.h" | 67 | #include "iphase.h" |
67 | #include "suni.h" | 68 | #include "suni.h" |
68 | #define swap_byte_order(x) (((x & 0xff) << 8) | ((x & 0xff00) >> 8)) | 69 | #define swap_byte_order(x) (((x & 0xff) << 8) | ((x & 0xff00) >> 8)) |
@@ -2760,8 +2761,11 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg) | |||
2760 | } | 2761 | } |
2761 | if (copy_from_user(&ia_cmds, arg, sizeof ia_cmds)) return -EFAULT; | 2762 | if (copy_from_user(&ia_cmds, arg, sizeof ia_cmds)) return -EFAULT; |
2762 | board = ia_cmds.status; | 2763 | board = ia_cmds.status; |
2763 | if ((board < 0) || (board > iadev_count)) | 2764 | |
2764 | board = 0; | 2765 | if ((board < 0) || (board > iadev_count)) |
2766 | board = 0; | ||
2767 | board = array_index_nospec(board, iadev_count + 1); | ||
2768 | |||
2765 | iadev = ia_dev[board]; | 2769 | iadev = ia_dev[board]; |
2766 | switch (ia_cmds.cmd) { | 2770 | switch (ia_cmds.cmd) { |
2767 | case MEMDUMP: | 2771 | case MEMDUMP: |
diff --git a/drivers/base/core.c b/drivers/base/core.c index da84a73f2ba6..636058bbf48a 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c | |||
@@ -1663,6 +1663,9 @@ void device_initialize(struct device *dev) | |||
1663 | kobject_init(&dev->kobj, &device_ktype); | 1663 | kobject_init(&dev->kobj, &device_ktype); |
1664 | INIT_LIST_HEAD(&dev->dma_pools); | 1664 | INIT_LIST_HEAD(&dev->dma_pools); |
1665 | mutex_init(&dev->mutex); | 1665 | mutex_init(&dev->mutex); |
1666 | #ifdef CONFIG_PROVE_LOCKING | ||
1667 | mutex_init(&dev->lockdep_mutex); | ||
1668 | #endif | ||
1666 | lockdep_set_novalidate_class(&dev->mutex); | 1669 | lockdep_set_novalidate_class(&dev->mutex); |
1667 | spin_lock_init(&dev->devres_lock); | 1670 | spin_lock_init(&dev->devres_lock); |
1668 | INIT_LIST_HEAD(&dev->devres_head); | 1671 | INIT_LIST_HEAD(&dev->devres_head); |
@@ -2211,6 +2214,24 @@ void put_device(struct device *dev) | |||
2211 | } | 2214 | } |
2212 | EXPORT_SYMBOL_GPL(put_device); | 2215 | EXPORT_SYMBOL_GPL(put_device); |
2213 | 2216 | ||
2217 | bool kill_device(struct device *dev) | ||
2218 | { | ||
2219 | /* | ||
2220 | * Require the device lock and set the "dead" flag to guarantee that | ||
2221 | * the update behavior is consistent with the other bitfields near | ||
2222 | * it and that we cannot have an asynchronous probe routine trying | ||
2223 | * to run while we are tearing out the bus/class/sysfs from | ||
2224 | * underneath the device. | ||
2225 | */ | ||
2226 | lockdep_assert_held(&dev->mutex); | ||
2227 | |||
2228 | if (dev->p->dead) | ||
2229 | return false; | ||
2230 | dev->p->dead = true; | ||
2231 | return true; | ||
2232 | } | ||
2233 | EXPORT_SYMBOL_GPL(kill_device); | ||
2234 | |||
2214 | /** | 2235 | /** |
2215 | * device_del - delete device from system. | 2236 | * device_del - delete device from system. |
2216 | * @dev: device. | 2237 | * @dev: device. |
@@ -2230,15 +2251,8 @@ void device_del(struct device *dev) | |||
2230 | struct kobject *glue_dir = NULL; | 2251 | struct kobject *glue_dir = NULL; |
2231 | struct class_interface *class_intf; | 2252 | struct class_interface *class_intf; |
2232 | 2253 | ||
2233 | /* | ||
2234 | * Hold the device lock and set the "dead" flag to guarantee that | ||
2235 | * the update behavior is consistent with the other bitfields near | ||
2236 | * it and that we cannot have an asynchronous probe routine trying | ||
2237 | * to run while we are tearing out the bus/class/sysfs from | ||
2238 | * underneath the device. | ||
2239 | */ | ||
2240 | device_lock(dev); | 2254 | device_lock(dev); |
2241 | dev->p->dead = true; | 2255 | kill_device(dev); |
2242 | device_unlock(dev); | 2256 | device_unlock(dev); |
2243 | 2257 | ||
2244 | /* Notify clients of device removal. This call must come | 2258 | /* Notify clients of device removal. This call must come |
diff --git a/drivers/base/firmware_loader/firmware.h b/drivers/base/firmware_loader/firmware.h index 7048a41973ed..7ecd590e67fe 100644 --- a/drivers/base/firmware_loader/firmware.h +++ b/drivers/base/firmware_loader/firmware.h | |||
@@ -141,8 +141,8 @@ int fw_grow_paged_buf(struct fw_priv *fw_priv, int pages_needed); | |||
141 | int fw_map_paged_buf(struct fw_priv *fw_priv); | 141 | int fw_map_paged_buf(struct fw_priv *fw_priv); |
142 | #else | 142 | #else |
143 | static inline void fw_free_paged_buf(struct fw_priv *fw_priv) {} | 143 | static inline void fw_free_paged_buf(struct fw_priv *fw_priv) {} |
144 | int fw_grow_paged_buf(struct fw_priv *fw_priv, int pages_needed) { return -ENXIO; } | 144 | static inline int fw_grow_paged_buf(struct fw_priv *fw_priv, int pages_needed) { return -ENXIO; } |
145 | int fw_map_paged_buf(struct fw_priv *fw_priv) { return -ENXIO; } | 145 | static inline int fw_map_paged_buf(struct fw_priv *fw_priv) { return -ENXIO; } |
146 | #endif | 146 | #endif |
147 | 147 | ||
148 | #endif /* __FIRMWARE_LOADER_H */ | 148 | #endif /* __FIRMWARE_LOADER_H */ |
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c index 85f20e371f2f..bd7d3bb8b890 100644 --- a/drivers/block/ataflop.c +++ b/drivers/block/ataflop.c | |||
@@ -1726,6 +1726,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, | |||
1726 | /* MSch: invalidate default_params */ | 1726 | /* MSch: invalidate default_params */ |
1727 | default_params[drive].blocks = 0; | 1727 | default_params[drive].blocks = 0; |
1728 | set_capacity(floppy->disk, MAX_DISK_SIZE * 2); | 1728 | set_capacity(floppy->disk, MAX_DISK_SIZE * 2); |
1729 | /* Fall through */ | ||
1729 | case FDFMTEND: | 1730 | case FDFMTEND: |
1730 | case FDFLUSH: | 1731 | case FDFLUSH: |
1731 | /* invalidate the buffer track to force a reread */ | 1732 | /* invalidate the buffer track to force a reread */ |
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index 90ebfcae0ce6..2b3103c30857 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c | |||
@@ -5417,7 +5417,7 @@ static int drbd_do_auth(struct drbd_connection *connection) | |||
5417 | unsigned int key_len; | 5417 | unsigned int key_len; |
5418 | char secret[SHARED_SECRET_MAX]; /* 64 byte */ | 5418 | char secret[SHARED_SECRET_MAX]; /* 64 byte */ |
5419 | unsigned int resp_size; | 5419 | unsigned int resp_size; |
5420 | SHASH_DESC_ON_STACK(desc, connection->cram_hmac_tfm); | 5420 | struct shash_desc *desc; |
5421 | struct packet_info pi; | 5421 | struct packet_info pi; |
5422 | struct net_conf *nc; | 5422 | struct net_conf *nc; |
5423 | int err, rv; | 5423 | int err, rv; |
@@ -5430,6 +5430,13 @@ static int drbd_do_auth(struct drbd_connection *connection) | |||
5430 | memcpy(secret, nc->shared_secret, key_len); | 5430 | memcpy(secret, nc->shared_secret, key_len); |
5431 | rcu_read_unlock(); | 5431 | rcu_read_unlock(); |
5432 | 5432 | ||
5433 | desc = kmalloc(sizeof(struct shash_desc) + | ||
5434 | crypto_shash_descsize(connection->cram_hmac_tfm), | ||
5435 | GFP_KERNEL); | ||
5436 | if (!desc) { | ||
5437 | rv = -1; | ||
5438 | goto fail; | ||
5439 | } | ||
5433 | desc->tfm = connection->cram_hmac_tfm; | 5440 | desc->tfm = connection->cram_hmac_tfm; |
5434 | 5441 | ||
5435 | rv = crypto_shash_setkey(connection->cram_hmac_tfm, (u8 *)secret, key_len); | 5442 | rv = crypto_shash_setkey(connection->cram_hmac_tfm, (u8 *)secret, key_len); |
@@ -5571,7 +5578,10 @@ static int drbd_do_auth(struct drbd_connection *connection) | |||
5571 | kfree(peers_ch); | 5578 | kfree(peers_ch); |
5572 | kfree(response); | 5579 | kfree(response); |
5573 | kfree(right_response); | 5580 | kfree(right_response); |
5574 | shash_desc_zero(desc); | 5581 | if (desc) { |
5582 | shash_desc_zero(desc); | ||
5583 | kfree(desc); | ||
5584 | } | ||
5575 | 5585 | ||
5576 | return rv; | 5586 | return rv; |
5577 | } | 5587 | } |
diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 44c9985f352a..3036883fc9f8 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c | |||
@@ -924,6 +924,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode, | |||
924 | struct file *file; | 924 | struct file *file; |
925 | struct inode *inode; | 925 | struct inode *inode; |
926 | struct address_space *mapping; | 926 | struct address_space *mapping; |
927 | struct block_device *claimed_bdev = NULL; | ||
927 | int lo_flags = 0; | 928 | int lo_flags = 0; |
928 | int error; | 929 | int error; |
929 | loff_t size; | 930 | loff_t size; |
@@ -942,10 +943,11 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode, | |||
942 | * here to avoid changing device under exclusive owner. | 943 | * here to avoid changing device under exclusive owner. |
943 | */ | 944 | */ |
944 | if (!(mode & FMODE_EXCL)) { | 945 | if (!(mode & FMODE_EXCL)) { |
945 | bdgrab(bdev); | 946 | claimed_bdev = bd_start_claiming(bdev, loop_set_fd); |
946 | error = blkdev_get(bdev, mode | FMODE_EXCL, loop_set_fd); | 947 | if (IS_ERR(claimed_bdev)) { |
947 | if (error) | 948 | error = PTR_ERR(claimed_bdev); |
948 | goto out_putf; | 949 | goto out_putf; |
950 | } | ||
949 | } | 951 | } |
950 | 952 | ||
951 | error = mutex_lock_killable(&loop_ctl_mutex); | 953 | error = mutex_lock_killable(&loop_ctl_mutex); |
@@ -1015,15 +1017,15 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode, | |||
1015 | mutex_unlock(&loop_ctl_mutex); | 1017 | mutex_unlock(&loop_ctl_mutex); |
1016 | if (partscan) | 1018 | if (partscan) |
1017 | loop_reread_partitions(lo, bdev); | 1019 | loop_reread_partitions(lo, bdev); |
1018 | if (!(mode & FMODE_EXCL)) | 1020 | if (claimed_bdev) |
1019 | blkdev_put(bdev, mode | FMODE_EXCL); | 1021 | bd_abort_claiming(bdev, claimed_bdev, loop_set_fd); |
1020 | return 0; | 1022 | return 0; |
1021 | 1023 | ||
1022 | out_unlock: | 1024 | out_unlock: |
1023 | mutex_unlock(&loop_ctl_mutex); | 1025 | mutex_unlock(&loop_ctl_mutex); |
1024 | out_bdev: | 1026 | out_bdev: |
1025 | if (!(mode & FMODE_EXCL)) | 1027 | if (claimed_bdev) |
1026 | blkdev_put(bdev, mode | FMODE_EXCL); | 1028 | bd_abort_claiming(bdev, claimed_bdev, loop_set_fd); |
1027 | out_putf: | 1029 | out_putf: |
1028 | fput(file); | 1030 | fput(file); |
1029 | out: | 1031 | out: |
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 9bcde2325893..e21d2ded732b 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c | |||
@@ -1231,7 +1231,7 @@ static void nbd_clear_sock_ioctl(struct nbd_device *nbd, | |||
1231 | struct block_device *bdev) | 1231 | struct block_device *bdev) |
1232 | { | 1232 | { |
1233 | sock_shutdown(nbd); | 1233 | sock_shutdown(nbd); |
1234 | kill_bdev(bdev); | 1234 | __invalidate_device(bdev, true); |
1235 | nbd_bdev_reset(bdev); | 1235 | nbd_bdev_reset(bdev); |
1236 | if (test_and_clear_bit(NBD_HAS_CONFIG_REF, | 1236 | if (test_and_clear_bit(NBD_HAS_CONFIG_REF, |
1237 | &nbd->config->runtime_flags)) | 1237 | &nbd->config->runtime_flags)) |
diff --git a/drivers/bluetooth/hci_ath.c b/drivers/bluetooth/hci_ath.c index a55be205b91a..dbfe34664633 100644 --- a/drivers/bluetooth/hci_ath.c +++ b/drivers/bluetooth/hci_ath.c | |||
@@ -98,6 +98,9 @@ static int ath_open(struct hci_uart *hu) | |||
98 | 98 | ||
99 | BT_DBG("hu %p", hu); | 99 | BT_DBG("hu %p", hu); |
100 | 100 | ||
101 | if (!hci_uart_has_flow_control(hu)) | ||
102 | return -EOPNOTSUPP; | ||
103 | |||
101 | ath = kzalloc(sizeof(*ath), GFP_KERNEL); | 104 | ath = kzalloc(sizeof(*ath), GFP_KERNEL); |
102 | if (!ath) | 105 | if (!ath) |
103 | return -ENOMEM; | 106 | return -ENOMEM; |
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c index 8905ad2edde7..ae2624fce913 100644 --- a/drivers/bluetooth/hci_bcm.c +++ b/drivers/bluetooth/hci_bcm.c | |||
@@ -406,6 +406,9 @@ static int bcm_open(struct hci_uart *hu) | |||
406 | 406 | ||
407 | bt_dev_dbg(hu->hdev, "hu %p", hu); | 407 | bt_dev_dbg(hu->hdev, "hu %p", hu); |
408 | 408 | ||
409 | if (!hci_uart_has_flow_control(hu)) | ||
410 | return -EOPNOTSUPP; | ||
411 | |||
409 | bcm = kzalloc(sizeof(*bcm), GFP_KERNEL); | 412 | bcm = kzalloc(sizeof(*bcm), GFP_KERNEL); |
410 | if (!bcm) | 413 | if (!bcm) |
411 | return -ENOMEM; | 414 | return -ENOMEM; |
diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c index 207bae5e0d46..31f25153087d 100644 --- a/drivers/bluetooth/hci_intel.c +++ b/drivers/bluetooth/hci_intel.c | |||
@@ -391,6 +391,9 @@ static int intel_open(struct hci_uart *hu) | |||
391 | 391 | ||
392 | BT_DBG("hu %p", hu); | 392 | BT_DBG("hu %p", hu); |
393 | 393 | ||
394 | if (!hci_uart_has_flow_control(hu)) | ||
395 | return -EOPNOTSUPP; | ||
396 | |||
394 | intel = kzalloc(sizeof(*intel), GFP_KERNEL); | 397 | intel = kzalloc(sizeof(*intel), GFP_KERNEL); |
395 | if (!intel) | 398 | if (!intel) |
396 | return -ENOMEM; | 399 | return -ENOMEM; |
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c index 8950e07889fe..85a30fb9177b 100644 --- a/drivers/bluetooth/hci_ldisc.c +++ b/drivers/bluetooth/hci_ldisc.c | |||
@@ -292,6 +292,19 @@ static int hci_uart_send_frame(struct hci_dev *hdev, struct sk_buff *skb) | |||
292 | return 0; | 292 | return 0; |
293 | } | 293 | } |
294 | 294 | ||
295 | /* Check the underlying device or tty has flow control support */ | ||
296 | bool hci_uart_has_flow_control(struct hci_uart *hu) | ||
297 | { | ||
298 | /* serdev nodes check if the needed operations are present */ | ||
299 | if (hu->serdev) | ||
300 | return true; | ||
301 | |||
302 | if (hu->tty->driver->ops->tiocmget && hu->tty->driver->ops->tiocmset) | ||
303 | return true; | ||
304 | |||
305 | return false; | ||
306 | } | ||
307 | |||
295 | /* Flow control or un-flow control the device */ | 308 | /* Flow control or un-flow control the device */ |
296 | void hci_uart_set_flow_control(struct hci_uart *hu, bool enable) | 309 | void hci_uart_set_flow_control(struct hci_uart *hu, bool enable) |
297 | { | 310 | { |
diff --git a/drivers/bluetooth/hci_mrvl.c b/drivers/bluetooth/hci_mrvl.c index f98e5cc343b2..fbc3f7c3a5c7 100644 --- a/drivers/bluetooth/hci_mrvl.c +++ b/drivers/bluetooth/hci_mrvl.c | |||
@@ -59,6 +59,9 @@ static int mrvl_open(struct hci_uart *hu) | |||
59 | 59 | ||
60 | BT_DBG("hu %p", hu); | 60 | BT_DBG("hu %p", hu); |
61 | 61 | ||
62 | if (!hci_uart_has_flow_control(hu)) | ||
63 | return -EOPNOTSUPP; | ||
64 | |||
62 | mrvl = kzalloc(sizeof(*mrvl), GFP_KERNEL); | 65 | mrvl = kzalloc(sizeof(*mrvl), GFP_KERNEL); |
63 | if (!mrvl) | 66 | if (!mrvl) |
64 | return -ENOMEM; | 67 | return -ENOMEM; |
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c index 9a5c9c1f9484..82a0a3691a63 100644 --- a/drivers/bluetooth/hci_qca.c +++ b/drivers/bluetooth/hci_qca.c | |||
@@ -473,6 +473,9 @@ static int qca_open(struct hci_uart *hu) | |||
473 | 473 | ||
474 | BT_DBG("hu %p qca_open", hu); | 474 | BT_DBG("hu %p qca_open", hu); |
475 | 475 | ||
476 | if (!hci_uart_has_flow_control(hu)) | ||
477 | return -EOPNOTSUPP; | ||
478 | |||
476 | qca = kzalloc(sizeof(struct qca_data), GFP_KERNEL); | 479 | qca = kzalloc(sizeof(struct qca_data), GFP_KERNEL); |
477 | if (!qca) | 480 | if (!qca) |
478 | return -ENOMEM; | 481 | return -ENOMEM; |
diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h index f11af3912ce6..6ab631101019 100644 --- a/drivers/bluetooth/hci_uart.h +++ b/drivers/bluetooth/hci_uart.h | |||
@@ -104,6 +104,7 @@ int hci_uart_wait_until_sent(struct hci_uart *hu); | |||
104 | int hci_uart_init_ready(struct hci_uart *hu); | 104 | int hci_uart_init_ready(struct hci_uart *hu); |
105 | void hci_uart_init_work(struct work_struct *work); | 105 | void hci_uart_init_work(struct work_struct *work); |
106 | void hci_uart_set_baudrate(struct hci_uart *hu, unsigned int speed); | 106 | void hci_uart_set_baudrate(struct hci_uart *hu, unsigned int speed); |
107 | bool hci_uart_has_flow_control(struct hci_uart *hu); | ||
107 | void hci_uart_set_flow_control(struct hci_uart *hu, bool enable); | 108 | void hci_uart_set_flow_control(struct hci_uart *hu, bool enable); |
108 | void hci_uart_set_speeds(struct hci_uart *hu, unsigned int init_speed, | 109 | void hci_uart_set_speeds(struct hci_uart *hu, unsigned int init_speed, |
109 | unsigned int oper_speed); | 110 | unsigned int oper_speed); |
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c index 5c39f20378b8..9ac6671bb514 100644 --- a/drivers/char/hpet.c +++ b/drivers/char/hpet.c | |||
@@ -567,8 +567,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets, | |||
567 | unsigned long long m; | 567 | unsigned long long m; |
568 | 568 | ||
569 | m = hpets->hp_tick_freq + (dis >> 1); | 569 | m = hpets->hp_tick_freq + (dis >> 1); |
570 | do_div(m, dis); | 570 | return div64_ul(m, dis); |
571 | return (unsigned long)m; | ||
572 | } | 571 | } |
573 | 572 | ||
574 | static int | 573 | static int |
diff --git a/drivers/char/ipmi/ipmb_dev_int.c b/drivers/char/ipmi/ipmb_dev_int.c index 57204335c5f5..285e0b8f9a97 100644 --- a/drivers/char/ipmi/ipmb_dev_int.c +++ b/drivers/char/ipmi/ipmb_dev_int.c | |||
@@ -76,7 +76,7 @@ static ssize_t ipmb_read(struct file *file, char __user *buf, size_t count, | |||
76 | struct ipmb_dev *ipmb_dev = to_ipmb_dev(file); | 76 | struct ipmb_dev *ipmb_dev = to_ipmb_dev(file); |
77 | struct ipmb_request_elem *queue_elem; | 77 | struct ipmb_request_elem *queue_elem; |
78 | struct ipmb_msg msg; | 78 | struct ipmb_msg msg; |
79 | ssize_t ret; | 79 | ssize_t ret = 0; |
80 | 80 | ||
81 | memset(&msg, 0, sizeof(msg)); | 81 | memset(&msg, 0, sizeof(msg)); |
82 | 82 | ||
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c index d47ad10a35fe..4838c6a9f0f2 100644 --- a/drivers/char/tpm/tpm-chip.c +++ b/drivers/char/tpm/tpm-chip.c | |||
@@ -77,6 +77,18 @@ static int tpm_go_idle(struct tpm_chip *chip) | |||
77 | return chip->ops->go_idle(chip); | 77 | return chip->ops->go_idle(chip); |
78 | } | 78 | } |
79 | 79 | ||
80 | static void tpm_clk_enable(struct tpm_chip *chip) | ||
81 | { | ||
82 | if (chip->ops->clk_enable) | ||
83 | chip->ops->clk_enable(chip, true); | ||
84 | } | ||
85 | |||
86 | static void tpm_clk_disable(struct tpm_chip *chip) | ||
87 | { | ||
88 | if (chip->ops->clk_enable) | ||
89 | chip->ops->clk_enable(chip, false); | ||
90 | } | ||
91 | |||
80 | /** | 92 | /** |
81 | * tpm_chip_start() - power on the TPM | 93 | * tpm_chip_start() - power on the TPM |
82 | * @chip: a TPM chip to use | 94 | * @chip: a TPM chip to use |
@@ -89,13 +101,12 @@ int tpm_chip_start(struct tpm_chip *chip) | |||
89 | { | 101 | { |
90 | int ret; | 102 | int ret; |
91 | 103 | ||
92 | if (chip->ops->clk_enable) | 104 | tpm_clk_enable(chip); |
93 | chip->ops->clk_enable(chip, true); | ||
94 | 105 | ||
95 | if (chip->locality == -1) { | 106 | if (chip->locality == -1) { |
96 | ret = tpm_request_locality(chip); | 107 | ret = tpm_request_locality(chip); |
97 | if (ret) { | 108 | if (ret) { |
98 | chip->ops->clk_enable(chip, false); | 109 | tpm_clk_disable(chip); |
99 | return ret; | 110 | return ret; |
100 | } | 111 | } |
101 | } | 112 | } |
@@ -103,8 +114,7 @@ int tpm_chip_start(struct tpm_chip *chip) | |||
103 | ret = tpm_cmd_ready(chip); | 114 | ret = tpm_cmd_ready(chip); |
104 | if (ret) { | 115 | if (ret) { |
105 | tpm_relinquish_locality(chip); | 116 | tpm_relinquish_locality(chip); |
106 | if (chip->ops->clk_enable) | 117 | tpm_clk_disable(chip); |
107 | chip->ops->clk_enable(chip, false); | ||
108 | return ret; | 118 | return ret; |
109 | } | 119 | } |
110 | 120 | ||
@@ -124,8 +134,7 @@ void tpm_chip_stop(struct tpm_chip *chip) | |||
124 | { | 134 | { |
125 | tpm_go_idle(chip); | 135 | tpm_go_idle(chip); |
126 | tpm_relinquish_locality(chip); | 136 | tpm_relinquish_locality(chip); |
127 | if (chip->ops->clk_enable) | 137 | tpm_clk_disable(chip); |
128 | chip->ops->clk_enable(chip, false); | ||
129 | } | 138 | } |
130 | EXPORT_SYMBOL_GPL(tpm_chip_stop); | 139 | EXPORT_SYMBOL_GPL(tpm_chip_stop); |
131 | 140 | ||
@@ -545,6 +554,20 @@ static int tpm_add_hwrng(struct tpm_chip *chip) | |||
545 | return hwrng_register(&chip->hwrng); | 554 | return hwrng_register(&chip->hwrng); |
546 | } | 555 | } |
547 | 556 | ||
557 | static int tpm_get_pcr_allocation(struct tpm_chip *chip) | ||
558 | { | ||
559 | int rc; | ||
560 | |||
561 | rc = (chip->flags & TPM_CHIP_FLAG_TPM2) ? | ||
562 | tpm2_get_pcr_allocation(chip) : | ||
563 | tpm1_get_pcr_allocation(chip); | ||
564 | |||
565 | if (rc > 0) | ||
566 | return -ENODEV; | ||
567 | |||
568 | return rc; | ||
569 | } | ||
570 | |||
548 | /* | 571 | /* |
549 | * tpm_chip_register() - create a character device for the TPM chip | 572 | * tpm_chip_register() - create a character device for the TPM chip |
550 | * @chip: TPM chip to use. | 573 | * @chip: TPM chip to use. |
@@ -564,6 +587,12 @@ int tpm_chip_register(struct tpm_chip *chip) | |||
564 | if (rc) | 587 | if (rc) |
565 | return rc; | 588 | return rc; |
566 | rc = tpm_auto_startup(chip); | 589 | rc = tpm_auto_startup(chip); |
590 | if (rc) { | ||
591 | tpm_chip_stop(chip); | ||
592 | return rc; | ||
593 | } | ||
594 | |||
595 | rc = tpm_get_pcr_allocation(chip); | ||
567 | tpm_chip_stop(chip); | 596 | tpm_chip_stop(chip); |
568 | if (rc) | 597 | if (rc) |
569 | return rc; | 598 | return rc; |
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h index e503ffc3aa39..a7fea3e0ca86 100644 --- a/drivers/char/tpm/tpm.h +++ b/drivers/char/tpm/tpm.h | |||
@@ -394,6 +394,7 @@ int tpm1_pcr_read(struct tpm_chip *chip, u32 pcr_idx, u8 *res_buf); | |||
394 | ssize_t tpm1_getcap(struct tpm_chip *chip, u32 subcap_id, cap_t *cap, | 394 | ssize_t tpm1_getcap(struct tpm_chip *chip, u32 subcap_id, cap_t *cap, |
395 | const char *desc, size_t min_cap_length); | 395 | const char *desc, size_t min_cap_length); |
396 | int tpm1_get_random(struct tpm_chip *chip, u8 *out, size_t max); | 396 | int tpm1_get_random(struct tpm_chip *chip, u8 *out, size_t max); |
397 | int tpm1_get_pcr_allocation(struct tpm_chip *chip); | ||
397 | unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal); | 398 | unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal); |
398 | int tpm_pm_suspend(struct device *dev); | 399 | int tpm_pm_suspend(struct device *dev); |
399 | int tpm_pm_resume(struct device *dev); | 400 | int tpm_pm_resume(struct device *dev); |
@@ -449,6 +450,7 @@ int tpm2_unseal_trusted(struct tpm_chip *chip, | |||
449 | ssize_t tpm2_get_tpm_pt(struct tpm_chip *chip, u32 property_id, | 450 | ssize_t tpm2_get_tpm_pt(struct tpm_chip *chip, u32 property_id, |
450 | u32 *value, const char *desc); | 451 | u32 *value, const char *desc); |
451 | 452 | ||
453 | ssize_t tpm2_get_pcr_allocation(struct tpm_chip *chip); | ||
452 | int tpm2_auto_startup(struct tpm_chip *chip); | 454 | int tpm2_auto_startup(struct tpm_chip *chip); |
453 | void tpm2_shutdown(struct tpm_chip *chip, u16 shutdown_type); | 455 | void tpm2_shutdown(struct tpm_chip *chip, u16 shutdown_type); |
454 | unsigned long tpm2_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal); | 456 | unsigned long tpm2_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal); |
diff --git a/drivers/char/tpm/tpm1-cmd.c b/drivers/char/tpm/tpm1-cmd.c index faacbe1ffa1a..149e953ca369 100644 --- a/drivers/char/tpm/tpm1-cmd.c +++ b/drivers/char/tpm/tpm1-cmd.c | |||
@@ -699,18 +699,6 @@ int tpm1_auto_startup(struct tpm_chip *chip) | |||
699 | goto out; | 699 | goto out; |
700 | } | 700 | } |
701 | 701 | ||
702 | chip->allocated_banks = kcalloc(1, sizeof(*chip->allocated_banks), | ||
703 | GFP_KERNEL); | ||
704 | if (!chip->allocated_banks) { | ||
705 | rc = -ENOMEM; | ||
706 | goto out; | ||
707 | } | ||
708 | |||
709 | chip->allocated_banks[0].alg_id = TPM_ALG_SHA1; | ||
710 | chip->allocated_banks[0].digest_size = hash_digest_size[HASH_ALGO_SHA1]; | ||
711 | chip->allocated_banks[0].crypto_id = HASH_ALGO_SHA1; | ||
712 | chip->nr_allocated_banks = 1; | ||
713 | |||
714 | return rc; | 702 | return rc; |
715 | out: | 703 | out: |
716 | if (rc > 0) | 704 | if (rc > 0) |
@@ -779,3 +767,27 @@ int tpm1_pm_suspend(struct tpm_chip *chip, u32 tpm_suspend_pcr) | |||
779 | return rc; | 767 | return rc; |
780 | } | 768 | } |
781 | 769 | ||
770 | /** | ||
771 | * tpm1_get_pcr_allocation() - initialize the allocated bank | ||
772 | * @chip: TPM chip to use. | ||
773 | * | ||
774 | * The function initializes the SHA1 allocated bank to extend PCR | ||
775 | * | ||
776 | * Return: | ||
777 | * * 0 on success, | ||
778 | * * < 0 on error. | ||
779 | */ | ||
780 | int tpm1_get_pcr_allocation(struct tpm_chip *chip) | ||
781 | { | ||
782 | chip->allocated_banks = kcalloc(1, sizeof(*chip->allocated_banks), | ||
783 | GFP_KERNEL); | ||
784 | if (!chip->allocated_banks) | ||
785 | return -ENOMEM; | ||
786 | |||
787 | chip->allocated_banks[0].alg_id = TPM_ALG_SHA1; | ||
788 | chip->allocated_banks[0].digest_size = hash_digest_size[HASH_ALGO_SHA1]; | ||
789 | chip->allocated_banks[0].crypto_id = HASH_ALGO_SHA1; | ||
790 | chip->nr_allocated_banks = 1; | ||
791 | |||
792 | return 0; | ||
793 | } | ||
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c index d103545e4055..ba9acae83bff 100644 --- a/drivers/char/tpm/tpm2-cmd.c +++ b/drivers/char/tpm/tpm2-cmd.c | |||
@@ -840,7 +840,7 @@ struct tpm2_pcr_selection { | |||
840 | u8 pcr_select[3]; | 840 | u8 pcr_select[3]; |
841 | } __packed; | 841 | } __packed; |
842 | 842 | ||
843 | static ssize_t tpm2_get_pcr_allocation(struct tpm_chip *chip) | 843 | ssize_t tpm2_get_pcr_allocation(struct tpm_chip *chip) |
844 | { | 844 | { |
845 | struct tpm2_pcr_selection pcr_selection; | 845 | struct tpm2_pcr_selection pcr_selection; |
846 | struct tpm_buf buf; | 846 | struct tpm_buf buf; |
@@ -1040,10 +1040,6 @@ int tpm2_auto_startup(struct tpm_chip *chip) | |||
1040 | goto out; | 1040 | goto out; |
1041 | } | 1041 | } |
1042 | 1042 | ||
1043 | rc = tpm2_get_pcr_allocation(chip); | ||
1044 | if (rc) | ||
1045 | goto out; | ||
1046 | |||
1047 | rc = tpm2_get_cc_attrs_tbl(chip); | 1043 | rc = tpm2_get_cc_attrs_tbl(chip); |
1048 | 1044 | ||
1049 | out: | 1045 | out: |
diff --git a/drivers/clk/at91/clk-generated.c b/drivers/clk/at91/clk-generated.c index 44db83a6d01c..44a46dcc0518 100644 --- a/drivers/clk/at91/clk-generated.c +++ b/drivers/clk/at91/clk-generated.c | |||
@@ -141,6 +141,8 @@ static int clk_generated_determine_rate(struct clk_hw *hw, | |||
141 | continue; | 141 | continue; |
142 | 142 | ||
143 | div = DIV_ROUND_CLOSEST(parent_rate, req->rate); | 143 | div = DIV_ROUND_CLOSEST(parent_rate, req->rate); |
144 | if (div > GENERATED_MAX_DIV + 1) | ||
145 | div = GENERATED_MAX_DIV + 1; | ||
144 | 146 | ||
145 | clk_generated_best_diff(req, parent, parent_rate, div, | 147 | clk_generated_best_diff(req, parent, parent_rate, div, |
146 | &best_diff, &best_rate); | 148 | &best_diff, &best_rate); |
diff --git a/drivers/clk/mediatek/clk-mt8183.c b/drivers/clk/mediatek/clk-mt8183.c index 1aa5f4059251..73b7e238eee7 100644 --- a/drivers/clk/mediatek/clk-mt8183.c +++ b/drivers/clk/mediatek/clk-mt8183.c | |||
@@ -25,9 +25,11 @@ static const struct mtk_fixed_clk top_fixed_clks[] = { | |||
25 | FIXED_CLK(CLK_TOP_UNIVP_192M, "univpll_192m", "univpll", 192000000), | 25 | FIXED_CLK(CLK_TOP_UNIVP_192M, "univpll_192m", "univpll", 192000000), |
26 | }; | 26 | }; |
27 | 27 | ||
28 | static const struct mtk_fixed_factor top_early_divs[] = { | ||
29 | FACTOR(CLK_TOP_CLK13M, "clk13m", "clk26m", 1, 2), | ||
30 | }; | ||
31 | |||
28 | static const struct mtk_fixed_factor top_divs[] = { | 32 | static const struct mtk_fixed_factor top_divs[] = { |
29 | FACTOR(CLK_TOP_CLK13M, "clk13m", "clk26m", 1, | ||
30 | 2), | ||
31 | FACTOR(CLK_TOP_F26M_CK_D2, "csw_f26m_ck_d2", "clk26m", 1, | 33 | FACTOR(CLK_TOP_F26M_CK_D2, "csw_f26m_ck_d2", "clk26m", 1, |
32 | 2), | 34 | 2), |
33 | FACTOR(CLK_TOP_SYSPLL_CK, "syspll_ck", "mainpll", 1, | 35 | FACTOR(CLK_TOP_SYSPLL_CK, "syspll_ck", "mainpll", 1, |
@@ -1148,37 +1150,57 @@ static int clk_mt8183_apmixed_probe(struct platform_device *pdev) | |||
1148 | return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); | 1150 | return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); |
1149 | } | 1151 | } |
1150 | 1152 | ||
1153 | static struct clk_onecell_data *top_clk_data; | ||
1154 | |||
1155 | static void clk_mt8183_top_init_early(struct device_node *node) | ||
1156 | { | ||
1157 | int i; | ||
1158 | |||
1159 | top_clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK); | ||
1160 | |||
1161 | for (i = 0; i < CLK_TOP_NR_CLK; i++) | ||
1162 | top_clk_data->clks[i] = ERR_PTR(-EPROBE_DEFER); | ||
1163 | |||
1164 | mtk_clk_register_factors(top_early_divs, ARRAY_SIZE(top_early_divs), | ||
1165 | top_clk_data); | ||
1166 | |||
1167 | of_clk_add_provider(node, of_clk_src_onecell_get, top_clk_data); | ||
1168 | } | ||
1169 | |||
1170 | CLK_OF_DECLARE_DRIVER(mt8183_topckgen, "mediatek,mt8183-topckgen", | ||
1171 | clk_mt8183_top_init_early); | ||
1172 | |||
1151 | static int clk_mt8183_top_probe(struct platform_device *pdev) | 1173 | static int clk_mt8183_top_probe(struct platform_device *pdev) |
1152 | { | 1174 | { |
1153 | struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1175 | struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1154 | void __iomem *base; | 1176 | void __iomem *base; |
1155 | struct clk_onecell_data *clk_data; | ||
1156 | struct device_node *node = pdev->dev.of_node; | 1177 | struct device_node *node = pdev->dev.of_node; |
1157 | 1178 | ||
1158 | base = devm_ioremap_resource(&pdev->dev, res); | 1179 | base = devm_ioremap_resource(&pdev->dev, res); |
1159 | if (IS_ERR(base)) | 1180 | if (IS_ERR(base)) |
1160 | return PTR_ERR(base); | 1181 | return PTR_ERR(base); |
1161 | 1182 | ||
1162 | clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK); | ||
1163 | |||
1164 | mtk_clk_register_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks), | 1183 | mtk_clk_register_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks), |
1165 | clk_data); | 1184 | top_clk_data); |
1185 | |||
1186 | mtk_clk_register_factors(top_early_divs, ARRAY_SIZE(top_early_divs), | ||
1187 | top_clk_data); | ||
1166 | 1188 | ||
1167 | mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), clk_data); | 1189 | mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), top_clk_data); |
1168 | 1190 | ||
1169 | mtk_clk_register_muxes(top_muxes, ARRAY_SIZE(top_muxes), | 1191 | mtk_clk_register_muxes(top_muxes, ARRAY_SIZE(top_muxes), |
1170 | node, &mt8183_clk_lock, clk_data); | 1192 | node, &mt8183_clk_lock, top_clk_data); |
1171 | 1193 | ||
1172 | mtk_clk_register_composites(top_aud_muxes, ARRAY_SIZE(top_aud_muxes), | 1194 | mtk_clk_register_composites(top_aud_muxes, ARRAY_SIZE(top_aud_muxes), |
1173 | base, &mt8183_clk_lock, clk_data); | 1195 | base, &mt8183_clk_lock, top_clk_data); |
1174 | 1196 | ||
1175 | mtk_clk_register_composites(top_aud_divs, ARRAY_SIZE(top_aud_divs), | 1197 | mtk_clk_register_composites(top_aud_divs, ARRAY_SIZE(top_aud_divs), |
1176 | base, &mt8183_clk_lock, clk_data); | 1198 | base, &mt8183_clk_lock, top_clk_data); |
1177 | 1199 | ||
1178 | mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks), | 1200 | mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks), |
1179 | clk_data); | 1201 | top_clk_data); |
1180 | 1202 | ||
1181 | return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); | 1203 | return of_clk_add_provider(node, of_clk_src_onecell_get, top_clk_data); |
1182 | } | 1204 | } |
1183 | 1205 | ||
1184 | static int clk_mt8183_infra_probe(struct platform_device *pdev) | 1206 | static int clk_mt8183_infra_probe(struct platform_device *pdev) |
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c index 52bbb9ce3807..d4075b130674 100644 --- a/drivers/clk/renesas/renesas-cpg-mssr.c +++ b/drivers/clk/renesas/renesas-cpg-mssr.c | |||
@@ -572,17 +572,11 @@ static int cpg_mssr_reset(struct reset_controller_dev *rcdev, | |||
572 | unsigned int reg = id / 32; | 572 | unsigned int reg = id / 32; |
573 | unsigned int bit = id % 32; | 573 | unsigned int bit = id % 32; |
574 | u32 bitmask = BIT(bit); | 574 | u32 bitmask = BIT(bit); |
575 | unsigned long flags; | ||
576 | u32 value; | ||
577 | 575 | ||
578 | dev_dbg(priv->dev, "reset %u%02u\n", reg, bit); | 576 | dev_dbg(priv->dev, "reset %u%02u\n", reg, bit); |
579 | 577 | ||
580 | /* Reset module */ | 578 | /* Reset module */ |
581 | spin_lock_irqsave(&priv->rmw_lock, flags); | 579 | writel(bitmask, priv->base + SRCR(reg)); |
582 | value = readl(priv->base + SRCR(reg)); | ||
583 | value |= bitmask; | ||
584 | writel(value, priv->base + SRCR(reg)); | ||
585 | spin_unlock_irqrestore(&priv->rmw_lock, flags); | ||
586 | 580 | ||
587 | /* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */ | 581 | /* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */ |
588 | udelay(35); | 582 | udelay(35); |
@@ -599,16 +593,10 @@ static int cpg_mssr_assert(struct reset_controller_dev *rcdev, unsigned long id) | |||
599 | unsigned int reg = id / 32; | 593 | unsigned int reg = id / 32; |
600 | unsigned int bit = id % 32; | 594 | unsigned int bit = id % 32; |
601 | u32 bitmask = BIT(bit); | 595 | u32 bitmask = BIT(bit); |
602 | unsigned long flags; | ||
603 | u32 value; | ||
604 | 596 | ||
605 | dev_dbg(priv->dev, "assert %u%02u\n", reg, bit); | 597 | dev_dbg(priv->dev, "assert %u%02u\n", reg, bit); |
606 | 598 | ||
607 | spin_lock_irqsave(&priv->rmw_lock, flags); | 599 | writel(bitmask, priv->base + SRCR(reg)); |
608 | value = readl(priv->base + SRCR(reg)); | ||
609 | value |= bitmask; | ||
610 | writel(value, priv->base + SRCR(reg)); | ||
611 | spin_unlock_irqrestore(&priv->rmw_lock, flags); | ||
612 | return 0; | 600 | return 0; |
613 | } | 601 | } |
614 | 602 | ||
diff --git a/drivers/clk/sprd/Kconfig b/drivers/clk/sprd/Kconfig index 91d3d721c801..3c219af25100 100644 --- a/drivers/clk/sprd/Kconfig +++ b/drivers/clk/sprd/Kconfig | |||
@@ -3,6 +3,7 @@ config SPRD_COMMON_CLK | |||
3 | tristate "Clock support for Spreadtrum SoCs" | 3 | tristate "Clock support for Spreadtrum SoCs" |
4 | depends on ARCH_SPRD || COMPILE_TEST | 4 | depends on ARCH_SPRD || COMPILE_TEST |
5 | default ARCH_SPRD | 5 | default ARCH_SPRD |
6 | select REGMAP_MMIO | ||
6 | 7 | ||
7 | if SPRD_COMMON_CLK | 8 | if SPRD_COMMON_CLK |
8 | 9 | ||
diff --git a/drivers/cpufreq/pasemi-cpufreq.c b/drivers/cpufreq/pasemi-cpufreq.c index 93f39a1d4c3d..c66f566a854c 100644 --- a/drivers/cpufreq/pasemi-cpufreq.c +++ b/drivers/cpufreq/pasemi-cpufreq.c | |||
@@ -131,10 +131,18 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
131 | int err = -ENODEV; | 131 | int err = -ENODEV; |
132 | 132 | ||
133 | cpu = of_get_cpu_node(policy->cpu, NULL); | 133 | cpu = of_get_cpu_node(policy->cpu, NULL); |
134 | if (!cpu) | ||
135 | goto out; | ||
134 | 136 | ||
137 | max_freqp = of_get_property(cpu, "clock-frequency", NULL); | ||
135 | of_node_put(cpu); | 138 | of_node_put(cpu); |
136 | if (!cpu) | 139 | if (!max_freqp) { |
140 | err = -EINVAL; | ||
137 | goto out; | 141 | goto out; |
142 | } | ||
143 | |||
144 | /* we need the freq in kHz */ | ||
145 | max_freq = *max_freqp / 1000; | ||
138 | 146 | ||
139 | dn = of_find_compatible_node(NULL, NULL, "1682m-sdc"); | 147 | dn = of_find_compatible_node(NULL, NULL, "1682m-sdc"); |
140 | if (!dn) | 148 | if (!dn) |
@@ -171,16 +179,6 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
171 | } | 179 | } |
172 | 180 | ||
173 | pr_debug("init cpufreq on CPU %d\n", policy->cpu); | 181 | pr_debug("init cpufreq on CPU %d\n", policy->cpu); |
174 | |||
175 | max_freqp = of_get_property(cpu, "clock-frequency", NULL); | ||
176 | if (!max_freqp) { | ||
177 | err = -EINVAL; | ||
178 | goto out_unmap_sdcpwr; | ||
179 | } | ||
180 | |||
181 | /* we need the freq in kHz */ | ||
182 | max_freq = *max_freqp / 1000; | ||
183 | |||
184 | pr_debug("max clock-frequency is at %u kHz\n", max_freq); | 182 | pr_debug("max clock-frequency is at %u kHz\n", max_freq); |
185 | pr_debug("initializing frequency table\n"); | 183 | pr_debug("initializing frequency table\n"); |
186 | 184 | ||
@@ -199,9 +197,6 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
199 | cpufreq_generic_init(policy, pas_freqs, get_gizmo_latency()); | 197 | cpufreq_generic_init(policy, pas_freqs, get_gizmo_latency()); |
200 | return 0; | 198 | return 0; |
201 | 199 | ||
202 | out_unmap_sdcpwr: | ||
203 | iounmap(sdcpwr_mapbase); | ||
204 | |||
205 | out_unmap_sdcasr: | 200 | out_unmap_sdcasr: |
206 | iounmap(sdcasr_mapbase); | 201 | iounmap(sdcasr_mapbase); |
207 | out: | 202 | out: |
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c index 3dc1cbf849db..b785e936244f 100644 --- a/drivers/firewire/core-device.c +++ b/drivers/firewire/core-device.c | |||
@@ -957,7 +957,7 @@ static void set_broadcast_channel(struct fw_device *device, int generation) | |||
957 | device->bc_implemented = BC_IMPLEMENTED; | 957 | device->bc_implemented = BC_IMPLEMENTED; |
958 | break; | 958 | break; |
959 | } | 959 | } |
960 | /* else fall through to case address error */ | 960 | /* else, fall through - to case address error */ |
961 | case RCODE_ADDRESS_ERROR: | 961 | case RCODE_ADDRESS_ERROR: |
962 | device->bc_implemented = BC_UNIMPLEMENTED; | 962 | device->bc_implemented = BC_UNIMPLEMENTED; |
963 | } | 963 | } |
diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c index 42566b7be8f5..df8a56a979b9 100644 --- a/drivers/firewire/core-iso.c +++ b/drivers/firewire/core-iso.c | |||
@@ -284,7 +284,7 @@ static int manage_channel(struct fw_card *card, int irm_id, int generation, | |||
284 | if ((data[0] & bit) == (data[1] & bit)) | 284 | if ((data[0] & bit) == (data[1] & bit)) |
285 | continue; | 285 | continue; |
286 | 286 | ||
287 | /* 1394-1995 IRM, fall through to retry. */ | 287 | /* fall through - It's a 1394-1995 IRM, retry. */ |
288 | default: | 288 | default: |
289 | if (retry) { | 289 | if (retry) { |
290 | retry--; | 290 | retry--; |
diff --git a/drivers/firewire/core-topology.c b/drivers/firewire/core-topology.c index 46bd22dde535..94a13fca8267 100644 --- a/drivers/firewire/core-topology.c +++ b/drivers/firewire/core-topology.c | |||
@@ -54,6 +54,7 @@ static u32 *count_ports(u32 *sid, int *total_port_count, int *child_port_count) | |||
54 | switch (port_type) { | 54 | switch (port_type) { |
55 | case SELFID_PORT_CHILD: | 55 | case SELFID_PORT_CHILD: |
56 | (*child_port_count)++; | 56 | (*child_port_count)++; |
57 | /* fall through */ | ||
57 | case SELFID_PORT_PARENT: | 58 | case SELFID_PORT_PARENT: |
58 | case SELFID_PORT_NCONN: | 59 | case SELFID_PORT_NCONN: |
59 | (*total_port_count)++; | 60 | (*total_port_count)++; |
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig index 53446e39a32c..ba8d3d0ef32c 100644 --- a/drivers/firmware/Kconfig +++ b/drivers/firmware/Kconfig | |||
@@ -157,7 +157,7 @@ config DMI_SCAN_MACHINE_NON_EFI_FALLBACK | |||
157 | 157 | ||
158 | config ISCSI_IBFT_FIND | 158 | config ISCSI_IBFT_FIND |
159 | bool "iSCSI Boot Firmware Table Attributes" | 159 | bool "iSCSI Boot Firmware Table Attributes" |
160 | depends on X86 && ACPI | 160 | depends on X86 && ISCSI_IBFT |
161 | default n | 161 | default n |
162 | help | 162 | help |
163 | This option enables the kernel to find the region of memory | 163 | This option enables the kernel to find the region of memory |
@@ -168,7 +168,8 @@ config ISCSI_IBFT_FIND | |||
168 | config ISCSI_IBFT | 168 | config ISCSI_IBFT |
169 | tristate "iSCSI Boot Firmware Table Attributes module" | 169 | tristate "iSCSI Boot Firmware Table Attributes module" |
170 | select ISCSI_BOOT_SYSFS | 170 | select ISCSI_BOOT_SYSFS |
171 | depends on ISCSI_IBFT_FIND && SCSI && SCSI_LOWLEVEL | 171 | select ISCSI_IBFT_FIND if X86 |
172 | depends on ACPI && SCSI && SCSI_LOWLEVEL | ||
172 | default n | 173 | default n |
173 | help | 174 | help |
174 | This option enables support for detection and exposing of iSCSI | 175 | This option enables support for detection and exposing of iSCSI |
diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c index ab3aa3983833..7e12cbdf957c 100644 --- a/drivers/firmware/iscsi_ibft.c +++ b/drivers/firmware/iscsi_ibft.c | |||
@@ -84,6 +84,10 @@ MODULE_DESCRIPTION("sysfs interface to BIOS iBFT information"); | |||
84 | MODULE_LICENSE("GPL"); | 84 | MODULE_LICENSE("GPL"); |
85 | MODULE_VERSION(IBFT_ISCSI_VERSION); | 85 | MODULE_VERSION(IBFT_ISCSI_VERSION); |
86 | 86 | ||
87 | #ifndef CONFIG_ISCSI_IBFT_FIND | ||
88 | struct acpi_table_ibft *ibft_addr; | ||
89 | #endif | ||
90 | |||
87 | struct ibft_hdr { | 91 | struct ibft_hdr { |
88 | u8 id; | 92 | u8 id; |
89 | u8 version; | 93 | u8 version; |
diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig index 474f304ec109..cdd4f73b4869 100644 --- a/drivers/fpga/Kconfig +++ b/drivers/fpga/Kconfig | |||
@@ -40,6 +40,7 @@ config ALTERA_PR_IP_CORE_PLAT | |||
40 | config FPGA_MGR_ALTERA_PS_SPI | 40 | config FPGA_MGR_ALTERA_PS_SPI |
41 | tristate "Altera FPGA Passive Serial over SPI" | 41 | tristate "Altera FPGA Passive Serial over SPI" |
42 | depends on SPI | 42 | depends on SPI |
43 | select BITREVERSE | ||
43 | help | 44 | help |
44 | FPGA manager driver support for Altera Arria/Cyclone/Stratix | 45 | FPGA manager driver support for Altera Arria/Cyclone/Stratix |
45 | using the passive serial interface over SPI. | 46 | using the passive serial interface over SPI. |
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 3ee99d070608..f497003f119c 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c | |||
@@ -956,9 +956,11 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip) | |||
956 | } | 956 | } |
957 | 957 | ||
958 | if (eflags & GPIOEVENT_REQUEST_RISING_EDGE) | 958 | if (eflags & GPIOEVENT_REQUEST_RISING_EDGE) |
959 | irqflags |= IRQF_TRIGGER_RISING; | 959 | irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ? |
960 | IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING; | ||
960 | if (eflags & GPIOEVENT_REQUEST_FALLING_EDGE) | 961 | if (eflags & GPIOEVENT_REQUEST_FALLING_EDGE) |
961 | irqflags |= IRQF_TRIGGER_FALLING; | 962 | irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ? |
963 | IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING; | ||
962 | irqflags |= IRQF_ONESHOT; | 964 | irqflags |= IRQF_ONESHOT; |
963 | 965 | ||
964 | INIT_KFIFO(le->events); | 966 | INIT_KFIFO(le->events); |
@@ -1392,12 +1394,17 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data, | |||
1392 | for (i = 0; i < chip->ngpio; i++) { | 1394 | for (i = 0; i < chip->ngpio; i++) { |
1393 | struct gpio_desc *desc = &gdev->descs[i]; | 1395 | struct gpio_desc *desc = &gdev->descs[i]; |
1394 | 1396 | ||
1395 | if (chip->get_direction && gpiochip_line_is_valid(chip, i)) | 1397 | if (chip->get_direction && gpiochip_line_is_valid(chip, i)) { |
1396 | desc->flags = !chip->get_direction(chip, i) ? | 1398 | if (!chip->get_direction(chip, i)) |
1397 | (1 << FLAG_IS_OUT) : 0; | 1399 | set_bit(FLAG_IS_OUT, &desc->flags); |
1398 | else | 1400 | else |
1399 | desc->flags = !chip->direction_input ? | 1401 | clear_bit(FLAG_IS_OUT, &desc->flags); |
1400 | (1 << FLAG_IS_OUT) : 0; | 1402 | } else { |
1403 | if (!chip->direction_input) | ||
1404 | set_bit(FLAG_IS_OUT, &desc->flags); | ||
1405 | else | ||
1406 | clear_bit(FLAG_IS_OUT, &desc->flags); | ||
1407 | } | ||
1401 | } | 1408 | } |
1402 | 1409 | ||
1403 | acpi_gpiochip_add(chip); | 1410 | acpi_gpiochip_add(chip); |
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 1d80222587ad..3c88420e3497 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig | |||
@@ -394,7 +394,7 @@ config DRM_R128 | |||
394 | config DRM_I810 | 394 | config DRM_I810 |
395 | tristate "Intel I810" | 395 | tristate "Intel I810" |
396 | # !PREEMPT because of missing ioctl locking | 396 | # !PREEMPT because of missing ioctl locking |
397 | depends on DRM && AGP && AGP_INTEL && (!PREEMPT || BROKEN) | 397 | depends on DRM && AGP && AGP_INTEL && (!PREEMPTION || BROKEN) |
398 | help | 398 | help |
399 | Choose this option if you have an Intel I810 graphics card. If M is | 399 | Choose this option if you have an Intel I810 graphics card. If M is |
400 | selected, the module will be called i810. AGP support is required | 400 | selected, the module will be called i810. AGP support is required |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 1d3ee9c42f7e..6a5c96e519b1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | |||
@@ -1140,7 +1140,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( | |||
1140 | adev->asic_type != CHIP_FIJI && | 1140 | adev->asic_type != CHIP_FIJI && |
1141 | adev->asic_type != CHIP_POLARIS10 && | 1141 | adev->asic_type != CHIP_POLARIS10 && |
1142 | adev->asic_type != CHIP_POLARIS11 && | 1142 | adev->asic_type != CHIP_POLARIS11 && |
1143 | adev->asic_type != CHIP_POLARIS12) ? | 1143 | adev->asic_type != CHIP_POLARIS12 && |
1144 | adev->asic_type != CHIP_VEGAM) ? | ||
1144 | VI_BO_SIZE_ALIGN : 1; | 1145 | VI_BO_SIZE_ALIGN : 1; |
1145 | 1146 | ||
1146 | mapping_flags = AMDGPU_VM_PAGE_READABLE; | 1147 | mapping_flags = AMDGPU_VM_PAGE_READABLE; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index e069de8b54e6..4e4094f842e7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | |||
@@ -1044,29 +1044,27 @@ static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p, | |||
1044 | return r; | 1044 | return r; |
1045 | } | 1045 | } |
1046 | 1046 | ||
1047 | fence = amdgpu_ctx_get_fence(ctx, entity, | 1047 | fence = amdgpu_ctx_get_fence(ctx, entity, deps[i].handle); |
1048 | deps[i].handle); | 1048 | amdgpu_ctx_put(ctx); |
1049 | |||
1050 | if (IS_ERR(fence)) | ||
1051 | return PTR_ERR(fence); | ||
1052 | else if (!fence) | ||
1053 | continue; | ||
1049 | 1054 | ||
1050 | if (chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) { | 1055 | if (chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) { |
1051 | struct drm_sched_fence *s_fence = to_drm_sched_fence(fence); | 1056 | struct drm_sched_fence *s_fence; |
1052 | struct dma_fence *old = fence; | 1057 | struct dma_fence *old = fence; |
1053 | 1058 | ||
1059 | s_fence = to_drm_sched_fence(fence); | ||
1054 | fence = dma_fence_get(&s_fence->scheduled); | 1060 | fence = dma_fence_get(&s_fence->scheduled); |
1055 | dma_fence_put(old); | 1061 | dma_fence_put(old); |
1056 | } | 1062 | } |
1057 | 1063 | ||
1058 | if (IS_ERR(fence)) { | 1064 | r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, true); |
1059 | r = PTR_ERR(fence); | 1065 | dma_fence_put(fence); |
1060 | amdgpu_ctx_put(ctx); | 1066 | if (r) |
1061 | return r; | 1067 | return r; |
1062 | } else if (fence) { | ||
1063 | r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, | ||
1064 | true); | ||
1065 | dma_fence_put(fence); | ||
1066 | amdgpu_ctx_put(ctx); | ||
1067 | if (r) | ||
1068 | return r; | ||
1069 | } | ||
1070 | } | 1068 | } |
1071 | return 0; | 1069 | return 0; |
1072 | } | 1070 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index 6d54decef7f8..5652cc72ed3a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | |||
@@ -707,7 +707,7 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf, | |||
707 | thread = (*pos & GENMASK_ULL(59, 52)) >> 52; | 707 | thread = (*pos & GENMASK_ULL(59, 52)) >> 52; |
708 | bank = (*pos & GENMASK_ULL(61, 60)) >> 60; | 708 | bank = (*pos & GENMASK_ULL(61, 60)) >> 60; |
709 | 709 | ||
710 | data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL); | 710 | data = kcalloc(1024, sizeof(*data), GFP_KERNEL); |
711 | if (!data) | 711 | if (!data) |
712 | return -ENOMEM; | 712 | return -ENOMEM; |
713 | 713 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index f2e8b4238efd..5376328d3fd0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | |||
@@ -148,7 +148,7 @@ struct amdgpu_mgpu_info mgpu_info = { | |||
148 | .mutex = __MUTEX_INITIALIZER(mgpu_info.mutex), | 148 | .mutex = __MUTEX_INITIALIZER(mgpu_info.mutex), |
149 | }; | 149 | }; |
150 | int amdgpu_ras_enable = -1; | 150 | int amdgpu_ras_enable = -1; |
151 | uint amdgpu_ras_mask = 0xffffffff; | 151 | uint amdgpu_ras_mask = 0xfffffffb; |
152 | 152 | ||
153 | /** | 153 | /** |
154 | * DOC: vramlimit (int) | 154 | * DOC: vramlimit (int) |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 8b7efd0a7028..2b546567853b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | |||
@@ -159,12 +159,16 @@ static ssize_t amdgpu_get_dpm_state(struct device *dev, | |||
159 | struct amdgpu_device *adev = ddev->dev_private; | 159 | struct amdgpu_device *adev = ddev->dev_private; |
160 | enum amd_pm_state_type pm; | 160 | enum amd_pm_state_type pm; |
161 | 161 | ||
162 | if (is_support_sw_smu(adev) && adev->smu.ppt_funcs->get_current_power_state) | 162 | if (is_support_sw_smu(adev)) { |
163 | pm = amdgpu_smu_get_current_power_state(adev); | 163 | if (adev->smu.ppt_funcs->get_current_power_state) |
164 | else if (adev->powerplay.pp_funcs->get_current_power_state) | 164 | pm = amdgpu_smu_get_current_power_state(adev); |
165 | else | ||
166 | pm = adev->pm.dpm.user_state; | ||
167 | } else if (adev->powerplay.pp_funcs->get_current_power_state) { | ||
165 | pm = amdgpu_dpm_get_current_power_state(adev); | 168 | pm = amdgpu_dpm_get_current_power_state(adev); |
166 | else | 169 | } else { |
167 | pm = adev->pm.dpm.user_state; | 170 | pm = adev->pm.dpm.user_state; |
171 | } | ||
168 | 172 | ||
169 | return snprintf(buf, PAGE_SIZE, "%s\n", | 173 | return snprintf(buf, PAGE_SIZE, "%s\n", |
170 | (pm == POWER_STATE_TYPE_BATTERY) ? "battery" : | 174 | (pm == POWER_STATE_TYPE_BATTERY) ? "battery" : |
@@ -191,7 +195,11 @@ static ssize_t amdgpu_set_dpm_state(struct device *dev, | |||
191 | goto fail; | 195 | goto fail; |
192 | } | 196 | } |
193 | 197 | ||
194 | if (adev->powerplay.pp_funcs->dispatch_tasks) { | 198 | if (is_support_sw_smu(adev)) { |
199 | mutex_lock(&adev->pm.mutex); | ||
200 | adev->pm.dpm.user_state = state; | ||
201 | mutex_unlock(&adev->pm.mutex); | ||
202 | } else if (adev->powerplay.pp_funcs->dispatch_tasks) { | ||
195 | amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_ENABLE_USER_STATE, &state); | 203 | amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_ENABLE_USER_STATE, &state); |
196 | } else { | 204 | } else { |
197 | mutex_lock(&adev->pm.mutex); | 205 | mutex_lock(&adev->pm.mutex); |
@@ -1734,7 +1742,7 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev, | |||
1734 | return -EINVAL; | 1742 | return -EINVAL; |
1735 | 1743 | ||
1736 | if (is_support_sw_smu(adev)) { | 1744 | if (is_support_sw_smu(adev)) { |
1737 | err = smu_get_current_rpm(&adev->smu, &speed); | 1745 | err = smu_get_fan_speed_rpm(&adev->smu, &speed); |
1738 | if (err) | 1746 | if (err) |
1739 | return err; | 1747 | return err; |
1740 | } else if (adev->powerplay.pp_funcs->get_fan_speed_rpm) { | 1748 | } else if (adev->powerplay.pp_funcs->get_fan_speed_rpm) { |
@@ -1794,7 +1802,7 @@ static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev, | |||
1794 | return -EINVAL; | 1802 | return -EINVAL; |
1795 | 1803 | ||
1796 | if (is_support_sw_smu(adev)) { | 1804 | if (is_support_sw_smu(adev)) { |
1797 | err = smu_get_current_rpm(&adev->smu, &rpm); | 1805 | err = smu_get_fan_speed_rpm(&adev->smu, &rpm); |
1798 | if (err) | 1806 | if (err) |
1799 | return err; | 1807 | return err; |
1800 | } else if (adev->powerplay.pp_funcs->get_fan_speed_rpm) { | 1808 | } else if (adev->powerplay.pp_funcs->get_fan_speed_rpm) { |
@@ -3067,28 +3075,44 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a | |||
3067 | if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, (void *)&value64, &size)) | 3075 | if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, (void *)&value64, &size)) |
3068 | seq_printf(m, "SMC Feature Mask: 0x%016llx\n", value64); | 3076 | seq_printf(m, "SMC Feature Mask: 0x%016llx\n", value64); |
3069 | 3077 | ||
3070 | /* UVD clocks */ | 3078 | if (adev->asic_type > CHIP_VEGA20) { |
3071 | if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) { | 3079 | /* VCN clocks */ |
3072 | if (!value) { | 3080 | if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_POWER_STATE, (void *)&value, &size)) { |
3073 | seq_printf(m, "UVD: Disabled\n"); | 3081 | if (!value) { |
3074 | } else { | 3082 | seq_printf(m, "VCN: Disabled\n"); |
3075 | seq_printf(m, "UVD: Enabled\n"); | 3083 | } else { |
3076 | if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size)) | 3084 | seq_printf(m, "VCN: Enabled\n"); |
3077 | seq_printf(m, "\t%u MHz (DCLK)\n", value/100); | 3085 | if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size)) |
3078 | if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size)) | 3086 | seq_printf(m, "\t%u MHz (DCLK)\n", value/100); |
3079 | seq_printf(m, "\t%u MHz (VCLK)\n", value/100); | 3087 | if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size)) |
3088 | seq_printf(m, "\t%u MHz (VCLK)\n", value/100); | ||
3089 | } | ||
3080 | } | 3090 | } |
3081 | } | 3091 | seq_printf(m, "\n"); |
3082 | seq_printf(m, "\n"); | 3092 | } else { |
3093 | /* UVD clocks */ | ||
3094 | if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) { | ||
3095 | if (!value) { | ||
3096 | seq_printf(m, "UVD: Disabled\n"); | ||
3097 | } else { | ||
3098 | seq_printf(m, "UVD: Enabled\n"); | ||
3099 | if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size)) | ||
3100 | seq_printf(m, "\t%u MHz (DCLK)\n", value/100); | ||
3101 | if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size)) | ||
3102 | seq_printf(m, "\t%u MHz (VCLK)\n", value/100); | ||
3103 | } | ||
3104 | } | ||
3105 | seq_printf(m, "\n"); | ||
3083 | 3106 | ||
3084 | /* VCE clocks */ | 3107 | /* VCE clocks */ |
3085 | if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) { | 3108 | if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) { |
3086 | if (!value) { | 3109 | if (!value) { |
3087 | seq_printf(m, "VCE: Disabled\n"); | 3110 | seq_printf(m, "VCE: Disabled\n"); |
3088 | } else { | 3111 | } else { |
3089 | seq_printf(m, "VCE: Enabled\n"); | 3112 | seq_printf(m, "VCE: Enabled\n"); |
3090 | if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size)) | 3113 | if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size)) |
3091 | seq_printf(m, "\t%u MHz (ECCLK)\n", value/100); | 3114 | seq_printf(m, "\t%u MHz (ECCLK)\n", value/100); |
3115 | } | ||
3092 | } | 3116 | } |
3093 | } | 3117 | } |
3094 | 3118 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 1a4412e47810..fac7aa2c244f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | |||
@@ -136,11 +136,6 @@ static int amdgpu_ras_reserve_vram(struct amdgpu_device *adev, | |||
136 | static int amdgpu_ras_release_vram(struct amdgpu_device *adev, | 136 | static int amdgpu_ras_release_vram(struct amdgpu_device *adev, |
137 | struct amdgpu_bo **bo_ptr); | 137 | struct amdgpu_bo **bo_ptr); |
138 | 138 | ||
139 | static void amdgpu_ras_self_test(struct amdgpu_device *adev) | ||
140 | { | ||
141 | /* TODO */ | ||
142 | } | ||
143 | |||
144 | static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf, | 139 | static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf, |
145 | size_t size, loff_t *pos) | 140 | size_t size, loff_t *pos) |
146 | { | 141 | { |
@@ -689,6 +684,12 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev, | |||
689 | if (!obj) | 684 | if (!obj) |
690 | return -EINVAL; | 685 | return -EINVAL; |
691 | 686 | ||
687 | if (block_info.block_id != TA_RAS_BLOCK__UMC) { | ||
688 | DRM_INFO("%s error injection is not supported yet\n", | ||
689 | ras_block_str(info->head.block)); | ||
690 | return -EINVAL; | ||
691 | } | ||
692 | |||
692 | ret = psp_ras_trigger_error(&adev->psp, &block_info); | 693 | ret = psp_ras_trigger_error(&adev->psp, &block_info); |
693 | if (ret) | 694 | if (ret) |
694 | DRM_ERROR("RAS ERROR: inject %s error failed ret %d\n", | 695 | DRM_ERROR("RAS ERROR: inject %s error failed ret %d\n", |
@@ -1557,6 +1558,12 @@ int amdgpu_ras_init(struct amdgpu_device *adev) | |||
1557 | 1558 | ||
1558 | amdgpu_ras_check_supported(adev, &con->hw_supported, | 1559 | amdgpu_ras_check_supported(adev, &con->hw_supported, |
1559 | &con->supported); | 1560 | &con->supported); |
1561 | if (!con->hw_supported) { | ||
1562 | amdgpu_ras_set_context(adev, NULL); | ||
1563 | kfree(con); | ||
1564 | return 0; | ||
1565 | } | ||
1566 | |||
1560 | con->features = 0; | 1567 | con->features = 0; |
1561 | INIT_LIST_HEAD(&con->head); | 1568 | INIT_LIST_HEAD(&con->head); |
1562 | /* Might need get this flag from vbios. */ | 1569 | /* Might need get this flag from vbios. */ |
@@ -1570,8 +1577,6 @@ int amdgpu_ras_init(struct amdgpu_device *adev) | |||
1570 | if (amdgpu_ras_fs_init(adev)) | 1577 | if (amdgpu_ras_fs_init(adev)) |
1571 | goto fs_out; | 1578 | goto fs_out; |
1572 | 1579 | ||
1573 | amdgpu_ras_self_test(adev); | ||
1574 | |||
1575 | DRM_INFO("RAS INFO: ras initialized successfully, " | 1580 | DRM_INFO("RAS INFO: ras initialized successfully, " |
1576 | "hardware ability[%x] ras_mask[%x]\n", | 1581 | "hardware ability[%x] ras_mask[%x]\n", |
1577 | con->hw_supported, con->supported); | 1582 | con->hw_supported, con->supported); |
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 1675d5837c3c..32773b7523d2 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | |||
@@ -1441,6 +1441,15 @@ static void gfx_v10_0_init_compute_vmid(struct amdgpu_device *adev) | |||
1441 | } | 1441 | } |
1442 | nv_grbm_select(adev, 0, 0, 0, 0); | 1442 | nv_grbm_select(adev, 0, 0, 0, 0); |
1443 | mutex_unlock(&adev->srbm_mutex); | 1443 | mutex_unlock(&adev->srbm_mutex); |
1444 | |||
1445 | /* Initialize all compute VMIDs to have no GDS, GWS, or OA | ||
1446 | acccess. These should be enabled by FW for target VMIDs. */ | ||
1447 | for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) { | ||
1448 | WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * i, 0); | ||
1449 | WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * i, 0); | ||
1450 | WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, i, 0); | ||
1451 | WREG32_SOC15_OFFSET(GC, 0, mmGDS_OA_VMID0, i, 0); | ||
1452 | } | ||
1444 | } | 1453 | } |
1445 | 1454 | ||
1446 | static void gfx_v10_0_tcp_harvest(struct amdgpu_device *adev) | 1455 | static void gfx_v10_0_tcp_harvest(struct amdgpu_device *adev) |
@@ -4611,6 +4620,7 @@ gfx_v10_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, | |||
4611 | cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, | 4620 | cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, |
4612 | TIME_STAMP_INT_ENABLE, 0); | 4621 | TIME_STAMP_INT_ENABLE, 0); |
4613 | WREG32(cp_int_cntl_reg, cp_int_cntl); | 4622 | WREG32(cp_int_cntl_reg, cp_int_cntl); |
4623 | break; | ||
4614 | case AMDGPU_IRQ_STATE_ENABLE: | 4624 | case AMDGPU_IRQ_STATE_ENABLE: |
4615 | cp_int_cntl = RREG32(cp_int_cntl_reg); | 4625 | cp_int_cntl = RREG32(cp_int_cntl_reg); |
4616 | cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, | 4626 | cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, |
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 0db9f488da7e..21187275dfd3 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | |||
@@ -1879,6 +1879,15 @@ static void gfx_v7_0_init_compute_vmid(struct amdgpu_device *adev) | |||
1879 | } | 1879 | } |
1880 | cik_srbm_select(adev, 0, 0, 0, 0); | 1880 | cik_srbm_select(adev, 0, 0, 0, 0); |
1881 | mutex_unlock(&adev->srbm_mutex); | 1881 | mutex_unlock(&adev->srbm_mutex); |
1882 | |||
1883 | /* Initialize all compute VMIDs to have no GDS, GWS, or OA | ||
1884 | acccess. These should be enabled by FW for target VMIDs. */ | ||
1885 | for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) { | ||
1886 | WREG32(amdgpu_gds_reg_offset[i].mem_base, 0); | ||
1887 | WREG32(amdgpu_gds_reg_offset[i].mem_size, 0); | ||
1888 | WREG32(amdgpu_gds_reg_offset[i].gws, 0); | ||
1889 | WREG32(amdgpu_gds_reg_offset[i].oa, 0); | ||
1890 | } | ||
1882 | } | 1891 | } |
1883 | 1892 | ||
1884 | static void gfx_v7_0_config_init(struct amdgpu_device *adev) | 1893 | static void gfx_v7_0_config_init(struct amdgpu_device *adev) |
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 5f401b41ef7c..751567f78567 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | |||
@@ -3706,6 +3706,15 @@ static void gfx_v8_0_init_compute_vmid(struct amdgpu_device *adev) | |||
3706 | } | 3706 | } |
3707 | vi_srbm_select(adev, 0, 0, 0, 0); | 3707 | vi_srbm_select(adev, 0, 0, 0, 0); |
3708 | mutex_unlock(&adev->srbm_mutex); | 3708 | mutex_unlock(&adev->srbm_mutex); |
3709 | |||
3710 | /* Initialize all compute VMIDs to have no GDS, GWS, or OA | ||
3711 | acccess. These should be enabled by FW for target VMIDs. */ | ||
3712 | for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) { | ||
3713 | WREG32(amdgpu_gds_reg_offset[i].mem_base, 0); | ||
3714 | WREG32(amdgpu_gds_reg_offset[i].mem_size, 0); | ||
3715 | WREG32(amdgpu_gds_reg_offset[i].gws, 0); | ||
3716 | WREG32(amdgpu_gds_reg_offset[i].oa, 0); | ||
3717 | } | ||
3709 | } | 3718 | } |
3710 | 3719 | ||
3711 | static void gfx_v8_0_config_init(struct amdgpu_device *adev) | 3720 | static void gfx_v8_0_config_init(struct amdgpu_device *adev) |
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index f4c4eea62526..1cf639a51178 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | |||
@@ -1918,6 +1918,15 @@ static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev) | |||
1918 | } | 1918 | } |
1919 | soc15_grbm_select(adev, 0, 0, 0, 0); | 1919 | soc15_grbm_select(adev, 0, 0, 0, 0); |
1920 | mutex_unlock(&adev->srbm_mutex); | 1920 | mutex_unlock(&adev->srbm_mutex); |
1921 | |||
1922 | /* Initialize all compute VMIDs to have no GDS, GWS, or OA | ||
1923 | acccess. These should be enabled by FW for target VMIDs. */ | ||
1924 | for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) { | ||
1925 | WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * i, 0); | ||
1926 | WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * i, 0); | ||
1927 | WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, i, 0); | ||
1928 | WREG32_SOC15_OFFSET(GC, 0, mmGDS_OA_VMID0, i, 0); | ||
1929 | } | ||
1921 | } | 1930 | } |
1922 | 1931 | ||
1923 | static void gfx_v9_0_constants_init(struct amdgpu_device *adev) | 1932 | static void gfx_v9_0_constants_init(struct amdgpu_device *adev) |
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c index 988c0adaca91..1cfc2620b2dd 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c | |||
@@ -372,11 +372,8 @@ static void vcn_v2_0_mc_resume(struct amdgpu_device *adev) | |||
372 | WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, | 372 | WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, |
373 | upper_32_bits(adev->vcn.gpu_addr)); | 373 | upper_32_bits(adev->vcn.gpu_addr)); |
374 | offset = size; | 374 | offset = size; |
375 | /* No signed header for now from firmware | ||
376 | WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, | 375 | WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, |
377 | AMDGPU_UVD_FIRMWARE_OFFSET >> 3); | 376 | AMDGPU_UVD_FIRMWARE_OFFSET >> 3); |
378 | */ | ||
379 | WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0); | ||
380 | } | 377 | } |
381 | 378 | ||
382 | WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size); | 379 | WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size); |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c index 792371442195..4e3fc284f6ac 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c | |||
@@ -668,6 +668,7 @@ static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev, | |||
668 | case CHIP_RAVEN: | 668 | case CHIP_RAVEN: |
669 | pcache_info = raven_cache_info; | 669 | pcache_info = raven_cache_info; |
670 | num_of_cache_types = ARRAY_SIZE(raven_cache_info); | 670 | num_of_cache_types = ARRAY_SIZE(raven_cache_info); |
671 | break; | ||
671 | case CHIP_NAVI10: | 672 | case CHIP_NAVI10: |
672 | pcache_info = navi10_cache_info; | 673 | pcache_info = navi10_cache_info; |
673 | num_of_cache_types = ARRAY_SIZE(navi10_cache_info); | 674 | num_of_cache_types = ARRAY_SIZE(navi10_cache_info); |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c index 4f8a6ffc5775..9cd3eb2d90bd 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c | |||
@@ -429,7 +429,6 @@ struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type, | |||
429 | 429 | ||
430 | switch (type) { | 430 | switch (type) { |
431 | case KFD_MQD_TYPE_CP: | 431 | case KFD_MQD_TYPE_CP: |
432 | pr_debug("%s@%i\n", __func__, __LINE__); | ||
433 | case KFD_MQD_TYPE_COMPUTE: | 432 | case KFD_MQD_TYPE_COMPUTE: |
434 | pr_debug("%s@%i\n", __func__, __LINE__); | 433 | pr_debug("%s@%i\n", __func__, __LINE__); |
435 | mqd->allocate_mqd = allocate_mqd; | 434 | mqd->allocate_mqd = allocate_mqd; |
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c index c1a92c16535c..5cc3acccda2a 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c | |||
@@ -262,12 +262,12 @@ void dce110_clk_mgr_construct( | |||
262 | struct dc_context *ctx, | 262 | struct dc_context *ctx, |
263 | struct clk_mgr_internal *clk_mgr) | 263 | struct clk_mgr_internal *clk_mgr) |
264 | { | 264 | { |
265 | dce_clk_mgr_construct(ctx, clk_mgr); | ||
266 | |||
265 | memcpy(clk_mgr->max_clks_by_state, | 267 | memcpy(clk_mgr->max_clks_by_state, |
266 | dce110_max_clks_by_state, | 268 | dce110_max_clks_by_state, |
267 | sizeof(dce110_max_clks_by_state)); | 269 | sizeof(dce110_max_clks_by_state)); |
268 | 270 | ||
269 | dce_clk_mgr_construct(ctx, clk_mgr); | ||
270 | |||
271 | clk_mgr->regs = &disp_clk_regs; | 271 | clk_mgr->regs = &disp_clk_regs; |
272 | clk_mgr->clk_mgr_shift = &disp_clk_shift; | 272 | clk_mgr->clk_mgr_shift = &disp_clk_shift; |
273 | clk_mgr->clk_mgr_mask = &disp_clk_mask; | 273 | clk_mgr->clk_mgr_mask = &disp_clk_mask; |
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c index 778392c73187..7c746ef1e32e 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c | |||
@@ -226,12 +226,12 @@ void dce112_clk_mgr_construct( | |||
226 | struct dc_context *ctx, | 226 | struct dc_context *ctx, |
227 | struct clk_mgr_internal *clk_mgr) | 227 | struct clk_mgr_internal *clk_mgr) |
228 | { | 228 | { |
229 | dce_clk_mgr_construct(ctx, clk_mgr); | ||
230 | |||
229 | memcpy(clk_mgr->max_clks_by_state, | 231 | memcpy(clk_mgr->max_clks_by_state, |
230 | dce112_max_clks_by_state, | 232 | dce112_max_clks_by_state, |
231 | sizeof(dce112_max_clks_by_state)); | 233 | sizeof(dce112_max_clks_by_state)); |
232 | 234 | ||
233 | dce_clk_mgr_construct(ctx, clk_mgr); | ||
234 | |||
235 | clk_mgr->regs = &disp_clk_regs; | 235 | clk_mgr->regs = &disp_clk_regs; |
236 | clk_mgr->clk_mgr_shift = &disp_clk_shift; | 236 | clk_mgr->clk_mgr_shift = &disp_clk_shift; |
237 | clk_mgr->clk_mgr_mask = &disp_clk_mask; | 237 | clk_mgr->clk_mgr_mask = &disp_clk_mask; |
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce120/dce120_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce120/dce120_clk_mgr.c index 906310c3e2eb..5399b8cf6b75 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce120/dce120_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce120/dce120_clk_mgr.c | |||
@@ -127,12 +127,12 @@ static struct clk_mgr_funcs dce120_funcs = { | |||
127 | 127 | ||
128 | void dce120_clk_mgr_construct(struct dc_context *ctx, struct clk_mgr_internal *clk_mgr) | 128 | void dce120_clk_mgr_construct(struct dc_context *ctx, struct clk_mgr_internal *clk_mgr) |
129 | { | 129 | { |
130 | dce_clk_mgr_construct(ctx, clk_mgr); | ||
131 | |||
130 | memcpy(clk_mgr->max_clks_by_state, | 132 | memcpy(clk_mgr->max_clks_by_state, |
131 | dce120_max_clks_by_state, | 133 | dce120_max_clks_by_state, |
132 | sizeof(dce120_max_clks_by_state)); | 134 | sizeof(dce120_max_clks_by_state)); |
133 | 135 | ||
134 | dce_clk_mgr_construct(ctx, clk_mgr); | ||
135 | |||
136 | clk_mgr->base.dprefclk_khz = 600000; | 136 | clk_mgr->base.dprefclk_khz = 600000; |
137 | clk_mgr->base.funcs = &dce120_funcs; | 137 | clk_mgr->base.funcs = &dce120_funcs; |
138 | } | 138 | } |
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c index 08a774fc7b67..50bfb5921de0 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c | |||
@@ -301,6 +301,8 @@ void dcn2_update_clocks_fpga(struct clk_mgr *clk_mgr, | |||
301 | void dcn2_init_clocks(struct clk_mgr *clk_mgr) | 301 | void dcn2_init_clocks(struct clk_mgr *clk_mgr) |
302 | { | 302 | { |
303 | memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks)); | 303 | memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks)); |
304 | // Assumption is that boot state always supports pstate | ||
305 | clk_mgr->clks.p_state_change_support = true; | ||
304 | } | 306 | } |
305 | 307 | ||
306 | void dcn2_enable_pme_wa(struct clk_mgr *clk_mgr_base) | 308 | void dcn2_enable_pme_wa(struct clk_mgr *clk_mgr_base) |
@@ -331,6 +333,7 @@ void dcn20_clk_mgr_construct( | |||
331 | struct dccg *dccg) | 333 | struct dccg *dccg) |
332 | { | 334 | { |
333 | clk_mgr->base.ctx = ctx; | 335 | clk_mgr->base.ctx = ctx; |
336 | clk_mgr->pp_smu = pp_smu; | ||
334 | clk_mgr->base.funcs = &dcn2_funcs; | 337 | clk_mgr->base.funcs = &dcn2_funcs; |
335 | clk_mgr->regs = &clk_mgr_regs; | 338 | clk_mgr->regs = &clk_mgr_regs; |
336 | clk_mgr->clk_mgr_shift = &clk_mgr_shift; | 339 | clk_mgr->clk_mgr_shift = &clk_mgr_shift; |
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 4ef4dc63e221..fa20201eef3a 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c | |||
@@ -502,8 +502,10 @@ void dc_stream_set_static_screen_events(struct dc *dc, | |||
502 | 502 | ||
503 | static void destruct(struct dc *dc) | 503 | static void destruct(struct dc *dc) |
504 | { | 504 | { |
505 | dc_release_state(dc->current_state); | 505 | if (dc->current_state) { |
506 | dc->current_state = NULL; | 506 | dc_release_state(dc->current_state); |
507 | dc->current_state = NULL; | ||
508 | } | ||
507 | 509 | ||
508 | destroy_links(dc); | 510 | destroy_links(dc); |
509 | 511 | ||
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 8dbf759eba45..355b4ba12796 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c | |||
@@ -532,6 +532,7 @@ static void read_edp_current_link_settings_on_detect(struct dc_link *link) | |||
532 | uint32_t read_dpcd_retry_cnt = 10; | 532 | uint32_t read_dpcd_retry_cnt = 10; |
533 | enum dc_status status = DC_ERROR_UNEXPECTED; | 533 | enum dc_status status = DC_ERROR_UNEXPECTED; |
534 | int i; | 534 | int i; |
535 | union max_down_spread max_down_spread = { {0} }; | ||
535 | 536 | ||
536 | // Read DPCD 00101h to find out the number of lanes currently set | 537 | // Read DPCD 00101h to find out the number of lanes currently set |
537 | for (i = 0; i < read_dpcd_retry_cnt; i++) { | 538 | for (i = 0; i < read_dpcd_retry_cnt; i++) { |
@@ -553,8 +554,6 @@ static void read_edp_current_link_settings_on_detect(struct dc_link *link) | |||
553 | msleep(8); | 554 | msleep(8); |
554 | } | 555 | } |
555 | 556 | ||
556 | ASSERT(status == DC_OK); | ||
557 | |||
558 | // Read DPCD 00100h to find if standard link rates are set | 557 | // Read DPCD 00100h to find if standard link rates are set |
559 | core_link_read_dpcd(link, DP_LINK_BW_SET, | 558 | core_link_read_dpcd(link, DP_LINK_BW_SET, |
560 | &link_bw_set, sizeof(link_bw_set)); | 559 | &link_bw_set, sizeof(link_bw_set)); |
@@ -576,6 +575,12 @@ static void read_edp_current_link_settings_on_detect(struct dc_link *link) | |||
576 | link->cur_link_settings.link_rate = link_bw_set; | 575 | link->cur_link_settings.link_rate = link_bw_set; |
577 | link->cur_link_settings.use_link_rate_set = false; | 576 | link->cur_link_settings.use_link_rate_set = false; |
578 | } | 577 | } |
578 | // Read DPCD 00003h to find the max down spread. | ||
579 | core_link_read_dpcd(link, DP_MAX_DOWNSPREAD, | ||
580 | &max_down_spread.raw, sizeof(max_down_spread)); | ||
581 | link->cur_link_settings.link_spread = | ||
582 | max_down_spread.bits.MAX_DOWN_SPREAD ? | ||
583 | LINK_SPREAD_05_DOWNSPREAD_30KHZ : LINK_SPREAD_DISABLED; | ||
579 | } | 584 | } |
580 | 585 | ||
581 | static bool detect_dp( | 586 | static bool detect_dp( |
@@ -717,13 +722,6 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) | |||
717 | return false; | 722 | return false; |
718 | } | 723 | } |
719 | 724 | ||
720 | if (link->connector_signal == SIGNAL_TYPE_EDP) { | ||
721 | /* On detect, we want to make sure current link settings are | ||
722 | * up to date, especially if link was powered on by GOP. | ||
723 | */ | ||
724 | read_edp_current_link_settings_on_detect(link); | ||
725 | } | ||
726 | |||
727 | prev_sink = link->local_sink; | 725 | prev_sink = link->local_sink; |
728 | if (prev_sink != NULL) { | 726 | if (prev_sink != NULL) { |
729 | dc_sink_retain(prev_sink); | 727 | dc_sink_retain(prev_sink); |
@@ -765,6 +763,7 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) | |||
765 | } | 763 | } |
766 | 764 | ||
767 | case SIGNAL_TYPE_EDP: { | 765 | case SIGNAL_TYPE_EDP: { |
766 | read_edp_current_link_settings_on_detect(link); | ||
768 | detect_edp_sink_caps(link); | 767 | detect_edp_sink_caps(link); |
769 | sink_caps.transaction_type = | 768 | sink_caps.transaction_type = |
770 | DDC_TRANSACTION_TYPE_I2C_OVER_AUX; | 769 | DDC_TRANSACTION_TYPE_I2C_OVER_AUX; |
@@ -2329,7 +2328,7 @@ bool dc_link_set_backlight_level(const struct dc_link *link, | |||
2329 | if (core_dc->current_state->res_ctx.pipe_ctx[i].stream) { | 2328 | if (core_dc->current_state->res_ctx.pipe_ctx[i].stream) { |
2330 | if (core_dc->current_state->res_ctx. | 2329 | if (core_dc->current_state->res_ctx. |
2331 | pipe_ctx[i].stream->link | 2330 | pipe_ctx[i].stream->link |
2332 | == link) | 2331 | == link) { |
2333 | /* DMCU -1 for all controller id values, | 2332 | /* DMCU -1 for all controller id values, |
2334 | * therefore +1 here | 2333 | * therefore +1 here |
2335 | */ | 2334 | */ |
@@ -2337,6 +2336,13 @@ bool dc_link_set_backlight_level(const struct dc_link *link, | |||
2337 | core_dc->current_state-> | 2336 | core_dc->current_state-> |
2338 | res_ctx.pipe_ctx[i].stream_res.tg->inst + | 2337 | res_ctx.pipe_ctx[i].stream_res.tg->inst + |
2339 | 1; | 2338 | 1; |
2339 | |||
2340 | /* Disable brightness ramping when the display is blanked | ||
2341 | * as it can hang the DMCU | ||
2342 | */ | ||
2343 | if (core_dc->current_state->res_ctx.pipe_ctx[i].plane_state == NULL) | ||
2344 | frame_ramp = 0; | ||
2345 | } | ||
2340 | } | 2346 | } |
2341 | } | 2347 | } |
2342 | abm->funcs->set_backlight_level_pwm( | 2348 | abm->funcs->set_backlight_level_pwm( |
@@ -2984,8 +2990,10 @@ void dc_link_set_preferred_link_settings(struct dc *dc, | |||
2984 | 2990 | ||
2985 | /* Retrain with preferred link settings only relevant for | 2991 | /* Retrain with preferred link settings only relevant for |
2986 | * DP signal type | 2992 | * DP signal type |
2993 | * Check for non-DP signal or if passive dongle present | ||
2987 | */ | 2994 | */ |
2988 | if (!dc_is_dp_signal(link->connector_signal)) | 2995 | if (!dc_is_dp_signal(link->connector_signal) || |
2996 | link->dongle_max_pix_clk > 0) | ||
2989 | return; | 2997 | return; |
2990 | 2998 | ||
2991 | for (i = 0; i < MAX_PIPES; i++) { | 2999 | for (i = 0; i < MAX_PIPES; i++) { |
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 056be4c34a98..2c7aaed907b9 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | |||
@@ -2230,18 +2230,25 @@ static void get_active_converter_info( | |||
2230 | link->dpcd_caps.dongle_type = DISPLAY_DONGLE_NONE; | 2230 | link->dpcd_caps.dongle_type = DISPLAY_DONGLE_NONE; |
2231 | ddc_service_set_dongle_type(link->ddc, | 2231 | ddc_service_set_dongle_type(link->ddc, |
2232 | link->dpcd_caps.dongle_type); | 2232 | link->dpcd_caps.dongle_type); |
2233 | link->dpcd_caps.is_branch_dev = false; | ||
2233 | return; | 2234 | return; |
2234 | } | 2235 | } |
2235 | 2236 | ||
2236 | /* DPCD 0x5 bit 0 = 1, it indicate it's branch device */ | 2237 | /* DPCD 0x5 bit 0 = 1, it indicate it's branch device */ |
2237 | link->dpcd_caps.is_branch_dev = ds_port.fields.PORT_PRESENT; | 2238 | if (ds_port.fields.PORT_TYPE == DOWNSTREAM_DP) { |
2239 | link->dpcd_caps.is_branch_dev = false; | ||
2240 | } | ||
2241 | |||
2242 | else { | ||
2243 | link->dpcd_caps.is_branch_dev = ds_port.fields.PORT_PRESENT; | ||
2244 | } | ||
2238 | 2245 | ||
2239 | switch (ds_port.fields.PORT_TYPE) { | 2246 | switch (ds_port.fields.PORT_TYPE) { |
2240 | case DOWNSTREAM_VGA: | 2247 | case DOWNSTREAM_VGA: |
2241 | link->dpcd_caps.dongle_type = DISPLAY_DONGLE_DP_VGA_CONVERTER; | 2248 | link->dpcd_caps.dongle_type = DISPLAY_DONGLE_DP_VGA_CONVERTER; |
2242 | break; | 2249 | break; |
2243 | case DOWNSTREAM_DVI_HDMI: | 2250 | case DOWNSTREAM_DVI_HDMI_DP_PLUS_PLUS: |
2244 | /* At this point we don't know is it DVI or HDMI, | 2251 | /* At this point we don't know is it DVI or HDMI or DP++, |
2245 | * assume DVI.*/ | 2252 | * assume DVI.*/ |
2246 | link->dpcd_caps.dongle_type = DISPLAY_DONGLE_DP_DVI_CONVERTER; | 2253 | link->dpcd_caps.dongle_type = DISPLAY_DONGLE_DP_DVI_CONVERTER; |
2247 | break; | 2254 | break; |
@@ -2258,6 +2265,10 @@ static void get_active_converter_info( | |||
2258 | det_caps, sizeof(det_caps)); | 2265 | det_caps, sizeof(det_caps)); |
2259 | 2266 | ||
2260 | switch (port_caps->bits.DWN_STRM_PORTX_TYPE) { | 2267 | switch (port_caps->bits.DWN_STRM_PORTX_TYPE) { |
2268 | /*Handle DP case as DONGLE_NONE*/ | ||
2269 | case DOWN_STREAM_DETAILED_DP: | ||
2270 | link->dpcd_caps.dongle_type = DISPLAY_DONGLE_NONE; | ||
2271 | break; | ||
2261 | case DOWN_STREAM_DETAILED_VGA: | 2272 | case DOWN_STREAM_DETAILED_VGA: |
2262 | link->dpcd_caps.dongle_type = | 2273 | link->dpcd_caps.dongle_type = |
2263 | DISPLAY_DONGLE_DP_VGA_CONVERTER; | 2274 | DISPLAY_DONGLE_DP_VGA_CONVERTER; |
@@ -2267,6 +2278,8 @@ static void get_active_converter_info( | |||
2267 | DISPLAY_DONGLE_DP_DVI_CONVERTER; | 2278 | DISPLAY_DONGLE_DP_DVI_CONVERTER; |
2268 | break; | 2279 | break; |
2269 | case DOWN_STREAM_DETAILED_HDMI: | 2280 | case DOWN_STREAM_DETAILED_HDMI: |
2281 | case DOWN_STREAM_DETAILED_DP_PLUS_PLUS: | ||
2282 | /*Handle DP++ active converter case, process DP++ case as HDMI case according DP1.4 spec*/ | ||
2270 | link->dpcd_caps.dongle_type = | 2283 | link->dpcd_caps.dongle_type = |
2271 | DISPLAY_DONGLE_DP_HDMI_CONVERTER; | 2284 | DISPLAY_DONGLE_DP_HDMI_CONVERTER; |
2272 | 2285 | ||
@@ -2282,14 +2295,18 @@ static void get_active_converter_info( | |||
2282 | 2295 | ||
2283 | link->dpcd_caps.dongle_caps.is_dp_hdmi_s3d_converter = | 2296 | link->dpcd_caps.dongle_caps.is_dp_hdmi_s3d_converter = |
2284 | hdmi_caps.bits.FRAME_SEQ_TO_FRAME_PACK; | 2297 | hdmi_caps.bits.FRAME_SEQ_TO_FRAME_PACK; |
2285 | link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr422_pass_through = | 2298 | /*YCBCR capability only for HDMI case*/ |
2286 | hdmi_caps.bits.YCrCr422_PASS_THROUGH; | 2299 | if (port_caps->bits.DWN_STRM_PORTX_TYPE |
2287 | link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr420_pass_through = | 2300 | == DOWN_STREAM_DETAILED_HDMI) { |
2288 | hdmi_caps.bits.YCrCr420_PASS_THROUGH; | 2301 | link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr422_pass_through = |
2289 | link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr422_converter = | 2302 | hdmi_caps.bits.YCrCr422_PASS_THROUGH; |
2290 | hdmi_caps.bits.YCrCr422_CONVERSION; | 2303 | link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr420_pass_through = |
2291 | link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr420_converter = | 2304 | hdmi_caps.bits.YCrCr420_PASS_THROUGH; |
2292 | hdmi_caps.bits.YCrCr420_CONVERSION; | 2305 | link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr422_converter = |
2306 | hdmi_caps.bits.YCrCr422_CONVERSION; | ||
2307 | link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr420_converter = | ||
2308 | hdmi_caps.bits.YCrCr420_CONVERSION; | ||
2309 | } | ||
2293 | 2310 | ||
2294 | link->dpcd_caps.dongle_caps.dp_hdmi_max_bpc = | 2311 | link->dpcd_caps.dongle_caps.dp_hdmi_max_bpc = |
2295 | translate_dpcd_max_bpc( | 2312 | translate_dpcd_max_bpc( |
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 51a78283a86d..2ceaab4fb5de 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c | |||
@@ -258,7 +258,7 @@ bool resource_construct( | |||
258 | * PORT_CONNECTIVITY == 1 (as instructed by HW team). | 258 | * PORT_CONNECTIVITY == 1 (as instructed by HW team). |
259 | */ | 259 | */ |
260 | update_num_audio(&straps, &num_audio, &pool->audio_support); | 260 | update_num_audio(&straps, &num_audio, &pool->audio_support); |
261 | for (i = 0; i < pool->pipe_count && i < num_audio; i++) { | 261 | for (i = 0; i < caps->num_audio; i++) { |
262 | struct audio *aud = create_funcs->create_audio(ctx, i); | 262 | struct audio *aud = create_funcs->create_audio(ctx, i); |
263 | 263 | ||
264 | if (aud == NULL) { | 264 | if (aud == NULL) { |
@@ -1669,6 +1669,12 @@ static struct audio *find_first_free_audio( | |||
1669 | return pool->audios[i]; | 1669 | return pool->audios[i]; |
1670 | } | 1670 | } |
1671 | } | 1671 | } |
1672 | |||
1673 | /* use engine id to find free audio */ | ||
1674 | if ((id < pool->audio_count) && (res_ctx->is_audio_acquired[id] == false)) { | ||
1675 | return pool->audios[id]; | ||
1676 | } | ||
1677 | |||
1672 | /*not found the matching one, first come first serve*/ | 1678 | /*not found the matching one, first come first serve*/ |
1673 | for (i = 0; i < pool->audio_count; i++) { | 1679 | for (i = 0; i < pool->audio_count; i++) { |
1674 | if (res_ctx->is_audio_acquired[i] == false) { | 1680 | if (res_ctx->is_audio_acquired[i] == false) { |
@@ -1833,6 +1839,7 @@ static int get_norm_pix_clk(const struct dc_crtc_timing *timing) | |||
1833 | pix_clk /= 2; | 1839 | pix_clk /= 2; |
1834 | if (timing->pixel_encoding != PIXEL_ENCODING_YCBCR422) { | 1840 | if (timing->pixel_encoding != PIXEL_ENCODING_YCBCR422) { |
1835 | switch (timing->display_color_depth) { | 1841 | switch (timing->display_color_depth) { |
1842 | case COLOR_DEPTH_666: | ||
1836 | case COLOR_DEPTH_888: | 1843 | case COLOR_DEPTH_888: |
1837 | normalized_pix_clk = pix_clk; | 1844 | normalized_pix_clk = pix_clk; |
1838 | break; | 1845 | break; |
@@ -1979,7 +1986,7 @@ enum dc_status resource_map_pool_resources( | |||
1979 | /* TODO: Add check if ASIC support and EDID audio */ | 1986 | /* TODO: Add check if ASIC support and EDID audio */ |
1980 | if (!stream->converter_disable_audio && | 1987 | if (!stream->converter_disable_audio && |
1981 | dc_is_audio_capable_signal(pipe_ctx->stream->signal) && | 1988 | dc_is_audio_capable_signal(pipe_ctx->stream->signal) && |
1982 | stream->audio_info.mode_count) { | 1989 | stream->audio_info.mode_count && stream->audio_info.flags.all) { |
1983 | pipe_ctx->stream_res.audio = find_first_free_audio( | 1990 | pipe_ctx->stream_res.audio = find_first_free_audio( |
1984 | &context->res_ctx, pool, pipe_ctx->stream_res.stream_enc->id); | 1991 | &context->res_ctx, pool, pipe_ctx->stream_res.stream_enc->id); |
1985 | 1992 | ||
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index af7f8be230f7..352862370390 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c | |||
@@ -612,7 +612,8 @@ bool dc_stream_set_dynamic_metadata(struct dc *dc, | |||
612 | 612 | ||
613 | pipe_ctx->stream->dmdata_address = attr->address; | 613 | pipe_ctx->stream->dmdata_address = attr->address; |
614 | 614 | ||
615 | if (pipe_ctx->stream_res.stream_enc->funcs->set_dynamic_metadata != NULL) { | 615 | if (pipe_ctx->stream_res.stream_enc && |
616 | pipe_ctx->stream_res.stream_enc->funcs->set_dynamic_metadata != NULL) { | ||
616 | if (pipe_ctx->stream->dmdata_address.quad_part != 0) { | 617 | if (pipe_ctx->stream->dmdata_address.quad_part != 0) { |
617 | /* if using dynamic meta, don't set up generic infopackets */ | 618 | /* if using dynamic meta, don't set up generic infopackets */ |
618 | pipe_ctx->stream_res.encoder_info_frame.hdrsmd.valid = false; | 619 | pipe_ctx->stream_res.encoder_info_frame.hdrsmd.valid = false; |
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c index f8903bcabe49..58bd131d5b48 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c | |||
@@ -239,6 +239,10 @@ static void dmcu_set_backlight_level( | |||
239 | s2 |= (backlight_8_bit << ATOM_S2_CURRENT_BL_LEVEL_SHIFT); | 239 | s2 |= (backlight_8_bit << ATOM_S2_CURRENT_BL_LEVEL_SHIFT); |
240 | 240 | ||
241 | REG_WRITE(BIOS_SCRATCH_2, s2); | 241 | REG_WRITE(BIOS_SCRATCH_2, s2); |
242 | |||
243 | /* waitDMCUReadyForCmd */ | ||
244 | REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, | ||
245 | 0, 1, 80000); | ||
242 | } | 246 | } |
243 | 247 | ||
244 | static void dce_abm_init(struct abm *abm) | 248 | static void dce_abm_init(struct abm *abm) |
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 858a58856ebd..fafb4b470140 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c | |||
@@ -965,11 +965,17 @@ void hwss_edp_backlight_control( | |||
965 | void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx) | 965 | void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx) |
966 | { | 966 | { |
967 | /* notify audio driver for audio modes of monitor */ | 967 | /* notify audio driver for audio modes of monitor */ |
968 | struct dc *core_dc = pipe_ctx->stream->ctx->dc; | 968 | struct dc *core_dc; |
969 | struct pp_smu_funcs *pp_smu = NULL; | 969 | struct pp_smu_funcs *pp_smu = NULL; |
970 | struct clk_mgr *clk_mgr = core_dc->clk_mgr; | 970 | struct clk_mgr *clk_mgr; |
971 | unsigned int i, num_audio = 1; | 971 | unsigned int i, num_audio = 1; |
972 | 972 | ||
973 | if (!pipe_ctx->stream) | ||
974 | return; | ||
975 | |||
976 | core_dc = pipe_ctx->stream->ctx->dc; | ||
977 | clk_mgr = core_dc->clk_mgr; | ||
978 | |||
973 | if (pipe_ctx->stream_res.audio && pipe_ctx->stream_res.audio->enabled == true) | 979 | if (pipe_ctx->stream_res.audio && pipe_ctx->stream_res.audio->enabled == true) |
974 | return; | 980 | return; |
975 | 981 | ||
@@ -999,9 +1005,15 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx) | |||
999 | 1005 | ||
1000 | void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option) | 1006 | void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option) |
1001 | { | 1007 | { |
1002 | struct dc *dc = pipe_ctx->stream->ctx->dc; | 1008 | struct dc *dc; |
1003 | struct pp_smu_funcs *pp_smu = NULL; | 1009 | struct pp_smu_funcs *pp_smu = NULL; |
1004 | struct clk_mgr *clk_mgr = dc->clk_mgr; | 1010 | struct clk_mgr *clk_mgr; |
1011 | |||
1012 | if (!pipe_ctx || !pipe_ctx->stream) | ||
1013 | return; | ||
1014 | |||
1015 | dc = pipe_ctx->stream->ctx->dc; | ||
1016 | clk_mgr = dc->clk_mgr; | ||
1005 | 1017 | ||
1006 | if (pipe_ctx->stream_res.audio && pipe_ctx->stream_res.audio->enabled == false) | 1018 | if (pipe_ctx->stream_res.audio && pipe_ctx->stream_res.audio->enabled == false) |
1007 | return; | 1019 | return; |
@@ -1009,6 +1021,8 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option) | |||
1009 | pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control( | 1021 | pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control( |
1010 | pipe_ctx->stream_res.stream_enc, true); | 1022 | pipe_ctx->stream_res.stream_enc, true); |
1011 | if (pipe_ctx->stream_res.audio) { | 1023 | if (pipe_ctx->stream_res.audio) { |
1024 | pipe_ctx->stream_res.audio->enabled = false; | ||
1025 | |||
1012 | if (dc->res_pool->pp_smu) | 1026 | if (dc->res_pool->pp_smu) |
1013 | pp_smu = dc->res_pool->pp_smu; | 1027 | pp_smu = dc->res_pool->pp_smu; |
1014 | 1028 | ||
@@ -1039,8 +1053,6 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option) | |||
1039 | /* dal_audio_disable_azalia_audio_jack_presence(stream->audio, | 1053 | /* dal_audio_disable_azalia_audio_jack_presence(stream->audio, |
1040 | * stream->stream_engine_id); | 1054 | * stream->stream_engine_id); |
1041 | */ | 1055 | */ |
1042 | if (pipe_ctx->stream_res.audio) | ||
1043 | pipe_ctx->stream_res.audio->enabled = false; | ||
1044 | } | 1056 | } |
1045 | } | 1057 | } |
1046 | 1058 | ||
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index e50a696fcb5d..2118ea21d7e9 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | |||
@@ -1195,16 +1195,7 @@ static void dcn10_init_hw(struct dc *dc) | |||
1195 | * everything down. | 1195 | * everything down. |
1196 | */ | 1196 | */ |
1197 | if (dcb->funcs->is_accelerated_mode(dcb) || dc->config.power_down_display_on_boot) { | 1197 | if (dcb->funcs->is_accelerated_mode(dcb) || dc->config.power_down_display_on_boot) { |
1198 | for (i = 0; i < dc->res_pool->pipe_count; i++) { | 1198 | dc->hwss.init_pipes(dc, dc->current_state); |
1199 | struct hubp *hubp = dc->res_pool->hubps[i]; | ||
1200 | struct dpp *dpp = dc->res_pool->dpps[i]; | ||
1201 | |||
1202 | hubp->funcs->hubp_init(hubp); | ||
1203 | dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst; | ||
1204 | plane_atomic_power_down(dc, dpp, hubp); | ||
1205 | } | ||
1206 | |||
1207 | apply_DEGVIDCN10_253_wa(dc); | ||
1208 | } | 1199 | } |
1209 | 1200 | ||
1210 | for (i = 0; i < dc->res_pool->audio_count; i++) { | 1201 | for (i = 0; i < dc->res_pool->audio_count; i++) { |
@@ -1375,10 +1366,6 @@ static bool dcn10_set_input_transfer_func(struct pipe_ctx *pipe_ctx, | |||
1375 | return result; | 1366 | return result; |
1376 | } | 1367 | } |
1377 | 1368 | ||
1378 | |||
1379 | |||
1380 | |||
1381 | |||
1382 | static bool | 1369 | static bool |
1383 | dcn10_set_output_transfer_func(struct pipe_ctx *pipe_ctx, | 1370 | dcn10_set_output_transfer_func(struct pipe_ctx *pipe_ctx, |
1384 | const struct dc_stream_state *stream) | 1371 | const struct dc_stream_state *stream) |
@@ -2516,6 +2503,12 @@ static void dcn10_apply_ctx_for_surface( | |||
2516 | if (removed_pipe[i]) | 2503 | if (removed_pipe[i]) |
2517 | dcn10_disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]); | 2504 | dcn10_disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]); |
2518 | 2505 | ||
2506 | for (i = 0; i < dc->res_pool->pipe_count; i++) | ||
2507 | if (removed_pipe[i]) { | ||
2508 | dc->hwss.optimize_bandwidth(dc, context); | ||
2509 | break; | ||
2510 | } | ||
2511 | |||
2519 | if (dc->hwseq->wa.DEGVIDCN10_254) | 2512 | if (dc->hwseq->wa.DEGVIDCN10_254) |
2520 | hubbub1_wm_change_req_wa(dc->res_pool->hubbub); | 2513 | hubbub1_wm_change_req_wa(dc->res_pool->hubbub); |
2521 | } | 2514 | } |
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c index 1a20461c2937..a12530a3ab9c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c | |||
@@ -508,7 +508,7 @@ static const struct resource_caps rv2_res_cap = { | |||
508 | .num_audio = 3, | 508 | .num_audio = 3, |
509 | .num_stream_encoder = 3, | 509 | .num_stream_encoder = 3, |
510 | .num_pll = 3, | 510 | .num_pll = 3, |
511 | .num_ddc = 3, | 511 | .num_ddc = 4, |
512 | }; | 512 | }; |
513 | 513 | ||
514 | static const struct dc_plane_cap plane_cap = { | 514 | static const struct dc_plane_cap plane_cap = { |
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c index 51a3dfe97f0e..31aa6ee5cd5b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c | |||
@@ -102,14 +102,19 @@ void dccg2_init(struct dccg *dccg) | |||
102 | switch (dccg_dcn->base.ctx->dc->res_pool->pipe_count) { | 102 | switch (dccg_dcn->base.ctx->dc->res_pool->pipe_count) { |
103 | case 6: | 103 | case 6: |
104 | REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[5], 1); | 104 | REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[5], 1); |
105 | /* Fall through */ | ||
105 | case 5: | 106 | case 5: |
106 | REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[4], 1); | 107 | REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[4], 1); |
108 | /* Fall through */ | ||
107 | case 4: | 109 | case 4: |
108 | REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[3], 1); | 110 | REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[3], 1); |
111 | /* Fall through */ | ||
109 | case 3: | 112 | case 3: |
110 | REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[2], 1); | 113 | REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[2], 1); |
114 | /* Fall through */ | ||
111 | case 2: | 115 | case 2: |
112 | REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[1], 1); | 116 | REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[1], 1); |
117 | /* Fall through */ | ||
113 | case 1: | 118 | case 1: |
114 | REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[0], 1); | 119 | REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[0], 1); |
115 | break; | 120 | break; |
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c index ece6e136437b..6e2dbd03f9bf 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c | |||
@@ -337,6 +337,7 @@ static enum dcn_hubbub_page_table_block_size page_table_block_size_to_hw(unsigne | |||
337 | break; | 337 | break; |
338 | default: | 338 | default: |
339 | ASSERT(false); | 339 | ASSERT(false); |
340 | block_size = page_table_block_size; | ||
340 | break; | 341 | break; |
341 | } | 342 | } |
342 | 343 | ||
@@ -366,25 +367,24 @@ int hubbub2_init_dchub_sys_ctx(struct hubbub *hubbub, | |||
366 | struct dcn_vmid_page_table_config phys_config; | 367 | struct dcn_vmid_page_table_config phys_config; |
367 | 368 | ||
368 | REG_SET(DCN_VM_FB_LOCATION_BASE, 0, | 369 | REG_SET(DCN_VM_FB_LOCATION_BASE, 0, |
369 | FB_BASE, pa_config->system_aperture.fb_base); | 370 | FB_BASE, pa_config->system_aperture.fb_base >> 24); |
370 | REG_SET(DCN_VM_FB_LOCATION_TOP, 0, | 371 | REG_SET(DCN_VM_FB_LOCATION_TOP, 0, |
371 | FB_TOP, pa_config->system_aperture.fb_top); | 372 | FB_TOP, pa_config->system_aperture.fb_top >> 24); |
372 | REG_SET(DCN_VM_FB_OFFSET, 0, | 373 | REG_SET(DCN_VM_FB_OFFSET, 0, |
373 | FB_OFFSET, pa_config->system_aperture.fb_offset); | 374 | FB_OFFSET, pa_config->system_aperture.fb_offset >> 24); |
374 | REG_SET(DCN_VM_AGP_BOT, 0, | 375 | REG_SET(DCN_VM_AGP_BOT, 0, |
375 | AGP_BOT, pa_config->system_aperture.agp_bot); | 376 | AGP_BOT, pa_config->system_aperture.agp_bot >> 24); |
376 | REG_SET(DCN_VM_AGP_TOP, 0, | 377 | REG_SET(DCN_VM_AGP_TOP, 0, |
377 | AGP_TOP, pa_config->system_aperture.agp_top); | 378 | AGP_TOP, pa_config->system_aperture.agp_top >> 24); |
378 | REG_SET(DCN_VM_AGP_BASE, 0, | 379 | REG_SET(DCN_VM_AGP_BASE, 0, |
379 | AGP_BASE, pa_config->system_aperture.agp_base); | 380 | AGP_BASE, pa_config->system_aperture.agp_base >> 24); |
380 | 381 | ||
381 | if (pa_config->gart_config.page_table_start_addr != pa_config->gart_config.page_table_end_addr) { | 382 | if (pa_config->gart_config.page_table_start_addr != pa_config->gart_config.page_table_end_addr) { |
382 | phys_config.depth = 1; | ||
383 | phys_config.block_size = 4096; | ||
384 | phys_config.page_table_start_addr = pa_config->gart_config.page_table_start_addr >> 12; | 383 | phys_config.page_table_start_addr = pa_config->gart_config.page_table_start_addr >> 12; |
385 | phys_config.page_table_end_addr = pa_config->gart_config.page_table_end_addr >> 12; | 384 | phys_config.page_table_end_addr = pa_config->gart_config.page_table_end_addr >> 12; |
386 | phys_config.page_table_base_addr = pa_config->gart_config.page_table_base_addr; | 385 | phys_config.page_table_base_addr = pa_config->gart_config.page_table_base_addr; |
387 | 386 | phys_config.depth = 0; | |
387 | phys_config.block_size = 0; | ||
388 | // Init VMID 0 based on PA config | 388 | // Init VMID 0 based on PA config |
389 | dcn20_vmid_setup(&hubbub1->vmid[0], &phys_config); | 389 | dcn20_vmid_setup(&hubbub1->vmid[0], &phys_config); |
390 | } | 390 | } |
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index 0b84a322b8a2..d810c8940129 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c | |||
@@ -1153,8 +1153,8 @@ void dcn20_enable_plane( | |||
1153 | 1153 | ||
1154 | apt.sys_default.quad_part = 0; | 1154 | apt.sys_default.quad_part = 0; |
1155 | 1155 | ||
1156 | apt.sys_high.quad_part = dc->vm_pa_config.system_aperture.start_addr; | 1156 | apt.sys_low.quad_part = dc->vm_pa_config.system_aperture.start_addr; |
1157 | apt.sys_low.quad_part = dc->vm_pa_config.system_aperture.end_addr; | 1157 | apt.sys_high.quad_part = dc->vm_pa_config.system_aperture.end_addr; |
1158 | 1158 | ||
1159 | // Program system aperture settings | 1159 | // Program system aperture settings |
1160 | pipe_ctx->plane_res.hubp->funcs->hubp_set_vm_system_aperture_settings(pipe_ctx->plane_res.hubp, &apt); | 1160 | pipe_ctx->plane_res.hubp->funcs->hubp_set_vm_system_aperture_settings(pipe_ctx->plane_res.hubp, &apt); |
@@ -1242,6 +1242,8 @@ void dcn20_pipe_control_lock_global( | |||
1242 | CRTC_STATE_VACTIVE); | 1242 | CRTC_STATE_VACTIVE); |
1243 | pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, | 1243 | pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, |
1244 | CRTC_STATE_VBLANK); | 1244 | CRTC_STATE_VBLANK); |
1245 | pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, | ||
1246 | CRTC_STATE_VACTIVE); | ||
1245 | pipe->stream_res.tg->funcs->lock_doublebuffer_disable( | 1247 | pipe->stream_res.tg->funcs->lock_doublebuffer_disable( |
1246 | pipe->stream_res.tg); | 1248 | pipe->stream_res.tg); |
1247 | } | 1249 | } |
@@ -1263,6 +1265,17 @@ void dcn20_pipe_control_lock( | |||
1263 | if (pipe->plane_state != NULL) | 1265 | if (pipe->plane_state != NULL) |
1264 | flip_immediate = pipe->plane_state->flip_immediate; | 1266 | flip_immediate = pipe->plane_state->flip_immediate; |
1265 | 1267 | ||
1268 | if (flip_immediate && lock) { | ||
1269 | while (pipe->plane_res.hubp->funcs->hubp_is_flip_pending(pipe->plane_res.hubp)) { | ||
1270 | udelay(1); | ||
1271 | } | ||
1272 | |||
1273 | if (pipe->bottom_pipe != NULL) | ||
1274 | while (pipe->bottom_pipe->plane_res.hubp->funcs->hubp_is_flip_pending(pipe->bottom_pipe->plane_res.hubp)) { | ||
1275 | udelay(1); | ||
1276 | } | ||
1277 | } | ||
1278 | |||
1266 | /* In flip immediate and pipe splitting case, we need to use GSL | 1279 | /* In flip immediate and pipe splitting case, we need to use GSL |
1267 | * for synchronization. Only do setup on locking and on flip type change. | 1280 | * for synchronization. Only do setup on locking and on flip type change. |
1268 | */ | 1281 | */ |
@@ -1740,8 +1753,11 @@ static void dcn20_reset_back_end_for_pipe( | |||
1740 | else if (pipe_ctx->stream_res.audio) { | 1753 | else if (pipe_ctx->stream_res.audio) { |
1741 | dc->hwss.disable_audio_stream(pipe_ctx, FREE_ACQUIRED_RESOURCE); | 1754 | dc->hwss.disable_audio_stream(pipe_ctx, FREE_ACQUIRED_RESOURCE); |
1742 | } | 1755 | } |
1743 | |||
1744 | } | 1756 | } |
1757 | #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT | ||
1758 | else if (pipe_ctx->stream_res.dsc) | ||
1759 | dp_set_dsc_enable(pipe_ctx, false); | ||
1760 | #endif | ||
1745 | 1761 | ||
1746 | /* by upper caller loop, parent pipe: pipe0, will be reset last. | 1762 | /* by upper caller loop, parent pipe: pipe0, will be reset last. |
1747 | * back end share by all pipes and will be disable only when disable | 1763 | * back end share by all pipes and will be disable only when disable |
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c index 26a66ccf6e72..1ae973962d53 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c | |||
@@ -535,7 +535,7 @@ void dcn20_timing_generator_init(struct optc *optc1) | |||
535 | optc1->min_h_blank = 32; | 535 | optc1->min_h_blank = 32; |
536 | optc1->min_v_blank = 3; | 536 | optc1->min_v_blank = 3; |
537 | optc1->min_v_blank_interlace = 5; | 537 | optc1->min_v_blank_interlace = 5; |
538 | optc1->min_h_sync_width = 8; | 538 | optc1->min_h_sync_width = 4;// Minimum HSYNC = 8 pixels asked By HW in the first place for no actual reason. Oculus Rift S will not light up with 8 as it's hsyncWidth is 6. Changing it to 4 to fix that issue. |
539 | optc1->min_v_sync_width = 1; | 539 | optc1->min_v_sync_width = 1; |
540 | optc1->comb_opp_id = 0xf; | 540 | optc1->comb_opp_id = 0xf; |
541 | } | 541 | } |
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index d200bc3cec71..b949e202d6cb 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c | |||
@@ -2643,6 +2643,10 @@ static void update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_ | |||
2643 | 2643 | ||
2644 | if (dc->bb_overrides.min_dcfclk_mhz > 0) | 2644 | if (dc->bb_overrides.min_dcfclk_mhz > 0) |
2645 | min_dcfclk = dc->bb_overrides.min_dcfclk_mhz; | 2645 | min_dcfclk = dc->bb_overrides.min_dcfclk_mhz; |
2646 | else | ||
2647 | // Accounting for SOC/DCF relationship, we can go as high as | ||
2648 | // 506Mhz in Vmin. We need to code 507 since SMU will round down to 506. | ||
2649 | min_dcfclk = 507; | ||
2646 | 2650 | ||
2647 | for (i = 0; i < num_states; i++) { | 2651 | for (i = 0; i < num_states; i++) { |
2648 | int min_fclk_required_by_uclk; | 2652 | int min_fclk_required_by_uclk; |
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.c index 27679ef6ebe8..96c263223315 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.c | |||
@@ -23,6 +23,8 @@ | |||
23 | * | 23 | * |
24 | */ | 24 | */ |
25 | 25 | ||
26 | #include <linux/delay.h> | ||
27 | |||
26 | #include "dcn20_vmid.h" | 28 | #include "dcn20_vmid.h" |
27 | #include "reg_helper.h" | 29 | #include "reg_helper.h" |
28 | 30 | ||
@@ -36,6 +38,38 @@ | |||
36 | #define FN(reg_name, field_name) \ | 38 | #define FN(reg_name, field_name) \ |
37 | vmid->shifts->field_name, vmid->masks->field_name | 39 | vmid->shifts->field_name, vmid->masks->field_name |
38 | 40 | ||
41 | static void dcn20_wait_for_vmid_ready(struct dcn20_vmid *vmid) | ||
42 | { | ||
43 | /* According the hardware spec, we need to poll for the lowest | ||
44 | * bit of PAGE_TABLE_BASE_ADDR_LO32 = 1 any time a GPUVM | ||
45 | * context is updated. We can't use REG_WAIT here since we | ||
46 | * don't have a seperate field to wait on. | ||
47 | * | ||
48 | * TODO: Confirm timeout / poll interval with hardware team | ||
49 | */ | ||
50 | |||
51 | int max_times = 10000; | ||
52 | int delay_us = 5; | ||
53 | int i; | ||
54 | |||
55 | for (i = 0; i < max_times; ++i) { | ||
56 | uint32_t entry_lo32; | ||
57 | |||
58 | REG_GET(PAGE_TABLE_BASE_ADDR_LO32, | ||
59 | VM_CONTEXT0_PAGE_DIRECTORY_ENTRY_LO32, | ||
60 | &entry_lo32); | ||
61 | |||
62 | if (entry_lo32 & 0x1) | ||
63 | return; | ||
64 | |||
65 | udelay(delay_us); | ||
66 | } | ||
67 | |||
68 | /* VM setup timed out */ | ||
69 | DC_LOG_WARNING("Timeout while waiting for GPUVM context update\n"); | ||
70 | ASSERT(0); | ||
71 | } | ||
72 | |||
39 | void dcn20_vmid_setup(struct dcn20_vmid *vmid, const struct dcn_vmid_page_table_config *config) | 73 | void dcn20_vmid_setup(struct dcn20_vmid *vmid, const struct dcn_vmid_page_table_config *config) |
40 | { | 74 | { |
41 | REG_SET(PAGE_TABLE_START_ADDR_HI32, 0, | 75 | REG_SET(PAGE_TABLE_START_ADDR_HI32, 0, |
@@ -54,6 +88,9 @@ void dcn20_vmid_setup(struct dcn20_vmid *vmid, const struct dcn_vmid_page_table_ | |||
54 | 88 | ||
55 | REG_SET(PAGE_TABLE_BASE_ADDR_HI32, 0, | 89 | REG_SET(PAGE_TABLE_BASE_ADDR_HI32, 0, |
56 | VM_CONTEXT0_PAGE_DIRECTORY_ENTRY_HI32, (config->page_table_base_addr >> 32) & 0xFFFFFFFF); | 90 | VM_CONTEXT0_PAGE_DIRECTORY_ENTRY_HI32, (config->page_table_base_addr >> 32) & 0xFFFFFFFF); |
91 | /* Note: per hardware spec PAGE_TABLE_BASE_ADDR_LO32 must be programmed last in sequence */ | ||
57 | REG_SET(PAGE_TABLE_BASE_ADDR_LO32, 0, | 92 | REG_SET(PAGE_TABLE_BASE_ADDR_LO32, 0, |
58 | VM_CONTEXT0_PAGE_DIRECTORY_ENTRY_LO32, config->page_table_base_addr & 0xFFFFFFFF); | 93 | VM_CONTEXT0_PAGE_DIRECTORY_ENTRY_LO32, config->page_table_base_addr & 0xFFFFFFFF); |
94 | |||
95 | dcn20_wait_for_vmid_ready(vmid); | ||
59 | } | 96 | } |
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/drm_dsc_dc.c b/drivers/gpu/drm/amd/display/dc/dsc/drm_dsc_dc.c index 67089765780b..340ef4d41ebd 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/drm_dsc_dc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/drm_dsc_dc.c | |||
@@ -377,6 +377,12 @@ int drm_dsc_compute_rc_parameters(struct drm_dsc_config *vdsc_cfg) | |||
377 | vdsc_cfg->rc_bits = (hrd_delay * vdsc_cfg->bits_per_pixel) / 16; | 377 | vdsc_cfg->rc_bits = (hrd_delay * vdsc_cfg->bits_per_pixel) / 16; |
378 | vdsc_cfg->initial_dec_delay = hrd_delay - vdsc_cfg->initial_xmit_delay; | 378 | vdsc_cfg->initial_dec_delay = hrd_delay - vdsc_cfg->initial_xmit_delay; |
379 | 379 | ||
380 | /* As per DSC spec v1.2a recommendation: */ | ||
381 | if (vdsc_cfg->native_420) | ||
382 | vdsc_cfg->second_line_offset_adj = 512; | ||
383 | else | ||
384 | vdsc_cfg->second_line_offset_adj = 0; | ||
385 | |||
380 | return 0; | 386 | return 0; |
381 | } | 387 | } |
382 | EXPORT_SYMBOL(drm_dsc_compute_rc_parameters); | 388 | EXPORT_SYMBOL(drm_dsc_compute_rc_parameters); |
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index c89393c19232..a148ffde8b12 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h | |||
@@ -212,7 +212,7 @@ struct resource_pool { | |||
212 | struct clock_source *clock_sources[MAX_CLOCK_SOURCES]; | 212 | struct clock_source *clock_sources[MAX_CLOCK_SOURCES]; |
213 | unsigned int clk_src_count; | 213 | unsigned int clk_src_count; |
214 | 214 | ||
215 | struct audio *audios[MAX_PIPES]; | 215 | struct audio *audios[MAX_AUDIOS]; |
216 | unsigned int audio_count; | 216 | unsigned int audio_count; |
217 | struct audio_support audio_support; | 217 | struct audio_support audio_support; |
218 | 218 | ||
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h index 959f5b654611..9502478c4a1b 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h | |||
@@ -61,8 +61,8 @@ enum dcn_hubbub_page_table_depth { | |||
61 | }; | 61 | }; |
62 | 62 | ||
63 | enum dcn_hubbub_page_table_block_size { | 63 | enum dcn_hubbub_page_table_block_size { |
64 | DCN_PAGE_TABLE_BLOCK_SIZE_4KB, | 64 | DCN_PAGE_TABLE_BLOCK_SIZE_4KB = 0, |
65 | DCN_PAGE_TABLE_BLOCK_SIZE_64KB | 65 | DCN_PAGE_TABLE_BLOCK_SIZE_64KB = 4, |
66 | }; | 66 | }; |
67 | 67 | ||
68 | struct dcn_hubbub_phys_addr_config { | 68 | struct dcn_hubbub_phys_addr_config { |
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h index 8759ec03aede..f82365e2d03c 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h | |||
@@ -34,6 +34,7 @@ | |||
34 | * Data types shared between different Virtual HW blocks | 34 | * Data types shared between different Virtual HW blocks |
35 | ******************************************************************************/ | 35 | ******************************************************************************/ |
36 | 36 | ||
37 | #define MAX_AUDIOS 7 | ||
37 | #define MAX_PIPES 6 | 38 | #define MAX_PIPES 6 |
38 | #if defined(CONFIG_DRM_AMD_DC_DCN2_0) | 39 | #if defined(CONFIG_DRM_AMD_DC_DCN2_0) |
39 | #define MAX_DWB_PIPES 1 | 40 | #define MAX_DWB_PIPES 1 |
diff --git a/drivers/gpu/drm/amd/display/include/dpcd_defs.h b/drivers/gpu/drm/amd/display/include/dpcd_defs.h index 1c66166d0a94..2c90d1b46c8b 100644 --- a/drivers/gpu/drm/amd/display/include/dpcd_defs.h +++ b/drivers/gpu/drm/amd/display/include/dpcd_defs.h | |||
@@ -43,7 +43,7 @@ enum dpcd_revision { | |||
43 | enum dpcd_downstream_port_type { | 43 | enum dpcd_downstream_port_type { |
44 | DOWNSTREAM_DP = 0, | 44 | DOWNSTREAM_DP = 0, |
45 | DOWNSTREAM_VGA, | 45 | DOWNSTREAM_VGA, |
46 | DOWNSTREAM_DVI_HDMI, | 46 | DOWNSTREAM_DVI_HDMI_DP_PLUS_PLUS,/* DVI, HDMI, DP++ */ |
47 | DOWNSTREAM_NONDDC /* has no EDID (TV,CV) */ | 47 | DOWNSTREAM_NONDDC /* has no EDID (TV,CV) */ |
48 | }; | 48 | }; |
49 | 49 | ||
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h index 9f661bf96ed0..5b1ebb7f995a 100644 --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h | |||
@@ -123,6 +123,7 @@ enum amd_pp_sensors { | |||
123 | AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, | 123 | AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, |
124 | AMDGPU_PP_SENSOR_MIN_FAN_RPM, | 124 | AMDGPU_PP_SENSOR_MIN_FAN_RPM, |
125 | AMDGPU_PP_SENSOR_MAX_FAN_RPM, | 125 | AMDGPU_PP_SENSOR_MAX_FAN_RPM, |
126 | AMDGPU_PP_SENSOR_VCN_POWER_STATE, | ||
126 | }; | 127 | }; |
127 | 128 | ||
128 | enum amd_pp_task { | 129 | enum amd_pp_task { |
diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c index f1565c448de5..0685a3388e38 100644 --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | |||
@@ -137,12 +137,37 @@ int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, | |||
137 | { | 137 | { |
138 | int ret = 0, clk_id = 0; | 138 | int ret = 0, clk_id = 0; |
139 | uint32_t param = 0; | 139 | uint32_t param = 0; |
140 | uint32_t clock_limit; | ||
140 | 141 | ||
141 | if (!min && !max) | 142 | if (!min && !max) |
142 | return -EINVAL; | 143 | return -EINVAL; |
143 | 144 | ||
144 | if (!smu_clk_dpm_is_enabled(smu, clk_type)) | 145 | if (!smu_clk_dpm_is_enabled(smu, clk_type)) { |
146 | switch (clk_type) { | ||
147 | case SMU_MCLK: | ||
148 | case SMU_UCLK: | ||
149 | clock_limit = smu->smu_table.boot_values.uclk; | ||
150 | break; | ||
151 | case SMU_GFXCLK: | ||
152 | case SMU_SCLK: | ||
153 | clock_limit = smu->smu_table.boot_values.gfxclk; | ||
154 | break; | ||
155 | case SMU_SOCCLK: | ||
156 | clock_limit = smu->smu_table.boot_values.socclk; | ||
157 | break; | ||
158 | default: | ||
159 | clock_limit = 0; | ||
160 | break; | ||
161 | } | ||
162 | |||
163 | /* clock in Mhz unit */ | ||
164 | if (min) | ||
165 | *min = clock_limit / 100; | ||
166 | if (max) | ||
167 | *max = clock_limit / 100; | ||
168 | |||
145 | return 0; | 169 | return 0; |
170 | } | ||
146 | 171 | ||
147 | mutex_lock(&smu->mutex); | 172 | mutex_lock(&smu->mutex); |
148 | clk_id = smu_clk_get_index(smu, clk_type); | 173 | clk_id = smu_clk_get_index(smu, clk_type); |
@@ -281,7 +306,8 @@ int smu_get_power_num_states(struct smu_context *smu, | |||
281 | 306 | ||
282 | /* not support power state */ | 307 | /* not support power state */ |
283 | memset(state_info, 0, sizeof(struct pp_states_info)); | 308 | memset(state_info, 0, sizeof(struct pp_states_info)); |
284 | state_info->nums = 0; | 309 | state_info->nums = 1; |
310 | state_info->states[0] = POWER_STATE_TYPE_DEFAULT; | ||
285 | 311 | ||
286 | return 0; | 312 | return 0; |
287 | } | 313 | } |
@@ -312,6 +338,10 @@ int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor, | |||
312 | *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0; | 338 | *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0; |
313 | *size = 4; | 339 | *size = 4; |
314 | break; | 340 | break; |
341 | case AMDGPU_PP_SENSOR_VCN_POWER_STATE: | ||
342 | *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT) ? 1 : 0; | ||
343 | *size = 4; | ||
344 | break; | ||
315 | default: | 345 | default: |
316 | ret = -EINVAL; | 346 | ret = -EINVAL; |
317 | break; | 347 | break; |
@@ -698,6 +728,12 @@ static int smu_sw_init(void *handle) | |||
698 | return ret; | 728 | return ret; |
699 | } | 729 | } |
700 | 730 | ||
731 | ret = smu_register_irq_handler(smu); | ||
732 | if (ret) { | ||
733 | pr_err("Failed to register smc irq handler!\n"); | ||
734 | return ret; | ||
735 | } | ||
736 | |||
701 | return 0; | 737 | return 0; |
702 | } | 738 | } |
703 | 739 | ||
@@ -707,6 +743,9 @@ static int smu_sw_fini(void *handle) | |||
707 | struct smu_context *smu = &adev->smu; | 743 | struct smu_context *smu = &adev->smu; |
708 | int ret; | 744 | int ret; |
709 | 745 | ||
746 | kfree(smu->irq_source); | ||
747 | smu->irq_source = NULL; | ||
748 | |||
710 | ret = smu_smc_table_sw_fini(smu); | 749 | ret = smu_smc_table_sw_fini(smu); |
711 | if (ret) { | 750 | if (ret) { |
712 | pr_err("Failed to sw fini smc table!\n"); | 751 | pr_err("Failed to sw fini smc table!\n"); |
@@ -1063,10 +1102,6 @@ static int smu_hw_init(void *handle) | |||
1063 | if (ret) | 1102 | if (ret) |
1064 | goto failed; | 1103 | goto failed; |
1065 | 1104 | ||
1066 | ret = smu_register_irq_handler(smu); | ||
1067 | if (ret) | ||
1068 | goto failed; | ||
1069 | |||
1070 | if (!smu->pm_enabled) | 1105 | if (!smu->pm_enabled) |
1071 | adev->pm.dpm_enabled = false; | 1106 | adev->pm.dpm_enabled = false; |
1072 | else | 1107 | else |
@@ -1096,9 +1131,6 @@ static int smu_hw_fini(void *handle) | |||
1096 | kfree(table_context->overdrive_table); | 1131 | kfree(table_context->overdrive_table); |
1097 | table_context->overdrive_table = NULL; | 1132 | table_context->overdrive_table = NULL; |
1098 | 1133 | ||
1099 | kfree(smu->irq_source); | ||
1100 | smu->irq_source = NULL; | ||
1101 | |||
1102 | ret = smu_fini_fb_allocations(smu); | 1134 | ret = smu_fini_fb_allocations(smu); |
1103 | if (ret) | 1135 | if (ret) |
1104 | return ret; | 1136 | return ret; |
@@ -1349,13 +1381,49 @@ static int smu_enable_umd_pstate(void *handle, | |||
1349 | return 0; | 1381 | return 0; |
1350 | } | 1382 | } |
1351 | 1383 | ||
1384 | static int smu_default_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level) | ||
1385 | { | ||
1386 | int ret = 0; | ||
1387 | uint32_t sclk_mask, mclk_mask, soc_mask; | ||
1388 | |||
1389 | switch (level) { | ||
1390 | case AMD_DPM_FORCED_LEVEL_HIGH: | ||
1391 | ret = smu_force_dpm_limit_value(smu, true); | ||
1392 | break; | ||
1393 | case AMD_DPM_FORCED_LEVEL_LOW: | ||
1394 | ret = smu_force_dpm_limit_value(smu, false); | ||
1395 | break; | ||
1396 | case AMD_DPM_FORCED_LEVEL_AUTO: | ||
1397 | case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: | ||
1398 | ret = smu_unforce_dpm_levels(smu); | ||
1399 | break; | ||
1400 | case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: | ||
1401 | case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: | ||
1402 | case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: | ||
1403 | ret = smu_get_profiling_clk_mask(smu, level, | ||
1404 | &sclk_mask, | ||
1405 | &mclk_mask, | ||
1406 | &soc_mask); | ||
1407 | if (ret) | ||
1408 | return ret; | ||
1409 | smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask); | ||
1410 | smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask); | ||
1411 | smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask); | ||
1412 | break; | ||
1413 | case AMD_DPM_FORCED_LEVEL_MANUAL: | ||
1414 | case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: | ||
1415 | default: | ||
1416 | break; | ||
1417 | } | ||
1418 | return ret; | ||
1419 | } | ||
1420 | |||
1352 | int smu_adjust_power_state_dynamic(struct smu_context *smu, | 1421 | int smu_adjust_power_state_dynamic(struct smu_context *smu, |
1353 | enum amd_dpm_forced_level level, | 1422 | enum amd_dpm_forced_level level, |
1354 | bool skip_display_settings) | 1423 | bool skip_display_settings) |
1355 | { | 1424 | { |
1356 | int ret = 0; | 1425 | int ret = 0; |
1357 | int index = 0; | 1426 | int index = 0; |
1358 | uint32_t sclk_mask, mclk_mask, soc_mask; | ||
1359 | long workload; | 1427 | long workload; |
1360 | struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); | 1428 | struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); |
1361 | 1429 | ||
@@ -1386,39 +1454,10 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu, | |||
1386 | } | 1454 | } |
1387 | 1455 | ||
1388 | if (smu_dpm_ctx->dpm_level != level) { | 1456 | if (smu_dpm_ctx->dpm_level != level) { |
1389 | switch (level) { | 1457 | ret = smu_asic_set_performance_level(smu, level); |
1390 | case AMD_DPM_FORCED_LEVEL_HIGH: | 1458 | if (ret) { |
1391 | ret = smu_force_dpm_limit_value(smu, true); | 1459 | ret = smu_default_set_performance_level(smu, level); |
1392 | break; | ||
1393 | case AMD_DPM_FORCED_LEVEL_LOW: | ||
1394 | ret = smu_force_dpm_limit_value(smu, false); | ||
1395 | break; | ||
1396 | |||
1397 | case AMD_DPM_FORCED_LEVEL_AUTO: | ||
1398 | case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: | ||
1399 | ret = smu_unforce_dpm_levels(smu); | ||
1400 | break; | ||
1401 | |||
1402 | case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: | ||
1403 | case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: | ||
1404 | case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: | ||
1405 | ret = smu_get_profiling_clk_mask(smu, level, | ||
1406 | &sclk_mask, | ||
1407 | &mclk_mask, | ||
1408 | &soc_mask); | ||
1409 | if (ret) | ||
1410 | return ret; | ||
1411 | smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask); | ||
1412 | smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask); | ||
1413 | smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask); | ||
1414 | break; | ||
1415 | |||
1416 | case AMD_DPM_FORCED_LEVEL_MANUAL: | ||
1417 | case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: | ||
1418 | default: | ||
1419 | break; | ||
1420 | } | 1460 | } |
1421 | |||
1422 | if (!ret) | 1461 | if (!ret) |
1423 | smu_dpm_ctx->dpm_level = level; | 1462 | smu_dpm_ctx->dpm_level = level; |
1424 | } | 1463 | } |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c index e32ae9d3373c..18e780f566fa 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c | |||
@@ -1111,6 +1111,7 @@ static int smu10_thermal_get_temperature(struct pp_hwmgr *hwmgr) | |||
1111 | static int smu10_read_sensor(struct pp_hwmgr *hwmgr, int idx, | 1111 | static int smu10_read_sensor(struct pp_hwmgr *hwmgr, int idx, |
1112 | void *value, int *size) | 1112 | void *value, int *size) |
1113 | { | 1113 | { |
1114 | struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend); | ||
1114 | uint32_t sclk, mclk; | 1115 | uint32_t sclk, mclk; |
1115 | int ret = 0; | 1116 | int ret = 0; |
1116 | 1117 | ||
@@ -1132,6 +1133,10 @@ static int smu10_read_sensor(struct pp_hwmgr *hwmgr, int idx, | |||
1132 | case AMDGPU_PP_SENSOR_GPU_TEMP: | 1133 | case AMDGPU_PP_SENSOR_GPU_TEMP: |
1133 | *((uint32_t *)value) = smu10_thermal_get_temperature(hwmgr); | 1134 | *((uint32_t *)value) = smu10_thermal_get_temperature(hwmgr); |
1134 | break; | 1135 | break; |
1136 | case AMDGPU_PP_SENSOR_VCN_POWER_STATE: | ||
1137 | *(uint32_t *)value = smu10_data->vcn_power_gated ? 0 : 1; | ||
1138 | *size = 4; | ||
1139 | break; | ||
1135 | default: | 1140 | default: |
1136 | ret = -EINVAL; | 1141 | ret = -EINVAL; |
1137 | break; | 1142 | break; |
@@ -1175,18 +1180,22 @@ static int smu10_powergate_sdma(struct pp_hwmgr *hwmgr, bool gate) | |||
1175 | 1180 | ||
1176 | static void smu10_powergate_vcn(struct pp_hwmgr *hwmgr, bool bgate) | 1181 | static void smu10_powergate_vcn(struct pp_hwmgr *hwmgr, bool bgate) |
1177 | { | 1182 | { |
1183 | struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend); | ||
1184 | |||
1178 | if (bgate) { | 1185 | if (bgate) { |
1179 | amdgpu_device_ip_set_powergating_state(hwmgr->adev, | 1186 | amdgpu_device_ip_set_powergating_state(hwmgr->adev, |
1180 | AMD_IP_BLOCK_TYPE_VCN, | 1187 | AMD_IP_BLOCK_TYPE_VCN, |
1181 | AMD_PG_STATE_GATE); | 1188 | AMD_PG_STATE_GATE); |
1182 | smum_send_msg_to_smc_with_parameter(hwmgr, | 1189 | smum_send_msg_to_smc_with_parameter(hwmgr, |
1183 | PPSMC_MSG_PowerDownVcn, 0); | 1190 | PPSMC_MSG_PowerDownVcn, 0); |
1191 | smu10_data->vcn_power_gated = true; | ||
1184 | } else { | 1192 | } else { |
1185 | smum_send_msg_to_smc_with_parameter(hwmgr, | 1193 | smum_send_msg_to_smc_with_parameter(hwmgr, |
1186 | PPSMC_MSG_PowerUpVcn, 0); | 1194 | PPSMC_MSG_PowerUpVcn, 0); |
1187 | amdgpu_device_ip_set_powergating_state(hwmgr->adev, | 1195 | amdgpu_device_ip_set_powergating_state(hwmgr->adev, |
1188 | AMD_IP_BLOCK_TYPE_VCN, | 1196 | AMD_IP_BLOCK_TYPE_VCN, |
1189 | AMD_PG_STATE_UNGATE); | 1197 | AMD_PG_STATE_UNGATE); |
1198 | smu10_data->vcn_power_gated = false; | ||
1190 | } | 1199 | } |
1191 | } | 1200 | } |
1192 | 1201 | ||
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h index 1af992fb0bde..208e6711d506 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h | |||
@@ -429,7 +429,6 @@ struct smu_table_context | |||
429 | struct smu_table *tables; | 429 | struct smu_table *tables; |
430 | uint32_t table_count; | 430 | uint32_t table_count; |
431 | struct smu_table memory_pool; | 431 | struct smu_table memory_pool; |
432 | uint16_t software_shutdown_temp; | ||
433 | uint8_t thermal_controller_type; | 432 | uint8_t thermal_controller_type; |
434 | uint16_t TDPODLimit; | 433 | uint16_t TDPODLimit; |
435 | 434 | ||
@@ -613,6 +612,7 @@ struct pptable_funcs { | |||
613 | int (*tables_init)(struct smu_context *smu, struct smu_table *tables); | 612 | int (*tables_init)(struct smu_context *smu, struct smu_table *tables); |
614 | int (*set_thermal_fan_table)(struct smu_context *smu); | 613 | int (*set_thermal_fan_table)(struct smu_context *smu); |
615 | int (*get_fan_speed_percent)(struct smu_context *smu, uint32_t *speed); | 614 | int (*get_fan_speed_percent)(struct smu_context *smu, uint32_t *speed); |
615 | int (*get_fan_speed_rpm)(struct smu_context *smu, uint32_t *speed); | ||
616 | int (*set_watermarks_table)(struct smu_context *smu, void *watermarks, | 616 | int (*set_watermarks_table)(struct smu_context *smu, void *watermarks, |
617 | struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges); | 617 | struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges); |
618 | int (*get_current_clk_freq_by_table)(struct smu_context *smu, | 618 | int (*get_current_clk_freq_by_table)(struct smu_context *smu, |
@@ -621,6 +621,7 @@ struct pptable_funcs { | |||
621 | int (*get_thermal_temperature_range)(struct smu_context *smu, struct smu_temperature_range *range); | 621 | int (*get_thermal_temperature_range)(struct smu_context *smu, struct smu_temperature_range *range); |
622 | int (*get_uclk_dpm_states)(struct smu_context *smu, uint32_t *clocks_in_khz, uint32_t *num_states); | 622 | int (*get_uclk_dpm_states)(struct smu_context *smu, uint32_t *clocks_in_khz, uint32_t *num_states); |
623 | int (*set_default_od_settings)(struct smu_context *smu, bool initialize); | 623 | int (*set_default_od_settings)(struct smu_context *smu, bool initialize); |
624 | int (*set_performance_level)(struct smu_context *smu, enum amd_dpm_forced_level level); | ||
624 | }; | 625 | }; |
625 | 626 | ||
626 | struct smu_funcs | 627 | struct smu_funcs |
@@ -685,7 +686,6 @@ struct smu_funcs | |||
685 | int (*set_watermarks_for_clock_ranges)(struct smu_context *smu, | 686 | int (*set_watermarks_for_clock_ranges)(struct smu_context *smu, |
686 | struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges); | 687 | struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges); |
687 | int (*conv_power_profile_to_pplib_workload)(int power_profile); | 688 | int (*conv_power_profile_to_pplib_workload)(int power_profile); |
688 | int (*get_current_rpm)(struct smu_context *smu, uint32_t *speed); | ||
689 | uint32_t (*get_fan_control_mode)(struct smu_context *smu); | 689 | uint32_t (*get_fan_control_mode)(struct smu_context *smu); |
690 | int (*set_fan_control_mode)(struct smu_context *smu, uint32_t mode); | 690 | int (*set_fan_control_mode)(struct smu_context *smu, uint32_t mode); |
691 | int (*set_fan_speed_percent)(struct smu_context *smu, uint32_t speed); | 691 | int (*set_fan_speed_percent)(struct smu_context *smu, uint32_t speed); |
@@ -751,8 +751,6 @@ struct smu_funcs | |||
751 | ((smu)->funcs->init_max_sustainable_clocks ? (smu)->funcs->init_max_sustainable_clocks((smu)) : 0) | 751 | ((smu)->funcs->init_max_sustainable_clocks ? (smu)->funcs->init_max_sustainable_clocks((smu)) : 0) |
752 | #define smu_set_default_od_settings(smu, initialize) \ | 752 | #define smu_set_default_od_settings(smu, initialize) \ |
753 | ((smu)->ppt_funcs->set_default_od_settings ? (smu)->ppt_funcs->set_default_od_settings((smu), (initialize)) : 0) | 753 | ((smu)->ppt_funcs->set_default_od_settings ? (smu)->ppt_funcs->set_default_od_settings((smu), (initialize)) : 0) |
754 | #define smu_get_current_rpm(smu, speed) \ | ||
755 | ((smu)->funcs->get_current_rpm ? (smu)->funcs->get_current_rpm((smu), (speed)) : 0) | ||
756 | #define smu_set_fan_speed_rpm(smu, speed) \ | 754 | #define smu_set_fan_speed_rpm(smu, speed) \ |
757 | ((smu)->funcs->set_fan_speed_rpm ? (smu)->funcs->set_fan_speed_rpm((smu), (speed)) : 0) | 755 | ((smu)->funcs->set_fan_speed_rpm ? (smu)->funcs->set_fan_speed_rpm((smu), (speed)) : 0) |
758 | #define smu_send_smc_msg(smu, msg) \ | 756 | #define smu_send_smc_msg(smu, msg) \ |
@@ -841,6 +839,8 @@ struct smu_funcs | |||
841 | ((smu)->ppt_funcs->get_fan_speed_percent ? (smu)->ppt_funcs->get_fan_speed_percent((smu), (speed)) : 0) | 839 | ((smu)->ppt_funcs->get_fan_speed_percent ? (smu)->ppt_funcs->get_fan_speed_percent((smu), (speed)) : 0) |
842 | #define smu_set_fan_speed_percent(smu, speed) \ | 840 | #define smu_set_fan_speed_percent(smu, speed) \ |
843 | ((smu)->funcs->set_fan_speed_percent ? (smu)->funcs->set_fan_speed_percent((smu), (speed)) : 0) | 841 | ((smu)->funcs->set_fan_speed_percent ? (smu)->funcs->set_fan_speed_percent((smu), (speed)) : 0) |
842 | #define smu_get_fan_speed_rpm(smu, speed) \ | ||
843 | ((smu)->ppt_funcs->get_fan_speed_rpm ? (smu)->ppt_funcs->get_fan_speed_rpm((smu), (speed)) : 0) | ||
844 | 844 | ||
845 | #define smu_msg_get_index(smu, msg) \ | 845 | #define smu_msg_get_index(smu, msg) \ |
846 | ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_msg_index? (smu)->ppt_funcs->get_smu_msg_index((smu), (msg)) : -EINVAL) : -EINVAL) | 846 | ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_msg_index? (smu)->ppt_funcs->get_smu_msg_index((smu), (msg)) : -EINVAL) : -EINVAL) |
@@ -918,6 +918,9 @@ struct smu_funcs | |||
918 | ((smu)->funcs->baco_get_state? (smu)->funcs->baco_get_state((smu), (state)) : 0) | 918 | ((smu)->funcs->baco_get_state? (smu)->funcs->baco_get_state((smu), (state)) : 0) |
919 | #define smu_baco_reset(smu) \ | 919 | #define smu_baco_reset(smu) \ |
920 | ((smu)->funcs->baco_reset? (smu)->funcs->baco_reset((smu)) : 0) | 920 | ((smu)->funcs->baco_reset? (smu)->funcs->baco_reset((smu)) : 0) |
921 | #define smu_asic_set_performance_level(smu, level) \ | ||
922 | ((smu)->ppt_funcs->set_performance_level? (smu)->ppt_funcs->set_performance_level((smu), (level)) : -EINVAL); | ||
923 | |||
921 | 924 | ||
922 | extern int smu_get_atom_data_table(struct smu_context *smu, uint32_t table, | 925 | extern int smu_get_atom_data_table(struct smu_context *smu, uint32_t table, |
923 | uint16_t *size, uint8_t *frev, uint8_t *crev, | 926 | uint16_t *size, uint8_t *frev, uint8_t *crev, |
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c index 2dae0ae0829e..cc0a3b2256af 100644 --- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c | |||
@@ -23,6 +23,7 @@ | |||
23 | 23 | ||
24 | #include "pp_debug.h" | 24 | #include "pp_debug.h" |
25 | #include <linux/firmware.h> | 25 | #include <linux/firmware.h> |
26 | #include <linux/pci.h> | ||
26 | #include "amdgpu.h" | 27 | #include "amdgpu.h" |
27 | #include "amdgpu_smu.h" | 28 | #include "amdgpu_smu.h" |
28 | #include "atomfirmware.h" | 29 | #include "atomfirmware.h" |
@@ -577,28 +578,20 @@ static int navi10_set_default_dpm_table(struct smu_context *smu) | |||
577 | static int navi10_dpm_set_uvd_enable(struct smu_context *smu, bool enable) | 578 | static int navi10_dpm_set_uvd_enable(struct smu_context *smu, bool enable) |
578 | { | 579 | { |
579 | int ret = 0; | 580 | int ret = 0; |
580 | struct smu_power_context *smu_power = &smu->smu_power; | ||
581 | struct smu_power_gate *power_gate = &smu_power->power_gate; | ||
582 | 581 | ||
583 | if (enable && power_gate->uvd_gated) { | 582 | if (enable) { |
584 | if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT)) { | 583 | ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1); |
585 | ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1); | 584 | if (ret) |
586 | if (ret) | 585 | return ret; |
587 | return ret; | ||
588 | } | ||
589 | power_gate->uvd_gated = false; | ||
590 | } else { | 586 | } else { |
591 | if (!enable && !power_gate->uvd_gated) { | 587 | ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn); |
592 | if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT)) { | 588 | if (ret) |
593 | ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn); | 589 | return ret; |
594 | if (ret) | ||
595 | return ret; | ||
596 | } | ||
597 | power_gate->uvd_gated = true; | ||
598 | } | ||
599 | } | 590 | } |
600 | 591 | ||
601 | return 0; | 592 | ret = smu_feature_set_enabled(smu, SMU_FEATURE_VCN_PG_BIT, enable); |
593 | |||
594 | return ret; | ||
602 | } | 595 | } |
603 | 596 | ||
604 | static int navi10_get_current_clk_freq_by_table(struct smu_context *smu, | 597 | static int navi10_get_current_clk_freq_by_table(struct smu_context *smu, |
@@ -626,11 +619,26 @@ static int navi10_get_current_clk_freq_by_table(struct smu_context *smu, | |||
626 | return ret; | 619 | return ret; |
627 | } | 620 | } |
628 | 621 | ||
622 | static bool navi10_is_support_fine_grained_dpm(struct smu_context *smu, enum smu_clk_type clk_type) | ||
623 | { | ||
624 | PPTable_t *pptable = smu->smu_table.driver_pptable; | ||
625 | DpmDescriptor_t *dpm_desc = NULL; | ||
626 | uint32_t clk_index = 0; | ||
627 | |||
628 | clk_index = smu_clk_get_index(smu, clk_type); | ||
629 | dpm_desc = &pptable->DpmDescriptor[clk_index]; | ||
630 | |||
631 | /* 0 - Fine grained DPM, 1 - Discrete DPM */ | ||
632 | return dpm_desc->SnapToDiscrete == 0 ? true : false; | ||
633 | } | ||
634 | |||
629 | static int navi10_print_clk_levels(struct smu_context *smu, | 635 | static int navi10_print_clk_levels(struct smu_context *smu, |
630 | enum smu_clk_type clk_type, char *buf) | 636 | enum smu_clk_type clk_type, char *buf) |
631 | { | 637 | { |
632 | int i, size = 0, ret = 0; | 638 | int i, size = 0, ret = 0; |
633 | uint32_t cur_value = 0, value = 0, count = 0; | 639 | uint32_t cur_value = 0, value = 0, count = 0; |
640 | uint32_t freq_values[3] = {0}; | ||
641 | uint32_t mark_index = 0; | ||
634 | 642 | ||
635 | switch (clk_type) { | 643 | switch (clk_type) { |
636 | case SMU_GFXCLK: | 644 | case SMU_GFXCLK: |
@@ -643,22 +651,42 @@ static int navi10_print_clk_levels(struct smu_context *smu, | |||
643 | ret = smu_get_current_clk_freq(smu, clk_type, &cur_value); | 651 | ret = smu_get_current_clk_freq(smu, clk_type, &cur_value); |
644 | if (ret) | 652 | if (ret) |
645 | return size; | 653 | return size; |
654 | |||
646 | /* 10KHz -> MHz */ | 655 | /* 10KHz -> MHz */ |
647 | cur_value = cur_value / 100; | 656 | cur_value = cur_value / 100; |
648 | 657 | ||
649 | size += sprintf(buf, "current clk: %uMhz\n", cur_value); | ||
650 | |||
651 | ret = smu_get_dpm_level_count(smu, clk_type, &count); | 658 | ret = smu_get_dpm_level_count(smu, clk_type, &count); |
652 | if (ret) | 659 | if (ret) |
653 | return size; | 660 | return size; |
654 | 661 | ||
655 | for (i = 0; i < count; i++) { | 662 | if (!navi10_is_support_fine_grained_dpm(smu, clk_type)) { |
656 | ret = smu_get_dpm_freq_by_index(smu, clk_type, i, &value); | 663 | for (i = 0; i < count; i++) { |
664 | ret = smu_get_dpm_freq_by_index(smu, clk_type, i, &value); | ||
665 | if (ret) | ||
666 | return size; | ||
667 | |||
668 | size += sprintf(buf + size, "%d: %uMhz %s\n", i, value, | ||
669 | cur_value == value ? "*" : ""); | ||
670 | } | ||
671 | } else { | ||
672 | ret = smu_get_dpm_freq_by_index(smu, clk_type, 0, &freq_values[0]); | ||
673 | if (ret) | ||
674 | return size; | ||
675 | ret = smu_get_dpm_freq_by_index(smu, clk_type, count - 1, &freq_values[2]); | ||
657 | if (ret) | 676 | if (ret) |
658 | return size; | 677 | return size; |
659 | 678 | ||
660 | size += sprintf(buf + size, "%d: %uMhz %s\n", i, value, | 679 | freq_values[1] = cur_value; |
661 | cur_value == value ? "*" : ""); | 680 | mark_index = cur_value == freq_values[0] ? 0 : |
681 | cur_value == freq_values[2] ? 2 : 1; | ||
682 | if (mark_index != 1) | ||
683 | freq_values[1] = (freq_values[0] + freq_values[2]) / 2; | ||
684 | |||
685 | for (i = 0; i < 3; i++) { | ||
686 | size += sprintf(buf + size, "%d: %uMhz %s\n", i, freq_values[i], | ||
687 | i == mark_index ? "*" : ""); | ||
688 | } | ||
689 | |||
662 | } | 690 | } |
663 | break; | 691 | break; |
664 | default: | 692 | default: |
@@ -919,12 +947,13 @@ static bool navi10_is_dpm_running(struct smu_context *smu) | |||
919 | return !!(feature_enabled & SMC_DPM_FEATURE); | 947 | return !!(feature_enabled & SMC_DPM_FEATURE); |
920 | } | 948 | } |
921 | 949 | ||
922 | static int navi10_get_fan_speed(struct smu_context *smu, uint16_t *value) | 950 | static int navi10_get_fan_speed_rpm(struct smu_context *smu, |
951 | uint32_t *speed) | ||
923 | { | 952 | { |
924 | SmuMetrics_t metrics; | 953 | SmuMetrics_t metrics; |
925 | int ret = 0; | 954 | int ret = 0; |
926 | 955 | ||
927 | if (!value) | 956 | if (!speed) |
928 | return -EINVAL; | 957 | return -EINVAL; |
929 | 958 | ||
930 | memset(&metrics, 0, sizeof(metrics)); | 959 | memset(&metrics, 0, sizeof(metrics)); |
@@ -934,7 +963,7 @@ static int navi10_get_fan_speed(struct smu_context *smu, uint16_t *value) | |||
934 | if (ret) | 963 | if (ret) |
935 | return ret; | 964 | return ret; |
936 | 965 | ||
937 | *value = metrics.CurrFanSpeed; | 966 | *speed = metrics.CurrFanSpeed; |
938 | 967 | ||
939 | return ret; | 968 | return ret; |
940 | } | 969 | } |
@@ -944,10 +973,10 @@ static int navi10_get_fan_speed_percent(struct smu_context *smu, | |||
944 | { | 973 | { |
945 | int ret = 0; | 974 | int ret = 0; |
946 | uint32_t percent = 0; | 975 | uint32_t percent = 0; |
947 | uint16_t current_rpm; | 976 | uint32_t current_rpm; |
948 | PPTable_t *pptable = smu->smu_table.driver_pptable; | 977 | PPTable_t *pptable = smu->smu_table.driver_pptable; |
949 | 978 | ||
950 | ret = navi10_get_fan_speed(smu, ¤t_rpm); | 979 | ret = navi10_get_fan_speed_rpm(smu, ¤t_rpm); |
951 | if (ret) | 980 | if (ret) |
952 | return ret; | 981 | return ret; |
953 | 982 | ||
@@ -1530,6 +1559,76 @@ static int navi10_set_ppfeature_status(struct smu_context *smu, | |||
1530 | return 0; | 1559 | return 0; |
1531 | } | 1560 | } |
1532 | 1561 | ||
1562 | static int navi10_set_peak_clock_by_device(struct smu_context *smu) | ||
1563 | { | ||
1564 | struct amdgpu_device *adev = smu->adev; | ||
1565 | int ret = 0; | ||
1566 | uint32_t sclk_freq = 0, uclk_freq = 0; | ||
1567 | uint32_t uclk_level = 0; | ||
1568 | |||
1569 | switch (adev->pdev->revision) { | ||
1570 | case 0xf0: /* XTX */ | ||
1571 | case 0xc0: | ||
1572 | sclk_freq = NAVI10_PEAK_SCLK_XTX; | ||
1573 | break; | ||
1574 | case 0xf1: /* XT */ | ||
1575 | case 0xc1: | ||
1576 | sclk_freq = NAVI10_PEAK_SCLK_XT; | ||
1577 | break; | ||
1578 | default: /* XL */ | ||
1579 | sclk_freq = NAVI10_PEAK_SCLK_XL; | ||
1580 | break; | ||
1581 | } | ||
1582 | |||
1583 | ret = smu_get_dpm_level_count(smu, SMU_UCLK, &uclk_level); | ||
1584 | if (ret) | ||
1585 | return ret; | ||
1586 | ret = smu_get_dpm_freq_by_index(smu, SMU_UCLK, uclk_level - 1, &uclk_freq); | ||
1587 | if (ret) | ||
1588 | return ret; | ||
1589 | |||
1590 | ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq); | ||
1591 | if (ret) | ||
1592 | return ret; | ||
1593 | ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq); | ||
1594 | if (ret) | ||
1595 | return ret; | ||
1596 | |||
1597 | return ret; | ||
1598 | } | ||
1599 | |||
1600 | static int navi10_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level) | ||
1601 | { | ||
1602 | int ret = 0; | ||
1603 | |||
1604 | switch (level) { | ||
1605 | case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: | ||
1606 | ret = navi10_set_peak_clock_by_device(smu); | ||
1607 | break; | ||
1608 | default: | ||
1609 | ret = -EINVAL; | ||
1610 | break; | ||
1611 | } | ||
1612 | |||
1613 | return ret; | ||
1614 | } | ||
1615 | |||
1616 | static int navi10_get_thermal_temperature_range(struct smu_context *smu, | ||
1617 | struct smu_temperature_range *range) | ||
1618 | { | ||
1619 | struct smu_table_context *table_context = &smu->smu_table; | ||
1620 | struct smu_11_0_powerplay_table *powerplay_table = table_context->power_play_table; | ||
1621 | |||
1622 | if (!range || !powerplay_table) | ||
1623 | return -EINVAL; | ||
1624 | |||
1625 | /* The unit is temperature */ | ||
1626 | range->min = 0; | ||
1627 | range->max = powerplay_table->software_shutdown_temp; | ||
1628 | |||
1629 | return 0; | ||
1630 | } | ||
1631 | |||
1533 | static const struct pptable_funcs navi10_ppt_funcs = { | 1632 | static const struct pptable_funcs navi10_ppt_funcs = { |
1534 | .tables_init = navi10_tables_init, | 1633 | .tables_init = navi10_tables_init, |
1535 | .alloc_dpm_context = navi10_allocate_dpm_context, | 1634 | .alloc_dpm_context = navi10_allocate_dpm_context, |
@@ -1557,6 +1656,7 @@ static const struct pptable_funcs navi10_ppt_funcs = { | |||
1557 | .unforce_dpm_levels = navi10_unforce_dpm_levels, | 1656 | .unforce_dpm_levels = navi10_unforce_dpm_levels, |
1558 | .is_dpm_running = navi10_is_dpm_running, | 1657 | .is_dpm_running = navi10_is_dpm_running, |
1559 | .get_fan_speed_percent = navi10_get_fan_speed_percent, | 1658 | .get_fan_speed_percent = navi10_get_fan_speed_percent, |
1659 | .get_fan_speed_rpm = navi10_get_fan_speed_rpm, | ||
1560 | .get_power_profile_mode = navi10_get_power_profile_mode, | 1660 | .get_power_profile_mode = navi10_get_power_profile_mode, |
1561 | .set_power_profile_mode = navi10_set_power_profile_mode, | 1661 | .set_power_profile_mode = navi10_set_power_profile_mode, |
1562 | .get_profiling_clk_mask = navi10_get_profiling_clk_mask, | 1662 | .get_profiling_clk_mask = navi10_get_profiling_clk_mask, |
@@ -1565,6 +1665,8 @@ static const struct pptable_funcs navi10_ppt_funcs = { | |||
1565 | .get_uclk_dpm_states = navi10_get_uclk_dpm_states, | 1665 | .get_uclk_dpm_states = navi10_get_uclk_dpm_states, |
1566 | .get_ppfeature_status = navi10_get_ppfeature_status, | 1666 | .get_ppfeature_status = navi10_get_ppfeature_status, |
1567 | .set_ppfeature_status = navi10_set_ppfeature_status, | 1667 | .set_ppfeature_status = navi10_set_ppfeature_status, |
1668 | .set_performance_level = navi10_set_performance_level, | ||
1669 | .get_thermal_temperature_range = navi10_get_thermal_temperature_range, | ||
1568 | }; | 1670 | }; |
1569 | 1671 | ||
1570 | void navi10_set_ppt_funcs(struct smu_context *smu) | 1672 | void navi10_set_ppt_funcs(struct smu_context *smu) |
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.h b/drivers/gpu/drm/amd/powerplay/navi10_ppt.h index 957288e22f47..620ff17c2fef 100644 --- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.h +++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.h | |||
@@ -23,6 +23,10 @@ | |||
23 | #ifndef __NAVI10_PPT_H__ | 23 | #ifndef __NAVI10_PPT_H__ |
24 | #define __NAVI10_PPT_H__ | 24 | #define __NAVI10_PPT_H__ |
25 | 25 | ||
26 | #define NAVI10_PEAK_SCLK_XTX (1830) | ||
27 | #define NAVI10_PEAK_SCLK_XT (1755) | ||
28 | #define NAVI10_PEAK_SCLK_XL (1625) | ||
29 | |||
26 | extern void navi10_set_ppt_funcs(struct smu_context *smu); | 30 | extern void navi10_set_ppt_funcs(struct smu_context *smu); |
27 | 31 | ||
28 | #endif | 32 | #endif |
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c index 95c7c4dae523..ac5b26228e75 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c | |||
@@ -1124,10 +1124,8 @@ static int smu_v11_0_set_thermal_range(struct smu_context *smu, | |||
1124 | struct smu_temperature_range *range) | 1124 | struct smu_temperature_range *range) |
1125 | { | 1125 | { |
1126 | struct amdgpu_device *adev = smu->adev; | 1126 | struct amdgpu_device *adev = smu->adev; |
1127 | int low = SMU_THERMAL_MINIMUM_ALERT_TEMP * | 1127 | int low = SMU_THERMAL_MINIMUM_ALERT_TEMP; |
1128 | SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; | 1128 | int high = SMU_THERMAL_MAXIMUM_ALERT_TEMP; |
1129 | int high = SMU_THERMAL_MAXIMUM_ALERT_TEMP * | ||
1130 | SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; | ||
1131 | uint32_t val; | 1129 | uint32_t val; |
1132 | 1130 | ||
1133 | if (!range) | 1131 | if (!range) |
@@ -1138,6 +1136,9 @@ static int smu_v11_0_set_thermal_range(struct smu_context *smu, | |||
1138 | if (high > range->max) | 1136 | if (high > range->max) |
1139 | high = range->max; | 1137 | high = range->max; |
1140 | 1138 | ||
1139 | low = max(SMU_THERMAL_MINIMUM_ALERT_TEMP, range->min); | ||
1140 | high = min(SMU_THERMAL_MAXIMUM_ALERT_TEMP, range->max); | ||
1141 | |||
1141 | if (low > high) | 1142 | if (low > high) |
1142 | return -EINVAL; | 1143 | return -EINVAL; |
1143 | 1144 | ||
@@ -1146,8 +1147,8 @@ static int smu_v11_0_set_thermal_range(struct smu_context *smu, | |||
1146 | val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1); | 1147 | val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1); |
1147 | val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 0); | 1148 | val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 0); |
1148 | val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 0); | 1149 | val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 0); |
1149 | val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES)); | 1150 | val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high & 0xff)); |
1150 | val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES)); | 1151 | val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low & 0xff)); |
1151 | val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK); | 1152 | val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK); |
1152 | 1153 | ||
1153 | WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val); | 1154 | WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val); |
@@ -1186,7 +1187,10 @@ static int smu_v11_0_start_thermal_control(struct smu_context *smu) | |||
1186 | 1187 | ||
1187 | if (!smu->pm_enabled) | 1188 | if (!smu->pm_enabled) |
1188 | return ret; | 1189 | return ret; |
1190 | |||
1189 | ret = smu_get_thermal_temperature_range(smu, &range); | 1191 | ret = smu_get_thermal_temperature_range(smu, &range); |
1192 | if (ret) | ||
1193 | return ret; | ||
1190 | 1194 | ||
1191 | if (smu->smu_table.thermal_controller_type) { | 1195 | if (smu->smu_table.thermal_controller_type) { |
1192 | ret = smu_v11_0_set_thermal_range(smu, &range); | 1196 | ret = smu_v11_0_set_thermal_range(smu, &range); |
@@ -1202,15 +1206,17 @@ static int smu_v11_0_start_thermal_control(struct smu_context *smu) | |||
1202 | return ret; | 1206 | return ret; |
1203 | } | 1207 | } |
1204 | 1208 | ||
1205 | adev->pm.dpm.thermal.min_temp = range.min; | 1209 | adev->pm.dpm.thermal.min_temp = range.min * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; |
1206 | adev->pm.dpm.thermal.max_temp = range.max; | 1210 | adev->pm.dpm.thermal.max_temp = range.max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; |
1207 | adev->pm.dpm.thermal.max_edge_emergency_temp = range.edge_emergency_max; | 1211 | adev->pm.dpm.thermal.max_edge_emergency_temp = range.edge_emergency_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; |
1208 | adev->pm.dpm.thermal.min_hotspot_temp = range.hotspot_min; | 1212 | adev->pm.dpm.thermal.min_hotspot_temp = range.hotspot_min * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; |
1209 | adev->pm.dpm.thermal.max_hotspot_crit_temp = range.hotspot_crit_max; | 1213 | adev->pm.dpm.thermal.max_hotspot_crit_temp = range.hotspot_crit_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; |
1210 | adev->pm.dpm.thermal.max_hotspot_emergency_temp = range.hotspot_emergency_max; | 1214 | adev->pm.dpm.thermal.max_hotspot_emergency_temp = range.hotspot_emergency_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; |
1211 | adev->pm.dpm.thermal.min_mem_temp = range.mem_min; | 1215 | adev->pm.dpm.thermal.min_mem_temp = range.mem_min * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; |
1212 | adev->pm.dpm.thermal.max_mem_crit_temp = range.mem_crit_max; | 1216 | adev->pm.dpm.thermal.max_mem_crit_temp = range.mem_crit_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; |
1213 | adev->pm.dpm.thermal.max_mem_emergency_temp = range.mem_emergency_max; | 1217 | adev->pm.dpm.thermal.max_mem_emergency_temp = range.mem_emergency_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; |
1218 | adev->pm.dpm.thermal.min_temp = range.min * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; | ||
1219 | adev->pm.dpm.thermal.max_temp = range.max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; | ||
1214 | 1220 | ||
1215 | return ret; | 1221 | return ret; |
1216 | } | 1222 | } |
@@ -1371,23 +1377,6 @@ static int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable) | |||
1371 | return ret; | 1377 | return ret; |
1372 | } | 1378 | } |
1373 | 1379 | ||
1374 | static int smu_v11_0_get_current_rpm(struct smu_context *smu, | ||
1375 | uint32_t *current_rpm) | ||
1376 | { | ||
1377 | int ret; | ||
1378 | |||
1379 | ret = smu_send_smc_msg(smu, SMU_MSG_GetCurrentRpm); | ||
1380 | |||
1381 | if (ret) { | ||
1382 | pr_err("Attempt to get current RPM from SMC Failed!\n"); | ||
1383 | return ret; | ||
1384 | } | ||
1385 | |||
1386 | smu_read_smc_arg(smu, current_rpm); | ||
1387 | |||
1388 | return 0; | ||
1389 | } | ||
1390 | |||
1391 | static uint32_t | 1380 | static uint32_t |
1392 | smu_v11_0_get_fan_control_mode(struct smu_context *smu) | 1381 | smu_v11_0_get_fan_control_mode(struct smu_context *smu) |
1393 | { | 1382 | { |
@@ -1773,7 +1762,6 @@ static const struct smu_funcs smu_v11_0_funcs = { | |||
1773 | .set_deep_sleep_dcefclk = smu_v11_0_set_deep_sleep_dcefclk, | 1762 | .set_deep_sleep_dcefclk = smu_v11_0_set_deep_sleep_dcefclk, |
1774 | .display_clock_voltage_request = smu_v11_0_display_clock_voltage_request, | 1763 | .display_clock_voltage_request = smu_v11_0_display_clock_voltage_request, |
1775 | .set_watermarks_for_clock_ranges = smu_v11_0_set_watermarks_for_clock_ranges, | 1764 | .set_watermarks_for_clock_ranges = smu_v11_0_set_watermarks_for_clock_ranges, |
1776 | .get_current_rpm = smu_v11_0_get_current_rpm, | ||
1777 | .get_fan_control_mode = smu_v11_0_get_fan_control_mode, | 1765 | .get_fan_control_mode = smu_v11_0_get_fan_control_mode, |
1778 | .set_fan_control_mode = smu_v11_0_set_fan_control_mode, | 1766 | .set_fan_control_mode = smu_v11_0_set_fan_control_mode, |
1779 | .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent, | 1767 | .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent, |
diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c index bb9bb09cfc7a..dd6fd1c8bf24 100644 --- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c | |||
@@ -450,7 +450,6 @@ static int vega20_store_powerplay_table(struct smu_context *smu) | |||
450 | memcpy(table_context->driver_pptable, &powerplay_table->smcPPTable, | 450 | memcpy(table_context->driver_pptable, &powerplay_table->smcPPTable, |
451 | sizeof(PPTable_t)); | 451 | sizeof(PPTable_t)); |
452 | 452 | ||
453 | table_context->software_shutdown_temp = powerplay_table->usSoftwareShutdownTemp; | ||
454 | table_context->thermal_controller_type = powerplay_table->ucThermalControllerType; | 453 | table_context->thermal_controller_type = powerplay_table->ucThermalControllerType; |
455 | table_context->TDPODLimit = le32_to_cpu(powerplay_table->OverDrive8Table.ODSettingsMax[ATOM_VEGA20_ODSETTING_POWERPERCENTAGE]); | 454 | table_context->TDPODLimit = le32_to_cpu(powerplay_table->OverDrive8Table.ODSettingsMax[ATOM_VEGA20_ODSETTING_POWERPERCENTAGE]); |
456 | 455 | ||
@@ -3015,6 +3014,23 @@ static int vega20_set_thermal_fan_table(struct smu_context *smu) | |||
3015 | return ret; | 3014 | return ret; |
3016 | } | 3015 | } |
3017 | 3016 | ||
3017 | static int vega20_get_fan_speed_rpm(struct smu_context *smu, | ||
3018 | uint32_t *speed) | ||
3019 | { | ||
3020 | int ret; | ||
3021 | |||
3022 | ret = smu_send_smc_msg(smu, SMU_MSG_GetCurrentRpm); | ||
3023 | |||
3024 | if (ret) { | ||
3025 | pr_err("Attempt to get current RPM from SMC Failed!\n"); | ||
3026 | return ret; | ||
3027 | } | ||
3028 | |||
3029 | smu_read_smc_arg(smu, speed); | ||
3030 | |||
3031 | return 0; | ||
3032 | } | ||
3033 | |||
3018 | static int vega20_get_fan_speed_percent(struct smu_context *smu, | 3034 | static int vega20_get_fan_speed_percent(struct smu_context *smu, |
3019 | uint32_t *speed) | 3035 | uint32_t *speed) |
3020 | { | 3036 | { |
@@ -3022,7 +3038,7 @@ static int vega20_get_fan_speed_percent(struct smu_context *smu, | |||
3022 | uint32_t current_rpm = 0, percent = 0; | 3038 | uint32_t current_rpm = 0, percent = 0; |
3023 | PPTable_t *pptable = smu->smu_table.driver_pptable; | 3039 | PPTable_t *pptable = smu->smu_table.driver_pptable; |
3024 | 3040 | ||
3025 | ret = smu_get_current_rpm(smu, ¤t_rpm); | 3041 | ret = vega20_get_fan_speed_rpm(smu, ¤t_rpm); |
3026 | if (ret) | 3042 | if (ret) |
3027 | return ret; | 3043 | return ret; |
3028 | 3044 | ||
@@ -3217,35 +3233,24 @@ static int vega20_set_watermarks_table(struct smu_context *smu, | |||
3217 | return 0; | 3233 | return 0; |
3218 | } | 3234 | } |
3219 | 3235 | ||
3220 | static const struct smu_temperature_range vega20_thermal_policy[] = | ||
3221 | { | ||
3222 | {-273150, 99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000}, | ||
3223 | { 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000}, | ||
3224 | }; | ||
3225 | |||
3226 | static int vega20_get_thermal_temperature_range(struct smu_context *smu, | 3236 | static int vega20_get_thermal_temperature_range(struct smu_context *smu, |
3227 | struct smu_temperature_range *range) | 3237 | struct smu_temperature_range *range) |
3228 | { | 3238 | { |
3229 | 3239 | struct smu_table_context *table_context = &smu->smu_table; | |
3240 | ATOM_Vega20_POWERPLAYTABLE *powerplay_table = table_context->power_play_table; | ||
3230 | PPTable_t *pptable = smu->smu_table.driver_pptable; | 3241 | PPTable_t *pptable = smu->smu_table.driver_pptable; |
3231 | 3242 | ||
3232 | if (!range) | 3243 | if (!range || !powerplay_table) |
3233 | return -EINVAL; | 3244 | return -EINVAL; |
3234 | 3245 | ||
3235 | memcpy(range, &vega20_thermal_policy[0], sizeof(struct smu_temperature_range)); | 3246 | /* The unit is temperature */ |
3236 | 3247 | range->min = 0; | |
3237 | range->max = pptable->TedgeLimit * | 3248 | range->max = powerplay_table->usSoftwareShutdownTemp; |
3238 | SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; | 3249 | range->edge_emergency_max = (pptable->TedgeLimit + CTF_OFFSET_EDGE); |
3239 | range->edge_emergency_max = (pptable->TedgeLimit + CTF_OFFSET_EDGE) * | 3250 | range->hotspot_crit_max = pptable->ThotspotLimit; |
3240 | SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; | 3251 | range->hotspot_emergency_max = (pptable->ThotspotLimit + CTF_OFFSET_HOTSPOT); |
3241 | range->hotspot_crit_max = pptable->ThotspotLimit * | 3252 | range->mem_crit_max = pptable->ThbmLimit; |
3242 | SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; | 3253 | range->mem_emergency_max = (pptable->ThbmLimit + CTF_OFFSET_HBM); |
3243 | range->hotspot_emergency_max = (pptable->ThotspotLimit + CTF_OFFSET_HOTSPOT) * | ||
3244 | SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; | ||
3245 | range->mem_crit_max = pptable->ThbmLimit * | ||
3246 | SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; | ||
3247 | range->mem_emergency_max = (pptable->ThbmLimit + CTF_OFFSET_HBM)* | ||
3248 | SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; | ||
3249 | 3254 | ||
3250 | 3255 | ||
3251 | return 0; | 3256 | return 0; |
@@ -3293,6 +3298,7 @@ static const struct pptable_funcs vega20_ppt_funcs = { | |||
3293 | .is_dpm_running = vega20_is_dpm_running, | 3298 | .is_dpm_running = vega20_is_dpm_running, |
3294 | .set_thermal_fan_table = vega20_set_thermal_fan_table, | 3299 | .set_thermal_fan_table = vega20_set_thermal_fan_table, |
3295 | .get_fan_speed_percent = vega20_get_fan_speed_percent, | 3300 | .get_fan_speed_percent = vega20_get_fan_speed_percent, |
3301 | .get_fan_speed_rpm = vega20_get_fan_speed_rpm, | ||
3296 | .set_watermarks_table = vega20_set_watermarks_table, | 3302 | .set_watermarks_table = vega20_set_watermarks_table, |
3297 | .get_thermal_temperature_range = vega20_get_thermal_temperature_range | 3303 | .get_thermal_temperature_range = vega20_get_thermal_temperature_range |
3298 | }; | 3304 | }; |
diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c index bc19dbd531ef..359030d5d818 100644 --- a/drivers/gpu/drm/bochs/bochs_kms.c +++ b/drivers/gpu/drm/bochs/bochs_kms.c | |||
@@ -191,6 +191,7 @@ int bochs_kms_init(struct bochs_device *bochs) | |||
191 | bochs->dev->mode_config.fb_base = bochs->fb_base; | 191 | bochs->dev->mode_config.fb_base = bochs->fb_base; |
192 | bochs->dev->mode_config.preferred_depth = 24; | 192 | bochs->dev->mode_config.preferred_depth = 24; |
193 | bochs->dev->mode_config.prefer_shadow = 0; | 193 | bochs->dev->mode_config.prefer_shadow = 0; |
194 | bochs->dev->mode_config.prefer_shadow_fbdev = 1; | ||
194 | bochs->dev->mode_config.quirk_addfb_prefer_host_byte_order = true; | 195 | bochs->dev->mode_config.quirk_addfb_prefer_host_byte_order = true; |
195 | 196 | ||
196 | bochs->dev->mode_config.funcs = &bochs_mode_funcs; | 197 | bochs->dev->mode_config.funcs = &bochs_mode_funcs; |
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig index ee777469293a..e4e22bbae2a7 100644 --- a/drivers/gpu/drm/bridge/Kconfig +++ b/drivers/gpu/drm/bridge/Kconfig | |||
@@ -48,6 +48,7 @@ config DRM_DUMB_VGA_DAC | |||
48 | config DRM_LVDS_ENCODER | 48 | config DRM_LVDS_ENCODER |
49 | tristate "Transparent parallel to LVDS encoder support" | 49 | tristate "Transparent parallel to LVDS encoder support" |
50 | depends on OF | 50 | depends on OF |
51 | select DRM_KMS_HELPER | ||
51 | select DRM_PANEL_BRIDGE | 52 | select DRM_PANEL_BRIDGE |
52 | help | 53 | help |
53 | Support for transparent parallel to LVDS encoders that don't require | 54 | Support for transparent parallel to LVDS encoders that don't require |
@@ -116,9 +117,10 @@ config DRM_THINE_THC63LVD1024 | |||
116 | 117 | ||
117 | config DRM_TOSHIBA_TC358764 | 118 | config DRM_TOSHIBA_TC358764 |
118 | tristate "TC358764 DSI/LVDS bridge" | 119 | tristate "TC358764 DSI/LVDS bridge" |
119 | depends on DRM && DRM_PANEL | ||
120 | depends on OF | 120 | depends on OF |
121 | select DRM_MIPI_DSI | 121 | select DRM_MIPI_DSI |
122 | select DRM_KMS_HELPER | ||
123 | select DRM_PANEL | ||
122 | help | 124 | help |
123 | Toshiba TC358764 DSI/LVDS bridge driver. | 125 | Toshiba TC358764 DSI/LVDS bridge driver. |
124 | 126 | ||
diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c index 410572f14257..e1dafb0cc5e2 100644 --- a/drivers/gpu/drm/drm_client.c +++ b/drivers/gpu/drm/drm_client.c | |||
@@ -254,7 +254,6 @@ drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, u | |||
254 | struct drm_device *dev = client->dev; | 254 | struct drm_device *dev = client->dev; |
255 | struct drm_client_buffer *buffer; | 255 | struct drm_client_buffer *buffer; |
256 | struct drm_gem_object *obj; | 256 | struct drm_gem_object *obj; |
257 | void *vaddr; | ||
258 | int ret; | 257 | int ret; |
259 | 258 | ||
260 | buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); | 259 | buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); |
@@ -281,6 +280,36 @@ drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, u | |||
281 | 280 | ||
282 | buffer->gem = obj; | 281 | buffer->gem = obj; |
283 | 282 | ||
283 | return buffer; | ||
284 | |||
285 | err_delete: | ||
286 | drm_client_buffer_delete(buffer); | ||
287 | |||
288 | return ERR_PTR(ret); | ||
289 | } | ||
290 | |||
291 | /** | ||
292 | * drm_client_buffer_vmap - Map DRM client buffer into address space | ||
293 | * @buffer: DRM client buffer | ||
294 | * | ||
295 | * This function maps a client buffer into kernel address space. If the | ||
296 | * buffer is already mapped, it returns the mapping's address. | ||
297 | * | ||
298 | * Client buffer mappings are not ref'counted. Each call to | ||
299 | * drm_client_buffer_vmap() should be followed by a call to | ||
300 | * drm_client_buffer_vunmap(); or the client buffer should be mapped | ||
301 | * throughout its lifetime. | ||
302 | * | ||
303 | * Returns: | ||
304 | * The mapped memory's address | ||
305 | */ | ||
306 | void *drm_client_buffer_vmap(struct drm_client_buffer *buffer) | ||
307 | { | ||
308 | void *vaddr; | ||
309 | |||
310 | if (buffer->vaddr) | ||
311 | return buffer->vaddr; | ||
312 | |||
284 | /* | 313 | /* |
285 | * FIXME: The dependency on GEM here isn't required, we could | 314 | * FIXME: The dependency on GEM here isn't required, we could |
286 | * convert the driver handle to a dma-buf instead and use the | 315 | * convert the driver handle to a dma-buf instead and use the |
@@ -289,21 +318,30 @@ drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, u | |||
289 | * fd_install step out of the driver backend hooks, to make that | 318 | * fd_install step out of the driver backend hooks, to make that |
290 | * final step optional for internal users. | 319 | * final step optional for internal users. |
291 | */ | 320 | */ |
292 | vaddr = drm_gem_vmap(obj); | 321 | vaddr = drm_gem_vmap(buffer->gem); |
293 | if (IS_ERR(vaddr)) { | 322 | if (IS_ERR(vaddr)) |
294 | ret = PTR_ERR(vaddr); | 323 | return vaddr; |
295 | goto err_delete; | ||
296 | } | ||
297 | 324 | ||
298 | buffer->vaddr = vaddr; | 325 | buffer->vaddr = vaddr; |
299 | 326 | ||
300 | return buffer; | 327 | return vaddr; |
301 | 328 | } | |
302 | err_delete: | 329 | EXPORT_SYMBOL(drm_client_buffer_vmap); |
303 | drm_client_buffer_delete(buffer); | ||
304 | 330 | ||
305 | return ERR_PTR(ret); | 331 | /** |
332 | * drm_client_buffer_vunmap - Unmap DRM client buffer | ||
333 | * @buffer: DRM client buffer | ||
334 | * | ||
335 | * This function removes a client buffer's memory mapping. Calling this | ||
336 | * function is only required by clients that manage their buffer mappings | ||
337 | * by themselves. | ||
338 | */ | ||
339 | void drm_client_buffer_vunmap(struct drm_client_buffer *buffer) | ||
340 | { | ||
341 | drm_gem_vunmap(buffer->gem, buffer->vaddr); | ||
342 | buffer->vaddr = NULL; | ||
306 | } | 343 | } |
344 | EXPORT_SYMBOL(drm_client_buffer_vunmap); | ||
307 | 345 | ||
308 | static void drm_client_buffer_rmfb(struct drm_client_buffer *buffer) | 346 | static void drm_client_buffer_rmfb(struct drm_client_buffer *buffer) |
309 | { | 347 | { |
diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c index 56d36779d213..c8922b7cac09 100644 --- a/drivers/gpu/drm/drm_client_modeset.c +++ b/drivers/gpu/drm/drm_client_modeset.c | |||
@@ -859,7 +859,7 @@ bool drm_client_rotation(struct drm_mode_set *modeset, unsigned int *rotation) | |||
859 | * simple XOR between the two handle the addition nicely. | 859 | * simple XOR between the two handle the addition nicely. |
860 | */ | 860 | */ |
861 | cmdline = &connector->cmdline_mode; | 861 | cmdline = &connector->cmdline_mode; |
862 | if (cmdline->specified) { | 862 | if (cmdline->specified && cmdline->rotation_reflection) { |
863 | unsigned int cmdline_rest, panel_rest; | 863 | unsigned int cmdline_rest, panel_rest; |
864 | unsigned int cmdline_rot, panel_rot; | 864 | unsigned int cmdline_rot, panel_rot; |
865 | unsigned int sum_rot, sum_rest; | 865 | unsigned int sum_rot, sum_rest; |
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 1984e5c54d58..a7ba5b4902d6 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c | |||
@@ -403,6 +403,7 @@ static void drm_fb_helper_dirty_work(struct work_struct *work) | |||
403 | struct drm_clip_rect *clip = &helper->dirty_clip; | 403 | struct drm_clip_rect *clip = &helper->dirty_clip; |
404 | struct drm_clip_rect clip_copy; | 404 | struct drm_clip_rect clip_copy; |
405 | unsigned long flags; | 405 | unsigned long flags; |
406 | void *vaddr; | ||
406 | 407 | ||
407 | spin_lock_irqsave(&helper->dirty_lock, flags); | 408 | spin_lock_irqsave(&helper->dirty_lock, flags); |
408 | clip_copy = *clip; | 409 | clip_copy = *clip; |
@@ -412,10 +413,20 @@ static void drm_fb_helper_dirty_work(struct work_struct *work) | |||
412 | 413 | ||
413 | /* call dirty callback only when it has been really touched */ | 414 | /* call dirty callback only when it has been really touched */ |
414 | if (clip_copy.x1 < clip_copy.x2 && clip_copy.y1 < clip_copy.y2) { | 415 | if (clip_copy.x1 < clip_copy.x2 && clip_copy.y1 < clip_copy.y2) { |
416 | |||
415 | /* Generic fbdev uses a shadow buffer */ | 417 | /* Generic fbdev uses a shadow buffer */ |
416 | if (helper->buffer) | 418 | if (helper->buffer) { |
419 | vaddr = drm_client_buffer_vmap(helper->buffer); | ||
420 | if (IS_ERR(vaddr)) | ||
421 | return; | ||
417 | drm_fb_helper_dirty_blit_real(helper, &clip_copy); | 422 | drm_fb_helper_dirty_blit_real(helper, &clip_copy); |
418 | helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, &clip_copy, 1); | 423 | } |
424 | if (helper->fb->funcs->dirty) | ||
425 | helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, | ||
426 | &clip_copy, 1); | ||
427 | |||
428 | if (helper->buffer) | ||
429 | drm_client_buffer_vunmap(helper->buffer); | ||
419 | } | 430 | } |
420 | } | 431 | } |
421 | 432 | ||
@@ -604,6 +615,16 @@ void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper) | |||
604 | } | 615 | } |
605 | EXPORT_SYMBOL(drm_fb_helper_unlink_fbi); | 616 | EXPORT_SYMBOL(drm_fb_helper_unlink_fbi); |
606 | 617 | ||
618 | static bool drm_fbdev_use_shadow_fb(struct drm_fb_helper *fb_helper) | ||
619 | { | ||
620 | struct drm_device *dev = fb_helper->dev; | ||
621 | struct drm_framebuffer *fb = fb_helper->fb; | ||
622 | |||
623 | return dev->mode_config.prefer_shadow_fbdev || | ||
624 | dev->mode_config.prefer_shadow || | ||
625 | fb->funcs->dirty; | ||
626 | } | ||
627 | |||
607 | static void drm_fb_helper_dirty(struct fb_info *info, u32 x, u32 y, | 628 | static void drm_fb_helper_dirty(struct fb_info *info, u32 x, u32 y, |
608 | u32 width, u32 height) | 629 | u32 width, u32 height) |
609 | { | 630 | { |
@@ -611,7 +632,7 @@ static void drm_fb_helper_dirty(struct fb_info *info, u32 x, u32 y, | |||
611 | struct drm_clip_rect *clip = &helper->dirty_clip; | 632 | struct drm_clip_rect *clip = &helper->dirty_clip; |
612 | unsigned long flags; | 633 | unsigned long flags; |
613 | 634 | ||
614 | if (!helper->fb->funcs->dirty) | 635 | if (!drm_fbdev_use_shadow_fb(helper)) |
615 | return; | 636 | return; |
616 | 637 | ||
617 | spin_lock_irqsave(&helper->dirty_lock, flags); | 638 | spin_lock_irqsave(&helper->dirty_lock, flags); |
@@ -2178,6 +2199,7 @@ int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper, | |||
2178 | struct drm_framebuffer *fb; | 2199 | struct drm_framebuffer *fb; |
2179 | struct fb_info *fbi; | 2200 | struct fb_info *fbi; |
2180 | u32 format; | 2201 | u32 format; |
2202 | void *vaddr; | ||
2181 | 2203 | ||
2182 | DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d)\n", | 2204 | DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d)\n", |
2183 | sizes->surface_width, sizes->surface_height, | 2205 | sizes->surface_width, sizes->surface_height, |
@@ -2200,16 +2222,10 @@ int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper, | |||
2200 | fbi->fbops = &drm_fbdev_fb_ops; | 2222 | fbi->fbops = &drm_fbdev_fb_ops; |
2201 | fbi->screen_size = fb->height * fb->pitches[0]; | 2223 | fbi->screen_size = fb->height * fb->pitches[0]; |
2202 | fbi->fix.smem_len = fbi->screen_size; | 2224 | fbi->fix.smem_len = fbi->screen_size; |
2203 | fbi->screen_buffer = buffer->vaddr; | 2225 | |
2204 | /* Shamelessly leak the physical address to user-space */ | ||
2205 | #if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM) | ||
2206 | if (drm_leak_fbdev_smem && fbi->fix.smem_start == 0) | ||
2207 | fbi->fix.smem_start = | ||
2208 | page_to_phys(virt_to_page(fbi->screen_buffer)); | ||
2209 | #endif | ||
2210 | drm_fb_helper_fill_info(fbi, fb_helper, sizes); | 2226 | drm_fb_helper_fill_info(fbi, fb_helper, sizes); |
2211 | 2227 | ||
2212 | if (fb->funcs->dirty) { | 2228 | if (drm_fbdev_use_shadow_fb(fb_helper)) { |
2213 | struct fb_ops *fbops; | 2229 | struct fb_ops *fbops; |
2214 | void *shadow; | 2230 | void *shadow; |
2215 | 2231 | ||
@@ -2231,6 +2247,19 @@ int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper, | |||
2231 | fbi->fbdefio = &drm_fbdev_defio; | 2247 | fbi->fbdefio = &drm_fbdev_defio; |
2232 | 2248 | ||
2233 | fb_deferred_io_init(fbi); | 2249 | fb_deferred_io_init(fbi); |
2250 | } else { | ||
2251 | /* buffer is mapped for HW framebuffer */ | ||
2252 | vaddr = drm_client_buffer_vmap(fb_helper->buffer); | ||
2253 | if (IS_ERR(vaddr)) | ||
2254 | return PTR_ERR(vaddr); | ||
2255 | |||
2256 | fbi->screen_buffer = vaddr; | ||
2257 | /* Shamelessly leak the physical address to user-space */ | ||
2258 | #if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM) | ||
2259 | if (drm_leak_fbdev_smem && fbi->fix.smem_start == 0) | ||
2260 | fbi->fix.smem_start = | ||
2261 | page_to_phys(virt_to_page(fbi->screen_buffer)); | ||
2262 | #endif | ||
2234 | } | 2263 | } |
2235 | 2264 | ||
2236 | return 0; | 2265 | return 0; |
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c index 0b72468e8131..57564318ceea 100644 --- a/drivers/gpu/drm/drm_framebuffer.c +++ b/drivers/gpu/drm/drm_framebuffer.c | |||
@@ -835,7 +835,7 @@ static int atomic_remove_fb(struct drm_framebuffer *fb) | |||
835 | struct drm_device *dev = fb->dev; | 835 | struct drm_device *dev = fb->dev; |
836 | struct drm_atomic_state *state; | 836 | struct drm_atomic_state *state; |
837 | struct drm_plane *plane; | 837 | struct drm_plane *plane; |
838 | struct drm_connector *conn; | 838 | struct drm_connector *conn __maybe_unused; |
839 | struct drm_connector_state *conn_state; | 839 | struct drm_connector_state *conn_state; |
840 | int i, ret; | 840 | int i, ret; |
841 | unsigned plane_mask; | 841 | unsigned plane_mask; |
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index 74a5739df506..80fcd5dc1558 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c | |||
@@ -1686,7 +1686,7 @@ static int drm_mode_parse_cmdline_options(char *str, size_t len, | |||
1686 | * | 1686 | * |
1687 | * Additionals options can be provided following the mode, using a comma to | 1687 | * Additionals options can be provided following the mode, using a comma to |
1688 | * separate each option. Valid options can be found in | 1688 | * separate each option. Valid options can be found in |
1689 | * Documentation/fb/modedb.txt. | 1689 | * Documentation/fb/modedb.rst. |
1690 | * | 1690 | * |
1691 | * The intermediate drm_cmdline_mode structure is required to store additional | 1691 | * The intermediate drm_cmdline_mode structure is required to store additional |
1692 | * options from the command line modline like the force-enable/disable flag. | 1692 | * options from the command line modline like the force-enable/disable flag. |
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig index 60ce4a8ad9e1..6f7d3b3b3628 100644 --- a/drivers/gpu/drm/exynos/Kconfig +++ b/drivers/gpu/drm/exynos/Kconfig | |||
@@ -2,6 +2,7 @@ | |||
2 | config DRM_EXYNOS | 2 | config DRM_EXYNOS |
3 | tristate "DRM Support for Samsung SoC EXYNOS Series" | 3 | tristate "DRM Support for Samsung SoC EXYNOS Series" |
4 | depends on OF && DRM && (ARCH_S3C64XX || ARCH_S5PV210 || ARCH_EXYNOS || ARCH_MULTIPLATFORM || COMPILE_TEST) | 4 | depends on OF && DRM && (ARCH_S3C64XX || ARCH_S5PV210 || ARCH_EXYNOS || ARCH_MULTIPLATFORM || COMPILE_TEST) |
5 | depends on MMU | ||
5 | select DRM_KMS_HELPER | 6 | select DRM_KMS_HELPER |
6 | select VIDEOMODE_HELPERS | 7 | select VIDEOMODE_HELPERS |
7 | select SND_SOC_HDMI_CODEC if SND_SOC | 8 | select SND_SOC_HDMI_CODEC if SND_SOC |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c index a594ab7be2c0..164d914cbe9a 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c | |||
@@ -44,7 +44,7 @@ static unsigned int fimc_mask = 0xc; | |||
44 | module_param_named(fimc_devs, fimc_mask, uint, 0644); | 44 | module_param_named(fimc_devs, fimc_mask, uint, 0644); |
45 | MODULE_PARM_DESC(fimc_devs, "Alias mask for assigning FIMC devices to Exynos DRM"); | 45 | MODULE_PARM_DESC(fimc_devs, "Alias mask for assigning FIMC devices to Exynos DRM"); |
46 | 46 | ||
47 | #define get_fimc_context(dev) platform_get_drvdata(to_platform_device(dev)) | 47 | #define get_fimc_context(dev) dev_get_drvdata(dev) |
48 | 48 | ||
49 | enum { | 49 | enum { |
50 | FIMC_CLK_LCLK, | 50 | FIMC_CLK_LCLK, |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index 50904eee96f7..2a3382d43bc9 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c | |||
@@ -267,7 +267,7 @@ static inline void g2d_hw_reset(struct g2d_data *g2d) | |||
267 | static int g2d_init_cmdlist(struct g2d_data *g2d) | 267 | static int g2d_init_cmdlist(struct g2d_data *g2d) |
268 | { | 268 | { |
269 | struct device *dev = g2d->dev; | 269 | struct device *dev = g2d->dev; |
270 | struct g2d_cmdlist_node *node = g2d->cmdlist_node; | 270 | struct g2d_cmdlist_node *node; |
271 | int nr; | 271 | int nr; |
272 | int ret; | 272 | int ret; |
273 | struct g2d_buf_info *buf_info; | 273 | struct g2d_buf_info *buf_info; |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c index 1e4b21c49a06..1c524db9570f 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c | |||
@@ -58,7 +58,7 @@ | |||
58 | #define GSC_COEF_DEPTH 3 | 58 | #define GSC_COEF_DEPTH 3 |
59 | #define GSC_AUTOSUSPEND_DELAY 2000 | 59 | #define GSC_AUTOSUSPEND_DELAY 2000 |
60 | 60 | ||
61 | #define get_gsc_context(dev) platform_get_drvdata(to_platform_device(dev)) | 61 | #define get_gsc_context(dev) dev_get_drvdata(dev) |
62 | #define gsc_read(offset) readl(ctx->regs + (offset)) | 62 | #define gsc_read(offset) readl(ctx->regs + (offset)) |
63 | #define gsc_write(cfg, offset) writel(cfg, ctx->regs + (offset)) | 63 | #define gsc_write(cfg, offset) writel(cfg, ctx->regs + (offset)) |
64 | 64 | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_scaler.c b/drivers/gpu/drm/exynos/exynos_drm_scaler.c index 9af096479e1c..b24ba948b725 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_scaler.c +++ b/drivers/gpu/drm/exynos/exynos_drm_scaler.c | |||
@@ -94,12 +94,12 @@ static inline int scaler_reset(struct scaler_context *scaler) | |||
94 | scaler_write(SCALER_CFG_SOFT_RESET, SCALER_CFG); | 94 | scaler_write(SCALER_CFG_SOFT_RESET, SCALER_CFG); |
95 | do { | 95 | do { |
96 | cpu_relax(); | 96 | cpu_relax(); |
97 | } while (retry > 1 && | 97 | } while (--retry > 1 && |
98 | scaler_read(SCALER_CFG) & SCALER_CFG_SOFT_RESET); | 98 | scaler_read(SCALER_CFG) & SCALER_CFG_SOFT_RESET); |
99 | do { | 99 | do { |
100 | cpu_relax(); | 100 | cpu_relax(); |
101 | scaler_write(1, SCALER_INT_EN); | 101 | scaler_write(1, SCALER_INT_EN); |
102 | } while (retry > 0 && scaler_read(SCALER_INT_EN) != 1); | 102 | } while (--retry > 0 && scaler_read(SCALER_INT_EN) != 1); |
103 | 103 | ||
104 | return retry ? 0 : -EIO; | 104 | return retry ? 0 : -EIO; |
105 | } | 105 | } |
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 91355c2ea8a5..8cace65f50ce 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile | |||
@@ -16,7 +16,6 @@ subdir-ccflags-y := -Wall -Wextra | |||
16 | subdir-ccflags-y += $(call cc-disable-warning, unused-parameter) | 16 | subdir-ccflags-y += $(call cc-disable-warning, unused-parameter) |
17 | subdir-ccflags-y += $(call cc-disable-warning, type-limits) | 17 | subdir-ccflags-y += $(call cc-disable-warning, type-limits) |
18 | subdir-ccflags-y += $(call cc-disable-warning, missing-field-initializers) | 18 | subdir-ccflags-y += $(call cc-disable-warning, missing-field-initializers) |
19 | subdir-ccflags-y += $(call cc-disable-warning, implicit-fallthrough) | ||
20 | subdir-ccflags-y += $(call cc-disable-warning, unused-but-set-variable) | 19 | subdir-ccflags-y += $(call cc-disable-warning, unused-but-set-variable) |
21 | # clang warnings | 20 | # clang warnings |
22 | subdir-ccflags-y += $(call cc-disable-warning, sign-compare) | 21 | subdir-ccflags-y += $(call cc-disable-warning, sign-compare) |
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c index c4710889cb32..3ef4e9f573cf 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.c +++ b/drivers/gpu/drm/i915/display/intel_bios.c | |||
@@ -765,7 +765,7 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb) | |||
765 | } | 765 | } |
766 | 766 | ||
767 | if (bdb->version >= 226) { | 767 | if (bdb->version >= 226) { |
768 | u32 wakeup_time = psr_table->psr2_tp2_tp3_wakeup_time; | 768 | u32 wakeup_time = psr->psr2_tp2_tp3_wakeup_time; |
769 | 769 | ||
770 | wakeup_time = (wakeup_time >> (2 * panel_type)) & 0x3; | 770 | wakeup_time = (wakeup_time >> (2 * panel_type)) & 0x3; |
771 | switch (wakeup_time) { | 771 | switch (wakeup_time) { |
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c index 753ac3165061..7b908e10d32e 100644 --- a/drivers/gpu/drm/i915/display/intel_bw.c +++ b/drivers/gpu/drm/i915/display/intel_bw.c | |||
@@ -178,6 +178,8 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv) | |||
178 | clpchgroup = (sa->deburst * deinterleave / num_channels) << i; | 178 | clpchgroup = (sa->deburst * deinterleave / num_channels) << i; |
179 | bi->num_planes = (ipqdepth - clpchgroup) / clpchgroup + 1; | 179 | bi->num_planes = (ipqdepth - clpchgroup) / clpchgroup + 1; |
180 | 180 | ||
181 | bi->num_qgv_points = qi.num_points; | ||
182 | |||
181 | for (j = 0; j < qi.num_points; j++) { | 183 | for (j = 0; j < qi.num_points; j++) { |
182 | const struct intel_qgv_point *sp = &qi.points[j]; | 184 | const struct intel_qgv_point *sp = &qi.points[j]; |
183 | int ct, bw; | 185 | int ct, bw; |
@@ -195,7 +197,7 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv) | |||
195 | bi->deratedbw[j] = min(maxdebw, | 197 | bi->deratedbw[j] = min(maxdebw, |
196 | bw * 9 / 10); /* 90% */ | 198 | bw * 9 / 10); /* 90% */ |
197 | 199 | ||
198 | DRM_DEBUG_KMS("BW%d / QGV %d: num_planes=%d deratedbw=%d\n", | 200 | DRM_DEBUG_KMS("BW%d / QGV %d: num_planes=%d deratedbw=%u\n", |
199 | i, j, bi->num_planes, bi->deratedbw[j]); | 201 | i, j, bi->num_planes, bi->deratedbw[j]); |
200 | } | 202 | } |
201 | 203 | ||
@@ -211,14 +213,17 @@ static unsigned int icl_max_bw(struct drm_i915_private *dev_priv, | |||
211 | { | 213 | { |
212 | int i; | 214 | int i; |
213 | 215 | ||
214 | /* Did we initialize the bw limits successfully? */ | ||
215 | if (dev_priv->max_bw[0].num_planes == 0) | ||
216 | return UINT_MAX; | ||
217 | |||
218 | for (i = 0; i < ARRAY_SIZE(dev_priv->max_bw); i++) { | 216 | for (i = 0; i < ARRAY_SIZE(dev_priv->max_bw); i++) { |
219 | const struct intel_bw_info *bi = | 217 | const struct intel_bw_info *bi = |
220 | &dev_priv->max_bw[i]; | 218 | &dev_priv->max_bw[i]; |
221 | 219 | ||
220 | /* | ||
221 | * Pcode will not expose all QGV points when | ||
222 | * SAGV is forced to off/min/med/max. | ||
223 | */ | ||
224 | if (qgv_point >= bi->num_qgv_points) | ||
225 | return UINT_MAX; | ||
226 | |||
222 | if (num_planes >= bi->num_planes) | 227 | if (num_planes >= bi->num_planes) |
223 | return bi->deratedbw[qgv_point]; | 228 | return bi->deratedbw[qgv_point]; |
224 | } | 229 | } |
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index 8993ab283562..0d19bbd08122 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c | |||
@@ -2240,6 +2240,17 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state) | |||
2240 | min_cdclk = max(2 * 96000, min_cdclk); | 2240 | min_cdclk = max(2 * 96000, min_cdclk); |
2241 | 2241 | ||
2242 | /* | 2242 | /* |
2243 | * "For DP audio configuration, cdclk frequency shall be set to | ||
2244 | * meet the following requirements: | ||
2245 | * DP Link Frequency(MHz) | Cdclk frequency(MHz) | ||
2246 | * 270 | 320 or higher | ||
2247 | * 162 | 200 or higher" | ||
2248 | */ | ||
2249 | if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && | ||
2250 | intel_crtc_has_dp_encoder(crtc_state) && crtc_state->has_audio) | ||
2251 | min_cdclk = max(crtc_state->port_clock, min_cdclk); | ||
2252 | |||
2253 | /* | ||
2243 | * On Valleyview some DSI panels lose (v|h)sync when the clock is lower | 2254 | * On Valleyview some DSI panels lose (v|h)sync when the clock is lower |
2244 | * than 320000KHz. | 2255 | * than 320000KHz. |
2245 | */ | 2256 | */ |
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 8592a7d422de..592b92782fab 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c | |||
@@ -1839,7 +1839,7 @@ static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state) | |||
1839 | /* FIXME: assert CPU port conditions for SNB+ */ | 1839 | /* FIXME: assert CPU port conditions for SNB+ */ |
1840 | } | 1840 | } |
1841 | 1841 | ||
1842 | trace_intel_pipe_enable(dev_priv, pipe); | 1842 | trace_intel_pipe_enable(crtc); |
1843 | 1843 | ||
1844 | reg = PIPECONF(cpu_transcoder); | 1844 | reg = PIPECONF(cpu_transcoder); |
1845 | val = I915_READ(reg); | 1845 | val = I915_READ(reg); |
@@ -1880,7 +1880,7 @@ static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state) | |||
1880 | */ | 1880 | */ |
1881 | assert_planes_disabled(crtc); | 1881 | assert_planes_disabled(crtc); |
1882 | 1882 | ||
1883 | trace_intel_pipe_disable(dev_priv, pipe); | 1883 | trace_intel_pipe_disable(crtc); |
1884 | 1884 | ||
1885 | reg = PIPECONF(cpu_transcoder); | 1885 | reg = PIPECONF(cpu_transcoder); |
1886 | val = I915_READ(reg); | 1886 | val = I915_READ(reg); |
@@ -12042,7 +12042,7 @@ static bool check_digital_port_conflicts(struct intel_atomic_state *state) | |||
12042 | case INTEL_OUTPUT_DDI: | 12042 | case INTEL_OUTPUT_DDI: |
12043 | if (WARN_ON(!HAS_DDI(to_i915(dev)))) | 12043 | if (WARN_ON(!HAS_DDI(to_i915(dev)))) |
12044 | break; | 12044 | break; |
12045 | /* else: fall through */ | 12045 | /* else, fall through */ |
12046 | case INTEL_OUTPUT_DP: | 12046 | case INTEL_OUTPUT_DP: |
12047 | case INTEL_OUTPUT_HDMI: | 12047 | case INTEL_OUTPUT_HDMI: |
12048 | case INTEL_OUTPUT_EDP: | 12048 | case INTEL_OUTPUT_EDP: |
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index c93ad512014c..2d1939db108f 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c | |||
@@ -438,16 +438,23 @@ icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv, | |||
438 | #define ICL_AUX_PW_TO_CH(pw_idx) \ | 438 | #define ICL_AUX_PW_TO_CH(pw_idx) \ |
439 | ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A) | 439 | ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A) |
440 | 440 | ||
441 | #define ICL_TBT_AUX_PW_TO_CH(pw_idx) \ | ||
442 | ((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C) | ||
443 | |||
441 | static void | 444 | static void |
442 | icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, | 445 | icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, |
443 | struct i915_power_well *power_well) | 446 | struct i915_power_well *power_well) |
444 | { | 447 | { |
445 | enum aux_ch aux_ch = ICL_AUX_PW_TO_CH(power_well->desc->hsw.idx); | 448 | int pw_idx = power_well->desc->hsw.idx; |
449 | bool is_tbt = power_well->desc->hsw.is_tc_tbt; | ||
450 | enum aux_ch aux_ch; | ||
446 | u32 val; | 451 | u32 val; |
447 | 452 | ||
453 | aux_ch = is_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) : | ||
454 | ICL_AUX_PW_TO_CH(pw_idx); | ||
448 | val = I915_READ(DP_AUX_CH_CTL(aux_ch)); | 455 | val = I915_READ(DP_AUX_CH_CTL(aux_ch)); |
449 | val &= ~DP_AUX_CH_CTL_TBT_IO; | 456 | val &= ~DP_AUX_CH_CTL_TBT_IO; |
450 | if (power_well->desc->hsw.is_tc_tbt) | 457 | if (is_tbt) |
451 | val |= DP_AUX_CH_CTL_TBT_IO; | 458 | val |= DP_AUX_CH_CTL_TBT_IO; |
452 | I915_WRITE(DP_AUX_CH_CTL(aux_ch), val); | 459 | I915_WRITE(DP_AUX_CH_CTL(aux_ch), val); |
453 | 460 | ||
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 4336df46fe78..d0fc34826771 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c | |||
@@ -231,6 +231,7 @@ static int intel_dp_get_fia_supported_lane_count(struct intel_dp *intel_dp) | |||
231 | switch (lane_info) { | 231 | switch (lane_info) { |
232 | default: | 232 | default: |
233 | MISSING_CASE(lane_info); | 233 | MISSING_CASE(lane_info); |
234 | /* fall through */ | ||
234 | case 1: | 235 | case 1: |
235 | case 2: | 236 | case 2: |
236 | case 4: | 237 | case 4: |
diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h index 2f4894e9a03d..5ddbe71ab423 100644 --- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h +++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h | |||
@@ -478,13 +478,13 @@ struct psr_table { | |||
478 | /* TP wake up time in multiple of 100 */ | 478 | /* TP wake up time in multiple of 100 */ |
479 | u16 tp1_wakeup_time; | 479 | u16 tp1_wakeup_time; |
480 | u16 tp2_tp3_wakeup_time; | 480 | u16 tp2_tp3_wakeup_time; |
481 | |||
482 | /* PSR2 TP2/TP3 wakeup time for 16 panels */ | ||
483 | u32 psr2_tp2_tp3_wakeup_time; | ||
484 | } __packed; | 481 | } __packed; |
485 | 482 | ||
486 | struct bdb_psr { | 483 | struct bdb_psr { |
487 | struct psr_table psr_table[16]; | 484 | struct psr_table psr_table[16]; |
485 | |||
486 | /* PSR2 TP2/TP3 wakeup time for 16 panels */ | ||
487 | u32 psr2_tp2_tp3_wakeup_time; | ||
488 | } __packed; | 488 | } __packed; |
489 | 489 | ||
490 | /* | 490 | /* |
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c index 391621ee3cbb..39a661927d8e 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c | |||
@@ -341,7 +341,7 @@ err: | |||
341 | */ | 341 | */ |
342 | if (!i915_terminally_wedged(i915)) | 342 | if (!i915_terminally_wedged(i915)) |
343 | return VM_FAULT_SIGBUS; | 343 | return VM_FAULT_SIGBUS; |
344 | /* else: fall through */ | 344 | /* else, fall through */ |
345 | case -EAGAIN: | 345 | case -EAGAIN: |
346 | /* | 346 | /* |
347 | * EAGAIN means the gpu is hung and we'll wait for the error | 347 | * EAGAIN means the gpu is hung and we'll wait for the error |
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c index b36ad269f4ea..65eb430cedba 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c | |||
@@ -268,7 +268,7 @@ static void *i915_gem_object_map(const struct drm_i915_gem_object *obj, | |||
268 | switch (type) { | 268 | switch (type) { |
269 | default: | 269 | default: |
270 | MISSING_CASE(type); | 270 | MISSING_CASE(type); |
271 | /* fallthrough to use PAGE_KERNEL anyway */ | 271 | /* fallthrough - to use PAGE_KERNEL anyway */ |
272 | case I915_MAP_WB: | 272 | case I915_MAP_WB: |
273 | pgprot = PAGE_KERNEL; | 273 | pgprot = PAGE_KERNEL; |
274 | break; | 274 | break; |
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c index 05011d4a3b88..914b5d4112bb 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c | |||
@@ -253,14 +253,15 @@ void i915_gem_resume(struct drm_i915_private *i915) | |||
253 | i915_gem_restore_gtt_mappings(i915); | 253 | i915_gem_restore_gtt_mappings(i915); |
254 | i915_gem_restore_fences(i915); | 254 | i915_gem_restore_fences(i915); |
255 | 255 | ||
256 | if (i915_gem_init_hw(i915)) | ||
257 | goto err_wedged; | ||
258 | |||
256 | /* | 259 | /* |
257 | * As we didn't flush the kernel context before suspend, we cannot | 260 | * As we didn't flush the kernel context before suspend, we cannot |
258 | * guarantee that the context image is complete. So let's just reset | 261 | * guarantee that the context image is complete. So let's just reset |
259 | * it and start again. | 262 | * it and start again. |
260 | */ | 263 | */ |
261 | intel_gt_resume(i915); | 264 | if (intel_gt_resume(i915)) |
262 | |||
263 | if (i915_gem_init_hw(i915)) | ||
264 | goto err_wedged; | 265 | goto err_wedged; |
265 | 266 | ||
266 | intel_uc_resume(i915); | 267 | intel_uc_resume(i915); |
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c index 528b61678334..2caa594322bc 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c | |||
@@ -664,7 +664,15 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj, | |||
664 | 664 | ||
665 | for_each_sgt_page(page, sgt_iter, pages) { | 665 | for_each_sgt_page(page, sgt_iter, pages) { |
666 | if (obj->mm.dirty) | 666 | if (obj->mm.dirty) |
667 | set_page_dirty(page); | 667 | /* |
668 | * As this may not be anonymous memory (e.g. shmem) | ||
669 | * but exist on a real mapping, we have to lock | ||
670 | * the page in order to dirty it -- holding | ||
671 | * the page reference is not sufficient to | ||
672 | * prevent the inode from being truncated. | ||
673 | * Play safe and take the lock. | ||
674 | */ | ||
675 | set_page_dirty_lock(page); | ||
668 | 676 | ||
669 | mark_page_accessed(page); | 677 | mark_page_accessed(page); |
670 | put_page(page); | 678 | put_page(page); |
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index 2c454f227c2e..23120901c55f 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c | |||
@@ -126,6 +126,7 @@ static void intel_context_retire(struct i915_active *active) | |||
126 | if (ce->state) | 126 | if (ce->state) |
127 | __context_unpin_state(ce->state); | 127 | __context_unpin_state(ce->state); |
128 | 128 | ||
129 | intel_ring_unpin(ce->ring); | ||
129 | intel_context_put(ce); | 130 | intel_context_put(ce); |
130 | } | 131 | } |
131 | 132 | ||
@@ -160,27 +161,35 @@ int intel_context_active_acquire(struct intel_context *ce, unsigned long flags) | |||
160 | 161 | ||
161 | intel_context_get(ce); | 162 | intel_context_get(ce); |
162 | 163 | ||
164 | err = intel_ring_pin(ce->ring); | ||
165 | if (err) | ||
166 | goto err_put; | ||
167 | |||
163 | if (!ce->state) | 168 | if (!ce->state) |
164 | return 0; | 169 | return 0; |
165 | 170 | ||
166 | err = __context_pin_state(ce->state, flags); | 171 | err = __context_pin_state(ce->state, flags); |
167 | if (err) { | 172 | if (err) |
168 | i915_active_cancel(&ce->active); | 173 | goto err_ring; |
169 | intel_context_put(ce); | ||
170 | return err; | ||
171 | } | ||
172 | 174 | ||
173 | /* Preallocate tracking nodes */ | 175 | /* Preallocate tracking nodes */ |
174 | if (!i915_gem_context_is_kernel(ce->gem_context)) { | 176 | if (!i915_gem_context_is_kernel(ce->gem_context)) { |
175 | err = i915_active_acquire_preallocate_barrier(&ce->active, | 177 | err = i915_active_acquire_preallocate_barrier(&ce->active, |
176 | ce->engine); | 178 | ce->engine); |
177 | if (err) { | 179 | if (err) |
178 | i915_active_release(&ce->active); | 180 | goto err_state; |
179 | return err; | ||
180 | } | ||
181 | } | 181 | } |
182 | 182 | ||
183 | return 0; | 183 | return 0; |
184 | |||
185 | err_state: | ||
186 | __context_unpin_state(ce->state); | ||
187 | err_ring: | ||
188 | intel_ring_unpin(ce->ring); | ||
189 | err_put: | ||
190 | intel_context_put(ce); | ||
191 | i915_active_cancel(&ce->active); | ||
192 | return err; | ||
184 | } | 193 | } |
185 | 194 | ||
186 | void intel_context_active_release(struct intel_context *ce) | 195 | void intel_context_active_release(struct intel_context *ce) |
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index 7fd33e81c2d9..f25632c9b292 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c | |||
@@ -969,9 +969,14 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type) | |||
969 | u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv) | 969 | u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv) |
970 | { | 970 | { |
971 | const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; | 971 | const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; |
972 | unsigned int slice = fls(sseu->slice_mask) - 1; | ||
973 | unsigned int subslice; | ||
972 | u32 mcr_s_ss_select; | 974 | u32 mcr_s_ss_select; |
973 | u32 slice = fls(sseu->slice_mask); | 975 | |
974 | u32 subslice = fls(sseu->subslice_mask[slice]); | 976 | GEM_BUG_ON(slice >= ARRAY_SIZE(sseu->subslice_mask)); |
977 | subslice = fls(sseu->subslice_mask[slice]); | ||
978 | GEM_BUG_ON(!subslice); | ||
979 | subslice--; | ||
975 | 980 | ||
976 | if (IS_GEN(dev_priv, 10)) | 981 | if (IS_GEN(dev_priv, 10)) |
977 | mcr_s_ss_select = GEN8_MCR_SLICE(slice) | | 982 | mcr_s_ss_select = GEN8_MCR_SLICE(slice) | |
@@ -1471,6 +1476,7 @@ void intel_engine_dump(struct intel_engine_cs *engine, | |||
1471 | struct i915_gpu_error * const error = &engine->i915->gpu_error; | 1476 | struct i915_gpu_error * const error = &engine->i915->gpu_error; |
1472 | struct i915_request *rq; | 1477 | struct i915_request *rq; |
1473 | intel_wakeref_t wakeref; | 1478 | intel_wakeref_t wakeref; |
1479 | unsigned long flags; | ||
1474 | 1480 | ||
1475 | if (header) { | 1481 | if (header) { |
1476 | va_list ap; | 1482 | va_list ap; |
@@ -1490,10 +1496,9 @@ void intel_engine_dump(struct intel_engine_cs *engine, | |||
1490 | i915_reset_engine_count(error, engine), | 1496 | i915_reset_engine_count(error, engine), |
1491 | i915_reset_count(error)); | 1497 | i915_reset_count(error)); |
1492 | 1498 | ||
1493 | rcu_read_lock(); | ||
1494 | |||
1495 | drm_printf(m, "\tRequests:\n"); | 1499 | drm_printf(m, "\tRequests:\n"); |
1496 | 1500 | ||
1501 | spin_lock_irqsave(&engine->active.lock, flags); | ||
1497 | rq = intel_engine_find_active_request(engine); | 1502 | rq = intel_engine_find_active_request(engine); |
1498 | if (rq) { | 1503 | if (rq) { |
1499 | print_request(m, rq, "\t\tactive "); | 1504 | print_request(m, rq, "\t\tactive "); |
@@ -1513,8 +1518,7 @@ void intel_engine_dump(struct intel_engine_cs *engine, | |||
1513 | 1518 | ||
1514 | print_request_ring(m, rq); | 1519 | print_request_ring(m, rq); |
1515 | } | 1520 | } |
1516 | 1521 | spin_unlock_irqrestore(&engine->active.lock, flags); | |
1517 | rcu_read_unlock(); | ||
1518 | 1522 | ||
1519 | wakeref = intel_runtime_pm_get_if_in_use(&engine->i915->runtime_pm); | 1523 | wakeref = intel_runtime_pm_get_if_in_use(&engine->i915->runtime_pm); |
1520 | if (wakeref) { | 1524 | if (wakeref) { |
@@ -1672,7 +1676,6 @@ struct i915_request * | |||
1672 | intel_engine_find_active_request(struct intel_engine_cs *engine) | 1676 | intel_engine_find_active_request(struct intel_engine_cs *engine) |
1673 | { | 1677 | { |
1674 | struct i915_request *request, *active = NULL; | 1678 | struct i915_request *request, *active = NULL; |
1675 | unsigned long flags; | ||
1676 | 1679 | ||
1677 | /* | 1680 | /* |
1678 | * We are called by the error capture, reset and to dump engine | 1681 | * We are called by the error capture, reset and to dump engine |
@@ -1685,7 +1688,7 @@ intel_engine_find_active_request(struct intel_engine_cs *engine) | |||
1685 | * At all other times, we must assume the GPU is still running, but | 1688 | * At all other times, we must assume the GPU is still running, but |
1686 | * we only care about the snapshot of this moment. | 1689 | * we only care about the snapshot of this moment. |
1687 | */ | 1690 | */ |
1688 | spin_lock_irqsave(&engine->active.lock, flags); | 1691 | lockdep_assert_held(&engine->active.lock); |
1689 | list_for_each_entry(request, &engine->active.requests, sched.link) { | 1692 | list_for_each_entry(request, &engine->active.requests, sched.link) { |
1690 | if (i915_request_completed(request)) | 1693 | if (i915_request_completed(request)) |
1691 | continue; | 1694 | continue; |
@@ -1700,7 +1703,6 @@ intel_engine_find_active_request(struct intel_engine_cs *engine) | |||
1700 | active = request; | 1703 | active = request; |
1701 | break; | 1704 | break; |
1702 | } | 1705 | } |
1703 | spin_unlock_irqrestore(&engine->active.lock, flags); | ||
1704 | 1706 | ||
1705 | return active; | 1707 | return active; |
1706 | } | 1708 | } |
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c index 2ce00d3dc42a..ae5b6baf6dff 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c | |||
@@ -142,27 +142,3 @@ void intel_engine_init__pm(struct intel_engine_cs *engine) | |||
142 | { | 142 | { |
143 | intel_wakeref_init(&engine->wakeref); | 143 | intel_wakeref_init(&engine->wakeref); |
144 | } | 144 | } |
145 | |||
146 | int intel_engines_resume(struct drm_i915_private *i915) | ||
147 | { | ||
148 | struct intel_engine_cs *engine; | ||
149 | enum intel_engine_id id; | ||
150 | int err = 0; | ||
151 | |||
152 | intel_gt_pm_get(i915); | ||
153 | for_each_engine(engine, i915, id) { | ||
154 | intel_engine_pm_get(engine); | ||
155 | engine->serial++; /* kernel context lost */ | ||
156 | err = engine->resume(engine); | ||
157 | intel_engine_pm_put(engine); | ||
158 | if (err) { | ||
159 | dev_err(i915->drm.dev, | ||
160 | "Failed to restart %s (%d)\n", | ||
161 | engine->name, err); | ||
162 | break; | ||
163 | } | ||
164 | } | ||
165 | intel_gt_pm_put(i915); | ||
166 | |||
167 | return err; | ||
168 | } | ||
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.h b/drivers/gpu/drm/i915/gt/intel_engine_pm.h index b326cd993d60..a11c893f64c6 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_pm.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.h | |||
@@ -7,16 +7,22 @@ | |||
7 | #ifndef INTEL_ENGINE_PM_H | 7 | #ifndef INTEL_ENGINE_PM_H |
8 | #define INTEL_ENGINE_PM_H | 8 | #define INTEL_ENGINE_PM_H |
9 | 9 | ||
10 | #include "intel_engine_types.h" | ||
11 | #include "intel_wakeref.h" | ||
12 | |||
10 | struct drm_i915_private; | 13 | struct drm_i915_private; |
11 | struct intel_engine_cs; | ||
12 | 14 | ||
13 | void intel_engine_pm_get(struct intel_engine_cs *engine); | 15 | void intel_engine_pm_get(struct intel_engine_cs *engine); |
14 | void intel_engine_pm_put(struct intel_engine_cs *engine); | 16 | void intel_engine_pm_put(struct intel_engine_cs *engine); |
15 | 17 | ||
18 | static inline bool | ||
19 | intel_engine_pm_get_if_awake(struct intel_engine_cs *engine) | ||
20 | { | ||
21 | return intel_wakeref_get_if_active(&engine->wakeref); | ||
22 | } | ||
23 | |||
16 | void intel_engine_park(struct intel_engine_cs *engine); | 24 | void intel_engine_park(struct intel_engine_cs *engine); |
17 | 25 | ||
18 | void intel_engine_init__pm(struct intel_engine_cs *engine); | 26 | void intel_engine_init__pm(struct intel_engine_cs *engine); |
19 | 27 | ||
20 | int intel_engines_resume(struct drm_i915_private *i915); | ||
21 | |||
22 | #endif /* INTEL_ENGINE_PM_H */ | 28 | #endif /* INTEL_ENGINE_PM_H */ |
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h index 868b220214f8..43e975a26016 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_types.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h | |||
@@ -70,6 +70,18 @@ struct intel_ring { | |||
70 | struct list_head request_list; | 70 | struct list_head request_list; |
71 | struct list_head active_link; | 71 | struct list_head active_link; |
72 | 72 | ||
73 | /* | ||
74 | * As we have two types of rings, one global to the engine used | ||
75 | * by ringbuffer submission and those that are exclusive to a | ||
76 | * context used by execlists, we have to play safe and allow | ||
77 | * atomic updates to the pin_count. However, the actual pinning | ||
78 | * of the context is either done during initialisation for | ||
79 | * ringbuffer submission or serialised as part of the context | ||
80 | * pinning for execlists, and so we do not need a mutex ourselves | ||
81 | * to serialise intel_ring_pin/intel_ring_unpin. | ||
82 | */ | ||
83 | atomic_t pin_count; | ||
84 | |||
73 | u32 head; | 85 | u32 head; |
74 | u32 tail; | 86 | u32 tail; |
75 | u32 emit; | 87 | u32 emit; |
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c index 7b5967751762..9f8f7f54191f 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c | |||
@@ -5,6 +5,7 @@ | |||
5 | */ | 5 | */ |
6 | 6 | ||
7 | #include "i915_drv.h" | 7 | #include "i915_drv.h" |
8 | #include "intel_engine_pm.h" | ||
8 | #include "intel_gt_pm.h" | 9 | #include "intel_gt_pm.h" |
9 | #include "intel_pm.h" | 10 | #include "intel_pm.h" |
10 | #include "intel_wakeref.h" | 11 | #include "intel_wakeref.h" |
@@ -118,10 +119,11 @@ void intel_gt_sanitize(struct drm_i915_private *i915, bool force) | |||
118 | intel_engine_reset(engine, false); | 119 | intel_engine_reset(engine, false); |
119 | } | 120 | } |
120 | 121 | ||
121 | void intel_gt_resume(struct drm_i915_private *i915) | 122 | int intel_gt_resume(struct drm_i915_private *i915) |
122 | { | 123 | { |
123 | struct intel_engine_cs *engine; | 124 | struct intel_engine_cs *engine; |
124 | enum intel_engine_id id; | 125 | enum intel_engine_id id; |
126 | int err = 0; | ||
125 | 127 | ||
126 | /* | 128 | /* |
127 | * After resume, we may need to poke into the pinned kernel | 129 | * After resume, we may need to poke into the pinned kernel |
@@ -129,9 +131,12 @@ void intel_gt_resume(struct drm_i915_private *i915) | |||
129 | * Only the kernel contexts should remain pinned over suspend, | 131 | * Only the kernel contexts should remain pinned over suspend, |
130 | * allowing us to fixup the user contexts on their first pin. | 132 | * allowing us to fixup the user contexts on their first pin. |
131 | */ | 133 | */ |
134 | intel_gt_pm_get(i915); | ||
132 | for_each_engine(engine, i915, id) { | 135 | for_each_engine(engine, i915, id) { |
133 | struct intel_context *ce; | 136 | struct intel_context *ce; |
134 | 137 | ||
138 | intel_engine_pm_get(engine); | ||
139 | |||
135 | ce = engine->kernel_context; | 140 | ce = engine->kernel_context; |
136 | if (ce) | 141 | if (ce) |
137 | ce->ops->reset(ce); | 142 | ce->ops->reset(ce); |
@@ -139,5 +144,19 @@ void intel_gt_resume(struct drm_i915_private *i915) | |||
139 | ce = engine->preempt_context; | 144 | ce = engine->preempt_context; |
140 | if (ce) | 145 | if (ce) |
141 | ce->ops->reset(ce); | 146 | ce->ops->reset(ce); |
147 | |||
148 | engine->serial++; /* kernel context lost */ | ||
149 | err = engine->resume(engine); | ||
150 | |||
151 | intel_engine_pm_put(engine); | ||
152 | if (err) { | ||
153 | dev_err(i915->drm.dev, | ||
154 | "Failed to restart %s (%d)\n", | ||
155 | engine->name, err); | ||
156 | break; | ||
157 | } | ||
142 | } | 158 | } |
159 | intel_gt_pm_put(i915); | ||
160 | |||
161 | return err; | ||
143 | } | 162 | } |
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_pm.h index 7dd1130a19a4..53f342b20181 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.h | |||
@@ -22,6 +22,6 @@ void intel_gt_pm_put(struct drm_i915_private *i915); | |||
22 | void intel_gt_pm_init(struct drm_i915_private *i915); | 22 | void intel_gt_pm_init(struct drm_i915_private *i915); |
23 | 23 | ||
24 | void intel_gt_sanitize(struct drm_i915_private *i915, bool force); | 24 | void intel_gt_sanitize(struct drm_i915_private *i915, bool force); |
25 | void intel_gt_resume(struct drm_i915_private *i915); | 25 | int intel_gt_resume(struct drm_i915_private *i915); |
26 | 26 | ||
27 | #endif /* INTEL_GT_PM_H */ | 27 | #endif /* INTEL_GT_PM_H */ |
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index b42b5f158295..82b7ace62d97 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c | |||
@@ -1414,6 +1414,7 @@ static void execlists_context_destroy(struct kref *kref) | |||
1414 | { | 1414 | { |
1415 | struct intel_context *ce = container_of(kref, typeof(*ce), ref); | 1415 | struct intel_context *ce = container_of(kref, typeof(*ce), ref); |
1416 | 1416 | ||
1417 | GEM_BUG_ON(!i915_active_is_idle(&ce->active)); | ||
1417 | GEM_BUG_ON(intel_context_is_pinned(ce)); | 1418 | GEM_BUG_ON(intel_context_is_pinned(ce)); |
1418 | 1419 | ||
1419 | if (ce->state) | 1420 | if (ce->state) |
@@ -1426,7 +1427,6 @@ static void execlists_context_unpin(struct intel_context *ce) | |||
1426 | { | 1427 | { |
1427 | i915_gem_context_unpin_hw_id(ce->gem_context); | 1428 | i915_gem_context_unpin_hw_id(ce->gem_context); |
1428 | i915_gem_object_unpin_map(ce->state->obj); | 1429 | i915_gem_object_unpin_map(ce->state->obj); |
1429 | intel_ring_unpin(ce->ring); | ||
1430 | } | 1430 | } |
1431 | 1431 | ||
1432 | static void | 1432 | static void |
@@ -1478,13 +1478,9 @@ __execlists_context_pin(struct intel_context *ce, | |||
1478 | goto unpin_active; | 1478 | goto unpin_active; |
1479 | } | 1479 | } |
1480 | 1480 | ||
1481 | ret = intel_ring_pin(ce->ring); | ||
1482 | if (ret) | ||
1483 | goto unpin_map; | ||
1484 | |||
1485 | ret = i915_gem_context_pin_hw_id(ce->gem_context); | 1481 | ret = i915_gem_context_pin_hw_id(ce->gem_context); |
1486 | if (ret) | 1482 | if (ret) |
1487 | goto unpin_ring; | 1483 | goto unpin_map; |
1488 | 1484 | ||
1489 | ce->lrc_desc = lrc_descriptor(ce, engine); | 1485 | ce->lrc_desc = lrc_descriptor(ce, engine); |
1490 | ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE; | 1486 | ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE; |
@@ -1492,8 +1488,6 @@ __execlists_context_pin(struct intel_context *ce, | |||
1492 | 1488 | ||
1493 | return 0; | 1489 | return 0; |
1494 | 1490 | ||
1495 | unpin_ring: | ||
1496 | intel_ring_unpin(ce->ring); | ||
1497 | unpin_map: | 1491 | unpin_map: |
1498 | i915_gem_object_unpin_map(ce->state->obj); | 1492 | i915_gem_object_unpin_map(ce->state->obj); |
1499 | unpin_active: | 1493 | unpin_active: |
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index 4c478b38e420..3f907701ef4d 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c | |||
@@ -687,7 +687,6 @@ static void reset_prepare_engine(struct intel_engine_cs *engine) | |||
687 | * written to the powercontext is undefined and so we may lose | 687 | * written to the powercontext is undefined and so we may lose |
688 | * GPU state upon resume, i.e. fail to restart after a reset. | 688 | * GPU state upon resume, i.e. fail to restart after a reset. |
689 | */ | 689 | */ |
690 | intel_engine_pm_get(engine); | ||
691 | intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL); | 690 | intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL); |
692 | engine->reset.prepare(engine); | 691 | engine->reset.prepare(engine); |
693 | } | 692 | } |
@@ -718,16 +717,21 @@ static void revoke_mmaps(struct drm_i915_private *i915) | |||
718 | } | 717 | } |
719 | } | 718 | } |
720 | 719 | ||
721 | static void reset_prepare(struct drm_i915_private *i915) | 720 | static intel_engine_mask_t reset_prepare(struct drm_i915_private *i915) |
722 | { | 721 | { |
723 | struct intel_engine_cs *engine; | 722 | struct intel_engine_cs *engine; |
723 | intel_engine_mask_t awake = 0; | ||
724 | enum intel_engine_id id; | 724 | enum intel_engine_id id; |
725 | 725 | ||
726 | intel_gt_pm_get(i915); | 726 | for_each_engine(engine, i915, id) { |
727 | for_each_engine(engine, i915, id) | 727 | if (intel_engine_pm_get_if_awake(engine)) |
728 | awake |= engine->mask; | ||
728 | reset_prepare_engine(engine); | 729 | reset_prepare_engine(engine); |
730 | } | ||
729 | 731 | ||
730 | intel_uc_reset_prepare(i915); | 732 | intel_uc_reset_prepare(i915); |
733 | |||
734 | return awake; | ||
731 | } | 735 | } |
732 | 736 | ||
733 | static void gt_revoke(struct drm_i915_private *i915) | 737 | static void gt_revoke(struct drm_i915_private *i915) |
@@ -761,20 +765,22 @@ static int gt_reset(struct drm_i915_private *i915, | |||
761 | static void reset_finish_engine(struct intel_engine_cs *engine) | 765 | static void reset_finish_engine(struct intel_engine_cs *engine) |
762 | { | 766 | { |
763 | engine->reset.finish(engine); | 767 | engine->reset.finish(engine); |
764 | intel_engine_pm_put(engine); | ||
765 | intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL); | 768 | intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL); |
769 | |||
770 | intel_engine_signal_breadcrumbs(engine); | ||
766 | } | 771 | } |
767 | 772 | ||
768 | static void reset_finish(struct drm_i915_private *i915) | 773 | static void reset_finish(struct drm_i915_private *i915, |
774 | intel_engine_mask_t awake) | ||
769 | { | 775 | { |
770 | struct intel_engine_cs *engine; | 776 | struct intel_engine_cs *engine; |
771 | enum intel_engine_id id; | 777 | enum intel_engine_id id; |
772 | 778 | ||
773 | for_each_engine(engine, i915, id) { | 779 | for_each_engine(engine, i915, id) { |
774 | reset_finish_engine(engine); | 780 | reset_finish_engine(engine); |
775 | intel_engine_signal_breadcrumbs(engine); | 781 | if (awake & engine->mask) |
782 | intel_engine_pm_put(engine); | ||
776 | } | 783 | } |
777 | intel_gt_pm_put(i915); | ||
778 | } | 784 | } |
779 | 785 | ||
780 | static void nop_submit_request(struct i915_request *request) | 786 | static void nop_submit_request(struct i915_request *request) |
@@ -798,6 +804,7 @@ static void __i915_gem_set_wedged(struct drm_i915_private *i915) | |||
798 | { | 804 | { |
799 | struct i915_gpu_error *error = &i915->gpu_error; | 805 | struct i915_gpu_error *error = &i915->gpu_error; |
800 | struct intel_engine_cs *engine; | 806 | struct intel_engine_cs *engine; |
807 | intel_engine_mask_t awake; | ||
801 | enum intel_engine_id id; | 808 | enum intel_engine_id id; |
802 | 809 | ||
803 | if (test_bit(I915_WEDGED, &error->flags)) | 810 | if (test_bit(I915_WEDGED, &error->flags)) |
@@ -817,7 +824,7 @@ static void __i915_gem_set_wedged(struct drm_i915_private *i915) | |||
817 | * rolling the global seqno forward (since this would complete requests | 824 | * rolling the global seqno forward (since this would complete requests |
818 | * for which we haven't set the fence error to EIO yet). | 825 | * for which we haven't set the fence error to EIO yet). |
819 | */ | 826 | */ |
820 | reset_prepare(i915); | 827 | awake = reset_prepare(i915); |
821 | 828 | ||
822 | /* Even if the GPU reset fails, it should still stop the engines */ | 829 | /* Even if the GPU reset fails, it should still stop the engines */ |
823 | if (!INTEL_INFO(i915)->gpu_reset_clobbers_display) | 830 | if (!INTEL_INFO(i915)->gpu_reset_clobbers_display) |
@@ -841,7 +848,7 @@ static void __i915_gem_set_wedged(struct drm_i915_private *i915) | |||
841 | for_each_engine(engine, i915, id) | 848 | for_each_engine(engine, i915, id) |
842 | engine->cancel_requests(engine); | 849 | engine->cancel_requests(engine); |
843 | 850 | ||
844 | reset_finish(i915); | 851 | reset_finish(i915, awake); |
845 | 852 | ||
846 | GEM_TRACE("end\n"); | 853 | GEM_TRACE("end\n"); |
847 | } | 854 | } |
@@ -951,6 +958,21 @@ static int do_reset(struct drm_i915_private *i915, | |||
951 | return gt_reset(i915, stalled_mask); | 958 | return gt_reset(i915, stalled_mask); |
952 | } | 959 | } |
953 | 960 | ||
961 | static int resume(struct drm_i915_private *i915) | ||
962 | { | ||
963 | struct intel_engine_cs *engine; | ||
964 | enum intel_engine_id id; | ||
965 | int ret; | ||
966 | |||
967 | for_each_engine(engine, i915, id) { | ||
968 | ret = engine->resume(engine); | ||
969 | if (ret) | ||
970 | return ret; | ||
971 | } | ||
972 | |||
973 | return 0; | ||
974 | } | ||
975 | |||
954 | /** | 976 | /** |
955 | * i915_reset - reset chip after a hang | 977 | * i915_reset - reset chip after a hang |
956 | * @i915: #drm_i915_private to reset | 978 | * @i915: #drm_i915_private to reset |
@@ -973,6 +995,7 @@ void i915_reset(struct drm_i915_private *i915, | |||
973 | const char *reason) | 995 | const char *reason) |
974 | { | 996 | { |
975 | struct i915_gpu_error *error = &i915->gpu_error; | 997 | struct i915_gpu_error *error = &i915->gpu_error; |
998 | intel_engine_mask_t awake; | ||
976 | int ret; | 999 | int ret; |
977 | 1000 | ||
978 | GEM_TRACE("flags=%lx\n", error->flags); | 1001 | GEM_TRACE("flags=%lx\n", error->flags); |
@@ -989,7 +1012,7 @@ void i915_reset(struct drm_i915_private *i915, | |||
989 | dev_notice(i915->drm.dev, "Resetting chip for %s\n", reason); | 1012 | dev_notice(i915->drm.dev, "Resetting chip for %s\n", reason); |
990 | error->reset_count++; | 1013 | error->reset_count++; |
991 | 1014 | ||
992 | reset_prepare(i915); | 1015 | awake = reset_prepare(i915); |
993 | 1016 | ||
994 | if (!intel_has_gpu_reset(i915)) { | 1017 | if (!intel_has_gpu_reset(i915)) { |
995 | if (i915_modparams.reset) | 1018 | if (i915_modparams.reset) |
@@ -1024,13 +1047,17 @@ void i915_reset(struct drm_i915_private *i915, | |||
1024 | if (ret) { | 1047 | if (ret) { |
1025 | DRM_ERROR("Failed to initialise HW following reset (%d)\n", | 1048 | DRM_ERROR("Failed to initialise HW following reset (%d)\n", |
1026 | ret); | 1049 | ret); |
1027 | goto error; | 1050 | goto taint; |
1028 | } | 1051 | } |
1029 | 1052 | ||
1053 | ret = resume(i915); | ||
1054 | if (ret) | ||
1055 | goto taint; | ||
1056 | |||
1030 | i915_queue_hangcheck(i915); | 1057 | i915_queue_hangcheck(i915); |
1031 | 1058 | ||
1032 | finish: | 1059 | finish: |
1033 | reset_finish(i915); | 1060 | reset_finish(i915, awake); |
1034 | unlock: | 1061 | unlock: |
1035 | mutex_unlock(&error->wedge_mutex); | 1062 | mutex_unlock(&error->wedge_mutex); |
1036 | return; | 1063 | return; |
@@ -1081,7 +1108,7 @@ int i915_reset_engine(struct intel_engine_cs *engine, const char *msg) | |||
1081 | GEM_TRACE("%s flags=%lx\n", engine->name, error->flags); | 1108 | GEM_TRACE("%s flags=%lx\n", engine->name, error->flags); |
1082 | GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags)); | 1109 | GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags)); |
1083 | 1110 | ||
1084 | if (!intel_wakeref_active(&engine->wakeref)) | 1111 | if (!intel_engine_pm_get_if_awake(engine)) |
1085 | return 0; | 1112 | return 0; |
1086 | 1113 | ||
1087 | reset_prepare_engine(engine); | 1114 | reset_prepare_engine(engine); |
@@ -1116,12 +1143,11 @@ int i915_reset_engine(struct intel_engine_cs *engine, const char *msg) | |||
1116 | * process to program RING_MODE, HWSP and re-enable submission. | 1143 | * process to program RING_MODE, HWSP and re-enable submission. |
1117 | */ | 1144 | */ |
1118 | ret = engine->resume(engine); | 1145 | ret = engine->resume(engine); |
1119 | if (ret) | ||
1120 | goto out; | ||
1121 | 1146 | ||
1122 | out: | 1147 | out: |
1123 | intel_engine_cancel_stop_cs(engine); | 1148 | intel_engine_cancel_stop_cs(engine); |
1124 | reset_finish_engine(engine); | 1149 | reset_finish_engine(engine); |
1150 | intel_engine_pm_put(engine); | ||
1125 | return ret; | 1151 | return ret; |
1126 | } | 1152 | } |
1127 | 1153 | ||
diff --git a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c index c6023bc9452d..12010e798868 100644 --- a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c | |||
@@ -1149,16 +1149,16 @@ i915_emit_bb_start(struct i915_request *rq, | |||
1149 | int intel_ring_pin(struct intel_ring *ring) | 1149 | int intel_ring_pin(struct intel_ring *ring) |
1150 | { | 1150 | { |
1151 | struct i915_vma *vma = ring->vma; | 1151 | struct i915_vma *vma = ring->vma; |
1152 | enum i915_map_type map = i915_coherent_map_type(vma->vm->i915); | ||
1153 | unsigned int flags; | 1152 | unsigned int flags; |
1154 | void *addr; | 1153 | void *addr; |
1155 | int ret; | 1154 | int ret; |
1156 | 1155 | ||
1157 | GEM_BUG_ON(ring->vaddr); | 1156 | if (atomic_fetch_inc(&ring->pin_count)) |
1157 | return 0; | ||
1158 | 1158 | ||
1159 | ret = i915_timeline_pin(ring->timeline); | 1159 | ret = i915_timeline_pin(ring->timeline); |
1160 | if (ret) | 1160 | if (ret) |
1161 | return ret; | 1161 | goto err_unpin; |
1162 | 1162 | ||
1163 | flags = PIN_GLOBAL; | 1163 | flags = PIN_GLOBAL; |
1164 | 1164 | ||
@@ -1172,26 +1172,31 @@ int intel_ring_pin(struct intel_ring *ring) | |||
1172 | 1172 | ||
1173 | ret = i915_vma_pin(vma, 0, 0, flags); | 1173 | ret = i915_vma_pin(vma, 0, 0, flags); |
1174 | if (unlikely(ret)) | 1174 | if (unlikely(ret)) |
1175 | goto unpin_timeline; | 1175 | goto err_timeline; |
1176 | 1176 | ||
1177 | if (i915_vma_is_map_and_fenceable(vma)) | 1177 | if (i915_vma_is_map_and_fenceable(vma)) |
1178 | addr = (void __force *)i915_vma_pin_iomap(vma); | 1178 | addr = (void __force *)i915_vma_pin_iomap(vma); |
1179 | else | 1179 | else |
1180 | addr = i915_gem_object_pin_map(vma->obj, map); | 1180 | addr = i915_gem_object_pin_map(vma->obj, |
1181 | i915_coherent_map_type(vma->vm->i915)); | ||
1181 | if (IS_ERR(addr)) { | 1182 | if (IS_ERR(addr)) { |
1182 | ret = PTR_ERR(addr); | 1183 | ret = PTR_ERR(addr); |
1183 | goto unpin_ring; | 1184 | goto err_ring; |
1184 | } | 1185 | } |
1185 | 1186 | ||
1186 | vma->obj->pin_global++; | 1187 | vma->obj->pin_global++; |
1187 | 1188 | ||
1189 | GEM_BUG_ON(ring->vaddr); | ||
1188 | ring->vaddr = addr; | 1190 | ring->vaddr = addr; |
1191 | |||
1189 | return 0; | 1192 | return 0; |
1190 | 1193 | ||
1191 | unpin_ring: | 1194 | err_ring: |
1192 | i915_vma_unpin(vma); | 1195 | i915_vma_unpin(vma); |
1193 | unpin_timeline: | 1196 | err_timeline: |
1194 | i915_timeline_unpin(ring->timeline); | 1197 | i915_timeline_unpin(ring->timeline); |
1198 | err_unpin: | ||
1199 | atomic_dec(&ring->pin_count); | ||
1195 | return ret; | 1200 | return ret; |
1196 | } | 1201 | } |
1197 | 1202 | ||
@@ -1207,16 +1212,19 @@ void intel_ring_reset(struct intel_ring *ring, u32 tail) | |||
1207 | 1212 | ||
1208 | void intel_ring_unpin(struct intel_ring *ring) | 1213 | void intel_ring_unpin(struct intel_ring *ring) |
1209 | { | 1214 | { |
1210 | GEM_BUG_ON(!ring->vma); | 1215 | if (!atomic_dec_and_test(&ring->pin_count)) |
1211 | GEM_BUG_ON(!ring->vaddr); | 1216 | return; |
1212 | 1217 | ||
1213 | /* Discard any unused bytes beyond that submitted to hw. */ | 1218 | /* Discard any unused bytes beyond that submitted to hw. */ |
1214 | intel_ring_reset(ring, ring->tail); | 1219 | intel_ring_reset(ring, ring->tail); |
1215 | 1220 | ||
1221 | GEM_BUG_ON(!ring->vma); | ||
1216 | if (i915_vma_is_map_and_fenceable(ring->vma)) | 1222 | if (i915_vma_is_map_and_fenceable(ring->vma)) |
1217 | i915_vma_unpin_iomap(ring->vma); | 1223 | i915_vma_unpin_iomap(ring->vma); |
1218 | else | 1224 | else |
1219 | i915_gem_object_unpin_map(ring->vma->obj); | 1225 | i915_gem_object_unpin_map(ring->vma->obj); |
1226 | |||
1227 | GEM_BUG_ON(!ring->vaddr); | ||
1220 | ring->vaddr = NULL; | 1228 | ring->vaddr = NULL; |
1221 | 1229 | ||
1222 | ring->vma->obj->pin_global--; | 1230 | ring->vma->obj->pin_global--; |
@@ -2081,10 +2089,11 @@ static void ring_destroy(struct intel_engine_cs *engine) | |||
2081 | WARN_ON(INTEL_GEN(dev_priv) > 2 && | 2089 | WARN_ON(INTEL_GEN(dev_priv) > 2 && |
2082 | (ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0); | 2090 | (ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0); |
2083 | 2091 | ||
2092 | intel_engine_cleanup_common(engine); | ||
2093 | |||
2084 | intel_ring_unpin(engine->buffer); | 2094 | intel_ring_unpin(engine->buffer); |
2085 | intel_ring_put(engine->buffer); | 2095 | intel_ring_put(engine->buffer); |
2086 | 2096 | ||
2087 | intel_engine_cleanup_common(engine); | ||
2088 | kfree(engine); | 2097 | kfree(engine); |
2089 | } | 2098 | } |
2090 | 2099 | ||
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index 15e90fd2cfdc..98dfb086320f 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c | |||
@@ -1098,10 +1098,25 @@ static void glk_whitelist_build(struct intel_engine_cs *engine) | |||
1098 | 1098 | ||
1099 | static void cfl_whitelist_build(struct intel_engine_cs *engine) | 1099 | static void cfl_whitelist_build(struct intel_engine_cs *engine) |
1100 | { | 1100 | { |
1101 | struct i915_wa_list *w = &engine->whitelist; | ||
1102 | |||
1101 | if (engine->class != RENDER_CLASS) | 1103 | if (engine->class != RENDER_CLASS) |
1102 | return; | 1104 | return; |
1103 | 1105 | ||
1104 | gen9_whitelist_build(&engine->whitelist); | 1106 | gen9_whitelist_build(w); |
1107 | |||
1108 | /* | ||
1109 | * WaAllowPMDepthAndInvocationCountAccessFromUMD:cfl,whl,cml,aml | ||
1110 | * | ||
1111 | * This covers 4 register which are next to one another : | ||
1112 | * - PS_INVOCATION_COUNT | ||
1113 | * - PS_INVOCATION_COUNT_UDW | ||
1114 | * - PS_DEPTH_COUNT | ||
1115 | * - PS_DEPTH_COUNT_UDW | ||
1116 | */ | ||
1117 | whitelist_reg_ext(w, PS_INVOCATION_COUNT, | ||
1118 | RING_FORCE_TO_NONPRIV_RD | | ||
1119 | RING_FORCE_TO_NONPRIV_RANGE_4); | ||
1105 | } | 1120 | } |
1106 | 1121 | ||
1107 | static void cnl_whitelist_build(struct intel_engine_cs *engine) | 1122 | static void cnl_whitelist_build(struct intel_engine_cs *engine) |
@@ -1129,6 +1144,19 @@ static void icl_whitelist_build(struct intel_engine_cs *engine) | |||
1129 | 1144 | ||
1130 | /* WaEnableStateCacheRedirectToCS:icl */ | 1145 | /* WaEnableStateCacheRedirectToCS:icl */ |
1131 | whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1); | 1146 | whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1); |
1147 | |||
1148 | /* | ||
1149 | * WaAllowPMDepthAndInvocationCountAccessFromUMD:icl | ||
1150 | * | ||
1151 | * This covers 4 register which are next to one another : | ||
1152 | * - PS_INVOCATION_COUNT | ||
1153 | * - PS_INVOCATION_COUNT_UDW | ||
1154 | * - PS_DEPTH_COUNT | ||
1155 | * - PS_DEPTH_COUNT_UDW | ||
1156 | */ | ||
1157 | whitelist_reg_ext(w, PS_INVOCATION_COUNT, | ||
1158 | RING_FORCE_TO_NONPRIV_RD | | ||
1159 | RING_FORCE_TO_NONPRIV_RANGE_4); | ||
1132 | break; | 1160 | break; |
1133 | 1161 | ||
1134 | case VIDEO_DECODE_CLASS: | 1162 | case VIDEO_DECODE_CLASS: |
@@ -1258,8 +1286,12 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) | |||
1258 | if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0)) | 1286 | if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0)) |
1259 | wa_write_or(wal, | 1287 | wa_write_or(wal, |
1260 | GEN7_SARCHKMD, | 1288 | GEN7_SARCHKMD, |
1261 | GEN7_DISABLE_DEMAND_PREFETCH | | 1289 | GEN7_DISABLE_DEMAND_PREFETCH); |
1262 | GEN7_DISABLE_SAMPLER_PREFETCH); | 1290 | |
1291 | /* Wa_1606682166:icl */ | ||
1292 | wa_write_or(wal, | ||
1293 | GEN7_SARCHKMD, | ||
1294 | GEN7_DISABLE_SAMPLER_PREFETCH); | ||
1263 | } | 1295 | } |
1264 | 1296 | ||
1265 | if (IS_GEN_RANGE(i915, 9, 11)) { | 1297 | if (IS_GEN_RANGE(i915, 9, 11)) { |
diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c index 086801b51441..486c6953dcb1 100644 --- a/drivers/gpu/drm/i915/gt/mock_engine.c +++ b/drivers/gpu/drm/i915/gt/mock_engine.c | |||
@@ -66,6 +66,7 @@ static struct intel_ring *mock_ring(struct intel_engine_cs *engine) | |||
66 | ring->base.effective_size = sz; | 66 | ring->base.effective_size = sz; |
67 | ring->base.vaddr = (void *)(ring + 1); | 67 | ring->base.vaddr = (void *)(ring + 1); |
68 | ring->base.timeline = &ring->timeline; | 68 | ring->base.timeline = &ring->timeline; |
69 | atomic_set(&ring->base.pin_count, 1); | ||
69 | 70 | ||
70 | INIT_LIST_HEAD(&ring->base.request_list); | 71 | INIT_LIST_HEAD(&ring->base.request_list); |
71 | intel_ring_update_space(&ring->base); | 72 | intel_ring_update_space(&ring->base); |
diff --git a/drivers/gpu/drm/i915/gt/selftest_reset.c b/drivers/gpu/drm/i915/gt/selftest_reset.c index 89da9e7cc1ba..b5c590c9ccba 100644 --- a/drivers/gpu/drm/i915/gt/selftest_reset.c +++ b/drivers/gpu/drm/i915/gt/selftest_reset.c | |||
@@ -71,13 +71,16 @@ static int igt_atomic_reset(void *arg) | |||
71 | goto unlock; | 71 | goto unlock; |
72 | 72 | ||
73 | for (p = igt_atomic_phases; p->name; p++) { | 73 | for (p = igt_atomic_phases; p->name; p++) { |
74 | intel_engine_mask_t awake; | ||
75 | |||
74 | GEM_TRACE("intel_gpu_reset under %s\n", p->name); | 76 | GEM_TRACE("intel_gpu_reset under %s\n", p->name); |
75 | 77 | ||
78 | awake = reset_prepare(i915); | ||
76 | p->critical_section_begin(); | 79 | p->critical_section_begin(); |
77 | reset_prepare(i915); | 80 | reset_prepare(i915); |
78 | err = intel_gpu_reset(i915, ALL_ENGINES); | 81 | err = intel_gpu_reset(i915, ALL_ENGINES); |
79 | reset_finish(i915); | ||
80 | p->critical_section_end(); | 82 | p->critical_section_end(); |
83 | reset_finish(i915, awake); | ||
81 | 84 | ||
82 | if (err) { | 85 | if (err) { |
83 | pr_err("intel_gpu_reset failed under %s\n", p->name); | 86 | pr_err("intel_gpu_reset failed under %s\n", p->name); |
diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c index 9eaf030affd0..44becd9538be 100644 --- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c +++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c | |||
@@ -925,7 +925,12 @@ check_whitelisted_registers(struct intel_engine_cs *engine, | |||
925 | 925 | ||
926 | err = 0; | 926 | err = 0; |
927 | for (i = 0; i < engine->whitelist.count; i++) { | 927 | for (i = 0; i < engine->whitelist.count; i++) { |
928 | if (!fn(engine, a[i], b[i], engine->whitelist.list[i].reg)) | 928 | const struct i915_wa *wa = &engine->whitelist.list[i]; |
929 | |||
930 | if (i915_mmio_reg_offset(wa->reg) & RING_FORCE_TO_NONPRIV_RD) | ||
931 | continue; | ||
932 | |||
933 | if (!fn(engine, a[i], b[i], wa->reg)) | ||
929 | err = -EINVAL; | 934 | err = -EINVAL; |
930 | } | 935 | } |
931 | 936 | ||
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index 6ea88270c818..b09dc315e2da 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c | |||
@@ -2674,11 +2674,6 @@ static int scan_workload(struct intel_vgpu_workload *workload) | |||
2674 | gma_head == gma_tail) | 2674 | gma_head == gma_tail) |
2675 | return 0; | 2675 | return 0; |
2676 | 2676 | ||
2677 | if (!intel_gvt_ggtt_validate_range(s.vgpu, s.ring_start, s.ring_size)) { | ||
2678 | ret = -EINVAL; | ||
2679 | goto out; | ||
2680 | } | ||
2681 | |||
2682 | ret = ip_gma_set(&s, gma_head); | 2677 | ret = ip_gma_set(&s, gma_head); |
2683 | if (ret) | 2678 | if (ret) |
2684 | goto out; | 2679 | goto out; |
@@ -2724,11 +2719,6 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) | |||
2724 | s.workload = workload; | 2719 | s.workload = workload; |
2725 | s.is_ctx_wa = true; | 2720 | s.is_ctx_wa = true; |
2726 | 2721 | ||
2727 | if (!intel_gvt_ggtt_validate_range(s.vgpu, s.ring_start, s.ring_size)) { | ||
2728 | ret = -EINVAL; | ||
2729 | goto out; | ||
2730 | } | ||
2731 | |||
2732 | ret = ip_gma_set(&s, gma_head); | 2722 | ret = ip_gma_set(&s, gma_head); |
2733 | if (ret) | 2723 | if (ret) |
2734 | goto out; | 2724 | goto out; |
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c index 65e847392aea..8bb292b01271 100644 --- a/drivers/gpu/drm/i915/gvt/fb_decoder.c +++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c | |||
@@ -245,7 +245,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu, | |||
245 | plane->hw_format = fmt; | 245 | plane->hw_format = fmt; |
246 | 246 | ||
247 | plane->base = vgpu_vreg_t(vgpu, DSPSURF(pipe)) & I915_GTT_PAGE_MASK; | 247 | plane->base = vgpu_vreg_t(vgpu, DSPSURF(pipe)) & I915_GTT_PAGE_MASK; |
248 | if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) | 248 | if (!vgpu_gmadr_is_valid(vgpu, plane->base)) |
249 | return -EINVAL; | 249 | return -EINVAL; |
250 | 250 | ||
251 | plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base); | 251 | plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base); |
@@ -368,7 +368,7 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu, | |||
368 | alpha_plane, alpha_force); | 368 | alpha_plane, alpha_force); |
369 | 369 | ||
370 | plane->base = vgpu_vreg_t(vgpu, CURBASE(pipe)) & I915_GTT_PAGE_MASK; | 370 | plane->base = vgpu_vreg_t(vgpu, CURBASE(pipe)) & I915_GTT_PAGE_MASK; |
371 | if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) | 371 | if (!vgpu_gmadr_is_valid(vgpu, plane->base)) |
372 | return -EINVAL; | 372 | return -EINVAL; |
373 | 373 | ||
374 | plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base); | 374 | plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base); |
@@ -472,7 +472,7 @@ int intel_vgpu_decode_sprite_plane(struct intel_vgpu *vgpu, | |||
472 | plane->drm_format = drm_format; | 472 | plane->drm_format = drm_format; |
473 | 473 | ||
474 | plane->base = vgpu_vreg_t(vgpu, SPRSURF(pipe)) & I915_GTT_PAGE_MASK; | 474 | plane->base = vgpu_vreg_t(vgpu, SPRSURF(pipe)) & I915_GTT_PAGE_MASK; |
475 | if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) | 475 | if (!vgpu_gmadr_is_valid(vgpu, plane->base)) |
476 | return -EINVAL; | 476 | return -EINVAL; |
477 | 477 | ||
478 | plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base); | 478 | plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base); |
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 53115bdae12b..4b04af569c05 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c | |||
@@ -2141,11 +2141,20 @@ static int emulate_ggtt_mmio_read(struct intel_vgpu *vgpu, | |||
2141 | struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm; | 2141 | struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm; |
2142 | const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; | 2142 | const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; |
2143 | unsigned long index = off >> info->gtt_entry_size_shift; | 2143 | unsigned long index = off >> info->gtt_entry_size_shift; |
2144 | unsigned long gma; | ||
2144 | struct intel_gvt_gtt_entry e; | 2145 | struct intel_gvt_gtt_entry e; |
2145 | 2146 | ||
2146 | if (bytes != 4 && bytes != 8) | 2147 | if (bytes != 4 && bytes != 8) |
2147 | return -EINVAL; | 2148 | return -EINVAL; |
2148 | 2149 | ||
2150 | gma = index << I915_GTT_PAGE_SHIFT; | ||
2151 | if (!intel_gvt_ggtt_validate_range(vgpu, | ||
2152 | gma, 1 << I915_GTT_PAGE_SHIFT)) { | ||
2153 | gvt_dbg_mm("read invalid ggtt at 0x%lx\n", gma); | ||
2154 | memset(p_data, 0, bytes); | ||
2155 | return 0; | ||
2156 | } | ||
2157 | |||
2149 | ggtt_get_guest_entry(ggtt_mm, &e, index); | 2158 | ggtt_get_guest_entry(ggtt_mm, &e, index); |
2150 | memcpy(p_data, (void *)&e.val64 + (off & (info->gtt_entry_size - 1)), | 2159 | memcpy(p_data, (void *)&e.val64 + (off & (info->gtt_entry_size - 1)), |
2151 | bytes); | 2160 | bytes); |
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 144301b778df..23aa3e50cbf8 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c | |||
@@ -1911,6 +1911,18 @@ static int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn, | |||
1911 | ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr, size); | 1911 | ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr, size); |
1912 | if (ret) | 1912 | if (ret) |
1913 | goto err_unmap; | 1913 | goto err_unmap; |
1914 | } else if (entry->size != size) { | ||
1915 | /* the same gfn with different size: unmap and re-map */ | ||
1916 | gvt_dma_unmap_page(vgpu, gfn, entry->dma_addr, entry->size); | ||
1917 | __gvt_cache_remove_entry(vgpu, entry); | ||
1918 | |||
1919 | ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size); | ||
1920 | if (ret) | ||
1921 | goto err_unlock; | ||
1922 | |||
1923 | ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr, size); | ||
1924 | if (ret) | ||
1925 | goto err_unmap; | ||
1914 | } else { | 1926 | } else { |
1915 | kref_get(&entry->ref); | 1927 | kref_get(&entry->ref); |
1916 | *dma_addr = entry->dma_addr; | 1928 | *dma_addr = entry->dma_addr; |
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 2144fb46d0e1..9f3fd7d96a69 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c | |||
@@ -364,16 +364,13 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) | |||
364 | wa_ctx->indirect_ctx.shadow_va = NULL; | 364 | wa_ctx->indirect_ctx.shadow_va = NULL; |
365 | } | 365 | } |
366 | 366 | ||
367 | static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload, | 367 | static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload, |
368 | struct i915_gem_context *ctx) | 368 | struct i915_gem_context *ctx) |
369 | { | 369 | { |
370 | struct intel_vgpu_mm *mm = workload->shadow_mm; | 370 | struct intel_vgpu_mm *mm = workload->shadow_mm; |
371 | struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(ctx->vm); | 371 | struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(ctx->vm); |
372 | int i = 0; | 372 | int i = 0; |
373 | 373 | ||
374 | if (mm->type != INTEL_GVT_MM_PPGTT || !mm->ppgtt_mm.shadowed) | ||
375 | return -EINVAL; | ||
376 | |||
377 | if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) { | 374 | if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) { |
378 | px_dma(ppgtt->pd) = mm->ppgtt_mm.shadow_pdps[0]; | 375 | px_dma(ppgtt->pd) = mm->ppgtt_mm.shadow_pdps[0]; |
379 | } else { | 376 | } else { |
@@ -384,8 +381,6 @@ static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload, | |||
384 | px_dma(pd) = mm->ppgtt_mm.shadow_pdps[i]; | 381 | px_dma(pd) = mm->ppgtt_mm.shadow_pdps[i]; |
385 | } | 382 | } |
386 | } | 383 | } |
387 | |||
388 | return 0; | ||
389 | } | 384 | } |
390 | 385 | ||
391 | static int | 386 | static int |
@@ -614,6 +609,8 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload) | |||
614 | static int prepare_workload(struct intel_vgpu_workload *workload) | 609 | static int prepare_workload(struct intel_vgpu_workload *workload) |
615 | { | 610 | { |
616 | struct intel_vgpu *vgpu = workload->vgpu; | 611 | struct intel_vgpu *vgpu = workload->vgpu; |
612 | struct intel_vgpu_submission *s = &vgpu->submission; | ||
613 | int ring = workload->ring_id; | ||
617 | int ret = 0; | 614 | int ret = 0; |
618 | 615 | ||
619 | ret = intel_vgpu_pin_mm(workload->shadow_mm); | 616 | ret = intel_vgpu_pin_mm(workload->shadow_mm); |
@@ -622,8 +619,16 @@ static int prepare_workload(struct intel_vgpu_workload *workload) | |||
622 | return ret; | 619 | return ret; |
623 | } | 620 | } |
624 | 621 | ||
622 | if (workload->shadow_mm->type != INTEL_GVT_MM_PPGTT || | ||
623 | !workload->shadow_mm->ppgtt_mm.shadowed) { | ||
624 | gvt_vgpu_err("workload shadow ppgtt isn't ready\n"); | ||
625 | return -EINVAL; | ||
626 | } | ||
627 | |||
625 | update_shadow_pdps(workload); | 628 | update_shadow_pdps(workload); |
626 | 629 | ||
630 | set_context_ppgtt_from_shadow(workload, s->shadow[ring]->gem_context); | ||
631 | |||
627 | ret = intel_vgpu_sync_oos_pages(workload->vgpu); | 632 | ret = intel_vgpu_sync_oos_pages(workload->vgpu); |
628 | if (ret) { | 633 | if (ret) { |
629 | gvt_vgpu_err("fail to vgpu sync oos pages\n"); | 634 | gvt_vgpu_err("fail to vgpu sync oos pages\n"); |
@@ -674,7 +679,6 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) | |||
674 | { | 679 | { |
675 | struct intel_vgpu *vgpu = workload->vgpu; | 680 | struct intel_vgpu *vgpu = workload->vgpu; |
676 | struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; | 681 | struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; |
677 | struct intel_vgpu_submission *s = &vgpu->submission; | ||
678 | struct i915_request *rq; | 682 | struct i915_request *rq; |
679 | int ring_id = workload->ring_id; | 683 | int ring_id = workload->ring_id; |
680 | int ret; | 684 | int ret; |
@@ -685,13 +689,6 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) | |||
685 | mutex_lock(&vgpu->vgpu_lock); | 689 | mutex_lock(&vgpu->vgpu_lock); |
686 | mutex_lock(&dev_priv->drm.struct_mutex); | 690 | mutex_lock(&dev_priv->drm.struct_mutex); |
687 | 691 | ||
688 | ret = set_context_ppgtt_from_shadow(workload, | ||
689 | s->shadow[ring_id]->gem_context); | ||
690 | if (ret < 0) { | ||
691 | gvt_vgpu_err("workload shadow ppgtt isn't ready\n"); | ||
692 | goto err_req; | ||
693 | } | ||
694 | |||
695 | ret = intel_gvt_workload_req_alloc(workload); | 692 | ret = intel_gvt_workload_req_alloc(workload); |
696 | if (ret) | 693 | if (ret) |
697 | goto err_req; | 694 | goto err_req; |
@@ -990,6 +987,7 @@ static int workload_thread(void *priv) | |||
990 | int ret; | 987 | int ret; |
991 | bool need_force_wake = (INTEL_GEN(gvt->dev_priv) >= 9); | 988 | bool need_force_wake = (INTEL_GEN(gvt->dev_priv) >= 9); |
992 | DEFINE_WAIT_FUNC(wait, woken_wake_function); | 989 | DEFINE_WAIT_FUNC(wait, woken_wake_function); |
990 | struct intel_runtime_pm *rpm = &gvt->dev_priv->runtime_pm; | ||
993 | 991 | ||
994 | kfree(p); | 992 | kfree(p); |
995 | 993 | ||
@@ -1013,6 +1011,8 @@ static int workload_thread(void *priv) | |||
1013 | workload->ring_id, workload, | 1011 | workload->ring_id, workload, |
1014 | workload->vgpu->id); | 1012 | workload->vgpu->id); |
1015 | 1013 | ||
1014 | intel_runtime_pm_get(rpm); | ||
1015 | |||
1016 | gvt_dbg_sched("ring id %d will dispatch workload %p\n", | 1016 | gvt_dbg_sched("ring id %d will dispatch workload %p\n", |
1017 | workload->ring_id, workload); | 1017 | workload->ring_id, workload); |
1018 | 1018 | ||
@@ -1042,6 +1042,7 @@ complete: | |||
1042 | intel_uncore_forcewake_put(&gvt->dev_priv->uncore, | 1042 | intel_uncore_forcewake_put(&gvt->dev_priv->uncore, |
1043 | FORCEWAKE_ALL); | 1043 | FORCEWAKE_ALL); |
1044 | 1044 | ||
1045 | intel_runtime_pm_put_unchecked(rpm); | ||
1045 | if (ret && (vgpu_is_vm_unhealthy(ret))) | 1046 | if (ret && (vgpu_is_vm_unhealthy(ret))) |
1046 | enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR); | 1047 | enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR); |
1047 | } | 1048 | } |
@@ -1492,6 +1493,12 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, | |||
1492 | intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + | 1493 | intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + |
1493 | RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4); | 1494 | RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4); |
1494 | 1495 | ||
1496 | if (!intel_gvt_ggtt_validate_range(vgpu, start, | ||
1497 | _RING_CTL_BUF_SIZE(ctl))) { | ||
1498 | gvt_vgpu_err("context contain invalid rb at: 0x%x\n", start); | ||
1499 | return ERR_PTR(-EINVAL); | ||
1500 | } | ||
1501 | |||
1495 | workload = alloc_workload(vgpu); | 1502 | workload = alloc_workload(vgpu); |
1496 | if (IS_ERR(workload)) | 1503 | if (IS_ERR(workload)) |
1497 | return workload; | 1504 | return workload; |
@@ -1516,9 +1523,31 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, | |||
1516 | workload->wa_ctx.indirect_ctx.size = | 1523 | workload->wa_ctx.indirect_ctx.size = |
1517 | (indirect_ctx & INDIRECT_CTX_SIZE_MASK) * | 1524 | (indirect_ctx & INDIRECT_CTX_SIZE_MASK) * |
1518 | CACHELINE_BYTES; | 1525 | CACHELINE_BYTES; |
1526 | |||
1527 | if (workload->wa_ctx.indirect_ctx.size != 0) { | ||
1528 | if (!intel_gvt_ggtt_validate_range(vgpu, | ||
1529 | workload->wa_ctx.indirect_ctx.guest_gma, | ||
1530 | workload->wa_ctx.indirect_ctx.size)) { | ||
1531 | kmem_cache_free(s->workloads, workload); | ||
1532 | gvt_vgpu_err("invalid wa_ctx at: 0x%lx\n", | ||
1533 | workload->wa_ctx.indirect_ctx.guest_gma); | ||
1534 | return ERR_PTR(-EINVAL); | ||
1535 | } | ||
1536 | } | ||
1537 | |||
1519 | workload->wa_ctx.per_ctx.guest_gma = | 1538 | workload->wa_ctx.per_ctx.guest_gma = |
1520 | per_ctx & PER_CTX_ADDR_MASK; | 1539 | per_ctx & PER_CTX_ADDR_MASK; |
1521 | workload->wa_ctx.per_ctx.valid = per_ctx & 1; | 1540 | workload->wa_ctx.per_ctx.valid = per_ctx & 1; |
1541 | if (workload->wa_ctx.per_ctx.valid) { | ||
1542 | if (!intel_gvt_ggtt_validate_range(vgpu, | ||
1543 | workload->wa_ctx.per_ctx.guest_gma, | ||
1544 | CACHELINE_BYTES)) { | ||
1545 | kmem_cache_free(s->workloads, workload); | ||
1546 | gvt_vgpu_err("invalid per_ctx at: 0x%lx\n", | ||
1547 | workload->wa_ctx.per_ctx.guest_gma); | ||
1548 | return ERR_PTR(-EINVAL); | ||
1549 | } | ||
1550 | } | ||
1522 | } | 1551 | } |
1523 | 1552 | ||
1524 | gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n", | 1553 | gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n", |
diff --git a/drivers/gpu/drm/i915/gvt/trace_points.c b/drivers/gpu/drm/i915/gvt/trace_points.c index a3deed692b9c..fe552e877e09 100644 --- a/drivers/gpu/drm/i915/gvt/trace_points.c +++ b/drivers/gpu/drm/i915/gvt/trace_points.c | |||
@@ -28,8 +28,6 @@ | |||
28 | * | 28 | * |
29 | */ | 29 | */ |
30 | 30 | ||
31 | #include "trace.h" | ||
32 | |||
33 | #ifndef __CHECKER__ | 31 | #ifndef __CHECKER__ |
34 | #define CREATE_TRACE_POINTS | 32 | #define CREATE_TRACE_POINTS |
35 | #include "trace.h" | 33 | #include "trace.h" |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index bc909ec5d9c3..fe7a6ec2c199 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -1674,8 +1674,9 @@ struct drm_i915_private { | |||
1674 | } dram_info; | 1674 | } dram_info; |
1675 | 1675 | ||
1676 | struct intel_bw_info { | 1676 | struct intel_bw_info { |
1677 | int num_planes; | 1677 | unsigned int deratedbw[3]; /* for each QGV point */ |
1678 | int deratedbw[3]; | 1678 | u8 num_qgv_points; |
1679 | u8 num_planes; | ||
1679 | } max_bw[6]; | 1680 | } max_bw[6]; |
1680 | 1681 | ||
1681 | struct drm_private_obj bw_obj; | 1682 | struct drm_private_obj bw_obj; |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 190ad54fb072..8a659d3d7435 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -46,7 +46,6 @@ | |||
46 | #include "gem/i915_gem_ioctls.h" | 46 | #include "gem/i915_gem_ioctls.h" |
47 | #include "gem/i915_gem_pm.h" | 47 | #include "gem/i915_gem_pm.h" |
48 | #include "gem/i915_gemfs.h" | 48 | #include "gem/i915_gemfs.h" |
49 | #include "gt/intel_engine_pm.h" | ||
50 | #include "gt/intel_gt_pm.h" | 49 | #include "gt/intel_gt_pm.h" |
51 | #include "gt/intel_mocs.h" | 50 | #include "gt/intel_mocs.h" |
52 | #include "gt/intel_reset.h" | 51 | #include "gt/intel_reset.h" |
@@ -1307,21 +1306,13 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv) | |||
1307 | 1306 | ||
1308 | intel_mocs_init_l3cc_table(dev_priv); | 1307 | intel_mocs_init_l3cc_table(dev_priv); |
1309 | 1308 | ||
1310 | /* Only when the HW is re-initialised, can we replay the requests */ | ||
1311 | ret = intel_engines_resume(dev_priv); | ||
1312 | if (ret) | ||
1313 | goto cleanup_uc; | ||
1314 | |||
1315 | intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); | 1309 | intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); |
1316 | 1310 | ||
1317 | intel_engines_set_scheduler_caps(dev_priv); | 1311 | intel_engines_set_scheduler_caps(dev_priv); |
1318 | return 0; | 1312 | return 0; |
1319 | 1313 | ||
1320 | cleanup_uc: | ||
1321 | intel_uc_fini_hw(dev_priv); | ||
1322 | out: | 1314 | out: |
1323 | intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); | 1315 | intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); |
1324 | |||
1325 | return ret; | 1316 | return ret; |
1326 | } | 1317 | } |
1327 | 1318 | ||
@@ -1580,6 +1571,11 @@ int i915_gem_init(struct drm_i915_private *dev_priv) | |||
1580 | if (ret) | 1571 | if (ret) |
1581 | goto err_uc_init; | 1572 | goto err_uc_init; |
1582 | 1573 | ||
1574 | /* Only when the HW is re-initialised, can we replay the requests */ | ||
1575 | ret = intel_gt_resume(dev_priv); | ||
1576 | if (ret) | ||
1577 | goto err_init_hw; | ||
1578 | |||
1583 | /* | 1579 | /* |
1584 | * Despite its name intel_init_clock_gating applies both display | 1580 | * Despite its name intel_init_clock_gating applies both display |
1585 | * clock gating workarounds; GT mmio workarounds and the occasional | 1581 | * clock gating workarounds; GT mmio workarounds and the occasional |
@@ -1593,20 +1589,20 @@ int i915_gem_init(struct drm_i915_private *dev_priv) | |||
1593 | 1589 | ||
1594 | ret = intel_engines_verify_workarounds(dev_priv); | 1590 | ret = intel_engines_verify_workarounds(dev_priv); |
1595 | if (ret) | 1591 | if (ret) |
1596 | goto err_init_hw; | 1592 | goto err_gt; |
1597 | 1593 | ||
1598 | ret = __intel_engines_record_defaults(dev_priv); | 1594 | ret = __intel_engines_record_defaults(dev_priv); |
1599 | if (ret) | 1595 | if (ret) |
1600 | goto err_init_hw; | 1596 | goto err_gt; |
1601 | 1597 | ||
1602 | if (i915_inject_load_failure()) { | 1598 | if (i915_inject_load_failure()) { |
1603 | ret = -ENODEV; | 1599 | ret = -ENODEV; |
1604 | goto err_init_hw; | 1600 | goto err_gt; |
1605 | } | 1601 | } |
1606 | 1602 | ||
1607 | if (i915_inject_load_failure()) { | 1603 | if (i915_inject_load_failure()) { |
1608 | ret = -EIO; | 1604 | ret = -EIO; |
1609 | goto err_init_hw; | 1605 | goto err_gt; |
1610 | } | 1606 | } |
1611 | 1607 | ||
1612 | intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); | 1608 | intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); |
@@ -1620,7 +1616,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv) | |||
1620 | * HW as irrevisibly wedged, but keep enough state around that the | 1616 | * HW as irrevisibly wedged, but keep enough state around that the |
1621 | * driver doesn't explode during runtime. | 1617 | * driver doesn't explode during runtime. |
1622 | */ | 1618 | */ |
1623 | err_init_hw: | 1619 | err_gt: |
1624 | mutex_unlock(&dev_priv->drm.struct_mutex); | 1620 | mutex_unlock(&dev_priv->drm.struct_mutex); |
1625 | 1621 | ||
1626 | i915_gem_set_wedged(dev_priv); | 1622 | i915_gem_set_wedged(dev_priv); |
@@ -1630,6 +1626,7 @@ err_init_hw: | |||
1630 | i915_gem_drain_workqueue(dev_priv); | 1626 | i915_gem_drain_workqueue(dev_priv); |
1631 | 1627 | ||
1632 | mutex_lock(&dev_priv->drm.struct_mutex); | 1628 | mutex_lock(&dev_priv->drm.struct_mutex); |
1629 | err_init_hw: | ||
1633 | intel_uc_fini_hw(dev_priv); | 1630 | intel_uc_fini_hw(dev_priv); |
1634 | err_uc_init: | 1631 | err_uc_init: |
1635 | intel_uc_fini(dev_priv); | 1632 | intel_uc_fini(dev_priv); |
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 8ab820145ea6..7015a97b1097 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c | |||
@@ -1444,9 +1444,11 @@ unwind_pd: | |||
1444 | spin_lock(&pdp->lock); | 1444 | spin_lock(&pdp->lock); |
1445 | if (atomic_dec_and_test(&pd->used)) { | 1445 | if (atomic_dec_and_test(&pd->used)) { |
1446 | gen8_ppgtt_set_pdpe(pdp, vm->scratch_pd, pdpe); | 1446 | gen8_ppgtt_set_pdpe(pdp, vm->scratch_pd, pdpe); |
1447 | pdp->entry[pdpe] = vm->scratch_pd; | ||
1447 | GEM_BUG_ON(!atomic_read(&pdp->used)); | 1448 | GEM_BUG_ON(!atomic_read(&pdp->used)); |
1448 | atomic_dec(&pdp->used); | 1449 | atomic_dec(&pdp->used); |
1449 | free_pd(vm, pd); | 1450 | GEM_BUG_ON(alloc); |
1451 | alloc = pd; /* defer the free to after the lock */ | ||
1450 | } | 1452 | } |
1451 | spin_unlock(&pdp->lock); | 1453 | spin_unlock(&pdp->lock); |
1452 | unwind: | 1454 | unwind: |
@@ -1515,7 +1517,9 @@ unwind_pdp: | |||
1515 | spin_lock(&pml4->lock); | 1517 | spin_lock(&pml4->lock); |
1516 | if (atomic_dec_and_test(&pdp->used)) { | 1518 | if (atomic_dec_and_test(&pdp->used)) { |
1517 | gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e); | 1519 | gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e); |
1518 | free_pd(vm, pdp); | 1520 | pml4->entry[pml4e] = vm->scratch_pdp; |
1521 | GEM_BUG_ON(alloc); | ||
1522 | alloc = pdp; /* defer the free until after the lock */ | ||
1519 | } | 1523 | } |
1520 | spin_unlock(&pml4->lock); | 1524 | spin_unlock(&pml4->lock); |
1521 | unwind: | 1525 | unwind: |
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index b7e9fddef270..8bc76fcff70d 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c | |||
@@ -1194,6 +1194,7 @@ static void error_record_engine_registers(struct i915_gpu_state *error, | |||
1194 | switch (engine->id) { | 1194 | switch (engine->id) { |
1195 | default: | 1195 | default: |
1196 | MISSING_CASE(engine->id); | 1196 | MISSING_CASE(engine->id); |
1197 | /* fall through */ | ||
1197 | case RCS0: | 1198 | case RCS0: |
1198 | mmio = RENDER_HWS_PGA_GEN7; | 1199 | mmio = RENDER_HWS_PGA_GEN7; |
1199 | break; | 1200 | break; |
@@ -1417,6 +1418,7 @@ static void gem_record_rings(struct i915_gpu_state *error) | |||
1417 | struct intel_engine_cs *engine = i915->engine[i]; | 1418 | struct intel_engine_cs *engine = i915->engine[i]; |
1418 | struct drm_i915_error_engine *ee = &error->engine[i]; | 1419 | struct drm_i915_error_engine *ee = &error->engine[i]; |
1419 | struct i915_request *request; | 1420 | struct i915_request *request; |
1421 | unsigned long flags; | ||
1420 | 1422 | ||
1421 | ee->engine_id = -1; | 1423 | ee->engine_id = -1; |
1422 | 1424 | ||
@@ -1428,10 +1430,11 @@ static void gem_record_rings(struct i915_gpu_state *error) | |||
1428 | error_record_engine_registers(error, engine, ee); | 1430 | error_record_engine_registers(error, engine, ee); |
1429 | error_record_engine_execlists(engine, ee); | 1431 | error_record_engine_execlists(engine, ee); |
1430 | 1432 | ||
1433 | spin_lock_irqsave(&engine->active.lock, flags); | ||
1431 | request = intel_engine_find_active_request(engine); | 1434 | request = intel_engine_find_active_request(engine); |
1432 | if (request) { | 1435 | if (request) { |
1433 | struct i915_gem_context *ctx = request->gem_context; | 1436 | struct i915_gem_context *ctx = request->gem_context; |
1434 | struct intel_ring *ring; | 1437 | struct intel_ring *ring = request->ring; |
1435 | 1438 | ||
1436 | ee->vm = ctx->vm ?: &ggtt->vm; | 1439 | ee->vm = ctx->vm ?: &ggtt->vm; |
1437 | 1440 | ||
@@ -1461,7 +1464,6 @@ static void gem_record_rings(struct i915_gpu_state *error) | |||
1461 | ee->rq_post = request->postfix; | 1464 | ee->rq_post = request->postfix; |
1462 | ee->rq_tail = request->tail; | 1465 | ee->rq_tail = request->tail; |
1463 | 1466 | ||
1464 | ring = request->ring; | ||
1465 | ee->cpu_ring_head = ring->head; | 1467 | ee->cpu_ring_head = ring->head; |
1466 | ee->cpu_ring_tail = ring->tail; | 1468 | ee->cpu_ring_tail = ring->tail; |
1467 | ee->ringbuffer = | 1469 | ee->ringbuffer = |
@@ -1469,6 +1471,7 @@ static void gem_record_rings(struct i915_gpu_state *error) | |||
1469 | 1471 | ||
1470 | engine_record_requests(engine, request, ee); | 1472 | engine_record_requests(engine, request, ee); |
1471 | } | 1473 | } |
1474 | spin_unlock_irqrestore(&engine->active.lock, flags); | ||
1472 | 1475 | ||
1473 | ee->hws_page = | 1476 | ee->hws_page = |
1474 | i915_error_object_create(i915, | 1477 | i915_error_object_create(i915, |
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index a700c5c3d167..5140017f9a39 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c | |||
@@ -1567,28 +1567,10 @@ static void config_oa_regs(struct drm_i915_private *dev_priv, | |||
1567 | } | 1567 | } |
1568 | } | 1568 | } |
1569 | 1569 | ||
1570 | static int hsw_enable_metric_set(struct i915_perf_stream *stream) | 1570 | static void delay_after_mux(void) |
1571 | { | 1571 | { |
1572 | struct drm_i915_private *dev_priv = stream->dev_priv; | 1572 | /* |
1573 | const struct i915_oa_config *oa_config = stream->oa_config; | 1573 | * It apparently takes a fairly long time for a new MUX |
1574 | |||
1575 | /* PRM: | ||
1576 | * | ||
1577 | * OA unit is using “crclk” for its functionality. When trunk | ||
1578 | * level clock gating takes place, OA clock would be gated, | ||
1579 | * unable to count the events from non-render clock domain. | ||
1580 | * Render clock gating must be disabled when OA is enabled to | ||
1581 | * count the events from non-render domain. Unit level clock | ||
1582 | * gating for RCS should also be disabled. | ||
1583 | */ | ||
1584 | I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) & | ||
1585 | ~GEN7_DOP_CLOCK_GATE_ENABLE)); | ||
1586 | I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) | | ||
1587 | GEN6_CSUNIT_CLOCK_GATE_DISABLE)); | ||
1588 | |||
1589 | config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len); | ||
1590 | |||
1591 | /* It apparently takes a fairly long time for a new MUX | ||
1592 | * configuration to be be applied after these register writes. | 1574 | * configuration to be be applied after these register writes. |
1593 | * This delay duration was derived empirically based on the | 1575 | * This delay duration was derived empirically based on the |
1594 | * render_basic config but hopefully it covers the maximum | 1576 | * render_basic config but hopefully it covers the maximum |
@@ -1610,6 +1592,30 @@ static int hsw_enable_metric_set(struct i915_perf_stream *stream) | |||
1610 | * a delay at this location would mitigate any invalid reports. | 1592 | * a delay at this location would mitigate any invalid reports. |
1611 | */ | 1593 | */ |
1612 | usleep_range(15000, 20000); | 1594 | usleep_range(15000, 20000); |
1595 | } | ||
1596 | |||
1597 | static int hsw_enable_metric_set(struct i915_perf_stream *stream) | ||
1598 | { | ||
1599 | struct drm_i915_private *dev_priv = stream->dev_priv; | ||
1600 | const struct i915_oa_config *oa_config = stream->oa_config; | ||
1601 | |||
1602 | /* | ||
1603 | * PRM: | ||
1604 | * | ||
1605 | * OA unit is using “crclk” for its functionality. When trunk | ||
1606 | * level clock gating takes place, OA clock would be gated, | ||
1607 | * unable to count the events from non-render clock domain. | ||
1608 | * Render clock gating must be disabled when OA is enabled to | ||
1609 | * count the events from non-render domain. Unit level clock | ||
1610 | * gating for RCS should also be disabled. | ||
1611 | */ | ||
1612 | I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) & | ||
1613 | ~GEN7_DOP_CLOCK_GATE_ENABLE)); | ||
1614 | I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) | | ||
1615 | GEN6_CSUNIT_CLOCK_GATE_DISABLE)); | ||
1616 | |||
1617 | config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len); | ||
1618 | delay_after_mux(); | ||
1613 | 1619 | ||
1614 | config_oa_regs(dev_priv, oa_config->b_counter_regs, | 1620 | config_oa_regs(dev_priv, oa_config->b_counter_regs, |
1615 | oa_config->b_counter_regs_len); | 1621 | oa_config->b_counter_regs_len); |
@@ -1835,6 +1841,7 @@ static int gen8_enable_metric_set(struct i915_perf_stream *stream) | |||
1835 | return ret; | 1841 | return ret; |
1836 | 1842 | ||
1837 | config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len); | 1843 | config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len); |
1844 | delay_after_mux(); | ||
1838 | 1845 | ||
1839 | config_oa_regs(dev_priv, oa_config->b_counter_regs, | 1846 | config_oa_regs(dev_priv, oa_config->b_counter_regs, |
1840 | oa_config->b_counter_regs_len); | 1847 | oa_config->b_counter_regs_len); |
@@ -2515,6 +2522,9 @@ static int i915_perf_release(struct inode *inode, struct file *file) | |||
2515 | i915_perf_destroy_locked(stream); | 2522 | i915_perf_destroy_locked(stream); |
2516 | mutex_unlock(&dev_priv->perf.lock); | 2523 | mutex_unlock(&dev_priv->perf.lock); |
2517 | 2524 | ||
2525 | /* Release the reference the perf stream kept on the driver. */ | ||
2526 | drm_dev_put(&dev_priv->drm); | ||
2527 | |||
2518 | return 0; | 2528 | return 0; |
2519 | } | 2529 | } |
2520 | 2530 | ||
@@ -2650,6 +2660,11 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv, | |||
2650 | if (!(param->flags & I915_PERF_FLAG_DISABLED)) | 2660 | if (!(param->flags & I915_PERF_FLAG_DISABLED)) |
2651 | i915_perf_enable_locked(stream); | 2661 | i915_perf_enable_locked(stream); |
2652 | 2662 | ||
2663 | /* Take a reference on the driver that will be kept with stream_fd | ||
2664 | * until its release. | ||
2665 | */ | ||
2666 | drm_dev_get(&dev_priv->drm); | ||
2667 | |||
2653 | return stream_fd; | 2668 | return stream_fd; |
2654 | 2669 | ||
2655 | err_open: | 2670 | err_open: |
@@ -3477,9 +3492,13 @@ void i915_perf_init(struct drm_i915_private *dev_priv) | |||
3477 | dev_priv->perf.oa.ops.enable_metric_set = gen8_enable_metric_set; | 3492 | dev_priv->perf.oa.ops.enable_metric_set = gen8_enable_metric_set; |
3478 | dev_priv->perf.oa.ops.disable_metric_set = gen10_disable_metric_set; | 3493 | dev_priv->perf.oa.ops.disable_metric_set = gen10_disable_metric_set; |
3479 | 3494 | ||
3480 | dev_priv->perf.oa.ctx_oactxctrl_offset = 0x128; | 3495 | if (IS_GEN(dev_priv, 10)) { |
3481 | dev_priv->perf.oa.ctx_flexeu0_offset = 0x3de; | 3496 | dev_priv->perf.oa.ctx_oactxctrl_offset = 0x128; |
3482 | 3497 | dev_priv->perf.oa.ctx_flexeu0_offset = 0x3de; | |
3498 | } else { | ||
3499 | dev_priv->perf.oa.ctx_oactxctrl_offset = 0x124; | ||
3500 | dev_priv->perf.oa.ctx_flexeu0_offset = 0x78e; | ||
3501 | } | ||
3483 | dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<16); | 3502 | dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<16); |
3484 | } | 3503 | } |
3485 | } | 3504 | } |
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index f4ce643b3bc3..cce426b23a24 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h | |||
@@ -21,24 +21,22 @@ | |||
21 | /* watermark/fifo updates */ | 21 | /* watermark/fifo updates */ |
22 | 22 | ||
23 | TRACE_EVENT(intel_pipe_enable, | 23 | TRACE_EVENT(intel_pipe_enable, |
24 | TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pipe), | 24 | TP_PROTO(struct intel_crtc *crtc), |
25 | TP_ARGS(dev_priv, pipe), | 25 | TP_ARGS(crtc), |
26 | 26 | ||
27 | TP_STRUCT__entry( | 27 | TP_STRUCT__entry( |
28 | __array(u32, frame, 3) | 28 | __array(u32, frame, 3) |
29 | __array(u32, scanline, 3) | 29 | __array(u32, scanline, 3) |
30 | __field(enum pipe, pipe) | 30 | __field(enum pipe, pipe) |
31 | ), | 31 | ), |
32 | |||
33 | TP_fast_assign( | 32 | TP_fast_assign( |
34 | enum pipe _pipe; | 33 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
35 | for_each_pipe(dev_priv, _pipe) { | 34 | struct intel_crtc *it__; |
36 | __entry->frame[_pipe] = | 35 | for_each_intel_crtc(&dev_priv->drm, it__) { |
37 | dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, _pipe); | 36 | __entry->frame[it__->pipe] = intel_crtc_get_vblank_counter(it__); |
38 | __entry->scanline[_pipe] = | 37 | __entry->scanline[it__->pipe] = intel_get_crtc_scanline(it__); |
39 | intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, _pipe)); | ||
40 | } | 38 | } |
41 | __entry->pipe = pipe; | 39 | __entry->pipe = crtc->pipe; |
42 | ), | 40 | ), |
43 | 41 | ||
44 | TP_printk("pipe %c enable, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u", | 42 | TP_printk("pipe %c enable, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u", |
@@ -49,8 +47,8 @@ TRACE_EVENT(intel_pipe_enable, | |||
49 | ); | 47 | ); |
50 | 48 | ||
51 | TRACE_EVENT(intel_pipe_disable, | 49 | TRACE_EVENT(intel_pipe_disable, |
52 | TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pipe), | 50 | TP_PROTO(struct intel_crtc *crtc), |
53 | TP_ARGS(dev_priv, pipe), | 51 | TP_ARGS(crtc), |
54 | 52 | ||
55 | TP_STRUCT__entry( | 53 | TP_STRUCT__entry( |
56 | __array(u32, frame, 3) | 54 | __array(u32, frame, 3) |
@@ -59,14 +57,13 @@ TRACE_EVENT(intel_pipe_disable, | |||
59 | ), | 57 | ), |
60 | 58 | ||
61 | TP_fast_assign( | 59 | TP_fast_assign( |
62 | enum pipe _pipe; | 60 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
63 | for_each_pipe(dev_priv, _pipe) { | 61 | struct intel_crtc *it__; |
64 | __entry->frame[_pipe] = | 62 | for_each_intel_crtc(&dev_priv->drm, it__) { |
65 | dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, _pipe); | 63 | __entry->frame[it__->pipe] = intel_crtc_get_vblank_counter(it__); |
66 | __entry->scanline[_pipe] = | 64 | __entry->scanline[it__->pipe] = intel_get_crtc_scanline(it__); |
67 | intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, _pipe)); | ||
68 | } | 65 | } |
69 | __entry->pipe = pipe; | 66 | __entry->pipe = crtc->pipe; |
70 | ), | 67 | ), |
71 | 68 | ||
72 | TP_printk("pipe %c disable, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u", | 69 | TP_printk("pipe %c disable, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u", |
@@ -89,8 +86,7 @@ TRACE_EVENT(intel_pipe_crc, | |||
89 | 86 | ||
90 | TP_fast_assign( | 87 | TP_fast_assign( |
91 | __entry->pipe = crtc->pipe; | 88 | __entry->pipe = crtc->pipe; |
92 | __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev, | 89 | __entry->frame = intel_crtc_get_vblank_counter(crtc); |
93 | crtc->pipe); | ||
94 | __entry->scanline = intel_get_crtc_scanline(crtc); | 90 | __entry->scanline = intel_get_crtc_scanline(crtc); |
95 | memcpy(__entry->crcs, crcs, sizeof(__entry->crcs)); | 91 | memcpy(__entry->crcs, crcs, sizeof(__entry->crcs)); |
96 | ), | 92 | ), |
@@ -112,9 +108,10 @@ TRACE_EVENT(intel_cpu_fifo_underrun, | |||
112 | ), | 108 | ), |
113 | 109 | ||
114 | TP_fast_assign( | 110 | TP_fast_assign( |
111 | struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); | ||
115 | __entry->pipe = pipe; | 112 | __entry->pipe = pipe; |
116 | __entry->frame = dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, pipe); | 113 | __entry->frame = intel_crtc_get_vblank_counter(crtc); |
117 | __entry->scanline = intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, pipe)); | 114 | __entry->scanline = intel_get_crtc_scanline(crtc); |
118 | ), | 115 | ), |
119 | 116 | ||
120 | TP_printk("pipe %c, frame=%u, scanline=%u", | 117 | TP_printk("pipe %c, frame=%u, scanline=%u", |
@@ -134,9 +131,10 @@ TRACE_EVENT(intel_pch_fifo_underrun, | |||
134 | 131 | ||
135 | TP_fast_assign( | 132 | TP_fast_assign( |
136 | enum pipe pipe = pch_transcoder; | 133 | enum pipe pipe = pch_transcoder; |
134 | struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); | ||
137 | __entry->pipe = pipe; | 135 | __entry->pipe = pipe; |
138 | __entry->frame = dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, pipe); | 136 | __entry->frame = intel_crtc_get_vblank_counter(crtc); |
139 | __entry->scanline = intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, pipe)); | 137 | __entry->scanline = intel_get_crtc_scanline(crtc); |
140 | ), | 138 | ), |
141 | 139 | ||
142 | TP_printk("pch transcoder %c, frame=%u, scanline=%u", | 140 | TP_printk("pch transcoder %c, frame=%u, scanline=%u", |
@@ -156,12 +154,10 @@ TRACE_EVENT(intel_memory_cxsr, | |||
156 | ), | 154 | ), |
157 | 155 | ||
158 | TP_fast_assign( | 156 | TP_fast_assign( |
159 | enum pipe pipe; | 157 | struct intel_crtc *crtc; |
160 | for_each_pipe(dev_priv, pipe) { | 158 | for_each_intel_crtc(&dev_priv->drm, crtc) { |
161 | __entry->frame[pipe] = | 159 | __entry->frame[crtc->pipe] = intel_crtc_get_vblank_counter(crtc); |
162 | dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, pipe); | 160 | __entry->scanline[crtc->pipe] = intel_get_crtc_scanline(crtc); |
163 | __entry->scanline[pipe] = | ||
164 | intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, pipe)); | ||
165 | } | 161 | } |
166 | __entry->old = old; | 162 | __entry->old = old; |
167 | __entry->new = new; | 163 | __entry->new = new; |
@@ -198,8 +194,7 @@ TRACE_EVENT(g4x_wm, | |||
198 | 194 | ||
199 | TP_fast_assign( | 195 | TP_fast_assign( |
200 | __entry->pipe = crtc->pipe; | 196 | __entry->pipe = crtc->pipe; |
201 | __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev, | 197 | __entry->frame = intel_crtc_get_vblank_counter(crtc); |
202 | crtc->pipe); | ||
203 | __entry->scanline = intel_get_crtc_scanline(crtc); | 198 | __entry->scanline = intel_get_crtc_scanline(crtc); |
204 | __entry->primary = wm->pipe[crtc->pipe].plane[PLANE_PRIMARY]; | 199 | __entry->primary = wm->pipe[crtc->pipe].plane[PLANE_PRIMARY]; |
205 | __entry->sprite = wm->pipe[crtc->pipe].plane[PLANE_SPRITE0]; | 200 | __entry->sprite = wm->pipe[crtc->pipe].plane[PLANE_SPRITE0]; |
@@ -243,8 +238,7 @@ TRACE_EVENT(vlv_wm, | |||
243 | 238 | ||
244 | TP_fast_assign( | 239 | TP_fast_assign( |
245 | __entry->pipe = crtc->pipe; | 240 | __entry->pipe = crtc->pipe; |
246 | __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev, | 241 | __entry->frame = intel_crtc_get_vblank_counter(crtc); |
247 | crtc->pipe); | ||
248 | __entry->scanline = intel_get_crtc_scanline(crtc); | 242 | __entry->scanline = intel_get_crtc_scanline(crtc); |
249 | __entry->level = wm->level; | 243 | __entry->level = wm->level; |
250 | __entry->cxsr = wm->cxsr; | 244 | __entry->cxsr = wm->cxsr; |
@@ -278,8 +272,7 @@ TRACE_EVENT(vlv_fifo_size, | |||
278 | 272 | ||
279 | TP_fast_assign( | 273 | TP_fast_assign( |
280 | __entry->pipe = crtc->pipe; | 274 | __entry->pipe = crtc->pipe; |
281 | __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev, | 275 | __entry->frame = intel_crtc_get_vblank_counter(crtc); |
282 | crtc->pipe); | ||
283 | __entry->scanline = intel_get_crtc_scanline(crtc); | 276 | __entry->scanline = intel_get_crtc_scanline(crtc); |
284 | __entry->sprite0_start = sprite0_start; | 277 | __entry->sprite0_start = sprite0_start; |
285 | __entry->sprite1_start = sprite1_start; | 278 | __entry->sprite1_start = sprite1_start; |
@@ -310,8 +303,7 @@ TRACE_EVENT(intel_update_plane, | |||
310 | TP_fast_assign( | 303 | TP_fast_assign( |
311 | __entry->pipe = crtc->pipe; | 304 | __entry->pipe = crtc->pipe; |
312 | __entry->name = plane->name; | 305 | __entry->name = plane->name; |
313 | __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev, | 306 | __entry->frame = intel_crtc_get_vblank_counter(crtc); |
314 | crtc->pipe); | ||
315 | __entry->scanline = intel_get_crtc_scanline(crtc); | 307 | __entry->scanline = intel_get_crtc_scanline(crtc); |
316 | memcpy(__entry->src, &plane->state->src, sizeof(__entry->src)); | 308 | memcpy(__entry->src, &plane->state->src, sizeof(__entry->src)); |
317 | memcpy(__entry->dst, &plane->state->dst, sizeof(__entry->dst)); | 309 | memcpy(__entry->dst, &plane->state->dst, sizeof(__entry->dst)); |
@@ -338,8 +330,7 @@ TRACE_EVENT(intel_disable_plane, | |||
338 | TP_fast_assign( | 330 | TP_fast_assign( |
339 | __entry->pipe = crtc->pipe; | 331 | __entry->pipe = crtc->pipe; |
340 | __entry->name = plane->name; | 332 | __entry->name = plane->name; |
341 | __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev, | 333 | __entry->frame = intel_crtc_get_vblank_counter(crtc); |
342 | crtc->pipe); | ||
343 | __entry->scanline = intel_get_crtc_scanline(crtc); | 334 | __entry->scanline = intel_get_crtc_scanline(crtc); |
344 | ), | 335 | ), |
345 | 336 | ||
@@ -364,8 +355,7 @@ TRACE_EVENT(i915_pipe_update_start, | |||
364 | 355 | ||
365 | TP_fast_assign( | 356 | TP_fast_assign( |
366 | __entry->pipe = crtc->pipe; | 357 | __entry->pipe = crtc->pipe; |
367 | __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev, | 358 | __entry->frame = intel_crtc_get_vblank_counter(crtc); |
368 | crtc->pipe); | ||
369 | __entry->scanline = intel_get_crtc_scanline(crtc); | 359 | __entry->scanline = intel_get_crtc_scanline(crtc); |
370 | __entry->min = crtc->debug.min_vbl; | 360 | __entry->min = crtc->debug.min_vbl; |
371 | __entry->max = crtc->debug.max_vbl; | 361 | __entry->max = crtc->debug.max_vbl; |
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 502c54428570..8d1aebc3e857 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c | |||
@@ -221,13 +221,11 @@ __untrack_all_wakerefs(struct intel_runtime_pm_debug *debug, | |||
221 | static void | 221 | static void |
222 | dump_and_free_wakeref_tracking(struct intel_runtime_pm_debug *debug) | 222 | dump_and_free_wakeref_tracking(struct intel_runtime_pm_debug *debug) |
223 | { | 223 | { |
224 | struct drm_printer p; | 224 | if (debug->count) { |
225 | struct drm_printer p = drm_debug_printer("i915"); | ||
225 | 226 | ||
226 | if (!debug->count) | 227 | __print_intel_runtime_pm_wakeref(&p, debug); |
227 | return; | 228 | } |
228 | |||
229 | p = drm_debug_printer("i915"); | ||
230 | __print_intel_runtime_pm_wakeref(&p, debug); | ||
231 | 229 | ||
232 | kfree(debug->owners); | 230 | kfree(debug->owners); |
233 | } | 231 | } |
diff --git a/drivers/gpu/drm/i915/intel_wakeref.h b/drivers/gpu/drm/i915/intel_wakeref.h index 9cbb2ebf575b..38275310b196 100644 --- a/drivers/gpu/drm/i915/intel_wakeref.h +++ b/drivers/gpu/drm/i915/intel_wakeref.h | |||
@@ -66,6 +66,21 @@ intel_wakeref_get(struct intel_runtime_pm *rpm, | |||
66 | } | 66 | } |
67 | 67 | ||
68 | /** | 68 | /** |
69 | * intel_wakeref_get_if_in_use: Acquire the wakeref | ||
70 | * @wf: the wakeref | ||
71 | * | ||
72 | * Acquire a hold on the wakeref, but only if the wakeref is already | ||
73 | * active. | ||
74 | * | ||
75 | * Returns: true if the wakeref was acquired, false otherwise. | ||
76 | */ | ||
77 | static inline bool | ||
78 | intel_wakeref_get_if_active(struct intel_wakeref *wf) | ||
79 | { | ||
80 | return atomic_inc_not_zero(&wf->count); | ||
81 | } | ||
82 | |||
83 | /** | ||
69 | * intel_wakeref_put: Release the wakeref | 84 | * intel_wakeref_put: Release the wakeref |
70 | * @i915: the drm_i915_private device | 85 | * @i915: the drm_i915_private device |
71 | * @wf: the wakeref | 86 | * @wf: the wakeref |
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index 1671db47aa57..e9c55d1d6c04 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c | |||
@@ -59,6 +59,7 @@ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit | |||
59 | case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: | 59 | case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: |
60 | if (priv->lastctx == ctx) | 60 | if (priv->lastctx == ctx) |
61 | break; | 61 | break; |
62 | /* fall-thru */ | ||
62 | case MSM_SUBMIT_CMD_BUF: | 63 | case MSM_SUBMIT_CMD_BUF: |
63 | /* copy commands into RB: */ | 64 | /* copy commands into RB: */ |
64 | obj = submit->bos[submit->cmd[i].idx].obj; | 65 | obj = submit->bos[submit->cmd[i].idx].obj; |
@@ -149,6 +150,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, | |||
149 | case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: | 150 | case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: |
150 | if (priv->lastctx == ctx) | 151 | if (priv->lastctx == ctx) |
151 | break; | 152 | break; |
153 | /* fall-thru */ | ||
152 | case MSM_SUBMIT_CMD_BUF: | 154 | case MSM_SUBMIT_CMD_BUF: |
153 | OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3); | 155 | OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3); |
154 | OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); | 156 | OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); |
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index be39cf01e51e..dc8ec2c94301 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c | |||
@@ -115,6 +115,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, | |||
115 | case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: | 115 | case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: |
116 | if (priv->lastctx == ctx) | 116 | if (priv->lastctx == ctx) |
117 | break; | 117 | break; |
118 | /* fall-thru */ | ||
118 | case MSM_SUBMIT_CMD_BUF: | 119 | case MSM_SUBMIT_CMD_BUF: |
119 | OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3); | 120 | OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3); |
120 | OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); | 121 | OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); |
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index 9acbbc0f3232..048c8be426f3 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c | |||
@@ -428,6 +428,7 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, | |||
428 | /* ignore if there has not been a ctx switch: */ | 428 | /* ignore if there has not been a ctx switch: */ |
429 | if (priv->lastctx == ctx) | 429 | if (priv->lastctx == ctx) |
430 | break; | 430 | break; |
431 | /* fall-thru */ | ||
431 | case MSM_SUBMIT_CMD_BUF: | 432 | case MSM_SUBMIT_CMD_BUF: |
432 | OUT_PKT3(ring, adreno_is_a430(adreno_gpu) ? | 433 | OUT_PKT3(ring, adreno_is_a430(adreno_gpu) ? |
433 | CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD, 2); | 434 | CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD, 2); |
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c index 0e2f74163a16..0aa8a12c9952 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c | |||
@@ -2221,8 +2221,6 @@ int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc, | |||
2221 | if (ret) | 2221 | if (ret) |
2222 | goto fail; | 2222 | goto fail; |
2223 | 2223 | ||
2224 | spin_lock_init(&dpu_enc->enc_spinlock); | ||
2225 | |||
2226 | atomic_set(&dpu_enc->frame_done_timeout_ms, 0); | 2224 | atomic_set(&dpu_enc->frame_done_timeout_ms, 0); |
2227 | timer_setup(&dpu_enc->frame_done_timer, | 2225 | timer_setup(&dpu_enc->frame_done_timer, |
2228 | dpu_encoder_frame_done_timeout, 0); | 2226 | dpu_encoder_frame_done_timeout, 0); |
@@ -2276,6 +2274,7 @@ struct drm_encoder *dpu_encoder_init(struct drm_device *dev, | |||
2276 | 2274 | ||
2277 | drm_encoder_helper_add(&dpu_enc->base, &dpu_encoder_helper_funcs); | 2275 | drm_encoder_helper_add(&dpu_enc->base, &dpu_encoder_helper_funcs); |
2278 | 2276 | ||
2277 | spin_lock_init(&dpu_enc->enc_spinlock); | ||
2279 | dpu_enc->enabled = false; | 2278 | dpu_enc->enabled = false; |
2280 | 2279 | ||
2281 | return &dpu_enc->base; | 2280 | return &dpu_enc->base; |
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c index ff14555372d0..78d5fa230c16 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c | |||
@@ -439,6 +439,18 @@ static void mdp5_crtc_atomic_disable(struct drm_crtc *crtc, | |||
439 | mdp5_crtc->enabled = false; | 439 | mdp5_crtc->enabled = false; |
440 | } | 440 | } |
441 | 441 | ||
442 | static void mdp5_crtc_vblank_on(struct drm_crtc *crtc) | ||
443 | { | ||
444 | struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); | ||
445 | struct mdp5_interface *intf = mdp5_cstate->pipeline.intf; | ||
446 | u32 count; | ||
447 | |||
448 | count = intf->mode == MDP5_INTF_DSI_MODE_COMMAND ? 0 : 0xffffffff; | ||
449 | drm_crtc_set_max_vblank_count(crtc, count); | ||
450 | |||
451 | drm_crtc_vblank_on(crtc); | ||
452 | } | ||
453 | |||
442 | static void mdp5_crtc_atomic_enable(struct drm_crtc *crtc, | 454 | static void mdp5_crtc_atomic_enable(struct drm_crtc *crtc, |
443 | struct drm_crtc_state *old_state) | 455 | struct drm_crtc_state *old_state) |
444 | { | 456 | { |
@@ -475,7 +487,7 @@ static void mdp5_crtc_atomic_enable(struct drm_crtc *crtc, | |||
475 | } | 487 | } |
476 | 488 | ||
477 | /* Restore vblank irq handling after power is enabled */ | 489 | /* Restore vblank irq handling after power is enabled */ |
478 | drm_crtc_vblank_on(crtc); | 490 | mdp5_crtc_vblank_on(crtc); |
479 | 491 | ||
480 | mdp5_crtc_mode_set_nofb(crtc); | 492 | mdp5_crtc_mode_set_nofb(crtc); |
481 | 493 | ||
@@ -1028,6 +1040,8 @@ static void mdp5_crtc_reset(struct drm_crtc *crtc) | |||
1028 | mdp5_crtc_destroy_state(crtc, crtc->state); | 1040 | mdp5_crtc_destroy_state(crtc, crtc->state); |
1029 | 1041 | ||
1030 | __drm_atomic_helper_crtc_reset(crtc, &mdp5_cstate->base); | 1042 | __drm_atomic_helper_crtc_reset(crtc, &mdp5_cstate->base); |
1043 | |||
1044 | drm_crtc_vblank_reset(crtc); | ||
1031 | } | 1045 | } |
1032 | 1046 | ||
1033 | static const struct drm_crtc_funcs mdp5_crtc_funcs = { | 1047 | static const struct drm_crtc_funcs mdp5_crtc_funcs = { |
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c index 4a60f5fca6b0..fec6ef1ae3b9 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c | |||
@@ -740,7 +740,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev) | |||
740 | dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos; | 740 | dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos; |
741 | dev->driver->get_scanout_position = mdp5_get_scanoutpos; | 741 | dev->driver->get_scanout_position = mdp5_get_scanoutpos; |
742 | dev->driver->get_vblank_counter = mdp5_get_vblank_counter; | 742 | dev->driver->get_vblank_counter = mdp5_get_vblank_counter; |
743 | dev->max_vblank_count = 0xffffffff; | 743 | dev->max_vblank_count = 0; /* max_vblank_count is set on each CRTC */ |
744 | dev->vblank_disable_immediate = true; | 744 | dev->vblank_disable_immediate = true; |
745 | 745 | ||
746 | return kms; | 746 | return kms; |
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index ab64ab470de7..c356f5ccf253 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c | |||
@@ -619,7 +619,7 @@ static int context_init(struct drm_device *dev, struct drm_file *file) | |||
619 | 619 | ||
620 | msm_submitqueue_init(dev, ctx); | 620 | msm_submitqueue_init(dev, ctx); |
621 | 621 | ||
622 | ctx->aspace = priv->gpu->aspace; | 622 | ctx->aspace = priv->gpu ? priv->gpu->aspace : NULL; |
623 | file->driver_priv = ctx; | 623 | file->driver_priv = ctx; |
624 | 624 | ||
625 | return 0; | 625 | return 0; |
@@ -1279,7 +1279,8 @@ static int add_gpu_components(struct device *dev, | |||
1279 | if (!np) | 1279 | if (!np) |
1280 | return 0; | 1280 | return 0; |
1281 | 1281 | ||
1282 | drm_of_component_match_add(dev, matchptr, compare_of, np); | 1282 | if (of_device_is_available(np)) |
1283 | drm_of_component_match_add(dev, matchptr, compare_of, np); | ||
1283 | 1284 | ||
1284 | of_node_put(np); | 1285 | of_node_put(np); |
1285 | 1286 | ||
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 8b78554cfde3..8cf6362e64bf 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c | |||
@@ -32,6 +32,46 @@ static bool use_pages(struct drm_gem_object *obj) | |||
32 | return !msm_obj->vram_node; | 32 | return !msm_obj->vram_node; |
33 | } | 33 | } |
34 | 34 | ||
35 | /* | ||
36 | * Cache sync.. this is a bit over-complicated, to fit dma-mapping | ||
37 | * API. Really GPU cache is out of scope here (handled on cmdstream) | ||
38 | * and all we need to do is invalidate newly allocated pages before | ||
39 | * mapping to CPU as uncached/writecombine. | ||
40 | * | ||
41 | * On top of this, we have the added headache, that depending on | ||
42 | * display generation, the display's iommu may be wired up to either | ||
43 | * the toplevel drm device (mdss), or to the mdp sub-node, meaning | ||
44 | * that here we either have dma-direct or iommu ops. | ||
45 | * | ||
46 | * Let this be a cautionary tail of abstraction gone wrong. | ||
47 | */ | ||
48 | |||
49 | static void sync_for_device(struct msm_gem_object *msm_obj) | ||
50 | { | ||
51 | struct device *dev = msm_obj->base.dev->dev; | ||
52 | |||
53 | if (get_dma_ops(dev)) { | ||
54 | dma_sync_sg_for_device(dev, msm_obj->sgt->sgl, | ||
55 | msm_obj->sgt->nents, DMA_BIDIRECTIONAL); | ||
56 | } else { | ||
57 | dma_map_sg(dev, msm_obj->sgt->sgl, | ||
58 | msm_obj->sgt->nents, DMA_BIDIRECTIONAL); | ||
59 | } | ||
60 | } | ||
61 | |||
62 | static void sync_for_cpu(struct msm_gem_object *msm_obj) | ||
63 | { | ||
64 | struct device *dev = msm_obj->base.dev->dev; | ||
65 | |||
66 | if (get_dma_ops(dev)) { | ||
67 | dma_sync_sg_for_cpu(dev, msm_obj->sgt->sgl, | ||
68 | msm_obj->sgt->nents, DMA_BIDIRECTIONAL); | ||
69 | } else { | ||
70 | dma_unmap_sg(dev, msm_obj->sgt->sgl, | ||
71 | msm_obj->sgt->nents, DMA_BIDIRECTIONAL); | ||
72 | } | ||
73 | } | ||
74 | |||
35 | /* allocate pages from VRAM carveout, used when no IOMMU: */ | 75 | /* allocate pages from VRAM carveout, used when no IOMMU: */ |
36 | static struct page **get_pages_vram(struct drm_gem_object *obj, int npages) | 76 | static struct page **get_pages_vram(struct drm_gem_object *obj, int npages) |
37 | { | 77 | { |
@@ -97,8 +137,7 @@ static struct page **get_pages(struct drm_gem_object *obj) | |||
97 | * because display controller, GPU, etc. are not coherent: | 137 | * because display controller, GPU, etc. are not coherent: |
98 | */ | 138 | */ |
99 | if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) | 139 | if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) |
100 | dma_map_sg(dev->dev, msm_obj->sgt->sgl, | 140 | sync_for_device(msm_obj); |
101 | msm_obj->sgt->nents, DMA_BIDIRECTIONAL); | ||
102 | } | 141 | } |
103 | 142 | ||
104 | return msm_obj->pages; | 143 | return msm_obj->pages; |
@@ -127,9 +166,7 @@ static void put_pages(struct drm_gem_object *obj) | |||
127 | * GPU, etc. are not coherent: | 166 | * GPU, etc. are not coherent: |
128 | */ | 167 | */ |
129 | if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) | 168 | if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) |
130 | dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl, | 169 | sync_for_cpu(msm_obj); |
131 | msm_obj->sgt->nents, | ||
132 | DMA_BIDIRECTIONAL); | ||
133 | 170 | ||
134 | sg_free_table(msm_obj->sgt); | 171 | sg_free_table(msm_obj->sgt); |
135 | kfree(msm_obj->sgt); | 172 | kfree(msm_obj->sgt); |
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index 8497768f1b41..126703816794 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c | |||
@@ -780,7 +780,7 @@ nv50_msto_atomic_check(struct drm_encoder *encoder, | |||
780 | drm_dp_calc_pbn_mode(crtc_state->adjusted_mode.clock, | 780 | drm_dp_calc_pbn_mode(crtc_state->adjusted_mode.clock, |
781 | connector->display_info.bpc * 3); | 781 | connector->display_info.bpc * 3); |
782 | 782 | ||
783 | if (drm_atomic_crtc_needs_modeset(crtc_state)) { | 783 | if (crtc_state->mode_changed) { |
784 | slots = drm_dp_atomic_find_vcpi_slots(state, &mstm->mgr, | 784 | slots = drm_dp_atomic_find_vcpi_slots(state, &mstm->mgr, |
785 | mstc->port, | 785 | mstc->port, |
786 | asyh->dp.pbn); | 786 | asyh->dp.pbn); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c index 8c92374afcf2..a835cebb6d90 100644 --- a/drivers/gpu/drm/nouveau/nouveau_svm.c +++ b/drivers/gpu/drm/nouveau/nouveau_svm.c | |||
@@ -475,6 +475,47 @@ nouveau_svm_fault_cache(struct nouveau_svm *svm, | |||
475 | fault->inst, fault->addr, fault->access); | 475 | fault->inst, fault->addr, fault->access); |
476 | } | 476 | } |
477 | 477 | ||
478 | static inline bool | ||
479 | nouveau_range_done(struct hmm_range *range) | ||
480 | { | ||
481 | bool ret = hmm_range_valid(range); | ||
482 | |||
483 | hmm_range_unregister(range); | ||
484 | return ret; | ||
485 | } | ||
486 | |||
487 | static int | ||
488 | nouveau_range_fault(struct hmm_mirror *mirror, struct hmm_range *range) | ||
489 | { | ||
490 | long ret; | ||
491 | |||
492 | range->default_flags = 0; | ||
493 | range->pfn_flags_mask = -1UL; | ||
494 | |||
495 | ret = hmm_range_register(range, mirror, | ||
496 | range->start, range->end, | ||
497 | PAGE_SHIFT); | ||
498 | if (ret) { | ||
499 | up_read(&range->vma->vm_mm->mmap_sem); | ||
500 | return (int)ret; | ||
501 | } | ||
502 | |||
503 | if (!hmm_range_wait_until_valid(range, HMM_RANGE_DEFAULT_TIMEOUT)) { | ||
504 | up_read(&range->vma->vm_mm->mmap_sem); | ||
505 | return -EAGAIN; | ||
506 | } | ||
507 | |||
508 | ret = hmm_range_fault(range, true); | ||
509 | if (ret <= 0) { | ||
510 | if (ret == 0) | ||
511 | ret = -EBUSY; | ||
512 | up_read(&range->vma->vm_mm->mmap_sem); | ||
513 | hmm_range_unregister(range); | ||
514 | return ret; | ||
515 | } | ||
516 | return 0; | ||
517 | } | ||
518 | |||
478 | static int | 519 | static int |
479 | nouveau_svm_fault(struct nvif_notify *notify) | 520 | nouveau_svm_fault(struct nvif_notify *notify) |
480 | { | 521 | { |
@@ -649,10 +690,10 @@ nouveau_svm_fault(struct nvif_notify *notify) | |||
649 | range.values = nouveau_svm_pfn_values; | 690 | range.values = nouveau_svm_pfn_values; |
650 | range.pfn_shift = NVIF_VMM_PFNMAP_V0_ADDR_SHIFT; | 691 | range.pfn_shift = NVIF_VMM_PFNMAP_V0_ADDR_SHIFT; |
651 | again: | 692 | again: |
652 | ret = hmm_vma_fault(&svmm->mirror, &range, true); | 693 | ret = nouveau_range_fault(&svmm->mirror, &range); |
653 | if (ret == 0) { | 694 | if (ret == 0) { |
654 | mutex_lock(&svmm->mutex); | 695 | mutex_lock(&svmm->mutex); |
655 | if (!hmm_vma_range_done(&range)) { | 696 | if (!nouveau_range_done(&range)) { |
656 | mutex_unlock(&svmm->mutex); | 697 | mutex_unlock(&svmm->mutex); |
657 | goto again; | 698 | goto again; |
658 | } | 699 | } |
@@ -666,8 +707,8 @@ again: | |||
666 | NULL); | 707 | NULL); |
667 | svmm->vmm->vmm.object.client->super = false; | 708 | svmm->vmm->vmm.object.client->super = false; |
668 | mutex_unlock(&svmm->mutex); | 709 | mutex_unlock(&svmm->mutex); |
710 | up_read(&svmm->mm->mmap_sem); | ||
669 | } | 711 | } |
670 | up_read(&svmm->mm->mmap_sem); | ||
671 | 712 | ||
672 | /* Cancel any faults in the window whose pages didn't manage | 713 | /* Cancel any faults in the window whose pages didn't manage |
673 | * to keep their valid bit, or stay writeable when required. | 714 | * to keep their valid bit, or stay writeable when required. |
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c index d594f7520b7b..7d78e6deac89 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c | |||
@@ -285,9 +285,13 @@ static int ttm_set_pages_caching(struct dma_pool *pool, | |||
285 | 285 | ||
286 | static void __ttm_dma_free_page(struct dma_pool *pool, struct dma_page *d_page) | 286 | static void __ttm_dma_free_page(struct dma_pool *pool, struct dma_page *d_page) |
287 | { | 287 | { |
288 | unsigned long attrs = 0; | ||
288 | dma_addr_t dma = d_page->dma; | 289 | dma_addr_t dma = d_page->dma; |
289 | d_page->vaddr &= ~VADDR_FLAG_HUGE_POOL; | 290 | d_page->vaddr &= ~VADDR_FLAG_HUGE_POOL; |
290 | dma_free_coherent(pool->dev, pool->size, (void *)d_page->vaddr, dma); | 291 | if (pool->type & IS_HUGE) |
292 | attrs = DMA_ATTR_NO_WARN; | ||
293 | |||
294 | dma_free_attrs(pool->dev, pool->size, (void *)d_page->vaddr, dma, attrs); | ||
291 | 295 | ||
292 | kfree(d_page); | 296 | kfree(d_page); |
293 | d_page = NULL; | 297 | d_page = NULL; |
diff --git a/drivers/hid/hid-a4tech.c b/drivers/hid/hid-a4tech.c index 98bf694626f7..3a8c4a5971f7 100644 --- a/drivers/hid/hid-a4tech.c +++ b/drivers/hid/hid-a4tech.c | |||
@@ -23,12 +23,36 @@ | |||
23 | #define A4_2WHEEL_MOUSE_HACK_7 0x01 | 23 | #define A4_2WHEEL_MOUSE_HACK_7 0x01 |
24 | #define A4_2WHEEL_MOUSE_HACK_B8 0x02 | 24 | #define A4_2WHEEL_MOUSE_HACK_B8 0x02 |
25 | 25 | ||
26 | #define A4_WHEEL_ORIENTATION (HID_UP_GENDESK | 0x000000b8) | ||
27 | |||
26 | struct a4tech_sc { | 28 | struct a4tech_sc { |
27 | unsigned long quirks; | 29 | unsigned long quirks; |
28 | unsigned int hw_wheel; | 30 | unsigned int hw_wheel; |
29 | __s32 delayed_value; | 31 | __s32 delayed_value; |
30 | }; | 32 | }; |
31 | 33 | ||
34 | static int a4_input_mapping(struct hid_device *hdev, struct hid_input *hi, | ||
35 | struct hid_field *field, struct hid_usage *usage, | ||
36 | unsigned long **bit, int *max) | ||
37 | { | ||
38 | struct a4tech_sc *a4 = hid_get_drvdata(hdev); | ||
39 | |||
40 | if (a4->quirks & A4_2WHEEL_MOUSE_HACK_B8 && | ||
41 | usage->hid == A4_WHEEL_ORIENTATION) { | ||
42 | /* | ||
43 | * We do not want to have this usage mapped to anything as it's | ||
44 | * nonstandard and doesn't really behave like an HID report. | ||
45 | * It's only selecting the orientation (vertical/horizontal) of | ||
46 | * the previous mouse wheel report. The input_events will be | ||
47 | * generated once both reports are recorded in a4_event(). | ||
48 | */ | ||
49 | return -1; | ||
50 | } | ||
51 | |||
52 | return 0; | ||
53 | |||
54 | } | ||
55 | |||
32 | static int a4_input_mapped(struct hid_device *hdev, struct hid_input *hi, | 56 | static int a4_input_mapped(struct hid_device *hdev, struct hid_input *hi, |
33 | struct hid_field *field, struct hid_usage *usage, | 57 | struct hid_field *field, struct hid_usage *usage, |
34 | unsigned long **bit, int *max) | 58 | unsigned long **bit, int *max) |
@@ -52,8 +76,7 @@ static int a4_event(struct hid_device *hdev, struct hid_field *field, | |||
52 | struct a4tech_sc *a4 = hid_get_drvdata(hdev); | 76 | struct a4tech_sc *a4 = hid_get_drvdata(hdev); |
53 | struct input_dev *input; | 77 | struct input_dev *input; |
54 | 78 | ||
55 | if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput || | 79 | if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput) |
56 | !usage->type) | ||
57 | return 0; | 80 | return 0; |
58 | 81 | ||
59 | input = field->hidinput->input; | 82 | input = field->hidinput->input; |
@@ -64,7 +87,7 @@ static int a4_event(struct hid_device *hdev, struct hid_field *field, | |||
64 | return 1; | 87 | return 1; |
65 | } | 88 | } |
66 | 89 | ||
67 | if (usage->hid == 0x000100b8) { | 90 | if (usage->hid == A4_WHEEL_ORIENTATION) { |
68 | input_event(input, EV_REL, value ? REL_HWHEEL : | 91 | input_event(input, EV_REL, value ? REL_HWHEEL : |
69 | REL_WHEEL, a4->delayed_value); | 92 | REL_WHEEL, a4->delayed_value); |
70 | input_event(input, EV_REL, value ? REL_HWHEEL_HI_RES : | 93 | input_event(input, EV_REL, value ? REL_HWHEEL_HI_RES : |
@@ -131,6 +154,7 @@ MODULE_DEVICE_TABLE(hid, a4_devices); | |||
131 | static struct hid_driver a4_driver = { | 154 | static struct hid_driver a4_driver = { |
132 | .name = "a4tech", | 155 | .name = "a4tech", |
133 | .id_table = a4_devices, | 156 | .id_table = a4_devices, |
157 | .input_mapping = a4_input_mapping, | ||
134 | .input_mapped = a4_input_mapped, | 158 | .input_mapped = a4_input_mapped, |
135 | .event = a4_event, | 159 | .event = a4_event, |
136 | .probe = a4_probe, | 160 | .probe = a4_probe, |
diff --git a/drivers/hid/hid-holtek-kbd.c b/drivers/hid/hid-holtek-kbd.c index b3d502421b79..0a38e8e9bc78 100644 --- a/drivers/hid/hid-holtek-kbd.c +++ b/drivers/hid/hid-holtek-kbd.c | |||
@@ -123,9 +123,14 @@ static int holtek_kbd_input_event(struct input_dev *dev, unsigned int type, | |||
123 | 123 | ||
124 | /* Locate the boot interface, to receive the LED change events */ | 124 | /* Locate the boot interface, to receive the LED change events */ |
125 | struct usb_interface *boot_interface = usb_ifnum_to_if(usb_dev, 0); | 125 | struct usb_interface *boot_interface = usb_ifnum_to_if(usb_dev, 0); |
126 | struct hid_device *boot_hid; | ||
127 | struct hid_input *boot_hid_input; | ||
126 | 128 | ||
127 | struct hid_device *boot_hid = usb_get_intfdata(boot_interface); | 129 | if (unlikely(boot_interface == NULL)) |
128 | struct hid_input *boot_hid_input = list_first_entry(&boot_hid->inputs, | 130 | return -ENODEV; |
131 | |||
132 | boot_hid = usb_get_intfdata(boot_interface); | ||
133 | boot_hid_input = list_first_entry(&boot_hid->inputs, | ||
129 | struct hid_input, list); | 134 | struct hid_input, list); |
130 | 135 | ||
131 | return boot_hid_input->input->event(boot_hid_input->input, type, code, | 136 | return boot_hid_input->input->event(boot_hid_input->input, type, code, |
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 0d695f8e1b2c..0a00be19f7a0 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h | |||
@@ -568,6 +568,7 @@ | |||
568 | #define USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A 0x0b4a | 568 | #define USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A 0x0b4a |
569 | #define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE 0x134a | 569 | #define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE 0x134a |
570 | #define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_094A 0x094a | 570 | #define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_094A 0x094a |
571 | #define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_0641 0x0641 | ||
571 | 572 | ||
572 | #define USB_VENDOR_ID_HUION 0x256c | 573 | #define USB_VENDOR_ID_HUION 0x256c |
573 | #define USB_DEVICE_ID_HUION_TABLET 0x006e | 574 | #define USB_DEVICE_ID_HUION_TABLET 0x006e |
@@ -768,7 +769,8 @@ | |||
768 | #define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER 0xc52f | 769 | #define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER 0xc52f |
769 | #define USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2 0xc532 | 770 | #define USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2 0xc532 |
770 | #define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_2 0xc534 | 771 | #define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_2 0xc534 |
771 | #define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_GAMING 0xc539 | 772 | #define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED 0xc539 |
773 | #define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_POWERPLAY 0xc53a | ||
772 | #define USB_DEVICE_ID_SPACETRAVELLER 0xc623 | 774 | #define USB_DEVICE_ID_SPACETRAVELLER 0xc623 |
773 | #define USB_DEVICE_ID_SPACENAVIGATOR 0xc626 | 775 | #define USB_DEVICE_ID_SPACENAVIGATOR 0xc626 |
774 | #define USB_DEVICE_ID_DINOVO_DESKTOP 0xc704 | 776 | #define USB_DEVICE_ID_DINOVO_DESKTOP 0xc704 |
@@ -989,6 +991,7 @@ | |||
989 | #define USB_DEVICE_ID_SAITEK_RAT7 0x0cd7 | 991 | #define USB_DEVICE_ID_SAITEK_RAT7 0x0cd7 |
990 | #define USB_DEVICE_ID_SAITEK_RAT9 0x0cfa | 992 | #define USB_DEVICE_ID_SAITEK_RAT9 0x0cfa |
991 | #define USB_DEVICE_ID_SAITEK_MMO7 0x0cd0 | 993 | #define USB_DEVICE_ID_SAITEK_MMO7 0x0cd0 |
994 | #define USB_DEVICE_ID_SAITEK_X52 0x075c | ||
992 | 995 | ||
993 | #define USB_VENDOR_ID_SAMSUNG 0x0419 | 996 | #define USB_VENDOR_ID_SAMSUNG 0x0419 |
994 | #define USB_DEVICE_ID_SAMSUNG_IR_REMOTE 0x0001 | 997 | #define USB_DEVICE_ID_SAMSUNG_IR_REMOTE 0x0001 |
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c index 6196217a7d93..cc47f948c1d0 100644 --- a/drivers/hid/hid-logitech-dj.c +++ b/drivers/hid/hid-logitech-dj.c | |||
@@ -1125,7 +1125,7 @@ static int logi_dj_recv_query_hidpp_devices(struct dj_receiver_dev *djrcv_dev) | |||
1125 | HID_REQ_SET_REPORT); | 1125 | HID_REQ_SET_REPORT); |
1126 | 1126 | ||
1127 | kfree(hidpp_report); | 1127 | kfree(hidpp_report); |
1128 | return retval; | 1128 | return (retval < 0) ? retval : 0; |
1129 | } | 1129 | } |
1130 | 1130 | ||
1131 | static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev) | 1131 | static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev) |
@@ -1832,13 +1832,17 @@ static const struct hid_device_id logi_dj_receivers[] = { | |||
1832 | HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, | 1832 | HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, |
1833 | USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_2), | 1833 | USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_2), |
1834 | .driver_data = recvr_type_hidpp}, | 1834 | .driver_data = recvr_type_hidpp}, |
1835 | { /* Logitech gaming receiver (0xc539) */ | 1835 | { /* Logitech lightspeed receiver (0xc539) */ |
1836 | HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, | 1836 | HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, |
1837 | USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_GAMING), | 1837 | USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED), |
1838 | .driver_data = recvr_type_gaming_hidpp}, | 1838 | .driver_data = recvr_type_gaming_hidpp}, |
1839 | { /* Logitech 27 MHz HID++ 1.0 receiver (0xc513) */ | 1839 | { /* Logitech 27 MHz HID++ 1.0 receiver (0xc513) */ |
1840 | HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER), | 1840 | HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER), |
1841 | .driver_data = recvr_type_27mhz}, | 1841 | .driver_data = recvr_type_27mhz}, |
1842 | { /* Logitech powerplay receiver (0xc53a) */ | ||
1843 | HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, | ||
1844 | USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_POWERPLAY), | ||
1845 | .driver_data = recvr_type_gaming_hidpp}, | ||
1842 | { /* Logitech 27 MHz HID++ 1.0 receiver (0xc517) */ | 1846 | { /* Logitech 27 MHz HID++ 1.0 receiver (0xc517) */ |
1843 | HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, | 1847 | HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, |
1844 | USB_DEVICE_ID_S510_RECEIVER_2), | 1848 | USB_DEVICE_ID_S510_RECEIVER_2), |
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c index e3b6245bf4b2..21268c9fa71a 100644 --- a/drivers/hid/hid-logitech-hidpp.c +++ b/drivers/hid/hid-logitech-hidpp.c | |||
@@ -3749,15 +3749,45 @@ static const struct hid_device_id hidpp_devices[] = { | |||
3749 | 3749 | ||
3750 | { L27MHZ_DEVICE(HID_ANY_ID) }, | 3750 | { L27MHZ_DEVICE(HID_ANY_ID) }, |
3751 | 3751 | ||
3752 | { /* Logitech G403 Gaming Mouse over USB */ | 3752 | { /* Logitech G203/Prodigy Gaming Mouse */ |
3753 | HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC084) }, | ||
3754 | { /* Logitech G302 Gaming Mouse */ | ||
3755 | HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC07F) }, | ||
3756 | { /* Logitech G303 Gaming Mouse */ | ||
3757 | HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC080) }, | ||
3758 | { /* Logitech G400 Gaming Mouse */ | ||
3759 | HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC07E) }, | ||
3760 | { /* Logitech G403 Wireless Gaming Mouse over USB */ | ||
3753 | HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC082) }, | 3761 | HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC082) }, |
3762 | { /* Logitech G403 Gaming Mouse */ | ||
3763 | HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC083) }, | ||
3764 | { /* Logitech G403 Hero Gaming Mouse over USB */ | ||
3765 | HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC08F) }, | ||
3766 | { /* Logitech G502 Proteus Core Gaming Mouse */ | ||
3767 | HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC07D) }, | ||
3768 | { /* Logitech G502 Proteus Spectrum Gaming Mouse over USB */ | ||
3769 | HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC332) }, | ||
3770 | { /* Logitech G502 Hero Gaming Mouse over USB */ | ||
3771 | HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC08B) }, | ||
3754 | { /* Logitech G700 Gaming Mouse over USB */ | 3772 | { /* Logitech G700 Gaming Mouse over USB */ |
3755 | HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC06B) }, | 3773 | HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC06B) }, |
3774 | { /* Logitech G700s Gaming Mouse over USB */ | ||
3775 | HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC07C) }, | ||
3776 | { /* Logitech G703 Gaming Mouse over USB */ | ||
3777 | HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC087) }, | ||
3778 | { /* Logitech G703 Hero Gaming Mouse over USB */ | ||
3779 | HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC090) }, | ||
3756 | { /* Logitech G900 Gaming Mouse over USB */ | 3780 | { /* Logitech G900 Gaming Mouse over USB */ |
3757 | HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC081) }, | 3781 | HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC081) }, |
3782 | { /* Logitech G903 Gaming Mouse over USB */ | ||
3783 | HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC086) }, | ||
3784 | { /* Logitech G903 Hero Gaming Mouse over USB */ | ||
3785 | HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC091) }, | ||
3758 | { /* Logitech G920 Wheel over USB */ | 3786 | { /* Logitech G920 Wheel over USB */ |
3759 | HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G920_WHEEL), | 3787 | HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G920_WHEEL), |
3760 | .driver_data = HIDPP_QUIRK_CLASS_G920 | HIDPP_QUIRK_FORCE_OUTPUT_REPORTS}, | 3788 | .driver_data = HIDPP_QUIRK_CLASS_G920 | HIDPP_QUIRK_FORCE_OUTPUT_REPORTS}, |
3789 | { /* Logitech G Pro Gaming Mouse over USB */ | ||
3790 | HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC088) }, | ||
3761 | 3791 | ||
3762 | { /* MX5000 keyboard over Bluetooth */ | 3792 | { /* MX5000 keyboard over Bluetooth */ |
3763 | HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb305), | 3793 | HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb305), |
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c index 185a577c46f6..166f41f3173b 100644 --- a/drivers/hid/hid-quirks.c +++ b/drivers/hid/hid-quirks.c | |||
@@ -92,6 +92,7 @@ static const struct hid_device_id hid_quirks[] = { | |||
92 | { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A), HID_QUIRK_ALWAYS_POLL }, | 92 | { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A), HID_QUIRK_ALWAYS_POLL }, |
93 | { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL }, | 93 | { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL }, |
94 | { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_094A), HID_QUIRK_ALWAYS_POLL }, | 94 | { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_094A), HID_QUIRK_ALWAYS_POLL }, |
95 | { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_0641), HID_QUIRK_ALWAYS_POLL }, | ||
95 | { HID_USB_DEVICE(USB_VENDOR_ID_IDEACOM, USB_DEVICE_ID_IDEACOM_IDC6680), HID_QUIRK_MULTI_INPUT }, | 96 | { HID_USB_DEVICE(USB_VENDOR_ID_IDEACOM, USB_DEVICE_ID_IDEACOM_IDC6680), HID_QUIRK_MULTI_INPUT }, |
96 | { HID_USB_DEVICE(USB_VENDOR_ID_INNOMEDIA, USB_DEVICE_ID_INNEX_GENESIS_ATARI), HID_QUIRK_MULTI_INPUT }, | 97 | { HID_USB_DEVICE(USB_VENDOR_ID_INNOMEDIA, USB_DEVICE_ID_INNEX_GENESIS_ATARI), HID_QUIRK_MULTI_INPUT }, |
97 | { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X), HID_QUIRK_MULTI_INPUT }, | 98 | { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X), HID_QUIRK_MULTI_INPUT }, |
@@ -141,6 +142,7 @@ static const struct hid_device_id hid_quirks[] = { | |||
141 | { HID_USB_DEVICE(USB_VENDOR_ID_RETROUSB, USB_DEVICE_ID_RETROUSB_SNES_RETROPAD), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE }, | 142 | { HID_USB_DEVICE(USB_VENDOR_ID_RETROUSB, USB_DEVICE_ID_RETROUSB_SNES_RETROPAD), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE }, |
142 | { HID_USB_DEVICE(USB_VENDOR_ID_RETROUSB, USB_DEVICE_ID_RETROUSB_SNES_RETROPORT), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE }, | 143 | { HID_USB_DEVICE(USB_VENDOR_ID_RETROUSB, USB_DEVICE_ID_RETROUSB_SNES_RETROPORT), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE }, |
143 | { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RUMBLEPAD), HID_QUIRK_BADPAD }, | 144 | { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RUMBLEPAD), HID_QUIRK_BADPAD }, |
145 | { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_X52), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE }, | ||
144 | { HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD2), HID_QUIRK_NO_INIT_REPORTS }, | 146 | { HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD2), HID_QUIRK_NO_INIT_REPORTS }, |
145 | { HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD), HID_QUIRK_NO_INIT_REPORTS }, | 147 | { HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD), HID_QUIRK_NO_INIT_REPORTS }, |
146 | { HID_USB_DEVICE(USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB), HID_QUIRK_NOGET }, | 148 | { HID_USB_DEVICE(USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB), HID_QUIRK_NOGET }, |
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c index 93942063b51b..49dd2d905c7f 100644 --- a/drivers/hid/hid-sony.c +++ b/drivers/hid/hid-sony.c | |||
@@ -585,10 +585,14 @@ static void sony_set_leds(struct sony_sc *sc); | |||
585 | static inline void sony_schedule_work(struct sony_sc *sc, | 585 | static inline void sony_schedule_work(struct sony_sc *sc, |
586 | enum sony_worker which) | 586 | enum sony_worker which) |
587 | { | 587 | { |
588 | unsigned long flags; | ||
589 | |||
588 | switch (which) { | 590 | switch (which) { |
589 | case SONY_WORKER_STATE: | 591 | case SONY_WORKER_STATE: |
590 | if (!sc->defer_initialization) | 592 | spin_lock_irqsave(&sc->lock, flags); |
593 | if (!sc->defer_initialization && sc->state_worker_initialized) | ||
591 | schedule_work(&sc->state_worker); | 594 | schedule_work(&sc->state_worker); |
595 | spin_unlock_irqrestore(&sc->lock, flags); | ||
592 | break; | 596 | break; |
593 | case SONY_WORKER_HOTPLUG: | 597 | case SONY_WORKER_HOTPLUG: |
594 | if (sc->hotplug_worker_initialized) | 598 | if (sc->hotplug_worker_initialized) |
@@ -2558,13 +2562,18 @@ static inline void sony_init_output_report(struct sony_sc *sc, | |||
2558 | 2562 | ||
2559 | static inline void sony_cancel_work_sync(struct sony_sc *sc) | 2563 | static inline void sony_cancel_work_sync(struct sony_sc *sc) |
2560 | { | 2564 | { |
2565 | unsigned long flags; | ||
2566 | |||
2561 | if (sc->hotplug_worker_initialized) | 2567 | if (sc->hotplug_worker_initialized) |
2562 | cancel_work_sync(&sc->hotplug_worker); | 2568 | cancel_work_sync(&sc->hotplug_worker); |
2563 | if (sc->state_worker_initialized) | 2569 | if (sc->state_worker_initialized) { |
2570 | spin_lock_irqsave(&sc->lock, flags); | ||
2571 | sc->state_worker_initialized = 0; | ||
2572 | spin_unlock_irqrestore(&sc->lock, flags); | ||
2564 | cancel_work_sync(&sc->state_worker); | 2573 | cancel_work_sync(&sc->state_worker); |
2574 | } | ||
2565 | } | 2575 | } |
2566 | 2576 | ||
2567 | |||
2568 | static int sony_input_configured(struct hid_device *hdev, | 2577 | static int sony_input_configured(struct hid_device *hdev, |
2569 | struct hid_input *hidinput) | 2578 | struct hid_input *hidinput) |
2570 | { | 2579 | { |
diff --git a/drivers/hid/hid-tmff.c b/drivers/hid/hid-tmff.c index e12f2588ddeb..bdfc5ff3b2c5 100644 --- a/drivers/hid/hid-tmff.c +++ b/drivers/hid/hid-tmff.c | |||
@@ -22,6 +22,8 @@ | |||
22 | 22 | ||
23 | #include "hid-ids.h" | 23 | #include "hid-ids.h" |
24 | 24 | ||
25 | #define THRUSTMASTER_DEVICE_ID_2_IN_1_DT 0xb320 | ||
26 | |||
25 | static const signed short ff_rumble[] = { | 27 | static const signed short ff_rumble[] = { |
26 | FF_RUMBLE, | 28 | FF_RUMBLE, |
27 | -1 | 29 | -1 |
@@ -76,6 +78,7 @@ static int tmff_play(struct input_dev *dev, void *data, | |||
76 | struct hid_field *ff_field = tmff->ff_field; | 78 | struct hid_field *ff_field = tmff->ff_field; |
77 | int x, y; | 79 | int x, y; |
78 | int left, right; /* Rumbling */ | 80 | int left, right; /* Rumbling */ |
81 | int motor_swap; | ||
79 | 82 | ||
80 | switch (effect->type) { | 83 | switch (effect->type) { |
81 | case FF_CONSTANT: | 84 | case FF_CONSTANT: |
@@ -100,6 +103,13 @@ static int tmff_play(struct input_dev *dev, void *data, | |||
100 | ff_field->logical_minimum, | 103 | ff_field->logical_minimum, |
101 | ff_field->logical_maximum); | 104 | ff_field->logical_maximum); |
102 | 105 | ||
106 | /* 2-in-1 strong motor is left */ | ||
107 | if (hid->product == THRUSTMASTER_DEVICE_ID_2_IN_1_DT) { | ||
108 | motor_swap = left; | ||
109 | left = right; | ||
110 | right = motor_swap; | ||
111 | } | ||
112 | |||
103 | dbg_hid("(left,right)=(%08x, %08x)\n", left, right); | 113 | dbg_hid("(left,right)=(%08x, %08x)\n", left, right); |
104 | ff_field->value[0] = left; | 114 | ff_field->value[0] = left; |
105 | ff_field->value[1] = right; | 115 | ff_field->value[1] = right; |
@@ -226,6 +236,8 @@ static const struct hid_device_id tm_devices[] = { | |||
226 | .driver_data = (unsigned long)ff_rumble }, | 236 | .driver_data = (unsigned long)ff_rumble }, |
227 | { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304), /* FireStorm Dual Power 2 (and 3) */ | 237 | { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304), /* FireStorm Dual Power 2 (and 3) */ |
228 | .driver_data = (unsigned long)ff_rumble }, | 238 | .driver_data = (unsigned long)ff_rumble }, |
239 | { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, THRUSTMASTER_DEVICE_ID_2_IN_1_DT), /* Dual Trigger 2-in-1 */ | ||
240 | .driver_data = (unsigned long)ff_rumble }, | ||
229 | { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb323), /* Dual Trigger 3-in-1 (PC Mode) */ | 241 | { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb323), /* Dual Trigger 3-in-1 (PC Mode) */ |
230 | .driver_data = (unsigned long)ff_rumble }, | 242 | .driver_data = (unsigned long)ff_rumble }, |
231 | { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb324), /* Dual Trigger 3-in-1 (PS3 Mode) */ | 243 | { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb324), /* Dual Trigger 3-in-1 (PS3 Mode) */ |
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c index 55b72573066b..4e11cc6fc34b 100644 --- a/drivers/hid/usbhid/hiddev.c +++ b/drivers/hid/usbhid/hiddev.c | |||
@@ -284,6 +284,14 @@ static int hiddev_open(struct inode *inode, struct file *file) | |||
284 | spin_unlock_irq(&list->hiddev->list_lock); | 284 | spin_unlock_irq(&list->hiddev->list_lock); |
285 | 285 | ||
286 | mutex_lock(&hiddev->existancelock); | 286 | mutex_lock(&hiddev->existancelock); |
287 | /* | ||
288 | * recheck exist with existance lock held to | ||
289 | * avoid opening a disconnected device | ||
290 | */ | ||
291 | if (!list->hiddev->exist) { | ||
292 | res = -ENODEV; | ||
293 | goto bail_unlock; | ||
294 | } | ||
287 | if (!list->hiddev->open++) | 295 | if (!list->hiddev->open++) |
288 | if (list->hiddev->exist) { | 296 | if (list->hiddev->exist) { |
289 | struct hid_device *hid = hiddev->hid; | 297 | struct hid_device *hid = hiddev->hid; |
@@ -300,6 +308,10 @@ bail_normal_power: | |||
300 | hid_hw_power(hid, PM_HINT_NORMAL); | 308 | hid_hw_power(hid, PM_HINT_NORMAL); |
301 | bail_unlock: | 309 | bail_unlock: |
302 | mutex_unlock(&hiddev->existancelock); | 310 | mutex_unlock(&hiddev->existancelock); |
311 | |||
312 | spin_lock_irq(&list->hiddev->list_lock); | ||
313 | list_del(&list->node); | ||
314 | spin_unlock_irq(&list->hiddev->list_lock); | ||
303 | bail: | 315 | bail: |
304 | file->private_data = NULL; | 316 | file->private_data = NULL; |
305 | vfree(list); | 317 | vfree(list); |
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index 8fc36a28081b..7a8ddc999a8e 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c | |||
@@ -533,14 +533,14 @@ static int wacom_intuos_pad(struct wacom_wac *wacom) | |||
533 | */ | 533 | */ |
534 | buttons = (data[4] << 1) | (data[3] & 0x01); | 534 | buttons = (data[4] << 1) | (data[3] & 0x01); |
535 | } else if (features->type == CINTIQ_COMPANION_2) { | 535 | } else if (features->type == CINTIQ_COMPANION_2) { |
536 | /* d-pad right -> data[4] & 0x10 | 536 | /* d-pad right -> data[2] & 0x10 |
537 | * d-pad up -> data[4] & 0x20 | 537 | * d-pad up -> data[2] & 0x20 |
538 | * d-pad left -> data[4] & 0x40 | 538 | * d-pad left -> data[2] & 0x40 |
539 | * d-pad down -> data[4] & 0x80 | 539 | * d-pad down -> data[2] & 0x80 |
540 | * d-pad center -> data[3] & 0x01 | 540 | * d-pad center -> data[1] & 0x01 |
541 | */ | 541 | */ |
542 | buttons = ((data[2] >> 4) << 7) | | 542 | buttons = ((data[2] >> 4) << 7) | |
543 | ((data[1] & 0x04) << 6) | | 543 | ((data[1] & 0x04) << 4) | |
544 | ((data[2] & 0x0F) << 2) | | 544 | ((data[2] & 0x0F) << 2) | |
545 | (data[1] & 0x03); | 545 | (data[1] & 0x03); |
546 | } else if (features->type >= INTUOS5S && features->type <= INTUOSPL) { | 546 | } else if (features->type >= INTUOS5S && features->type <= INTUOSPL) { |
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c index e7dff5febe16..d42bc0883a32 100644 --- a/drivers/hwmon/nct6775.c +++ b/drivers/hwmon/nct6775.c | |||
@@ -852,7 +852,7 @@ static const u16 NCT6106_REG_TARGET[] = { 0x111, 0x121, 0x131 }; | |||
852 | static const u16 NCT6106_REG_WEIGHT_TEMP_SEL[] = { 0x168, 0x178, 0x188 }; | 852 | static const u16 NCT6106_REG_WEIGHT_TEMP_SEL[] = { 0x168, 0x178, 0x188 }; |
853 | static const u16 NCT6106_REG_WEIGHT_TEMP_STEP[] = { 0x169, 0x179, 0x189 }; | 853 | static const u16 NCT6106_REG_WEIGHT_TEMP_STEP[] = { 0x169, 0x179, 0x189 }; |
854 | static const u16 NCT6106_REG_WEIGHT_TEMP_STEP_TOL[] = { 0x16a, 0x17a, 0x18a }; | 854 | static const u16 NCT6106_REG_WEIGHT_TEMP_STEP_TOL[] = { 0x16a, 0x17a, 0x18a }; |
855 | static const u16 NCT6106_REG_WEIGHT_DUTY_STEP[] = { 0x16b, 0x17b, 0x17c }; | 855 | static const u16 NCT6106_REG_WEIGHT_DUTY_STEP[] = { 0x16b, 0x17b, 0x18b }; |
856 | static const u16 NCT6106_REG_WEIGHT_TEMP_BASE[] = { 0x16c, 0x17c, 0x18c }; | 856 | static const u16 NCT6106_REG_WEIGHT_TEMP_BASE[] = { 0x16c, 0x17c, 0x18c }; |
857 | static const u16 NCT6106_REG_WEIGHT_DUTY_BASE[] = { 0x16d, 0x17d, 0x18d }; | 857 | static const u16 NCT6106_REG_WEIGHT_DUTY_BASE[] = { 0x16d, 0x17d, 0x18d }; |
858 | 858 | ||
@@ -3764,6 +3764,7 @@ static int nct6775_probe(struct platform_device *pdev) | |||
3764 | data->REG_FAN_TIME[0] = NCT6106_REG_FAN_STOP_TIME; | 3764 | data->REG_FAN_TIME[0] = NCT6106_REG_FAN_STOP_TIME; |
3765 | data->REG_FAN_TIME[1] = NCT6106_REG_FAN_STEP_UP_TIME; | 3765 | data->REG_FAN_TIME[1] = NCT6106_REG_FAN_STEP_UP_TIME; |
3766 | data->REG_FAN_TIME[2] = NCT6106_REG_FAN_STEP_DOWN_TIME; | 3766 | data->REG_FAN_TIME[2] = NCT6106_REG_FAN_STEP_DOWN_TIME; |
3767 | data->REG_TOLERANCE_H = NCT6106_REG_TOLERANCE_H; | ||
3767 | data->REG_PWM[0] = NCT6106_REG_PWM; | 3768 | data->REG_PWM[0] = NCT6106_REG_PWM; |
3768 | data->REG_PWM[1] = NCT6106_REG_FAN_START_OUTPUT; | 3769 | data->REG_PWM[1] = NCT6106_REG_FAN_START_OUTPUT; |
3769 | data->REG_PWM[2] = NCT6106_REG_FAN_STOP_OUTPUT; | 3770 | data->REG_PWM[2] = NCT6106_REG_FAN_STOP_OUTPUT; |
diff --git a/drivers/hwmon/occ/common.c b/drivers/hwmon/occ/common.c index a7d2b16dd702..30e18eb60da7 100644 --- a/drivers/hwmon/occ/common.c +++ b/drivers/hwmon/occ/common.c | |||
@@ -408,8 +408,10 @@ static ssize_t occ_show_power_1(struct device *dev, | |||
408 | 408 | ||
409 | static u64 occ_get_powr_avg(u64 *accum, u32 *samples) | 409 | static u64 occ_get_powr_avg(u64 *accum, u32 *samples) |
410 | { | 410 | { |
411 | return div64_u64(get_unaligned_be64(accum) * 1000000ULL, | 411 | u64 divisor = get_unaligned_be32(samples); |
412 | get_unaligned_be32(samples)); | 412 | |
413 | return (divisor == 0) ? 0 : | ||
414 | div64_u64(get_unaligned_be64(accum) * 1000000ULL, divisor); | ||
413 | } | 415 | } |
414 | 416 | ||
415 | static ssize_t occ_show_power_2(struct device *dev, | 417 | static ssize_t occ_show_power_2(struct device *dev, |
diff --git a/drivers/i2c/busses/i2c-at91-core.c b/drivers/i2c/busses/i2c-at91-core.c index 8d55cdd69ff4..435c7d7377a3 100644 --- a/drivers/i2c/busses/i2c-at91-core.c +++ b/drivers/i2c/busses/i2c-at91-core.c | |||
@@ -142,7 +142,7 @@ static struct at91_twi_pdata sama5d4_config = { | |||
142 | 142 | ||
143 | static struct at91_twi_pdata sama5d2_config = { | 143 | static struct at91_twi_pdata sama5d2_config = { |
144 | .clk_max_div = 7, | 144 | .clk_max_div = 7, |
145 | .clk_offset = 4, | 145 | .clk_offset = 3, |
146 | .has_unre_flag = true, | 146 | .has_unre_flag = true, |
147 | .has_alt_cmd = true, | 147 | .has_alt_cmd = true, |
148 | .has_hold_field = true, | 148 | .has_hold_field = true, |
diff --git a/drivers/i2c/busses/i2c-at91-master.c b/drivers/i2c/busses/i2c-at91-master.c index e87232f2e708..a3fcc35ffd3b 100644 --- a/drivers/i2c/busses/i2c-at91-master.c +++ b/drivers/i2c/busses/i2c-at91-master.c | |||
@@ -122,9 +122,11 @@ static void at91_twi_write_next_byte(struct at91_twi_dev *dev) | |||
122 | writeb_relaxed(*dev->buf, dev->base + AT91_TWI_THR); | 122 | writeb_relaxed(*dev->buf, dev->base + AT91_TWI_THR); |
123 | 123 | ||
124 | /* send stop when last byte has been written */ | 124 | /* send stop when last byte has been written */ |
125 | if (--dev->buf_len == 0) | 125 | if (--dev->buf_len == 0) { |
126 | if (!dev->use_alt_cmd) | 126 | if (!dev->use_alt_cmd) |
127 | at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP); | 127 | at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP); |
128 | at91_twi_write(dev, AT91_TWI_IDR, AT91_TWI_TXRDY); | ||
129 | } | ||
128 | 130 | ||
129 | dev_dbg(dev->dev, "wrote 0x%x, to go %zu\n", *dev->buf, dev->buf_len); | 131 | dev_dbg(dev->dev, "wrote 0x%x, to go %zu\n", *dev->buf, dev->buf_len); |
130 | 132 | ||
@@ -542,9 +544,8 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev) | |||
542 | } else { | 544 | } else { |
543 | at91_twi_write_next_byte(dev); | 545 | at91_twi_write_next_byte(dev); |
544 | at91_twi_write(dev, AT91_TWI_IER, | 546 | at91_twi_write(dev, AT91_TWI_IER, |
545 | AT91_TWI_TXCOMP | | 547 | AT91_TWI_TXCOMP | AT91_TWI_NACK | |
546 | AT91_TWI_NACK | | 548 | (dev->buf_len ? AT91_TWI_TXRDY : 0)); |
547 | AT91_TWI_TXRDY); | ||
548 | } | 549 | } |
549 | } | 550 | } |
550 | 551 | ||
diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c index 2c7f145a036e..d7fd76baec92 100644 --- a/drivers/i2c/busses/i2c-bcm-iproc.c +++ b/drivers/i2c/busses/i2c-bcm-iproc.c | |||
@@ -392,16 +392,18 @@ static bool bcm_iproc_i2c_slave_isr(struct bcm_iproc_i2c_dev *iproc_i2c, | |||
392 | static void bcm_iproc_i2c_read_valid_bytes(struct bcm_iproc_i2c_dev *iproc_i2c) | 392 | static void bcm_iproc_i2c_read_valid_bytes(struct bcm_iproc_i2c_dev *iproc_i2c) |
393 | { | 393 | { |
394 | struct i2c_msg *msg = iproc_i2c->msg; | 394 | struct i2c_msg *msg = iproc_i2c->msg; |
395 | uint32_t val; | ||
395 | 396 | ||
396 | /* Read valid data from RX FIFO */ | 397 | /* Read valid data from RX FIFO */ |
397 | while (iproc_i2c->rx_bytes < msg->len) { | 398 | while (iproc_i2c->rx_bytes < msg->len) { |
398 | if (!((iproc_i2c_rd_reg(iproc_i2c, M_FIFO_CTRL_OFFSET) >> M_FIFO_RX_CNT_SHIFT) | 399 | val = iproc_i2c_rd_reg(iproc_i2c, M_RX_OFFSET); |
399 | & M_FIFO_RX_CNT_MASK)) | 400 | |
401 | /* rx fifo empty */ | ||
402 | if (!((val >> M_RX_STATUS_SHIFT) & M_RX_STATUS_MASK)) | ||
400 | break; | 403 | break; |
401 | 404 | ||
402 | msg->buf[iproc_i2c->rx_bytes] = | 405 | msg->buf[iproc_i2c->rx_bytes] = |
403 | (iproc_i2c_rd_reg(iproc_i2c, M_RX_OFFSET) >> | 406 | (val >> M_RX_DATA_SHIFT) & M_RX_DATA_MASK; |
404 | M_RX_DATA_SHIFT) & M_RX_DATA_MASK; | ||
405 | iproc_i2c->rx_bytes++; | 407 | iproc_i2c->rx_bytes++; |
406 | } | 408 | } |
407 | } | 409 | } |
diff --git a/drivers/i2c/busses/i2c-nvidia-gpu.c b/drivers/i2c/busses/i2c-nvidia-gpu.c index cfc76b5de726..5a1235fd86bb 100644 --- a/drivers/i2c/busses/i2c-nvidia-gpu.c +++ b/drivers/i2c/busses/i2c-nvidia-gpu.c | |||
@@ -364,7 +364,7 @@ static void gpu_i2c_remove(struct pci_dev *pdev) | |||
364 | /* | 364 | /* |
365 | * We need gpu_i2c_suspend() even if it is stub, for runtime pm to work | 365 | * We need gpu_i2c_suspend() even if it is stub, for runtime pm to work |
366 | * correctly. Without it, lspci shows runtime pm status as "D0" for the card. | 366 | * correctly. Without it, lspci shows runtime pm status as "D0" for the card. |
367 | * Documentation/power/pci.txt also insists for driver to provide this. | 367 | * Documentation/power/pci.rst also insists for driver to provide this. |
368 | */ | 368 | */ |
369 | static __maybe_unused int gpu_i2c_suspend(struct device *dev) | 369 | static __maybe_unused int gpu_i2c_suspend(struct device *dev) |
370 | { | 370 | { |
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c index d97fb857b0ea..c98ef4c4a0c9 100644 --- a/drivers/i2c/busses/i2c-s3c2410.c +++ b/drivers/i2c/busses/i2c-s3c2410.c | |||
@@ -435,6 +435,7 @@ static int i2c_s3c_irq_nextbyte(struct s3c24xx_i2c *i2c, unsigned long iicstat) | |||
435 | * fall through to the write state, as we will need to | 435 | * fall through to the write state, as we will need to |
436 | * send a byte as well | 436 | * send a byte as well |
437 | */ | 437 | */ |
438 | /* Fall through */ | ||
438 | 439 | ||
439 | case STATE_WRITE: | 440 | case STATE_WRITE: |
440 | /* | 441 | /* |
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h index 888d89ce81df..beee7b7e0d9a 100644 --- a/drivers/infiniband/core/core_priv.h +++ b/drivers/infiniband/core/core_priv.h | |||
@@ -302,7 +302,9 @@ static inline struct ib_qp *_ib_create_qp(struct ib_device *dev, | |||
302 | struct ib_udata *udata, | 302 | struct ib_udata *udata, |
303 | struct ib_uobject *uobj) | 303 | struct ib_uobject *uobj) |
304 | { | 304 | { |
305 | enum ib_qp_type qp_type = attr->qp_type; | ||
305 | struct ib_qp *qp; | 306 | struct ib_qp *qp; |
307 | bool is_xrc; | ||
306 | 308 | ||
307 | if (!dev->ops.create_qp) | 309 | if (!dev->ops.create_qp) |
308 | return ERR_PTR(-EOPNOTSUPP); | 310 | return ERR_PTR(-EOPNOTSUPP); |
@@ -320,7 +322,8 @@ static inline struct ib_qp *_ib_create_qp(struct ib_device *dev, | |||
320 | * and more importantly they are created internaly by driver, | 322 | * and more importantly they are created internaly by driver, |
321 | * see mlx5 create_dev_resources() as an example. | 323 | * see mlx5 create_dev_resources() as an example. |
322 | */ | 324 | */ |
323 | if (attr->qp_type < IB_QPT_XRC_INI) { | 325 | is_xrc = qp_type == IB_QPT_XRC_INI || qp_type == IB_QPT_XRC_TGT; |
326 | if ((qp_type < IB_QPT_MAX && !is_xrc) || qp_type == IB_QPT_DRIVER) { | ||
324 | qp->res.type = RDMA_RESTRACK_QP; | 327 | qp->res.type = RDMA_RESTRACK_QP; |
325 | if (uobj) | 328 | if (uobj) |
326 | rdma_restrack_uadd(&qp->res); | 329 | rdma_restrack_uadd(&qp->res); |
diff --git a/drivers/infiniband/core/counters.c b/drivers/infiniband/core/counters.c index 01faef7bc061..45d5164e9574 100644 --- a/drivers/infiniband/core/counters.c +++ b/drivers/infiniband/core/counters.c | |||
@@ -393,6 +393,9 @@ u64 rdma_counter_get_hwstat_value(struct ib_device *dev, u8 port, u32 index) | |||
393 | u64 sum; | 393 | u64 sum; |
394 | 394 | ||
395 | port_counter = &dev->port_data[port].port_counter; | 395 | port_counter = &dev->port_data[port].port_counter; |
396 | if (!port_counter->hstats) | ||
397 | return 0; | ||
398 | |||
396 | sum = get_running_counters_hwstat_sum(dev, port, index); | 399 | sum = get_running_counters_hwstat_sum(dev, port, index); |
397 | sum += port_counter->hstats->value[index]; | 400 | sum += port_counter->hstats->value[index]; |
398 | 401 | ||
@@ -594,7 +597,7 @@ void rdma_counter_init(struct ib_device *dev) | |||
594 | struct rdma_port_counter *port_counter; | 597 | struct rdma_port_counter *port_counter; |
595 | u32 port; | 598 | u32 port; |
596 | 599 | ||
597 | if (!dev->ops.alloc_hw_stats || !dev->port_data) | 600 | if (!dev->port_data) |
598 | return; | 601 | return; |
599 | 602 | ||
600 | rdma_for_each_port(dev, port) { | 603 | rdma_for_each_port(dev, port) { |
@@ -602,6 +605,9 @@ void rdma_counter_init(struct ib_device *dev) | |||
602 | port_counter->mode.mode = RDMA_COUNTER_MODE_NONE; | 605 | port_counter->mode.mode = RDMA_COUNTER_MODE_NONE; |
603 | mutex_init(&port_counter->lock); | 606 | mutex_init(&port_counter->lock); |
604 | 607 | ||
608 | if (!dev->ops.alloc_hw_stats) | ||
609 | continue; | ||
610 | |||
605 | port_counter->hstats = dev->ops.alloc_hw_stats(dev, port); | 611 | port_counter->hstats = dev->ops.alloc_hw_stats(dev, port); |
606 | if (!port_counter->hstats) | 612 | if (!port_counter->hstats) |
607 | goto fail; | 613 | goto fail; |
@@ -624,9 +630,6 @@ void rdma_counter_release(struct ib_device *dev) | |||
624 | struct rdma_port_counter *port_counter; | 630 | struct rdma_port_counter *port_counter; |
625 | u32 port; | 631 | u32 port; |
626 | 632 | ||
627 | if (!dev->ops.alloc_hw_stats) | ||
628 | return; | ||
629 | |||
630 | rdma_for_each_port(dev, port) { | 633 | rdma_for_each_port(dev, port) { |
631 | port_counter = &dev->port_data[port].port_counter; | 634 | port_counter = &dev->port_data[port].port_counter; |
632 | kfree(port_counter->hstats); | 635 | kfree(port_counter->hstats); |
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index 9773145dee09..ea8661a00651 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c | |||
@@ -94,11 +94,17 @@ static DEFINE_XARRAY_FLAGS(devices, XA_FLAGS_ALLOC); | |||
94 | static DECLARE_RWSEM(devices_rwsem); | 94 | static DECLARE_RWSEM(devices_rwsem); |
95 | #define DEVICE_REGISTERED XA_MARK_1 | 95 | #define DEVICE_REGISTERED XA_MARK_1 |
96 | 96 | ||
97 | static LIST_HEAD(client_list); | 97 | static u32 highest_client_id; |
98 | #define CLIENT_REGISTERED XA_MARK_1 | 98 | #define CLIENT_REGISTERED XA_MARK_1 |
99 | static DEFINE_XARRAY_FLAGS(clients, XA_FLAGS_ALLOC); | 99 | static DEFINE_XARRAY_FLAGS(clients, XA_FLAGS_ALLOC); |
100 | static DECLARE_RWSEM(clients_rwsem); | 100 | static DECLARE_RWSEM(clients_rwsem); |
101 | 101 | ||
102 | static void ib_client_put(struct ib_client *client) | ||
103 | { | ||
104 | if (refcount_dec_and_test(&client->uses)) | ||
105 | complete(&client->uses_zero); | ||
106 | } | ||
107 | |||
102 | /* | 108 | /* |
103 | * If client_data is registered then the corresponding client must also still | 109 | * If client_data is registered then the corresponding client must also still |
104 | * be registered. | 110 | * be registered. |
@@ -661,6 +667,14 @@ static int add_client_context(struct ib_device *device, | |||
661 | 667 | ||
662 | down_write(&device->client_data_rwsem); | 668 | down_write(&device->client_data_rwsem); |
663 | /* | 669 | /* |
670 | * So long as the client is registered hold both the client and device | ||
671 | * unregistration locks. | ||
672 | */ | ||
673 | if (!refcount_inc_not_zero(&client->uses)) | ||
674 | goto out_unlock; | ||
675 | refcount_inc(&device->refcount); | ||
676 | |||
677 | /* | ||
664 | * Another caller to add_client_context got here first and has already | 678 | * Another caller to add_client_context got here first and has already |
665 | * completely initialized context. | 679 | * completely initialized context. |
666 | */ | 680 | */ |
@@ -683,6 +697,9 @@ static int add_client_context(struct ib_device *device, | |||
683 | return 0; | 697 | return 0; |
684 | 698 | ||
685 | out: | 699 | out: |
700 | ib_device_put(device); | ||
701 | ib_client_put(client); | ||
702 | out_unlock: | ||
686 | up_write(&device->client_data_rwsem); | 703 | up_write(&device->client_data_rwsem); |
687 | return ret; | 704 | return ret; |
688 | } | 705 | } |
@@ -702,7 +719,7 @@ static void remove_client_context(struct ib_device *device, | |||
702 | client_data = xa_load(&device->client_data, client_id); | 719 | client_data = xa_load(&device->client_data, client_id); |
703 | xa_clear_mark(&device->client_data, client_id, CLIENT_DATA_REGISTERED); | 720 | xa_clear_mark(&device->client_data, client_id, CLIENT_DATA_REGISTERED); |
704 | client = xa_load(&clients, client_id); | 721 | client = xa_load(&clients, client_id); |
705 | downgrade_write(&device->client_data_rwsem); | 722 | up_write(&device->client_data_rwsem); |
706 | 723 | ||
707 | /* | 724 | /* |
708 | * Notice we cannot be holding any exclusive locks when calling the | 725 | * Notice we cannot be holding any exclusive locks when calling the |
@@ -712,17 +729,13 @@ static void remove_client_context(struct ib_device *device, | |||
712 | * | 729 | * |
713 | * For this reason clients and drivers should not call the | 730 | * For this reason clients and drivers should not call the |
714 | * unregistration functions will holdling any locks. | 731 | * unregistration functions will holdling any locks. |
715 | * | ||
716 | * It tempting to drop the client_data_rwsem too, but this is required | ||
717 | * to ensure that unregister_client does not return until all clients | ||
718 | * are completely unregistered, which is required to avoid module | ||
719 | * unloading races. | ||
720 | */ | 732 | */ |
721 | if (client->remove) | 733 | if (client->remove) |
722 | client->remove(device, client_data); | 734 | client->remove(device, client_data); |
723 | 735 | ||
724 | xa_erase(&device->client_data, client_id); | 736 | xa_erase(&device->client_data, client_id); |
725 | up_read(&device->client_data_rwsem); | 737 | ib_device_put(device); |
738 | ib_client_put(client); | ||
726 | } | 739 | } |
727 | 740 | ||
728 | static int alloc_port_data(struct ib_device *device) | 741 | static int alloc_port_data(struct ib_device *device) |
@@ -1224,7 +1237,7 @@ static int setup_device(struct ib_device *device) | |||
1224 | 1237 | ||
1225 | static void disable_device(struct ib_device *device) | 1238 | static void disable_device(struct ib_device *device) |
1226 | { | 1239 | { |
1227 | struct ib_client *client; | 1240 | u32 cid; |
1228 | 1241 | ||
1229 | WARN_ON(!refcount_read(&device->refcount)); | 1242 | WARN_ON(!refcount_read(&device->refcount)); |
1230 | 1243 | ||
@@ -1232,10 +1245,19 @@ static void disable_device(struct ib_device *device) | |||
1232 | xa_clear_mark(&devices, device->index, DEVICE_REGISTERED); | 1245 | xa_clear_mark(&devices, device->index, DEVICE_REGISTERED); |
1233 | up_write(&devices_rwsem); | 1246 | up_write(&devices_rwsem); |
1234 | 1247 | ||
1248 | /* | ||
1249 | * Remove clients in LIFO order, see assign_client_id. This could be | ||
1250 | * more efficient if xarray learns to reverse iterate. Since no new | ||
1251 | * clients can be added to this ib_device past this point we only need | ||
1252 | * the maximum possible client_id value here. | ||
1253 | */ | ||
1235 | down_read(&clients_rwsem); | 1254 | down_read(&clients_rwsem); |
1236 | list_for_each_entry_reverse(client, &client_list, list) | 1255 | cid = highest_client_id; |
1237 | remove_client_context(device, client->client_id); | ||
1238 | up_read(&clients_rwsem); | 1256 | up_read(&clients_rwsem); |
1257 | while (cid) { | ||
1258 | cid--; | ||
1259 | remove_client_context(device, cid); | ||
1260 | } | ||
1239 | 1261 | ||
1240 | /* Pairs with refcount_set in enable_device */ | 1262 | /* Pairs with refcount_set in enable_device */ |
1241 | ib_device_put(device); | 1263 | ib_device_put(device); |
@@ -1662,30 +1684,31 @@ static int assign_client_id(struct ib_client *client) | |||
1662 | /* | 1684 | /* |
1663 | * The add/remove callbacks must be called in FIFO/LIFO order. To | 1685 | * The add/remove callbacks must be called in FIFO/LIFO order. To |
1664 | * achieve this we assign client_ids so they are sorted in | 1686 | * achieve this we assign client_ids so they are sorted in |
1665 | * registration order, and retain a linked list we can reverse iterate | 1687 | * registration order. |
1666 | * to get the LIFO order. The extra linked list can go away if xarray | ||
1667 | * learns to reverse iterate. | ||
1668 | */ | 1688 | */ |
1669 | if (list_empty(&client_list)) { | 1689 | client->client_id = highest_client_id; |
1670 | client->client_id = 0; | ||
1671 | } else { | ||
1672 | struct ib_client *last; | ||
1673 | |||
1674 | last = list_last_entry(&client_list, struct ib_client, list); | ||
1675 | client->client_id = last->client_id + 1; | ||
1676 | } | ||
1677 | ret = xa_insert(&clients, client->client_id, client, GFP_KERNEL); | 1690 | ret = xa_insert(&clients, client->client_id, client, GFP_KERNEL); |
1678 | if (ret) | 1691 | if (ret) |
1679 | goto out; | 1692 | goto out; |
1680 | 1693 | ||
1694 | highest_client_id++; | ||
1681 | xa_set_mark(&clients, client->client_id, CLIENT_REGISTERED); | 1695 | xa_set_mark(&clients, client->client_id, CLIENT_REGISTERED); |
1682 | list_add_tail(&client->list, &client_list); | ||
1683 | 1696 | ||
1684 | out: | 1697 | out: |
1685 | up_write(&clients_rwsem); | 1698 | up_write(&clients_rwsem); |
1686 | return ret; | 1699 | return ret; |
1687 | } | 1700 | } |
1688 | 1701 | ||
1702 | static void remove_client_id(struct ib_client *client) | ||
1703 | { | ||
1704 | down_write(&clients_rwsem); | ||
1705 | xa_erase(&clients, client->client_id); | ||
1706 | for (; highest_client_id; highest_client_id--) | ||
1707 | if (xa_load(&clients, highest_client_id - 1)) | ||
1708 | break; | ||
1709 | up_write(&clients_rwsem); | ||
1710 | } | ||
1711 | |||
1689 | /** | 1712 | /** |
1690 | * ib_register_client - Register an IB client | 1713 | * ib_register_client - Register an IB client |
1691 | * @client:Client to register | 1714 | * @client:Client to register |
@@ -1705,6 +1728,8 @@ int ib_register_client(struct ib_client *client) | |||
1705 | unsigned long index; | 1728 | unsigned long index; |
1706 | int ret; | 1729 | int ret; |
1707 | 1730 | ||
1731 | refcount_set(&client->uses, 1); | ||
1732 | init_completion(&client->uses_zero); | ||
1708 | ret = assign_client_id(client); | 1733 | ret = assign_client_id(client); |
1709 | if (ret) | 1734 | if (ret) |
1710 | return ret; | 1735 | return ret; |
@@ -1740,21 +1765,30 @@ void ib_unregister_client(struct ib_client *client) | |||
1740 | unsigned long index; | 1765 | unsigned long index; |
1741 | 1766 | ||
1742 | down_write(&clients_rwsem); | 1767 | down_write(&clients_rwsem); |
1768 | ib_client_put(client); | ||
1743 | xa_clear_mark(&clients, client->client_id, CLIENT_REGISTERED); | 1769 | xa_clear_mark(&clients, client->client_id, CLIENT_REGISTERED); |
1744 | up_write(&clients_rwsem); | 1770 | up_write(&clients_rwsem); |
1745 | /* | 1771 | |
1746 | * Every device still known must be serialized to make sure we are | 1772 | /* We do not want to have locks while calling client->remove() */ |
1747 | * done with the client callbacks before we return. | 1773 | rcu_read_lock(); |
1748 | */ | 1774 | xa_for_each (&devices, index, device) { |
1749 | down_read(&devices_rwsem); | 1775 | if (!ib_device_try_get(device)) |
1750 | xa_for_each (&devices, index, device) | 1776 | continue; |
1777 | rcu_read_unlock(); | ||
1778 | |||
1751 | remove_client_context(device, client->client_id); | 1779 | remove_client_context(device, client->client_id); |
1752 | up_read(&devices_rwsem); | ||
1753 | 1780 | ||
1754 | down_write(&clients_rwsem); | 1781 | ib_device_put(device); |
1755 | list_del(&client->list); | 1782 | rcu_read_lock(); |
1756 | xa_erase(&clients, client->client_id); | 1783 | } |
1757 | up_write(&clients_rwsem); | 1784 | rcu_read_unlock(); |
1785 | |||
1786 | /* | ||
1787 | * remove_client_context() is not a fence, it can return even though a | ||
1788 | * removal is ongoing. Wait until all removals are completed. | ||
1789 | */ | ||
1790 | wait_for_completion(&client->uses_zero); | ||
1791 | remove_client_id(client); | ||
1758 | } | 1792 | } |
1759 | EXPORT_SYMBOL(ib_unregister_client); | 1793 | EXPORT_SYMBOL(ib_unregister_client); |
1760 | 1794 | ||
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index cc99479b2c09..9947d16edef2 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c | |||
@@ -3224,18 +3224,18 @@ static int ib_mad_port_open(struct ib_device *device, | |||
3224 | if (has_smi) | 3224 | if (has_smi) |
3225 | cq_size *= 2; | 3225 | cq_size *= 2; |
3226 | 3226 | ||
3227 | port_priv->pd = ib_alloc_pd(device, 0); | ||
3228 | if (IS_ERR(port_priv->pd)) { | ||
3229 | dev_err(&device->dev, "Couldn't create ib_mad PD\n"); | ||
3230 | ret = PTR_ERR(port_priv->pd); | ||
3231 | goto error3; | ||
3232 | } | ||
3233 | |||
3227 | port_priv->cq = ib_alloc_cq(port_priv->device, port_priv, cq_size, 0, | 3234 | port_priv->cq = ib_alloc_cq(port_priv->device, port_priv, cq_size, 0, |
3228 | IB_POLL_UNBOUND_WORKQUEUE); | 3235 | IB_POLL_UNBOUND_WORKQUEUE); |
3229 | if (IS_ERR(port_priv->cq)) { | 3236 | if (IS_ERR(port_priv->cq)) { |
3230 | dev_err(&device->dev, "Couldn't create ib_mad CQ\n"); | 3237 | dev_err(&device->dev, "Couldn't create ib_mad CQ\n"); |
3231 | ret = PTR_ERR(port_priv->cq); | 3238 | ret = PTR_ERR(port_priv->cq); |
3232 | goto error3; | ||
3233 | } | ||
3234 | |||
3235 | port_priv->pd = ib_alloc_pd(device, 0); | ||
3236 | if (IS_ERR(port_priv->pd)) { | ||
3237 | dev_err(&device->dev, "Couldn't create ib_mad PD\n"); | ||
3238 | ret = PTR_ERR(port_priv->pd); | ||
3239 | goto error4; | 3239 | goto error4; |
3240 | } | 3240 | } |
3241 | 3241 | ||
@@ -3278,11 +3278,11 @@ error8: | |||
3278 | error7: | 3278 | error7: |
3279 | destroy_mad_qp(&port_priv->qp_info[0]); | 3279 | destroy_mad_qp(&port_priv->qp_info[0]); |
3280 | error6: | 3280 | error6: |
3281 | ib_dealloc_pd(port_priv->pd); | ||
3282 | error4: | ||
3283 | ib_free_cq(port_priv->cq); | 3281 | ib_free_cq(port_priv->cq); |
3284 | cleanup_recv_queue(&port_priv->qp_info[1]); | 3282 | cleanup_recv_queue(&port_priv->qp_info[1]); |
3285 | cleanup_recv_queue(&port_priv->qp_info[0]); | 3283 | cleanup_recv_queue(&port_priv->qp_info[0]); |
3284 | error4: | ||
3285 | ib_dealloc_pd(port_priv->pd); | ||
3286 | error3: | 3286 | error3: |
3287 | kfree(port_priv); | 3287 | kfree(port_priv); |
3288 | 3288 | ||
@@ -3312,8 +3312,8 @@ static int ib_mad_port_close(struct ib_device *device, int port_num) | |||
3312 | destroy_workqueue(port_priv->wq); | 3312 | destroy_workqueue(port_priv->wq); |
3313 | destroy_mad_qp(&port_priv->qp_info[1]); | 3313 | destroy_mad_qp(&port_priv->qp_info[1]); |
3314 | destroy_mad_qp(&port_priv->qp_info[0]); | 3314 | destroy_mad_qp(&port_priv->qp_info[0]); |
3315 | ib_dealloc_pd(port_priv->pd); | ||
3316 | ib_free_cq(port_priv->cq); | 3315 | ib_free_cq(port_priv->cq); |
3316 | ib_dealloc_pd(port_priv->pd); | ||
3317 | cleanup_recv_queue(&port_priv->qp_info[1]); | 3317 | cleanup_recv_queue(&port_priv->qp_info[1]); |
3318 | cleanup_recv_queue(&port_priv->qp_info[0]); | 3318 | cleanup_recv_queue(&port_priv->qp_info[0]); |
3319 | /* XXX: Handle deallocation of MAD registration tables */ | 3319 | /* XXX: Handle deallocation of MAD registration tables */ |
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c index 9f8a48016b41..ffdeaf6e0b68 100644 --- a/drivers/infiniband/core/user_mad.c +++ b/drivers/infiniband/core/user_mad.c | |||
@@ -49,6 +49,7 @@ | |||
49 | #include <linux/sched.h> | 49 | #include <linux/sched.h> |
50 | #include <linux/semaphore.h> | 50 | #include <linux/semaphore.h> |
51 | #include <linux/slab.h> | 51 | #include <linux/slab.h> |
52 | #include <linux/nospec.h> | ||
52 | 53 | ||
53 | #include <linux/uaccess.h> | 54 | #include <linux/uaccess.h> |
54 | 55 | ||
@@ -884,11 +885,14 @@ static int ib_umad_unreg_agent(struct ib_umad_file *file, u32 __user *arg) | |||
884 | 885 | ||
885 | if (get_user(id, arg)) | 886 | if (get_user(id, arg)) |
886 | return -EFAULT; | 887 | return -EFAULT; |
888 | if (id >= IB_UMAD_MAX_AGENTS) | ||
889 | return -EINVAL; | ||
887 | 890 | ||
888 | mutex_lock(&file->port->file_mutex); | 891 | mutex_lock(&file->port->file_mutex); |
889 | mutex_lock(&file->mutex); | 892 | mutex_lock(&file->mutex); |
890 | 893 | ||
891 | if (id >= IB_UMAD_MAX_AGENTS || !__get_agent(file, id)) { | 894 | id = array_index_nospec(id, IB_UMAD_MAX_AGENTS); |
895 | if (!__get_agent(file, id)) { | ||
892 | ret = -EINVAL; | 896 | ret = -EINVAL; |
893 | goto out; | 897 | goto out; |
894 | } | 898 | } |
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index a91653aabf38..098ab883733e 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c | |||
@@ -308,6 +308,7 @@ int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context) | |||
308 | struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev); | 308 | struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev); |
309 | struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl; | 309 | struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl; |
310 | struct bnxt_qplib_gid *gid_to_del; | 310 | struct bnxt_qplib_gid *gid_to_del; |
311 | u16 vlan_id = 0xFFFF; | ||
311 | 312 | ||
312 | /* Delete the entry from the hardware */ | 313 | /* Delete the entry from the hardware */ |
313 | ctx = *context; | 314 | ctx = *context; |
@@ -317,7 +318,8 @@ int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context) | |||
317 | if (sgid_tbl && sgid_tbl->active) { | 318 | if (sgid_tbl && sgid_tbl->active) { |
318 | if (ctx->idx >= sgid_tbl->max) | 319 | if (ctx->idx >= sgid_tbl->max) |
319 | return -EINVAL; | 320 | return -EINVAL; |
320 | gid_to_del = &sgid_tbl->tbl[ctx->idx]; | 321 | gid_to_del = &sgid_tbl->tbl[ctx->idx].gid; |
322 | vlan_id = sgid_tbl->tbl[ctx->idx].vlan_id; | ||
321 | /* DEL_GID is called in WQ context(netdevice_event_work_handler) | 323 | /* DEL_GID is called in WQ context(netdevice_event_work_handler) |
322 | * or via the ib_unregister_device path. In the former case QP1 | 324 | * or via the ib_unregister_device path. In the former case QP1 |
323 | * may not be destroyed yet, in which case just return as FW | 325 | * may not be destroyed yet, in which case just return as FW |
@@ -335,7 +337,8 @@ int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context) | |||
335 | } | 337 | } |
336 | ctx->refcnt--; | 338 | ctx->refcnt--; |
337 | if (!ctx->refcnt) { | 339 | if (!ctx->refcnt) { |
338 | rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del, true); | 340 | rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del, |
341 | vlan_id, true); | ||
339 | if (rc) { | 342 | if (rc) { |
340 | dev_err(rdev_to_dev(rdev), | 343 | dev_err(rdev_to_dev(rdev), |
341 | "Failed to remove GID: %#x", rc); | 344 | "Failed to remove GID: %#x", rc); |
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c index 37928b1111df..bdbde8e22420 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_res.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c | |||
@@ -488,7 +488,7 @@ static int bnxt_qplib_alloc_sgid_tbl(struct bnxt_qplib_res *res, | |||
488 | struct bnxt_qplib_sgid_tbl *sgid_tbl, | 488 | struct bnxt_qplib_sgid_tbl *sgid_tbl, |
489 | u16 max) | 489 | u16 max) |
490 | { | 490 | { |
491 | sgid_tbl->tbl = kcalloc(max, sizeof(struct bnxt_qplib_gid), GFP_KERNEL); | 491 | sgid_tbl->tbl = kcalloc(max, sizeof(*sgid_tbl->tbl), GFP_KERNEL); |
492 | if (!sgid_tbl->tbl) | 492 | if (!sgid_tbl->tbl) |
493 | return -ENOMEM; | 493 | return -ENOMEM; |
494 | 494 | ||
@@ -526,9 +526,10 @@ static void bnxt_qplib_cleanup_sgid_tbl(struct bnxt_qplib_res *res, | |||
526 | for (i = 0; i < sgid_tbl->max; i++) { | 526 | for (i = 0; i < sgid_tbl->max; i++) { |
527 | if (memcmp(&sgid_tbl->tbl[i], &bnxt_qplib_gid_zero, | 527 | if (memcmp(&sgid_tbl->tbl[i], &bnxt_qplib_gid_zero, |
528 | sizeof(bnxt_qplib_gid_zero))) | 528 | sizeof(bnxt_qplib_gid_zero))) |
529 | bnxt_qplib_del_sgid(sgid_tbl, &sgid_tbl->tbl[i], true); | 529 | bnxt_qplib_del_sgid(sgid_tbl, &sgid_tbl->tbl[i].gid, |
530 | sgid_tbl->tbl[i].vlan_id, true); | ||
530 | } | 531 | } |
531 | memset(sgid_tbl->tbl, 0, sizeof(struct bnxt_qplib_gid) * sgid_tbl->max); | 532 | memset(sgid_tbl->tbl, 0, sizeof(*sgid_tbl->tbl) * sgid_tbl->max); |
532 | memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max); | 533 | memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max); |
533 | memset(sgid_tbl->vlan, 0, sizeof(u8) * sgid_tbl->max); | 534 | memset(sgid_tbl->vlan, 0, sizeof(u8) * sgid_tbl->max); |
534 | sgid_tbl->active = 0; | 535 | sgid_tbl->active = 0; |
@@ -537,7 +538,11 @@ static void bnxt_qplib_cleanup_sgid_tbl(struct bnxt_qplib_res *res, | |||
537 | static void bnxt_qplib_init_sgid_tbl(struct bnxt_qplib_sgid_tbl *sgid_tbl, | 538 | static void bnxt_qplib_init_sgid_tbl(struct bnxt_qplib_sgid_tbl *sgid_tbl, |
538 | struct net_device *netdev) | 539 | struct net_device *netdev) |
539 | { | 540 | { |
540 | memset(sgid_tbl->tbl, 0, sizeof(struct bnxt_qplib_gid) * sgid_tbl->max); | 541 | u32 i; |
542 | |||
543 | for (i = 0; i < sgid_tbl->max; i++) | ||
544 | sgid_tbl->tbl[i].vlan_id = 0xffff; | ||
545 | |||
541 | memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max); | 546 | memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max); |
542 | } | 547 | } |
543 | 548 | ||
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h index 30c42c92fac7..fbda11a7ab1a 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_res.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h | |||
@@ -111,7 +111,7 @@ struct bnxt_qplib_pd_tbl { | |||
111 | }; | 111 | }; |
112 | 112 | ||
113 | struct bnxt_qplib_sgid_tbl { | 113 | struct bnxt_qplib_sgid_tbl { |
114 | struct bnxt_qplib_gid *tbl; | 114 | struct bnxt_qplib_gid_info *tbl; |
115 | u16 *hw_id; | 115 | u16 *hw_id; |
116 | u16 max; | 116 | u16 max; |
117 | u16 active; | 117 | u16 active; |
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c index 48793d3512ac..40296b97d21e 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c | |||
@@ -213,12 +213,12 @@ int bnxt_qplib_get_sgid(struct bnxt_qplib_res *res, | |||
213 | index, sgid_tbl->max); | 213 | index, sgid_tbl->max); |
214 | return -EINVAL; | 214 | return -EINVAL; |
215 | } | 215 | } |
216 | memcpy(gid, &sgid_tbl->tbl[index], sizeof(*gid)); | 216 | memcpy(gid, &sgid_tbl->tbl[index].gid, sizeof(*gid)); |
217 | return 0; | 217 | return 0; |
218 | } | 218 | } |
219 | 219 | ||
220 | int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, | 220 | int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, |
221 | struct bnxt_qplib_gid *gid, bool update) | 221 | struct bnxt_qplib_gid *gid, u16 vlan_id, bool update) |
222 | { | 222 | { |
223 | struct bnxt_qplib_res *res = to_bnxt_qplib(sgid_tbl, | 223 | struct bnxt_qplib_res *res = to_bnxt_qplib(sgid_tbl, |
224 | struct bnxt_qplib_res, | 224 | struct bnxt_qplib_res, |
@@ -236,7 +236,8 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, | |||
236 | return -ENOMEM; | 236 | return -ENOMEM; |
237 | } | 237 | } |
238 | for (index = 0; index < sgid_tbl->max; index++) { | 238 | for (index = 0; index < sgid_tbl->max; index++) { |
239 | if (!memcmp(&sgid_tbl->tbl[index], gid, sizeof(*gid))) | 239 | if (!memcmp(&sgid_tbl->tbl[index].gid, gid, sizeof(*gid)) && |
240 | vlan_id == sgid_tbl->tbl[index].vlan_id) | ||
240 | break; | 241 | break; |
241 | } | 242 | } |
242 | if (index == sgid_tbl->max) { | 243 | if (index == sgid_tbl->max) { |
@@ -262,8 +263,9 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, | |||
262 | if (rc) | 263 | if (rc) |
263 | return rc; | 264 | return rc; |
264 | } | 265 | } |
265 | memcpy(&sgid_tbl->tbl[index], &bnxt_qplib_gid_zero, | 266 | memcpy(&sgid_tbl->tbl[index].gid, &bnxt_qplib_gid_zero, |
266 | sizeof(bnxt_qplib_gid_zero)); | 267 | sizeof(bnxt_qplib_gid_zero)); |
268 | sgid_tbl->tbl[index].vlan_id = 0xFFFF; | ||
267 | sgid_tbl->vlan[index] = 0; | 269 | sgid_tbl->vlan[index] = 0; |
268 | sgid_tbl->active--; | 270 | sgid_tbl->active--; |
269 | dev_dbg(&res->pdev->dev, | 271 | dev_dbg(&res->pdev->dev, |
@@ -296,7 +298,8 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, | |||
296 | } | 298 | } |
297 | free_idx = sgid_tbl->max; | 299 | free_idx = sgid_tbl->max; |
298 | for (i = 0; i < sgid_tbl->max; i++) { | 300 | for (i = 0; i < sgid_tbl->max; i++) { |
299 | if (!memcmp(&sgid_tbl->tbl[i], gid, sizeof(*gid))) { | 301 | if (!memcmp(&sgid_tbl->tbl[i], gid, sizeof(*gid)) && |
302 | sgid_tbl->tbl[i].vlan_id == vlan_id) { | ||
300 | dev_dbg(&res->pdev->dev, | 303 | dev_dbg(&res->pdev->dev, |
301 | "SGID entry already exist in entry %d!\n", i); | 304 | "SGID entry already exist in entry %d!\n", i); |
302 | *index = i; | 305 | *index = i; |
@@ -351,6 +354,7 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, | |||
351 | } | 354 | } |
352 | /* Add GID to the sgid_tbl */ | 355 | /* Add GID to the sgid_tbl */ |
353 | memcpy(&sgid_tbl->tbl[free_idx], gid, sizeof(*gid)); | 356 | memcpy(&sgid_tbl->tbl[free_idx], gid, sizeof(*gid)); |
357 | sgid_tbl->tbl[free_idx].vlan_id = vlan_id; | ||
354 | sgid_tbl->active++; | 358 | sgid_tbl->active++; |
355 | if (vlan_id != 0xFFFF) | 359 | if (vlan_id != 0xFFFF) |
356 | sgid_tbl->vlan[free_idx] = 1; | 360 | sgid_tbl->vlan[free_idx] = 1; |
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h index 0ec3b12b0bcd..13d9432d5ce2 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h | |||
@@ -84,6 +84,11 @@ struct bnxt_qplib_gid { | |||
84 | u8 data[16]; | 84 | u8 data[16]; |
85 | }; | 85 | }; |
86 | 86 | ||
87 | struct bnxt_qplib_gid_info { | ||
88 | struct bnxt_qplib_gid gid; | ||
89 | u16 vlan_id; | ||
90 | }; | ||
91 | |||
87 | struct bnxt_qplib_ah { | 92 | struct bnxt_qplib_ah { |
88 | struct bnxt_qplib_gid dgid; | 93 | struct bnxt_qplib_gid dgid; |
89 | struct bnxt_qplib_pd *pd; | 94 | struct bnxt_qplib_pd *pd; |
@@ -221,7 +226,7 @@ int bnxt_qplib_get_sgid(struct bnxt_qplib_res *res, | |||
221 | struct bnxt_qplib_sgid_tbl *sgid_tbl, int index, | 226 | struct bnxt_qplib_sgid_tbl *sgid_tbl, int index, |
222 | struct bnxt_qplib_gid *gid); | 227 | struct bnxt_qplib_gid *gid); |
223 | int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, | 228 | int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, |
224 | struct bnxt_qplib_gid *gid, bool update); | 229 | struct bnxt_qplib_gid *gid, u16 vlan_id, bool update); |
225 | int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, | 230 | int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, |
226 | struct bnxt_qplib_gid *gid, u8 *mac, u16 vlan_id, | 231 | struct bnxt_qplib_gid *gid, u8 *mac, u16 vlan_id, |
227 | bool update, u32 *index); | 232 | bool update, u32 *index); |
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c index d5b643a1d9fd..67052dc3100c 100644 --- a/drivers/infiniband/hw/hfi1/chip.c +++ b/drivers/infiniband/hw/hfi1/chip.c | |||
@@ -14452,7 +14452,7 @@ void hfi1_deinit_vnic_rsm(struct hfi1_devdata *dd) | |||
14452 | clear_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK); | 14452 | clear_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK); |
14453 | } | 14453 | } |
14454 | 14454 | ||
14455 | static void init_rxe(struct hfi1_devdata *dd) | 14455 | static int init_rxe(struct hfi1_devdata *dd) |
14456 | { | 14456 | { |
14457 | struct rsm_map_table *rmt; | 14457 | struct rsm_map_table *rmt; |
14458 | u64 val; | 14458 | u64 val; |
@@ -14461,6 +14461,9 @@ static void init_rxe(struct hfi1_devdata *dd) | |||
14461 | write_csr(dd, RCV_ERR_MASK, ~0ull); | 14461 | write_csr(dd, RCV_ERR_MASK, ~0ull); |
14462 | 14462 | ||
14463 | rmt = alloc_rsm_map_table(dd); | 14463 | rmt = alloc_rsm_map_table(dd); |
14464 | if (!rmt) | ||
14465 | return -ENOMEM; | ||
14466 | |||
14464 | /* set up QOS, including the QPN map table */ | 14467 | /* set up QOS, including the QPN map table */ |
14465 | init_qos(dd, rmt); | 14468 | init_qos(dd, rmt); |
14466 | init_fecn_handling(dd, rmt); | 14469 | init_fecn_handling(dd, rmt); |
@@ -14487,6 +14490,7 @@ static void init_rxe(struct hfi1_devdata *dd) | |||
14487 | val |= ((4ull & RCV_BYPASS_HDR_SIZE_MASK) << | 14490 | val |= ((4ull & RCV_BYPASS_HDR_SIZE_MASK) << |
14488 | RCV_BYPASS_HDR_SIZE_SHIFT); | 14491 | RCV_BYPASS_HDR_SIZE_SHIFT); |
14489 | write_csr(dd, RCV_BYPASS, val); | 14492 | write_csr(dd, RCV_BYPASS, val); |
14493 | return 0; | ||
14490 | } | 14494 | } |
14491 | 14495 | ||
14492 | static void init_other(struct hfi1_devdata *dd) | 14496 | static void init_other(struct hfi1_devdata *dd) |
@@ -15024,7 +15028,10 @@ int hfi1_init_dd(struct hfi1_devdata *dd) | |||
15024 | goto bail_cleanup; | 15028 | goto bail_cleanup; |
15025 | 15029 | ||
15026 | /* set initial RXE CSRs */ | 15030 | /* set initial RXE CSRs */ |
15027 | init_rxe(dd); | 15031 | ret = init_rxe(dd); |
15032 | if (ret) | ||
15033 | goto bail_cleanup; | ||
15034 | |||
15028 | /* set initial TXE CSRs */ | 15035 | /* set initial TXE CSRs */ |
15029 | init_txe(dd); | 15036 | init_txe(dd); |
15030 | /* set initial non-RXE, non-TXE CSRs */ | 15037 | /* set initial non-RXE, non-TXE CSRs */ |
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c index 0477c14633ab..024a7c2b6124 100644 --- a/drivers/infiniband/hw/hfi1/rc.c +++ b/drivers/infiniband/hw/hfi1/rc.c | |||
@@ -1835,7 +1835,6 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_opa_header *opah) | |||
1835 | cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) | 1835 | cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) |
1836 | break; | 1836 | break; |
1837 | trdma_clean_swqe(qp, wqe); | 1837 | trdma_clean_swqe(qp, wqe); |
1838 | rvt_qp_wqe_unreserve(qp, wqe); | ||
1839 | trace_hfi1_qp_send_completion(qp, wqe, qp->s_last); | 1838 | trace_hfi1_qp_send_completion(qp, wqe, qp->s_last); |
1840 | rvt_qp_complete_swqe(qp, | 1839 | rvt_qp_complete_swqe(qp, |
1841 | wqe, | 1840 | wqe, |
@@ -1882,7 +1881,6 @@ struct rvt_swqe *do_rc_completion(struct rvt_qp *qp, | |||
1882 | if (cmp_psn(wqe->lpsn, qp->s_sending_psn) < 0 || | 1881 | if (cmp_psn(wqe->lpsn, qp->s_sending_psn) < 0 || |
1883 | cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) { | 1882 | cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) { |
1884 | trdma_clean_swqe(qp, wqe); | 1883 | trdma_clean_swqe(qp, wqe); |
1885 | rvt_qp_wqe_unreserve(qp, wqe); | ||
1886 | trace_hfi1_qp_send_completion(qp, wqe, qp->s_last); | 1884 | trace_hfi1_qp_send_completion(qp, wqe, qp->s_last); |
1887 | rvt_qp_complete_swqe(qp, | 1885 | rvt_qp_complete_swqe(qp, |
1888 | wqe, | 1886 | wqe, |
diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c index 92acccaaaa86..996fc298207e 100644 --- a/drivers/infiniband/hw/hfi1/tid_rdma.c +++ b/drivers/infiniband/hw/hfi1/tid_rdma.c | |||
@@ -1620,6 +1620,7 @@ static int hfi1_kern_exp_rcv_alloc_flows(struct tid_rdma_request *req, | |||
1620 | flows[i].req = req; | 1620 | flows[i].req = req; |
1621 | flows[i].npagesets = 0; | 1621 | flows[i].npagesets = 0; |
1622 | flows[i].pagesets[0].mapped = 0; | 1622 | flows[i].pagesets[0].mapped = 0; |
1623 | flows[i].resync_npkts = 0; | ||
1623 | } | 1624 | } |
1624 | req->flows = flows; | 1625 | req->flows = flows; |
1625 | return 0; | 1626 | return 0; |
@@ -1673,34 +1674,6 @@ static struct tid_rdma_flow *find_flow_ib(struct tid_rdma_request *req, | |||
1673 | return NULL; | 1674 | return NULL; |
1674 | } | 1675 | } |
1675 | 1676 | ||
1676 | static struct tid_rdma_flow * | ||
1677 | __find_flow_ranged(struct tid_rdma_request *req, u16 head, u16 tail, | ||
1678 | u32 psn, u16 *fidx) | ||
1679 | { | ||
1680 | for ( ; CIRC_CNT(head, tail, MAX_FLOWS); | ||
1681 | tail = CIRC_NEXT(tail, MAX_FLOWS)) { | ||
1682 | struct tid_rdma_flow *flow = &req->flows[tail]; | ||
1683 | u32 spsn, lpsn; | ||
1684 | |||
1685 | spsn = full_flow_psn(flow, flow->flow_state.spsn); | ||
1686 | lpsn = full_flow_psn(flow, flow->flow_state.lpsn); | ||
1687 | |||
1688 | if (cmp_psn(psn, spsn) >= 0 && cmp_psn(psn, lpsn) <= 0) { | ||
1689 | if (fidx) | ||
1690 | *fidx = tail; | ||
1691 | return flow; | ||
1692 | } | ||
1693 | } | ||
1694 | return NULL; | ||
1695 | } | ||
1696 | |||
1697 | static struct tid_rdma_flow *find_flow(struct tid_rdma_request *req, | ||
1698 | u32 psn, u16 *fidx) | ||
1699 | { | ||
1700 | return __find_flow_ranged(req, req->setup_head, req->clear_tail, psn, | ||
1701 | fidx); | ||
1702 | } | ||
1703 | |||
1704 | /* TID RDMA READ functions */ | 1677 | /* TID RDMA READ functions */ |
1705 | u32 hfi1_build_tid_rdma_read_packet(struct rvt_swqe *wqe, | 1678 | u32 hfi1_build_tid_rdma_read_packet(struct rvt_swqe *wqe, |
1706 | struct ib_other_headers *ohdr, u32 *bth1, | 1679 | struct ib_other_headers *ohdr, u32 *bth1, |
@@ -2788,19 +2761,7 @@ static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd, | |||
2788 | * to prevent continuous Flow Sequence errors for any | 2761 | * to prevent continuous Flow Sequence errors for any |
2789 | * packets that could be still in the fabric. | 2762 | * packets that could be still in the fabric. |
2790 | */ | 2763 | */ |
2791 | flow = find_flow(req, psn, NULL); | 2764 | flow = &req->flows[req->clear_tail]; |
2792 | if (!flow) { | ||
2793 | /* | ||
2794 | * We can't find the IB PSN matching the | ||
2795 | * received KDETH PSN. The only thing we can | ||
2796 | * do at this point is report the error to | ||
2797 | * the QP. | ||
2798 | */ | ||
2799 | hfi1_kern_read_tid_flow_free(qp); | ||
2800 | spin_unlock(&qp->s_lock); | ||
2801 | rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR); | ||
2802 | return ret; | ||
2803 | } | ||
2804 | if (priv->s_flags & HFI1_R_TID_SW_PSN) { | 2765 | if (priv->s_flags & HFI1_R_TID_SW_PSN) { |
2805 | diff = cmp_psn(psn, | 2766 | diff = cmp_psn(psn, |
2806 | flow->flow_state.r_next_psn); | 2767 | flow->flow_state.r_next_psn); |
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c index c4b243f50c76..646f61545ed6 100644 --- a/drivers/infiniband/hw/hfi1/verbs.c +++ b/drivers/infiniband/hw/hfi1/verbs.c | |||
@@ -54,6 +54,7 @@ | |||
54 | #include <linux/mm.h> | 54 | #include <linux/mm.h> |
55 | #include <linux/vmalloc.h> | 55 | #include <linux/vmalloc.h> |
56 | #include <rdma/opa_addr.h> | 56 | #include <rdma/opa_addr.h> |
57 | #include <linux/nospec.h> | ||
57 | 58 | ||
58 | #include "hfi.h" | 59 | #include "hfi.h" |
59 | #include "common.h" | 60 | #include "common.h" |
@@ -1536,6 +1537,7 @@ static int hfi1_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr) | |||
1536 | sl = rdma_ah_get_sl(ah_attr); | 1537 | sl = rdma_ah_get_sl(ah_attr); |
1537 | if (sl >= ARRAY_SIZE(ibp->sl_to_sc)) | 1538 | if (sl >= ARRAY_SIZE(ibp->sl_to_sc)) |
1538 | return -EINVAL; | 1539 | return -EINVAL; |
1540 | sl = array_index_nospec(sl, ARRAY_SIZE(ibp->sl_to_sc)); | ||
1539 | 1541 | ||
1540 | sc5 = ibp->sl_to_sc[sl]; | 1542 | sc5 = ibp->sl_to_sc[sl]; |
1541 | if (sc_to_vlt(dd, sc5) > num_vls && sc_to_vlt(dd, sc5) != 0xf) | 1543 | if (sc_to_vlt(dd, sc5) > num_vls && sc_to_vlt(dd, sc5) != 0xf) |
diff --git a/drivers/infiniband/hw/hns/Kconfig b/drivers/infiniband/hw/hns/Kconfig index 8bf847bcd8d3..54782197c717 100644 --- a/drivers/infiniband/hw/hns/Kconfig +++ b/drivers/infiniband/hw/hns/Kconfig | |||
@@ -1,6 +1,6 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0-only | 1 | # SPDX-License-Identifier: GPL-2.0-only |
2 | config INFINIBAND_HNS | 2 | config INFINIBAND_HNS |
3 | tristate "HNS RoCE Driver" | 3 | bool "HNS RoCE Driver" |
4 | depends on NET_VENDOR_HISILICON | 4 | depends on NET_VENDOR_HISILICON |
5 | depends on ARM64 || (COMPILE_TEST && 64BIT) | 5 | depends on ARM64 || (COMPILE_TEST && 64BIT) |
6 | ---help--- | 6 | ---help--- |
@@ -11,7 +11,7 @@ config INFINIBAND_HNS | |||
11 | To compile HIP06 or HIP08 driver as module, choose M here. | 11 | To compile HIP06 or HIP08 driver as module, choose M here. |
12 | 12 | ||
13 | config INFINIBAND_HNS_HIP06 | 13 | config INFINIBAND_HNS_HIP06 |
14 | bool "Hisilicon Hip06 Family RoCE support" | 14 | tristate "Hisilicon Hip06 Family RoCE support" |
15 | depends on INFINIBAND_HNS && HNS && HNS_DSAF && HNS_ENET | 15 | depends on INFINIBAND_HNS && HNS && HNS_DSAF && HNS_ENET |
16 | ---help--- | 16 | ---help--- |
17 | RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip06 and | 17 | RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip06 and |
@@ -21,7 +21,7 @@ config INFINIBAND_HNS_HIP06 | |||
21 | module will be called hns-roce-hw-v1 | 21 | module will be called hns-roce-hw-v1 |
22 | 22 | ||
23 | config INFINIBAND_HNS_HIP08 | 23 | config INFINIBAND_HNS_HIP08 |
24 | bool "Hisilicon Hip08 Family RoCE support" | 24 | tristate "Hisilicon Hip08 Family RoCE support" |
25 | depends on INFINIBAND_HNS && PCI && HNS3 | 25 | depends on INFINIBAND_HNS && PCI && HNS3 |
26 | ---help--- | 26 | ---help--- |
27 | RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip08 SoC. | 27 | RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip08 SoC. |
diff --git a/drivers/infiniband/hw/hns/Makefile b/drivers/infiniband/hw/hns/Makefile index e105945b94a1..449a2d81319d 100644 --- a/drivers/infiniband/hw/hns/Makefile +++ b/drivers/infiniband/hw/hns/Makefile | |||
@@ -9,12 +9,8 @@ hns-roce-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_pd.o \ | |||
9 | hns_roce_ah.o hns_roce_hem.o hns_roce_mr.o hns_roce_qp.o \ | 9 | hns_roce_ah.o hns_roce_hem.o hns_roce_mr.o hns_roce_qp.o \ |
10 | hns_roce_cq.o hns_roce_alloc.o hns_roce_db.o hns_roce_srq.o hns_roce_restrack.o | 10 | hns_roce_cq.o hns_roce_alloc.o hns_roce_db.o hns_roce_srq.o hns_roce_restrack.o |
11 | 11 | ||
12 | ifdef CONFIG_INFINIBAND_HNS_HIP06 | ||
13 | hns-roce-hw-v1-objs := hns_roce_hw_v1.o $(hns-roce-objs) | 12 | hns-roce-hw-v1-objs := hns_roce_hw_v1.o $(hns-roce-objs) |
14 | obj-$(CONFIG_INFINIBAND_HNS) += hns-roce-hw-v1.o | 13 | obj-$(CONFIG_INFINIBAND_HNS_HIP06) += hns-roce-hw-v1.o |
15 | endif | ||
16 | 14 | ||
17 | ifdef CONFIG_INFINIBAND_HNS_HIP08 | ||
18 | hns-roce-hw-v2-objs := hns_roce_hw_v2.o hns_roce_hw_v2_dfx.o $(hns-roce-objs) | 15 | hns-roce-hw-v2-objs := hns_roce_hw_v2.o hns_roce_hw_v2_dfx.o $(hns-roce-objs) |
19 | obj-$(CONFIG_INFINIBAND_HNS) += hns-roce-hw-v2.o | 16 | obj-$(CONFIG_INFINIBAND_HNS_HIP08) += hns-roce-hw-v2.o |
20 | endif | ||
diff --git a/drivers/infiniband/hw/hns/hns_roce_db.c b/drivers/infiniband/hw/hns/hns_roce_db.c index 627aa46ef683..c00714c2f16a 100644 --- a/drivers/infiniband/hw/hns/hns_roce_db.c +++ b/drivers/infiniband/hw/hns/hns_roce_db.c | |||
@@ -12,13 +12,15 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context, | |||
12 | struct ib_udata *udata, unsigned long virt, | 12 | struct ib_udata *udata, unsigned long virt, |
13 | struct hns_roce_db *db) | 13 | struct hns_roce_db *db) |
14 | { | 14 | { |
15 | unsigned long page_addr = virt & PAGE_MASK; | ||
15 | struct hns_roce_user_db_page *page; | 16 | struct hns_roce_user_db_page *page; |
17 | unsigned int offset; | ||
16 | int ret = 0; | 18 | int ret = 0; |
17 | 19 | ||
18 | mutex_lock(&context->page_mutex); | 20 | mutex_lock(&context->page_mutex); |
19 | 21 | ||
20 | list_for_each_entry(page, &context->page_list, list) | 22 | list_for_each_entry(page, &context->page_list, list) |
21 | if (page->user_virt == (virt & PAGE_MASK)) | 23 | if (page->user_virt == page_addr) |
22 | goto found; | 24 | goto found; |
23 | 25 | ||
24 | page = kmalloc(sizeof(*page), GFP_KERNEL); | 26 | page = kmalloc(sizeof(*page), GFP_KERNEL); |
@@ -28,8 +30,8 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context, | |||
28 | } | 30 | } |
29 | 31 | ||
30 | refcount_set(&page->refcount, 1); | 32 | refcount_set(&page->refcount, 1); |
31 | page->user_virt = (virt & PAGE_MASK); | 33 | page->user_virt = page_addr; |
32 | page->umem = ib_umem_get(udata, virt & PAGE_MASK, PAGE_SIZE, 0, 0); | 34 | page->umem = ib_umem_get(udata, page_addr, PAGE_SIZE, 0, 0); |
33 | if (IS_ERR(page->umem)) { | 35 | if (IS_ERR(page->umem)) { |
34 | ret = PTR_ERR(page->umem); | 36 | ret = PTR_ERR(page->umem); |
35 | kfree(page); | 37 | kfree(page); |
@@ -39,10 +41,9 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context, | |||
39 | list_add(&page->list, &context->page_list); | 41 | list_add(&page->list, &context->page_list); |
40 | 42 | ||
41 | found: | 43 | found: |
42 | db->dma = sg_dma_address(page->umem->sg_head.sgl) + | 44 | offset = virt - page_addr; |
43 | (virt & ~PAGE_MASK); | 45 | db->dma = sg_dma_address(page->umem->sg_head.sgl) + offset; |
44 | page->umem->sg_head.sgl->offset = virt & ~PAGE_MASK; | 46 | db->virt_addr = sg_virt(page->umem->sg_head.sgl) + offset; |
45 | db->virt_addr = sg_virt(page->umem->sg_head.sgl); | ||
46 | db->u.user_page = page; | 47 | db->u.user_page = page; |
47 | refcount_inc(&page->refcount); | 48 | refcount_inc(&page->refcount); |
48 | 49 | ||
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c index 81e6dedb1e02..c07e387a07a3 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c | |||
@@ -750,8 +750,10 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev) | |||
750 | atomic_set(&free_mr->mr_free_cq->ib_cq.usecnt, 0); | 750 | atomic_set(&free_mr->mr_free_cq->ib_cq.usecnt, 0); |
751 | 751 | ||
752 | pd = rdma_zalloc_drv_obj(ibdev, ib_pd); | 752 | pd = rdma_zalloc_drv_obj(ibdev, ib_pd); |
753 | if (!pd) | 753 | if (!pd) { |
754 | ret = -ENOMEM; | ||
754 | goto alloc_mem_failed; | 755 | goto alloc_mem_failed; |
756 | } | ||
755 | 757 | ||
756 | pd->device = ibdev; | 758 | pd->device = ibdev; |
757 | ret = hns_roce_alloc_pd(pd, NULL); | 759 | ret = hns_roce_alloc_pd(pd, NULL); |
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index c2a5780cb394..e12a4404096b 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c | |||
@@ -5802,13 +5802,12 @@ static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev, | |||
5802 | return; | 5802 | return; |
5803 | } | 5803 | } |
5804 | 5804 | ||
5805 | if (mpi->mdev_events.notifier_call) | ||
5806 | mlx5_notifier_unregister(mpi->mdev, &mpi->mdev_events); | ||
5807 | mpi->mdev_events.notifier_call = NULL; | ||
5808 | |||
5809 | mpi->ibdev = NULL; | 5805 | mpi->ibdev = NULL; |
5810 | 5806 | ||
5811 | spin_unlock(&port->mp.mpi_lock); | 5807 | spin_unlock(&port->mp.mpi_lock); |
5808 | if (mpi->mdev_events.notifier_call) | ||
5809 | mlx5_notifier_unregister(mpi->mdev, &mpi->mdev_events); | ||
5810 | mpi->mdev_events.notifier_call = NULL; | ||
5812 | mlx5_remove_netdev_notifier(ibdev, port_num); | 5811 | mlx5_remove_netdev_notifier(ibdev, port_num); |
5813 | spin_lock(&port->mp.mpi_lock); | 5812 | spin_lock(&port->mp.mpi_lock); |
5814 | 5813 | ||
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index c482f19958b3..f6a53455bf8b 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h | |||
@@ -481,6 +481,7 @@ struct mlx5_umr_wr { | |||
481 | u64 length; | 481 | u64 length; |
482 | int access_flags; | 482 | int access_flags; |
483 | u32 mkey; | 483 | u32 mkey; |
484 | u8 ignore_free_state:1; | ||
484 | }; | 485 | }; |
485 | 486 | ||
486 | static inline const struct mlx5_umr_wr *umr_wr(const struct ib_send_wr *wr) | 487 | static inline const struct mlx5_umr_wr *umr_wr(const struct ib_send_wr *wr) |
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 20ece6e0b2fc..b74fad08412f 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c | |||
@@ -51,22 +51,12 @@ static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr); | |||
51 | static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr); | 51 | static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr); |
52 | static int mr_cache_max_order(struct mlx5_ib_dev *dev); | 52 | static int mr_cache_max_order(struct mlx5_ib_dev *dev); |
53 | static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr); | 53 | static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr); |
54 | static bool umr_can_modify_entity_size(struct mlx5_ib_dev *dev) | ||
55 | { | ||
56 | return !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled); | ||
57 | } | ||
58 | 54 | ||
59 | static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev) | 55 | static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev) |
60 | { | 56 | { |
61 | return !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled); | 57 | return !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled); |
62 | } | 58 | } |
63 | 59 | ||
64 | static bool use_umr(struct mlx5_ib_dev *dev, int order) | ||
65 | { | ||
66 | return order <= mr_cache_max_order(dev) && | ||
67 | umr_can_modify_entity_size(dev); | ||
68 | } | ||
69 | |||
70 | static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) | 60 | static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) |
71 | { | 61 | { |
72 | int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey); | 62 | int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey); |
@@ -545,13 +535,16 @@ void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) | |||
545 | return; | 535 | return; |
546 | 536 | ||
547 | c = order2idx(dev, mr->order); | 537 | c = order2idx(dev, mr->order); |
548 | if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) { | 538 | WARN_ON(c < 0 || c >= MAX_MR_CACHE_ENTRIES); |
549 | mlx5_ib_warn(dev, "order %d, cache index %d\n", mr->order, c); | ||
550 | return; | ||
551 | } | ||
552 | 539 | ||
553 | if (unreg_umr(dev, mr)) | 540 | if (unreg_umr(dev, mr)) { |
541 | mr->allocated_from_cache = false; | ||
542 | destroy_mkey(dev, mr); | ||
543 | ent = &cache->ent[c]; | ||
544 | if (ent->cur < ent->limit) | ||
545 | queue_work(cache->wq, &ent->work); | ||
554 | return; | 546 | return; |
547 | } | ||
555 | 548 | ||
556 | ent = &cache->ent[c]; | 549 | ent = &cache->ent[c]; |
557 | spin_lock_irq(&ent->lock); | 550 | spin_lock_irq(&ent->lock); |
@@ -1268,7 +1261,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, | |||
1268 | { | 1261 | { |
1269 | struct mlx5_ib_dev *dev = to_mdev(pd->device); | 1262 | struct mlx5_ib_dev *dev = to_mdev(pd->device); |
1270 | struct mlx5_ib_mr *mr = NULL; | 1263 | struct mlx5_ib_mr *mr = NULL; |
1271 | bool populate_mtts = false; | 1264 | bool use_umr; |
1272 | struct ib_umem *umem; | 1265 | struct ib_umem *umem; |
1273 | int page_shift; | 1266 | int page_shift; |
1274 | int npages; | 1267 | int npages; |
@@ -1300,29 +1293,30 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, | |||
1300 | if (err < 0) | 1293 | if (err < 0) |
1301 | return ERR_PTR(err); | 1294 | return ERR_PTR(err); |
1302 | 1295 | ||
1303 | if (use_umr(dev, order)) { | 1296 | use_umr = !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled) && |
1297 | (!MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled) || | ||
1298 | !MLX5_CAP_GEN(dev->mdev, atomic)); | ||
1299 | |||
1300 | if (order <= mr_cache_max_order(dev) && use_umr) { | ||
1304 | mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont, | 1301 | mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont, |
1305 | page_shift, order, access_flags); | 1302 | page_shift, order, access_flags); |
1306 | if (PTR_ERR(mr) == -EAGAIN) { | 1303 | if (PTR_ERR(mr) == -EAGAIN) { |
1307 | mlx5_ib_dbg(dev, "cache empty for order %d\n", order); | 1304 | mlx5_ib_dbg(dev, "cache empty for order %d\n", order); |
1308 | mr = NULL; | 1305 | mr = NULL; |
1309 | } | 1306 | } |
1310 | populate_mtts = false; | ||
1311 | } else if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) { | 1307 | } else if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) { |
1312 | if (access_flags & IB_ACCESS_ON_DEMAND) { | 1308 | if (access_flags & IB_ACCESS_ON_DEMAND) { |
1313 | err = -EINVAL; | 1309 | err = -EINVAL; |
1314 | pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB\n"); | 1310 | pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB\n"); |
1315 | goto error; | 1311 | goto error; |
1316 | } | 1312 | } |
1317 | populate_mtts = true; | 1313 | use_umr = false; |
1318 | } | 1314 | } |
1319 | 1315 | ||
1320 | if (!mr) { | 1316 | if (!mr) { |
1321 | if (!umr_can_modify_entity_size(dev)) | ||
1322 | populate_mtts = true; | ||
1323 | mutex_lock(&dev->slow_path_mutex); | 1317 | mutex_lock(&dev->slow_path_mutex); |
1324 | mr = reg_create(NULL, pd, virt_addr, length, umem, ncont, | 1318 | mr = reg_create(NULL, pd, virt_addr, length, umem, ncont, |
1325 | page_shift, access_flags, populate_mtts); | 1319 | page_shift, access_flags, !use_umr); |
1326 | mutex_unlock(&dev->slow_path_mutex); | 1320 | mutex_unlock(&dev->slow_path_mutex); |
1327 | } | 1321 | } |
1328 | 1322 | ||
@@ -1338,7 +1332,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, | |||
1338 | 1332 | ||
1339 | update_odp_mr(mr); | 1333 | update_odp_mr(mr); |
1340 | 1334 | ||
1341 | if (!populate_mtts) { | 1335 | if (use_umr) { |
1342 | int update_xlt_flags = MLX5_IB_UPD_XLT_ENABLE; | 1336 | int update_xlt_flags = MLX5_IB_UPD_XLT_ENABLE; |
1343 | 1337 | ||
1344 | if (access_flags & IB_ACCESS_ON_DEMAND) | 1338 | if (access_flags & IB_ACCESS_ON_DEMAND) |
@@ -1373,9 +1367,11 @@ static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) | |||
1373 | return 0; | 1367 | return 0; |
1374 | 1368 | ||
1375 | umrwr.wr.send_flags = MLX5_IB_SEND_UMR_DISABLE_MR | | 1369 | umrwr.wr.send_flags = MLX5_IB_SEND_UMR_DISABLE_MR | |
1376 | MLX5_IB_SEND_UMR_FAIL_IF_FREE; | 1370 | MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS; |
1377 | umrwr.wr.opcode = MLX5_IB_WR_UMR; | 1371 | umrwr.wr.opcode = MLX5_IB_WR_UMR; |
1372 | umrwr.pd = dev->umrc.pd; | ||
1378 | umrwr.mkey = mr->mmkey.key; | 1373 | umrwr.mkey = mr->mmkey.key; |
1374 | umrwr.ignore_free_state = 1; | ||
1379 | 1375 | ||
1380 | return mlx5_ib_post_send_wait(dev, &umrwr); | 1376 | return mlx5_ib_post_send_wait(dev, &umrwr); |
1381 | } | 1377 | } |
@@ -1577,10 +1573,10 @@ static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) | |||
1577 | mr->sig = NULL; | 1573 | mr->sig = NULL; |
1578 | } | 1574 | } |
1579 | 1575 | ||
1580 | mlx5_free_priv_descs(mr); | 1576 | if (!allocated_from_cache) { |
1581 | |||
1582 | if (!allocated_from_cache) | ||
1583 | destroy_mkey(dev, mr); | 1577 | destroy_mkey(dev, mr); |
1578 | mlx5_free_priv_descs(mr); | ||
1579 | } | ||
1584 | } | 1580 | } |
1585 | 1581 | ||
1586 | static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) | 1582 | static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) |
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c index 5b642d81e617..81da82050d05 100644 --- a/drivers/infiniband/hw/mlx5/odp.c +++ b/drivers/infiniband/hw/mlx5/odp.c | |||
@@ -246,7 +246,7 @@ void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start, | |||
246 | * overwrite the same MTTs. Concurent invalidations might race us, | 246 | * overwrite the same MTTs. Concurent invalidations might race us, |
247 | * but they will write 0s as well, so no difference in the end result. | 247 | * but they will write 0s as well, so no difference in the end result. |
248 | */ | 248 | */ |
249 | 249 | mutex_lock(&umem_odp->umem_mutex); | |
250 | for (addr = start; addr < end; addr += BIT(umem_odp->page_shift)) { | 250 | for (addr = start; addr < end; addr += BIT(umem_odp->page_shift)) { |
251 | idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift; | 251 | idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift; |
252 | /* | 252 | /* |
@@ -278,6 +278,7 @@ void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start, | |||
278 | idx - blk_start_idx + 1, 0, | 278 | idx - blk_start_idx + 1, 0, |
279 | MLX5_IB_UPD_XLT_ZAP | | 279 | MLX5_IB_UPD_XLT_ZAP | |
280 | MLX5_IB_UPD_XLT_ATOMIC); | 280 | MLX5_IB_UPD_XLT_ATOMIC); |
281 | mutex_unlock(&umem_odp->umem_mutex); | ||
281 | /* | 282 | /* |
282 | * We are now sure that the device will not access the | 283 | * We are now sure that the device will not access the |
283 | * memory. We can safely unmap it, and mark it as dirty if | 284 | * memory. We can safely unmap it, and mark it as dirty if |
@@ -1771,7 +1772,7 @@ static void mlx5_ib_prefetch_mr_work(struct work_struct *work) | |||
1771 | 1772 | ||
1772 | num_pending_prefetch_dec(to_mdev(w->pd->device), w->sg_list, | 1773 | num_pending_prefetch_dec(to_mdev(w->pd->device), w->sg_list, |
1773 | w->num_sge, 0); | 1774 | w->num_sge, 0); |
1774 | kfree(w); | 1775 | kvfree(w); |
1775 | } | 1776 | } |
1776 | 1777 | ||
1777 | int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd, | 1778 | int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd, |
@@ -1813,7 +1814,7 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd, | |||
1813 | if (valid_req) | 1814 | if (valid_req) |
1814 | queue_work(system_unbound_wq, &work->work); | 1815 | queue_work(system_unbound_wq, &work->work); |
1815 | else | 1816 | else |
1816 | kfree(work); | 1817 | kvfree(work); |
1817 | 1818 | ||
1818 | srcu_read_unlock(&dev->mr_srcu, srcu_key); | 1819 | srcu_read_unlock(&dev->mr_srcu, srcu_key); |
1819 | 1820 | ||
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 2a97619ed603..379328b2598f 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c | |||
@@ -1713,7 +1713,6 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, | |||
1713 | } | 1713 | } |
1714 | 1714 | ||
1715 | MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_TOEPLITZ); | 1715 | MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_TOEPLITZ); |
1716 | MLX5_SET(tirc, tirc, rx_hash_symmetric, 1); | ||
1717 | memcpy(rss_key, ucmd.rx_hash_key, len); | 1716 | memcpy(rss_key, ucmd.rx_hash_key, len); |
1718 | break; | 1717 | break; |
1719 | } | 1718 | } |
@@ -4295,10 +4294,14 @@ static int set_reg_umr_segment(struct mlx5_ib_dev *dev, | |||
4295 | 4294 | ||
4296 | memset(umr, 0, sizeof(*umr)); | 4295 | memset(umr, 0, sizeof(*umr)); |
4297 | 4296 | ||
4298 | if (wr->send_flags & MLX5_IB_SEND_UMR_FAIL_IF_FREE) | 4297 | if (!umrwr->ignore_free_state) { |
4299 | umr->flags = MLX5_UMR_CHECK_FREE; /* fail if free */ | 4298 | if (wr->send_flags & MLX5_IB_SEND_UMR_FAIL_IF_FREE) |
4300 | else | 4299 | /* fail if free */ |
4301 | umr->flags = MLX5_UMR_CHECK_NOT_FREE; /* fail if not free */ | 4300 | umr->flags = MLX5_UMR_CHECK_FREE; |
4301 | else | ||
4302 | /* fail if not free */ | ||
4303 | umr->flags = MLX5_UMR_CHECK_NOT_FREE; | ||
4304 | } | ||
4302 | 4305 | ||
4303 | umr->xlt_octowords = cpu_to_be16(get_xlt_octo(umrwr->xlt_size)); | 4306 | umr->xlt_octowords = cpu_to_be16(get_xlt_octo(umrwr->xlt_size)); |
4304 | if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_XLT) { | 4307 | if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_XLT) { |
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c index 533157a2a3be..f97b3d65b30c 100644 --- a/drivers/infiniband/hw/qedr/main.c +++ b/drivers/infiniband/hw/qedr/main.c | |||
@@ -125,14 +125,20 @@ static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr, | |||
125 | struct qedr_dev *dev = | 125 | struct qedr_dev *dev = |
126 | rdma_device_to_drv_device(device, struct qedr_dev, ibdev); | 126 | rdma_device_to_drv_device(device, struct qedr_dev, ibdev); |
127 | 127 | ||
128 | return scnprintf(buf, PAGE_SIZE, "0x%x\n", dev->pdev->vendor); | 128 | return scnprintf(buf, PAGE_SIZE, "0x%x\n", dev->attr.hw_ver); |
129 | } | 129 | } |
130 | static DEVICE_ATTR_RO(hw_rev); | 130 | static DEVICE_ATTR_RO(hw_rev); |
131 | 131 | ||
132 | static ssize_t hca_type_show(struct device *device, | 132 | static ssize_t hca_type_show(struct device *device, |
133 | struct device_attribute *attr, char *buf) | 133 | struct device_attribute *attr, char *buf) |
134 | { | 134 | { |
135 | return scnprintf(buf, PAGE_SIZE, "%s\n", "HCA_TYPE_TO_SET"); | 135 | struct qedr_dev *dev = |
136 | rdma_device_to_drv_device(device, struct qedr_dev, ibdev); | ||
137 | |||
138 | return scnprintf(buf, PAGE_SIZE, "FastLinQ QL%x %s\n", | ||
139 | dev->pdev->device, | ||
140 | rdma_protocol_iwarp(&dev->ibdev, 1) ? | ||
141 | "iWARP" : "RoCE"); | ||
136 | } | 142 | } |
137 | static DEVICE_ATTR_RO(hca_type); | 143 | static DEVICE_ATTR_RO(hca_type); |
138 | 144 | ||
diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c index a7cde98e73e8..9ce8a1b925d2 100644 --- a/drivers/infiniband/sw/siw/siw_cm.c +++ b/drivers/infiniband/sw/siw/siw_cm.c | |||
@@ -220,13 +220,12 @@ static void siw_put_work(struct siw_cm_work *work) | |||
220 | static void siw_cep_set_inuse(struct siw_cep *cep) | 220 | static void siw_cep_set_inuse(struct siw_cep *cep) |
221 | { | 221 | { |
222 | unsigned long flags; | 222 | unsigned long flags; |
223 | int rv; | ||
224 | retry: | 223 | retry: |
225 | spin_lock_irqsave(&cep->lock, flags); | 224 | spin_lock_irqsave(&cep->lock, flags); |
226 | 225 | ||
227 | if (cep->in_use) { | 226 | if (cep->in_use) { |
228 | spin_unlock_irqrestore(&cep->lock, flags); | 227 | spin_unlock_irqrestore(&cep->lock, flags); |
229 | rv = wait_event_interruptible(cep->waitq, !cep->in_use); | 228 | wait_event_interruptible(cep->waitq, !cep->in_use); |
230 | if (signal_pending(current)) | 229 | if (signal_pending(current)) |
231 | flush_signals(current); | 230 | flush_signals(current); |
232 | goto retry; | 231 | goto retry; |
diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c index f55c4e80aea4..d0f140daf659 100644 --- a/drivers/infiniband/sw/siw/siw_main.c +++ b/drivers/infiniband/sw/siw/siw_main.c | |||
@@ -612,6 +612,7 @@ static __init int siw_init_module(void) | |||
612 | 612 | ||
613 | if (!siw_create_tx_threads()) { | 613 | if (!siw_create_tx_threads()) { |
614 | pr_info("siw: Could not start any TX thread\n"); | 614 | pr_info("siw: Could not start any TX thread\n"); |
615 | rv = -ENOMEM; | ||
615 | goto out_error; | 616 | goto out_error; |
616 | } | 617 | } |
617 | /* | 618 | /* |
diff --git a/drivers/infiniband/sw/siw/siw_qp.c b/drivers/infiniband/sw/siw/siw_qp.c index 11383d9f95ef..e27bd5b35b96 100644 --- a/drivers/infiniband/sw/siw/siw_qp.c +++ b/drivers/infiniband/sw/siw/siw_qp.c | |||
@@ -220,12 +220,14 @@ static int siw_qp_enable_crc(struct siw_qp *qp) | |||
220 | { | 220 | { |
221 | struct siw_rx_stream *c_rx = &qp->rx_stream; | 221 | struct siw_rx_stream *c_rx = &qp->rx_stream; |
222 | struct siw_iwarp_tx *c_tx = &qp->tx_ctx; | 222 | struct siw_iwarp_tx *c_tx = &qp->tx_ctx; |
223 | int size = crypto_shash_descsize(siw_crypto_shash) + | 223 | int size; |
224 | sizeof(struct shash_desc); | ||
225 | 224 | ||
226 | if (siw_crypto_shash == NULL) | 225 | if (siw_crypto_shash == NULL) |
227 | return -ENOENT; | 226 | return -ENOENT; |
228 | 227 | ||
228 | size = crypto_shash_descsize(siw_crypto_shash) + | ||
229 | sizeof(struct shash_desc); | ||
230 | |||
229 | c_tx->mpa_crc_hd = kzalloc(size, GFP_KERNEL); | 231 | c_tx->mpa_crc_hd = kzalloc(size, GFP_KERNEL); |
230 | c_rx->mpa_crc_hd = kzalloc(size, GFP_KERNEL); | 232 | c_rx->mpa_crc_hd = kzalloc(size, GFP_KERNEL); |
231 | if (!c_tx->mpa_crc_hd || !c_rx->mpa_crc_hd) { | 233 | if (!c_tx->mpa_crc_hd || !c_rx->mpa_crc_hd) { |
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index eb104c719629..4413aa67000e 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c | |||
@@ -23,6 +23,8 @@ | |||
23 | #include <linux/mem_encrypt.h> | 23 | #include <linux/mem_encrypt.h> |
24 | #include <asm/pci-direct.h> | 24 | #include <asm/pci-direct.h> |
25 | #include <asm/iommu.h> | 25 | #include <asm/iommu.h> |
26 | #include <asm/apic.h> | ||
27 | #include <asm/msidef.h> | ||
26 | #include <asm/gart.h> | 28 | #include <asm/gart.h> |
27 | #include <asm/x86_init.h> | 29 | #include <asm/x86_init.h> |
28 | #include <asm/iommu_table.h> | 30 | #include <asm/iommu_table.h> |
@@ -1920,6 +1922,90 @@ static int iommu_setup_msi(struct amd_iommu *iommu) | |||
1920 | return 0; | 1922 | return 0; |
1921 | } | 1923 | } |
1922 | 1924 | ||
1925 | #define XT_INT_DEST_MODE(x) (((x) & 0x1ULL) << 2) | ||
1926 | #define XT_INT_DEST_LO(x) (((x) & 0xFFFFFFULL) << 8) | ||
1927 | #define XT_INT_VEC(x) (((x) & 0xFFULL) << 32) | ||
1928 | #define XT_INT_DEST_HI(x) ((((x) >> 24) & 0xFFULL) << 56) | ||
1929 | |||
1930 | /** | ||
1931 | * Setup the IntCapXT registers with interrupt routing information | ||
1932 | * based on the PCI MSI capability block registers, accessed via | ||
1933 | * MMIO MSI address low/hi and MSI data registers. | ||
1934 | */ | ||
1935 | static void iommu_update_intcapxt(struct amd_iommu *iommu) | ||
1936 | { | ||
1937 | u64 val; | ||
1938 | u32 addr_lo = readl(iommu->mmio_base + MMIO_MSI_ADDR_LO_OFFSET); | ||
1939 | u32 addr_hi = readl(iommu->mmio_base + MMIO_MSI_ADDR_HI_OFFSET); | ||
1940 | u32 data = readl(iommu->mmio_base + MMIO_MSI_DATA_OFFSET); | ||
1941 | bool dm = (addr_lo >> MSI_ADDR_DEST_MODE_SHIFT) & 0x1; | ||
1942 | u32 dest = ((addr_lo >> MSI_ADDR_DEST_ID_SHIFT) & 0xFF); | ||
1943 | |||
1944 | if (x2apic_enabled()) | ||
1945 | dest |= MSI_ADDR_EXT_DEST_ID(addr_hi); | ||
1946 | |||
1947 | val = XT_INT_VEC(data & 0xFF) | | ||
1948 | XT_INT_DEST_MODE(dm) | | ||
1949 | XT_INT_DEST_LO(dest) | | ||
1950 | XT_INT_DEST_HI(dest); | ||
1951 | |||
1952 | /** | ||
1953 | * Current IOMMU implemtation uses the same IRQ for all | ||
1954 | * 3 IOMMU interrupts. | ||
1955 | */ | ||
1956 | writeq(val, iommu->mmio_base + MMIO_INTCAPXT_EVT_OFFSET); | ||
1957 | writeq(val, iommu->mmio_base + MMIO_INTCAPXT_PPR_OFFSET); | ||
1958 | writeq(val, iommu->mmio_base + MMIO_INTCAPXT_GALOG_OFFSET); | ||
1959 | } | ||
1960 | |||
1961 | static void _irq_notifier_notify(struct irq_affinity_notify *notify, | ||
1962 | const cpumask_t *mask) | ||
1963 | { | ||
1964 | struct amd_iommu *iommu; | ||
1965 | |||
1966 | for_each_iommu(iommu) { | ||
1967 | if (iommu->dev->irq == notify->irq) { | ||
1968 | iommu_update_intcapxt(iommu); | ||
1969 | break; | ||
1970 | } | ||
1971 | } | ||
1972 | } | ||
1973 | |||
1974 | static void _irq_notifier_release(struct kref *ref) | ||
1975 | { | ||
1976 | } | ||
1977 | |||
1978 | static int iommu_init_intcapxt(struct amd_iommu *iommu) | ||
1979 | { | ||
1980 | int ret; | ||
1981 | struct irq_affinity_notify *notify = &iommu->intcapxt_notify; | ||
1982 | |||
1983 | /** | ||
1984 | * IntCapXT requires XTSup=1, which can be inferred | ||
1985 | * amd_iommu_xt_mode. | ||
1986 | */ | ||
1987 | if (amd_iommu_xt_mode != IRQ_REMAP_X2APIC_MODE) | ||
1988 | return 0; | ||
1989 | |||
1990 | /** | ||
1991 | * Also, we need to setup notifier to update the IntCapXT registers | ||
1992 | * whenever the irq affinity is changed from user-space. | ||
1993 | */ | ||
1994 | notify->irq = iommu->dev->irq; | ||
1995 | notify->notify = _irq_notifier_notify, | ||
1996 | notify->release = _irq_notifier_release, | ||
1997 | ret = irq_set_affinity_notifier(iommu->dev->irq, notify); | ||
1998 | if (ret) { | ||
1999 | pr_err("Failed to register irq affinity notifier (devid=%#x, irq %d)\n", | ||
2000 | iommu->devid, iommu->dev->irq); | ||
2001 | return ret; | ||
2002 | } | ||
2003 | |||
2004 | iommu_update_intcapxt(iommu); | ||
2005 | iommu_feature_enable(iommu, CONTROL_INTCAPXT_EN); | ||
2006 | return ret; | ||
2007 | } | ||
2008 | |||
1923 | static int iommu_init_msi(struct amd_iommu *iommu) | 2009 | static int iommu_init_msi(struct amd_iommu *iommu) |
1924 | { | 2010 | { |
1925 | int ret; | 2011 | int ret; |
@@ -1936,6 +2022,10 @@ static int iommu_init_msi(struct amd_iommu *iommu) | |||
1936 | return ret; | 2022 | return ret; |
1937 | 2023 | ||
1938 | enable_faults: | 2024 | enable_faults: |
2025 | ret = iommu_init_intcapxt(iommu); | ||
2026 | if (ret) | ||
2027 | return ret; | ||
2028 | |||
1939 | iommu_feature_enable(iommu, CONTROL_EVT_INT_EN); | 2029 | iommu_feature_enable(iommu, CONTROL_EVT_INT_EN); |
1940 | 2030 | ||
1941 | if (iommu->ppr_log != NULL) | 2031 | if (iommu->ppr_log != NULL) |
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h index 52c35d557fad..64edd5a9694c 100644 --- a/drivers/iommu/amd_iommu_types.h +++ b/drivers/iommu/amd_iommu_types.h | |||
@@ -60,6 +60,12 @@ | |||
60 | #define MMIO_PPR_LOG_OFFSET 0x0038 | 60 | #define MMIO_PPR_LOG_OFFSET 0x0038 |
61 | #define MMIO_GA_LOG_BASE_OFFSET 0x00e0 | 61 | #define MMIO_GA_LOG_BASE_OFFSET 0x00e0 |
62 | #define MMIO_GA_LOG_TAIL_OFFSET 0x00e8 | 62 | #define MMIO_GA_LOG_TAIL_OFFSET 0x00e8 |
63 | #define MMIO_MSI_ADDR_LO_OFFSET 0x015C | ||
64 | #define MMIO_MSI_ADDR_HI_OFFSET 0x0160 | ||
65 | #define MMIO_MSI_DATA_OFFSET 0x0164 | ||
66 | #define MMIO_INTCAPXT_EVT_OFFSET 0x0170 | ||
67 | #define MMIO_INTCAPXT_PPR_OFFSET 0x0178 | ||
68 | #define MMIO_INTCAPXT_GALOG_OFFSET 0x0180 | ||
63 | #define MMIO_CMD_HEAD_OFFSET 0x2000 | 69 | #define MMIO_CMD_HEAD_OFFSET 0x2000 |
64 | #define MMIO_CMD_TAIL_OFFSET 0x2008 | 70 | #define MMIO_CMD_TAIL_OFFSET 0x2008 |
65 | #define MMIO_EVT_HEAD_OFFSET 0x2010 | 71 | #define MMIO_EVT_HEAD_OFFSET 0x2010 |
@@ -150,6 +156,7 @@ | |||
150 | #define CONTROL_GALOG_EN 0x1CULL | 156 | #define CONTROL_GALOG_EN 0x1CULL |
151 | #define CONTROL_GAINT_EN 0x1DULL | 157 | #define CONTROL_GAINT_EN 0x1DULL |
152 | #define CONTROL_XT_EN 0x32ULL | 158 | #define CONTROL_XT_EN 0x32ULL |
159 | #define CONTROL_INTCAPXT_EN 0x33ULL | ||
153 | 160 | ||
154 | #define CTRL_INV_TO_MASK (7 << CONTROL_INV_TIMEOUT) | 161 | #define CTRL_INV_TO_MASK (7 << CONTROL_INV_TIMEOUT) |
155 | #define CTRL_INV_TO_NONE 0 | 162 | #define CTRL_INV_TO_NONE 0 |
@@ -592,6 +599,8 @@ struct amd_iommu { | |||
592 | /* DebugFS Info */ | 599 | /* DebugFS Info */ |
593 | struct dentry *debugfs; | 600 | struct dentry *debugfs; |
594 | #endif | 601 | #endif |
602 | /* IRQ notifier for IntCapXT interrupt */ | ||
603 | struct irq_affinity_notify intcapxt_notify; | ||
595 | }; | 604 | }; |
596 | 605 | ||
597 | static inline struct amd_iommu *dev_to_amd_iommu(struct device *dev) | 606 | static inline struct amd_iommu *dev_to_amd_iommu(struct device *dev) |
diff --git a/drivers/iommu/intel-iommu-debugfs.c b/drivers/iommu/intel-iommu-debugfs.c index 73a552914455..2b25d9c59336 100644 --- a/drivers/iommu/intel-iommu-debugfs.c +++ b/drivers/iommu/intel-iommu-debugfs.c | |||
@@ -162,9 +162,9 @@ static inline void print_tbl_walk(struct seq_file *m) | |||
162 | (u64)0, (u64)0, (u64)0); | 162 | (u64)0, (u64)0, (u64)0); |
163 | else | 163 | else |
164 | seq_printf(m, "%-6d\t0x%016llx:0x%016llx:0x%016llx\n", | 164 | seq_printf(m, "%-6d\t0x%016llx:0x%016llx:0x%016llx\n", |
165 | tbl_wlk->pasid, tbl_wlk->pasid_tbl_entry->val[0], | 165 | tbl_wlk->pasid, tbl_wlk->pasid_tbl_entry->val[2], |
166 | tbl_wlk->pasid_tbl_entry->val[1], | 166 | tbl_wlk->pasid_tbl_entry->val[1], |
167 | tbl_wlk->pasid_tbl_entry->val[2]); | 167 | tbl_wlk->pasid_tbl_entry->val[0]); |
168 | } | 168 | } |
169 | 169 | ||
170 | static void pasid_tbl_walk(struct seq_file *m, struct pasid_entry *tbl_entry, | 170 | static void pasid_tbl_walk(struct seq_file *m, struct pasid_entry *tbl_entry, |
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index ac4172c02244..bdaed2da8a55 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c | |||
@@ -339,8 +339,6 @@ static void domain_exit(struct dmar_domain *domain); | |||
339 | static void domain_remove_dev_info(struct dmar_domain *domain); | 339 | static void domain_remove_dev_info(struct dmar_domain *domain); |
340 | static void dmar_remove_one_dev_info(struct device *dev); | 340 | static void dmar_remove_one_dev_info(struct device *dev); |
341 | static void __dmar_remove_one_dev_info(struct device_domain_info *info); | 341 | static void __dmar_remove_one_dev_info(struct device_domain_info *info); |
342 | static void domain_context_clear(struct intel_iommu *iommu, | ||
343 | struct device *dev); | ||
344 | static int domain_detach_iommu(struct dmar_domain *domain, | 342 | static int domain_detach_iommu(struct dmar_domain *domain, |
345 | struct intel_iommu *iommu); | 343 | struct intel_iommu *iommu); |
346 | static bool device_is_rmrr_locked(struct device *dev); | 344 | static bool device_is_rmrr_locked(struct device *dev); |
@@ -1833,9 +1831,65 @@ static inline int guestwidth_to_adjustwidth(int gaw) | |||
1833 | return agaw; | 1831 | return agaw; |
1834 | } | 1832 | } |
1835 | 1833 | ||
1834 | static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu, | ||
1835 | int guest_width) | ||
1836 | { | ||
1837 | int adjust_width, agaw; | ||
1838 | unsigned long sagaw; | ||
1839 | int err; | ||
1840 | |||
1841 | init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN); | ||
1842 | |||
1843 | err = init_iova_flush_queue(&domain->iovad, | ||
1844 | iommu_flush_iova, iova_entry_free); | ||
1845 | if (err) | ||
1846 | return err; | ||
1847 | |||
1848 | domain_reserve_special_ranges(domain); | ||
1849 | |||
1850 | /* calculate AGAW */ | ||
1851 | if (guest_width > cap_mgaw(iommu->cap)) | ||
1852 | guest_width = cap_mgaw(iommu->cap); | ||
1853 | domain->gaw = guest_width; | ||
1854 | adjust_width = guestwidth_to_adjustwidth(guest_width); | ||
1855 | agaw = width_to_agaw(adjust_width); | ||
1856 | sagaw = cap_sagaw(iommu->cap); | ||
1857 | if (!test_bit(agaw, &sagaw)) { | ||
1858 | /* hardware doesn't support it, choose a bigger one */ | ||
1859 | pr_debug("Hardware doesn't support agaw %d\n", agaw); | ||
1860 | agaw = find_next_bit(&sagaw, 5, agaw); | ||
1861 | if (agaw >= 5) | ||
1862 | return -ENODEV; | ||
1863 | } | ||
1864 | domain->agaw = agaw; | ||
1865 | |||
1866 | if (ecap_coherent(iommu->ecap)) | ||
1867 | domain->iommu_coherency = 1; | ||
1868 | else | ||
1869 | domain->iommu_coherency = 0; | ||
1870 | |||
1871 | if (ecap_sc_support(iommu->ecap)) | ||
1872 | domain->iommu_snooping = 1; | ||
1873 | else | ||
1874 | domain->iommu_snooping = 0; | ||
1875 | |||
1876 | if (intel_iommu_superpage) | ||
1877 | domain->iommu_superpage = fls(cap_super_page_val(iommu->cap)); | ||
1878 | else | ||
1879 | domain->iommu_superpage = 0; | ||
1880 | |||
1881 | domain->nid = iommu->node; | ||
1882 | |||
1883 | /* always allocate the top pgd */ | ||
1884 | domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid); | ||
1885 | if (!domain->pgd) | ||
1886 | return -ENOMEM; | ||
1887 | __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE); | ||
1888 | return 0; | ||
1889 | } | ||
1890 | |||
1836 | static void domain_exit(struct dmar_domain *domain) | 1891 | static void domain_exit(struct dmar_domain *domain) |
1837 | { | 1892 | { |
1838 | struct page *freelist; | ||
1839 | 1893 | ||
1840 | /* Remove associated devices and clear attached or cached domains */ | 1894 | /* Remove associated devices and clear attached or cached domains */ |
1841 | domain_remove_dev_info(domain); | 1895 | domain_remove_dev_info(domain); |
@@ -1843,9 +1897,12 @@ static void domain_exit(struct dmar_domain *domain) | |||
1843 | /* destroy iovas */ | 1897 | /* destroy iovas */ |
1844 | put_iova_domain(&domain->iovad); | 1898 | put_iova_domain(&domain->iovad); |
1845 | 1899 | ||
1846 | freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw)); | 1900 | if (domain->pgd) { |
1901 | struct page *freelist; | ||
1847 | 1902 | ||
1848 | dma_free_pagelist(freelist); | 1903 | freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw)); |
1904 | dma_free_pagelist(freelist); | ||
1905 | } | ||
1849 | 1906 | ||
1850 | free_domain_mem(domain); | 1907 | free_domain_mem(domain); |
1851 | } | 1908 | } |
@@ -2048,26 +2105,9 @@ out_unlock: | |||
2048 | return ret; | 2105 | return ret; |
2049 | } | 2106 | } |
2050 | 2107 | ||
2051 | struct domain_context_mapping_data { | ||
2052 | struct dmar_domain *domain; | ||
2053 | struct intel_iommu *iommu; | ||
2054 | struct pasid_table *table; | ||
2055 | }; | ||
2056 | |||
2057 | static int domain_context_mapping_cb(struct pci_dev *pdev, | ||
2058 | u16 alias, void *opaque) | ||
2059 | { | ||
2060 | struct domain_context_mapping_data *data = opaque; | ||
2061 | |||
2062 | return domain_context_mapping_one(data->domain, data->iommu, | ||
2063 | data->table, PCI_BUS_NUM(alias), | ||
2064 | alias & 0xff); | ||
2065 | } | ||
2066 | |||
2067 | static int | 2108 | static int |
2068 | domain_context_mapping(struct dmar_domain *domain, struct device *dev) | 2109 | domain_context_mapping(struct dmar_domain *domain, struct device *dev) |
2069 | { | 2110 | { |
2070 | struct domain_context_mapping_data data; | ||
2071 | struct pasid_table *table; | 2111 | struct pasid_table *table; |
2072 | struct intel_iommu *iommu; | 2112 | struct intel_iommu *iommu; |
2073 | u8 bus, devfn; | 2113 | u8 bus, devfn; |
@@ -2077,17 +2117,7 @@ domain_context_mapping(struct dmar_domain *domain, struct device *dev) | |||
2077 | return -ENODEV; | 2117 | return -ENODEV; |
2078 | 2118 | ||
2079 | table = intel_pasid_get_table(dev); | 2119 | table = intel_pasid_get_table(dev); |
2080 | 2120 | return domain_context_mapping_one(domain, iommu, table, bus, devfn); | |
2081 | if (!dev_is_pci(dev)) | ||
2082 | return domain_context_mapping_one(domain, iommu, table, | ||
2083 | bus, devfn); | ||
2084 | |||
2085 | data.domain = domain; | ||
2086 | data.iommu = iommu; | ||
2087 | data.table = table; | ||
2088 | |||
2089 | return pci_for_each_dma_alias(to_pci_dev(dev), | ||
2090 | &domain_context_mapping_cb, &data); | ||
2091 | } | 2121 | } |
2092 | 2122 | ||
2093 | static int domain_context_mapped_cb(struct pci_dev *pdev, | 2123 | static int domain_context_mapped_cb(struct pci_dev *pdev, |
@@ -2513,31 +2543,6 @@ static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque) | |||
2513 | return 0; | 2543 | return 0; |
2514 | } | 2544 | } |
2515 | 2545 | ||
2516 | static int domain_init(struct dmar_domain *domain, int guest_width) | ||
2517 | { | ||
2518 | int adjust_width; | ||
2519 | |||
2520 | init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN); | ||
2521 | domain_reserve_special_ranges(domain); | ||
2522 | |||
2523 | /* calculate AGAW */ | ||
2524 | domain->gaw = guest_width; | ||
2525 | adjust_width = guestwidth_to_adjustwidth(guest_width); | ||
2526 | domain->agaw = width_to_agaw(adjust_width); | ||
2527 | |||
2528 | domain->iommu_coherency = 0; | ||
2529 | domain->iommu_snooping = 0; | ||
2530 | domain->iommu_superpage = 0; | ||
2531 | domain->max_addr = 0; | ||
2532 | |||
2533 | /* always allocate the top pgd */ | ||
2534 | domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid); | ||
2535 | if (!domain->pgd) | ||
2536 | return -ENOMEM; | ||
2537 | domain_flush_cache(domain, domain->pgd, PAGE_SIZE); | ||
2538 | return 0; | ||
2539 | } | ||
2540 | |||
2541 | static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw) | 2546 | static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw) |
2542 | { | 2547 | { |
2543 | struct device_domain_info *info; | 2548 | struct device_domain_info *info; |
@@ -2575,19 +2580,11 @@ static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw) | |||
2575 | domain = alloc_domain(0); | 2580 | domain = alloc_domain(0); |
2576 | if (!domain) | 2581 | if (!domain) |
2577 | return NULL; | 2582 | return NULL; |
2578 | 2583 | if (domain_init(domain, iommu, gaw)) { | |
2579 | if (domain_init(domain, gaw)) { | ||
2580 | domain_exit(domain); | 2584 | domain_exit(domain); |
2581 | return NULL; | 2585 | return NULL; |
2582 | } | 2586 | } |
2583 | 2587 | ||
2584 | if (init_iova_flush_queue(&domain->iovad, | ||
2585 | iommu_flush_iova, | ||
2586 | iova_entry_free)) { | ||
2587 | pr_warn("iova flush queue initialization failed\n"); | ||
2588 | intel_iommu_strict = 1; | ||
2589 | } | ||
2590 | |||
2591 | out: | 2588 | out: |
2592 | return domain; | 2589 | return domain; |
2593 | } | 2590 | } |
@@ -2692,6 +2689,8 @@ static int domain_prepare_identity_map(struct device *dev, | |||
2692 | return iommu_domain_identity_map(domain, start, end); | 2689 | return iommu_domain_identity_map(domain, start, end); |
2693 | } | 2690 | } |
2694 | 2691 | ||
2692 | static int md_domain_init(struct dmar_domain *domain, int guest_width); | ||
2693 | |||
2695 | static int __init si_domain_init(int hw) | 2694 | static int __init si_domain_init(int hw) |
2696 | { | 2695 | { |
2697 | struct dmar_rmrr_unit *rmrr; | 2696 | struct dmar_rmrr_unit *rmrr; |
@@ -2702,7 +2701,7 @@ static int __init si_domain_init(int hw) | |||
2702 | if (!si_domain) | 2701 | if (!si_domain) |
2703 | return -EFAULT; | 2702 | return -EFAULT; |
2704 | 2703 | ||
2705 | if (domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { | 2704 | if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { |
2706 | domain_exit(si_domain); | 2705 | domain_exit(si_domain); |
2707 | return -EFAULT; | 2706 | return -EFAULT; |
2708 | } | 2707 | } |
@@ -3564,7 +3563,8 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size) | |||
3564 | 3563 | ||
3565 | freelist = domain_unmap(domain, start_pfn, last_pfn); | 3564 | freelist = domain_unmap(domain, start_pfn, last_pfn); |
3566 | 3565 | ||
3567 | if (intel_iommu_strict || (pdev && pdev->untrusted)) { | 3566 | if (intel_iommu_strict || (pdev && pdev->untrusted) || |
3567 | !has_iova_flush_queue(&domain->iovad)) { | ||
3568 | iommu_flush_iotlb_psi(iommu, domain, start_pfn, | 3568 | iommu_flush_iotlb_psi(iommu, domain, start_pfn, |
3569 | nrpages, !freelist, 0); | 3569 | nrpages, !freelist, 0); |
3570 | /* free iova */ | 3570 | /* free iova */ |
@@ -4758,28 +4758,6 @@ out_free_dmar: | |||
4758 | return ret; | 4758 | return ret; |
4759 | } | 4759 | } |
4760 | 4760 | ||
4761 | static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *opaque) | ||
4762 | { | ||
4763 | struct intel_iommu *iommu = opaque; | ||
4764 | |||
4765 | domain_context_clear_one(iommu, PCI_BUS_NUM(alias), alias & 0xff); | ||
4766 | return 0; | ||
4767 | } | ||
4768 | |||
4769 | /* | ||
4770 | * NB - intel-iommu lacks any sort of reference counting for the users of | ||
4771 | * dependent devices. If multiple endpoints have intersecting dependent | ||
4772 | * devices, unbinding the driver from any one of them will possibly leave | ||
4773 | * the others unable to operate. | ||
4774 | */ | ||
4775 | static void domain_context_clear(struct intel_iommu *iommu, struct device *dev) | ||
4776 | { | ||
4777 | if (!iommu || !dev || !dev_is_pci(dev)) | ||
4778 | return; | ||
4779 | |||
4780 | pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu); | ||
4781 | } | ||
4782 | |||
4783 | static void __dmar_remove_one_dev_info(struct device_domain_info *info) | 4761 | static void __dmar_remove_one_dev_info(struct device_domain_info *info) |
4784 | { | 4762 | { |
4785 | struct dmar_domain *domain; | 4763 | struct dmar_domain *domain; |
@@ -4800,7 +4778,7 @@ static void __dmar_remove_one_dev_info(struct device_domain_info *info) | |||
4800 | PASID_RID2PASID); | 4778 | PASID_RID2PASID); |
4801 | 4779 | ||
4802 | iommu_disable_dev_iotlb(info); | 4780 | iommu_disable_dev_iotlb(info); |
4803 | domain_context_clear(iommu, info->dev); | 4781 | domain_context_clear_one(iommu, info->bus, info->devfn); |
4804 | intel_pasid_free_table(info->dev); | 4782 | intel_pasid_free_table(info->dev); |
4805 | } | 4783 | } |
4806 | 4784 | ||
@@ -4829,6 +4807,31 @@ static void dmar_remove_one_dev_info(struct device *dev) | |||
4829 | spin_unlock_irqrestore(&device_domain_lock, flags); | 4807 | spin_unlock_irqrestore(&device_domain_lock, flags); |
4830 | } | 4808 | } |
4831 | 4809 | ||
4810 | static int md_domain_init(struct dmar_domain *domain, int guest_width) | ||
4811 | { | ||
4812 | int adjust_width; | ||
4813 | |||
4814 | init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN); | ||
4815 | domain_reserve_special_ranges(domain); | ||
4816 | |||
4817 | /* calculate AGAW */ | ||
4818 | domain->gaw = guest_width; | ||
4819 | adjust_width = guestwidth_to_adjustwidth(guest_width); | ||
4820 | domain->agaw = width_to_agaw(adjust_width); | ||
4821 | |||
4822 | domain->iommu_coherency = 0; | ||
4823 | domain->iommu_snooping = 0; | ||
4824 | domain->iommu_superpage = 0; | ||
4825 | domain->max_addr = 0; | ||
4826 | |||
4827 | /* always allocate the top pgd */ | ||
4828 | domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid); | ||
4829 | if (!domain->pgd) | ||
4830 | return -ENOMEM; | ||
4831 | domain_flush_cache(domain, domain->pgd, PAGE_SIZE); | ||
4832 | return 0; | ||
4833 | } | ||
4834 | |||
4832 | static struct iommu_domain *intel_iommu_domain_alloc(unsigned type) | 4835 | static struct iommu_domain *intel_iommu_domain_alloc(unsigned type) |
4833 | { | 4836 | { |
4834 | struct dmar_domain *dmar_domain; | 4837 | struct dmar_domain *dmar_domain; |
@@ -4843,7 +4846,7 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type) | |||
4843 | pr_err("Can't allocate dmar_domain\n"); | 4846 | pr_err("Can't allocate dmar_domain\n"); |
4844 | return NULL; | 4847 | return NULL; |
4845 | } | 4848 | } |
4846 | if (domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { | 4849 | if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { |
4847 | pr_err("Domain initialization failed\n"); | 4850 | pr_err("Domain initialization failed\n"); |
4848 | domain_exit(dmar_domain); | 4851 | domain_exit(dmar_domain); |
4849 | return NULL; | 4852 | return NULL; |
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index d499b2621239..3e1a8a675572 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c | |||
@@ -54,9 +54,14 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule, | |||
54 | } | 54 | } |
55 | EXPORT_SYMBOL_GPL(init_iova_domain); | 55 | EXPORT_SYMBOL_GPL(init_iova_domain); |
56 | 56 | ||
57 | bool has_iova_flush_queue(struct iova_domain *iovad) | ||
58 | { | ||
59 | return !!iovad->fq; | ||
60 | } | ||
61 | |||
57 | static void free_iova_flush_queue(struct iova_domain *iovad) | 62 | static void free_iova_flush_queue(struct iova_domain *iovad) |
58 | { | 63 | { |
59 | if (!iovad->fq) | 64 | if (!has_iova_flush_queue(iovad)) |
60 | return; | 65 | return; |
61 | 66 | ||
62 | if (timer_pending(&iovad->fq_timer)) | 67 | if (timer_pending(&iovad->fq_timer)) |
@@ -74,13 +79,14 @@ static void free_iova_flush_queue(struct iova_domain *iovad) | |||
74 | int init_iova_flush_queue(struct iova_domain *iovad, | 79 | int init_iova_flush_queue(struct iova_domain *iovad, |
75 | iova_flush_cb flush_cb, iova_entry_dtor entry_dtor) | 80 | iova_flush_cb flush_cb, iova_entry_dtor entry_dtor) |
76 | { | 81 | { |
82 | struct iova_fq __percpu *queue; | ||
77 | int cpu; | 83 | int cpu; |
78 | 84 | ||
79 | atomic64_set(&iovad->fq_flush_start_cnt, 0); | 85 | atomic64_set(&iovad->fq_flush_start_cnt, 0); |
80 | atomic64_set(&iovad->fq_flush_finish_cnt, 0); | 86 | atomic64_set(&iovad->fq_flush_finish_cnt, 0); |
81 | 87 | ||
82 | iovad->fq = alloc_percpu(struct iova_fq); | 88 | queue = alloc_percpu(struct iova_fq); |
83 | if (!iovad->fq) | 89 | if (!queue) |
84 | return -ENOMEM; | 90 | return -ENOMEM; |
85 | 91 | ||
86 | iovad->flush_cb = flush_cb; | 92 | iovad->flush_cb = flush_cb; |
@@ -89,13 +95,17 @@ int init_iova_flush_queue(struct iova_domain *iovad, | |||
89 | for_each_possible_cpu(cpu) { | 95 | for_each_possible_cpu(cpu) { |
90 | struct iova_fq *fq; | 96 | struct iova_fq *fq; |
91 | 97 | ||
92 | fq = per_cpu_ptr(iovad->fq, cpu); | 98 | fq = per_cpu_ptr(queue, cpu); |
93 | fq->head = 0; | 99 | fq->head = 0; |
94 | fq->tail = 0; | 100 | fq->tail = 0; |
95 | 101 | ||
96 | spin_lock_init(&fq->lock); | 102 | spin_lock_init(&fq->lock); |
97 | } | 103 | } |
98 | 104 | ||
105 | smp_wmb(); | ||
106 | |||
107 | iovad->fq = queue; | ||
108 | |||
99 | timer_setup(&iovad->fq_timer, fq_flush_timeout, 0); | 109 | timer_setup(&iovad->fq_timer, fq_flush_timeout, 0); |
100 | atomic_set(&iovad->fq_timer_on, 0); | 110 | atomic_set(&iovad->fq_timer_on, 0); |
101 | 111 | ||
@@ -127,8 +137,9 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free) | |||
127 | struct iova *cached_iova; | 137 | struct iova *cached_iova; |
128 | 138 | ||
129 | cached_iova = rb_entry(iovad->cached32_node, struct iova, node); | 139 | cached_iova = rb_entry(iovad->cached32_node, struct iova, node); |
130 | if (free->pfn_hi < iovad->dma_32bit_pfn && | 140 | if (free == cached_iova || |
131 | free->pfn_lo >= cached_iova->pfn_lo) { | 141 | (free->pfn_hi < iovad->dma_32bit_pfn && |
142 | free->pfn_lo >= cached_iova->pfn_lo)) { | ||
132 | iovad->cached32_node = rb_next(&free->node); | 143 | iovad->cached32_node = rb_next(&free->node); |
133 | iovad->max32_alloc_size = iovad->dma_32bit_pfn; | 144 | iovad->max32_alloc_size = iovad->dma_32bit_pfn; |
134 | } | 145 | } |
diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c index 433f4d2ee956..80a740df0737 100644 --- a/drivers/iommu/virtio-iommu.c +++ b/drivers/iommu/virtio-iommu.c | |||
@@ -2,7 +2,7 @@ | |||
2 | /* | 2 | /* |
3 | * Virtio driver for the paravirtualized IOMMU | 3 | * Virtio driver for the paravirtualized IOMMU |
4 | * | 4 | * |
5 | * Copyright (C) 2018 Arm Limited | 5 | * Copyright (C) 2019 Arm Limited |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 8 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
@@ -47,7 +47,10 @@ struct viommu_dev { | |||
47 | /* Device configuration */ | 47 | /* Device configuration */ |
48 | struct iommu_domain_geometry geometry; | 48 | struct iommu_domain_geometry geometry; |
49 | u64 pgsize_bitmap; | 49 | u64 pgsize_bitmap; |
50 | u8 domain_bits; | 50 | u32 first_domain; |
51 | u32 last_domain; | ||
52 | /* Supported MAP flags */ | ||
53 | u32 map_flags; | ||
51 | u32 probe_size; | 54 | u32 probe_size; |
52 | }; | 55 | }; |
53 | 56 | ||
@@ -62,6 +65,7 @@ struct viommu_domain { | |||
62 | struct viommu_dev *viommu; | 65 | struct viommu_dev *viommu; |
63 | struct mutex mutex; /* protects viommu pointer */ | 66 | struct mutex mutex; /* protects viommu pointer */ |
64 | unsigned int id; | 67 | unsigned int id; |
68 | u32 map_flags; | ||
65 | 69 | ||
66 | spinlock_t mappings_lock; | 70 | spinlock_t mappings_lock; |
67 | struct rb_root_cached mappings; | 71 | struct rb_root_cached mappings; |
@@ -113,6 +117,8 @@ static int viommu_get_req_errno(void *buf, size_t len) | |||
113 | return -ENOENT; | 117 | return -ENOENT; |
114 | case VIRTIO_IOMMU_S_FAULT: | 118 | case VIRTIO_IOMMU_S_FAULT: |
115 | return -EFAULT; | 119 | return -EFAULT; |
120 | case VIRTIO_IOMMU_S_NOMEM: | ||
121 | return -ENOMEM; | ||
116 | case VIRTIO_IOMMU_S_IOERR: | 122 | case VIRTIO_IOMMU_S_IOERR: |
117 | case VIRTIO_IOMMU_S_DEVERR: | 123 | case VIRTIO_IOMMU_S_DEVERR: |
118 | default: | 124 | default: |
@@ -607,15 +613,15 @@ static int viommu_domain_finalise(struct viommu_dev *viommu, | |||
607 | { | 613 | { |
608 | int ret; | 614 | int ret; |
609 | struct viommu_domain *vdomain = to_viommu_domain(domain); | 615 | struct viommu_domain *vdomain = to_viommu_domain(domain); |
610 | unsigned int max_domain = viommu->domain_bits > 31 ? ~0 : | ||
611 | (1U << viommu->domain_bits) - 1; | ||
612 | 616 | ||
613 | vdomain->viommu = viommu; | 617 | vdomain->viommu = viommu; |
618 | vdomain->map_flags = viommu->map_flags; | ||
614 | 619 | ||
615 | domain->pgsize_bitmap = viommu->pgsize_bitmap; | 620 | domain->pgsize_bitmap = viommu->pgsize_bitmap; |
616 | domain->geometry = viommu->geometry; | 621 | domain->geometry = viommu->geometry; |
617 | 622 | ||
618 | ret = ida_alloc_max(&viommu->domain_ids, max_domain, GFP_KERNEL); | 623 | ret = ida_alloc_range(&viommu->domain_ids, viommu->first_domain, |
624 | viommu->last_domain, GFP_KERNEL); | ||
619 | if (ret >= 0) | 625 | if (ret >= 0) |
620 | vdomain->id = (unsigned int)ret; | 626 | vdomain->id = (unsigned int)ret; |
621 | 627 | ||
@@ -710,7 +716,7 @@ static int viommu_map(struct iommu_domain *domain, unsigned long iova, | |||
710 | phys_addr_t paddr, size_t size, int prot) | 716 | phys_addr_t paddr, size_t size, int prot) |
711 | { | 717 | { |
712 | int ret; | 718 | int ret; |
713 | int flags; | 719 | u32 flags; |
714 | struct virtio_iommu_req_map map; | 720 | struct virtio_iommu_req_map map; |
715 | struct viommu_domain *vdomain = to_viommu_domain(domain); | 721 | struct viommu_domain *vdomain = to_viommu_domain(domain); |
716 | 722 | ||
@@ -718,6 +724,9 @@ static int viommu_map(struct iommu_domain *domain, unsigned long iova, | |||
718 | (prot & IOMMU_WRITE ? VIRTIO_IOMMU_MAP_F_WRITE : 0) | | 724 | (prot & IOMMU_WRITE ? VIRTIO_IOMMU_MAP_F_WRITE : 0) | |
719 | (prot & IOMMU_MMIO ? VIRTIO_IOMMU_MAP_F_MMIO : 0); | 725 | (prot & IOMMU_MMIO ? VIRTIO_IOMMU_MAP_F_MMIO : 0); |
720 | 726 | ||
727 | if (flags & ~vdomain->map_flags) | ||
728 | return -EINVAL; | ||
729 | |||
721 | ret = viommu_add_mapping(vdomain, iova, paddr, size, flags); | 730 | ret = viommu_add_mapping(vdomain, iova, paddr, size, flags); |
722 | if (ret) | 731 | if (ret) |
723 | return ret; | 732 | return ret; |
@@ -1027,7 +1036,8 @@ static int viommu_probe(struct virtio_device *vdev) | |||
1027 | goto err_free_vqs; | 1036 | goto err_free_vqs; |
1028 | } | 1037 | } |
1029 | 1038 | ||
1030 | viommu->domain_bits = 32; | 1039 | viommu->map_flags = VIRTIO_IOMMU_MAP_F_READ | VIRTIO_IOMMU_MAP_F_WRITE; |
1040 | viommu->last_domain = ~0U; | ||
1031 | 1041 | ||
1032 | /* Optional features */ | 1042 | /* Optional features */ |
1033 | virtio_cread_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE, | 1043 | virtio_cread_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE, |
@@ -1038,9 +1048,13 @@ static int viommu_probe(struct virtio_device *vdev) | |||
1038 | struct virtio_iommu_config, input_range.end, | 1048 | struct virtio_iommu_config, input_range.end, |
1039 | &input_end); | 1049 | &input_end); |
1040 | 1050 | ||
1041 | virtio_cread_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_BITS, | 1051 | virtio_cread_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_RANGE, |
1042 | struct virtio_iommu_config, domain_bits, | 1052 | struct virtio_iommu_config, domain_range.start, |
1043 | &viommu->domain_bits); | 1053 | &viommu->first_domain); |
1054 | |||
1055 | virtio_cread_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_RANGE, | ||
1056 | struct virtio_iommu_config, domain_range.end, | ||
1057 | &viommu->last_domain); | ||
1044 | 1058 | ||
1045 | virtio_cread_feature(vdev, VIRTIO_IOMMU_F_PROBE, | 1059 | virtio_cread_feature(vdev, VIRTIO_IOMMU_F_PROBE, |
1046 | struct virtio_iommu_config, probe_size, | 1060 | struct virtio_iommu_config, probe_size, |
@@ -1052,6 +1066,9 @@ static int viommu_probe(struct virtio_device *vdev) | |||
1052 | .force_aperture = true, | 1066 | .force_aperture = true, |
1053 | }; | 1067 | }; |
1054 | 1068 | ||
1069 | if (virtio_has_feature(vdev, VIRTIO_IOMMU_F_MMIO)) | ||
1070 | viommu->map_flags |= VIRTIO_IOMMU_MAP_F_MMIO; | ||
1071 | |||
1055 | viommu_ops.pgsize_bitmap = viommu->pgsize_bitmap; | 1072 | viommu_ops.pgsize_bitmap = viommu->pgsize_bitmap; |
1056 | 1073 | ||
1057 | virtio_device_ready(vdev); | 1074 | virtio_device_ready(vdev); |
@@ -1130,9 +1147,10 @@ static void viommu_config_changed(struct virtio_device *vdev) | |||
1130 | 1147 | ||
1131 | static unsigned int features[] = { | 1148 | static unsigned int features[] = { |
1132 | VIRTIO_IOMMU_F_MAP_UNMAP, | 1149 | VIRTIO_IOMMU_F_MAP_UNMAP, |
1133 | VIRTIO_IOMMU_F_DOMAIN_BITS, | ||
1134 | VIRTIO_IOMMU_F_INPUT_RANGE, | 1150 | VIRTIO_IOMMU_F_INPUT_RANGE, |
1151 | VIRTIO_IOMMU_F_DOMAIN_RANGE, | ||
1135 | VIRTIO_IOMMU_F_PROBE, | 1152 | VIRTIO_IOMMU_F_PROBE, |
1153 | VIRTIO_IOMMU_F_MMIO, | ||
1136 | }; | 1154 | }; |
1137 | 1155 | ||
1138 | static struct virtio_device_id id_table[] = { | 1156 | static struct virtio_device_id id_table[] = { |
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 730fbe0e2a9d..1b5c3672aea2 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c | |||
@@ -3010,7 +3010,7 @@ static int its_vpe_init(struct its_vpe *vpe) | |||
3010 | 3010 | ||
3011 | if (!its_alloc_vpe_table(vpe_id)) { | 3011 | if (!its_alloc_vpe_table(vpe_id)) { |
3012 | its_vpe_id_free(vpe_id); | 3012 | its_vpe_id_free(vpe_id); |
3013 | its_free_pending_table(vpe->vpt_page); | 3013 | its_free_pending_table(vpt_page); |
3014 | return -ENOMEM; | 3014 | return -ENOMEM; |
3015 | } | 3015 | } |
3016 | 3016 | ||
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 9bca4896fa6f..96d927f0f91a 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c | |||
@@ -771,8 +771,10 @@ static void gic_cpu_sys_reg_init(void) | |||
771 | case 7: | 771 | case 7: |
772 | write_gicreg(0, ICC_AP0R3_EL1); | 772 | write_gicreg(0, ICC_AP0R3_EL1); |
773 | write_gicreg(0, ICC_AP0R2_EL1); | 773 | write_gicreg(0, ICC_AP0R2_EL1); |
774 | /* Fall through */ | ||
774 | case 6: | 775 | case 6: |
775 | write_gicreg(0, ICC_AP0R1_EL1); | 776 | write_gicreg(0, ICC_AP0R1_EL1); |
777 | /* Fall through */ | ||
776 | case 5: | 778 | case 5: |
777 | case 4: | 779 | case 4: |
778 | write_gicreg(0, ICC_AP0R0_EL1); | 780 | write_gicreg(0, ICC_AP0R0_EL1); |
@@ -786,8 +788,10 @@ static void gic_cpu_sys_reg_init(void) | |||
786 | case 7: | 788 | case 7: |
787 | write_gicreg(0, ICC_AP1R3_EL1); | 789 | write_gicreg(0, ICC_AP1R3_EL1); |
788 | write_gicreg(0, ICC_AP1R2_EL1); | 790 | write_gicreg(0, ICC_AP1R2_EL1); |
791 | /* Fall through */ | ||
789 | case 6: | 792 | case 6: |
790 | write_gicreg(0, ICC_AP1R1_EL1); | 793 | write_gicreg(0, ICC_AP1R1_EL1); |
794 | /* Fall through */ | ||
791 | case 5: | 795 | case 5: |
792 | case 4: | 796 | case 4: |
793 | write_gicreg(0, ICC_AP1R0_EL1); | 797 | write_gicreg(0, ICC_AP1R0_EL1); |
diff --git a/drivers/irqchip/irq-imx-gpcv2.c b/drivers/irqchip/irq-imx-gpcv2.c index bf2237ac5d09..4f74c15c4755 100644 --- a/drivers/irqchip/irq-imx-gpcv2.c +++ b/drivers/irqchip/irq-imx-gpcv2.c | |||
@@ -131,6 +131,7 @@ static struct irq_chip gpcv2_irqchip_data_chip = { | |||
131 | .irq_unmask = imx_gpcv2_irq_unmask, | 131 | .irq_unmask = imx_gpcv2_irq_unmask, |
132 | .irq_set_wake = imx_gpcv2_irq_set_wake, | 132 | .irq_set_wake = imx_gpcv2_irq_set_wake, |
133 | .irq_retrigger = irq_chip_retrigger_hierarchy, | 133 | .irq_retrigger = irq_chip_retrigger_hierarchy, |
134 | .irq_set_type = irq_chip_set_type_parent, | ||
134 | #ifdef CONFIG_SMP | 135 | #ifdef CONFIG_SMP |
135 | .irq_set_affinity = irq_chip_set_affinity_parent, | 136 | .irq_set_affinity = irq_chip_set_affinity_parent, |
136 | #endif | 137 | #endif |
diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c index 3dd28382d5f5..3f09f658e8e2 100644 --- a/drivers/irqchip/irq-mbigen.c +++ b/drivers/irqchip/irq-mbigen.c | |||
@@ -241,12 +241,15 @@ static int mbigen_of_create_domain(struct platform_device *pdev, | |||
241 | 241 | ||
242 | parent = platform_bus_type.dev_root; | 242 | parent = platform_bus_type.dev_root; |
243 | child = of_platform_device_create(np, NULL, parent); | 243 | child = of_platform_device_create(np, NULL, parent); |
244 | if (!child) | 244 | if (!child) { |
245 | of_node_put(np); | ||
245 | return -ENOMEM; | 246 | return -ENOMEM; |
247 | } | ||
246 | 248 | ||
247 | if (of_property_read_u32(child->dev.of_node, "num-pins", | 249 | if (of_property_read_u32(child->dev.of_node, "num-pins", |
248 | &num_pins) < 0) { | 250 | &num_pins) < 0) { |
249 | dev_err(&pdev->dev, "No num-pins property\n"); | 251 | dev_err(&pdev->dev, "No num-pins property\n"); |
252 | of_node_put(np); | ||
250 | return -EINVAL; | 253 | return -EINVAL; |
251 | } | 254 | } |
252 | 255 | ||
@@ -254,8 +257,10 @@ static int mbigen_of_create_domain(struct platform_device *pdev, | |||
254 | mbigen_write_msg, | 257 | mbigen_write_msg, |
255 | &mbigen_domain_ops, | 258 | &mbigen_domain_ops, |
256 | mgn_chip); | 259 | mgn_chip); |
257 | if (!domain) | 260 | if (!domain) { |
261 | of_node_put(np); | ||
258 | return -ENOMEM; | 262 | return -ENOMEM; |
263 | } | ||
259 | } | 264 | } |
260 | 265 | ||
261 | return 0; | 266 | return 0; |
diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c index 0e224232f746..008a74a1ed44 100644 --- a/drivers/isdn/hardware/mISDN/hfcsusb.c +++ b/drivers/isdn/hardware/mISDN/hfcsusb.c | |||
@@ -1394,6 +1394,7 @@ start_isoc_chain(struct usb_fifo *fifo, int num_packets_per_urb, | |||
1394 | printk(KERN_DEBUG | 1394 | printk(KERN_DEBUG |
1395 | "%s: %s: alloc urb for fifo %i failed", | 1395 | "%s: %s: alloc urb for fifo %i failed", |
1396 | hw->name, __func__, fifo->fifonum); | 1396 | hw->name, __func__, fifo->fifonum); |
1397 | continue; | ||
1397 | } | 1398 | } |
1398 | fifo->iso[i].owner_fifo = (struct usb_fifo *) fifo; | 1399 | fifo->iso[i].owner_fifo = (struct usb_fifo *) fifo; |
1399 | fifo->iso[i].indx = i; | 1400 | fifo->iso[i].indx = i; |
@@ -1692,13 +1693,23 @@ hfcsusb_stop_endpoint(struct hfcsusb *hw, int channel) | |||
1692 | static int | 1693 | static int |
1693 | setup_hfcsusb(struct hfcsusb *hw) | 1694 | setup_hfcsusb(struct hfcsusb *hw) |
1694 | { | 1695 | { |
1696 | void *dmabuf = kmalloc(sizeof(u_char), GFP_KERNEL); | ||
1695 | u_char b; | 1697 | u_char b; |
1698 | int ret; | ||
1696 | 1699 | ||
1697 | if (debug & DBG_HFC_CALL_TRACE) | 1700 | if (debug & DBG_HFC_CALL_TRACE) |
1698 | printk(KERN_DEBUG "%s: %s\n", hw->name, __func__); | 1701 | printk(KERN_DEBUG "%s: %s\n", hw->name, __func__); |
1699 | 1702 | ||
1703 | if (!dmabuf) | ||
1704 | return -ENOMEM; | ||
1705 | |||
1706 | ret = read_reg_atomic(hw, HFCUSB_CHIP_ID, dmabuf); | ||
1707 | |||
1708 | memcpy(&b, dmabuf, sizeof(u_char)); | ||
1709 | kfree(dmabuf); | ||
1710 | |||
1700 | /* check the chip id */ | 1711 | /* check the chip id */ |
1701 | if (read_reg_atomic(hw, HFCUSB_CHIP_ID, &b) != 1) { | 1712 | if (ret != 1) { |
1702 | printk(KERN_DEBUG "%s: %s: cannot read chip id\n", | 1713 | printk(KERN_DEBUG "%s: %s: cannot read chip id\n", |
1703 | hw->name, __func__); | 1714 | hw->name, __func__); |
1704 | return 1; | 1715 | return 1; |
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c index 276065c888bc..23f1f41c8602 100644 --- a/drivers/macintosh/smu.c +++ b/drivers/macintosh/smu.c | |||
@@ -852,6 +852,7 @@ int smu_queue_i2c(struct smu_i2c_cmd *cmd) | |||
852 | break; | 852 | break; |
853 | case SMU_I2C_TRANSFER_COMBINED: | 853 | case SMU_I2C_TRANSFER_COMBINED: |
854 | cmd->info.devaddr &= 0xfe; | 854 | cmd->info.devaddr &= 0xfe; |
855 | /* fall through */ | ||
855 | case SMU_I2C_TRANSFER_STDSUB: | 856 | case SMU_I2C_TRANSFER_STDSUB: |
856 | if (cmd->info.sublen > 3) | 857 | if (cmd->info.sublen > 3) |
857 | return -EINVAL; | 858 | return -EINVAL; |
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 26e374fbf57c..20ed838e9413 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c | |||
@@ -931,6 +931,9 @@ int bch_cached_dev_run(struct cached_dev *dc) | |||
931 | if (dc->io_disable) { | 931 | if (dc->io_disable) { |
932 | pr_err("I/O disabled on cached dev %s", | 932 | pr_err("I/O disabled on cached dev %s", |
933 | dc->backing_dev_name); | 933 | dc->backing_dev_name); |
934 | kfree(env[1]); | ||
935 | kfree(env[2]); | ||
936 | kfree(buf); | ||
934 | return -EIO; | 937 | return -EIO; |
935 | } | 938 | } |
936 | 939 | ||
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index caaee8032afe..7b6c3ee9e755 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c | |||
@@ -882,23 +882,23 @@ EXPORT_SYMBOL_GPL(dm_table_set_type); | |||
882 | 882 | ||
883 | /* validate the dax capability of the target device span */ | 883 | /* validate the dax capability of the target device span */ |
884 | int device_supports_dax(struct dm_target *ti, struct dm_dev *dev, | 884 | int device_supports_dax(struct dm_target *ti, struct dm_dev *dev, |
885 | sector_t start, sector_t len, void *data) | 885 | sector_t start, sector_t len, void *data) |
886 | { | 886 | { |
887 | int blocksize = *(int *) data; | 887 | int blocksize = *(int *) data; |
888 | 888 | ||
889 | return generic_fsdax_supported(dev->dax_dev, dev->bdev, blocksize, | 889 | return generic_fsdax_supported(dev->dax_dev, dev->bdev, blocksize, |
890 | start, len); | 890 | start, len); |
891 | } | 891 | } |
892 | 892 | ||
893 | /* Check devices support synchronous DAX */ | 893 | /* Check devices support synchronous DAX */ |
894 | static int device_synchronous(struct dm_target *ti, struct dm_dev *dev, | 894 | static int device_dax_synchronous(struct dm_target *ti, struct dm_dev *dev, |
895 | sector_t start, sector_t len, void *data) | 895 | sector_t start, sector_t len, void *data) |
896 | { | 896 | { |
897 | return dax_synchronous(dev->dax_dev); | 897 | return dev->dax_dev && dax_synchronous(dev->dax_dev); |
898 | } | 898 | } |
899 | 899 | ||
900 | bool dm_table_supports_dax(struct dm_table *t, | 900 | bool dm_table_supports_dax(struct dm_table *t, |
901 | iterate_devices_callout_fn iterate_fn, int *blocksize) | 901 | iterate_devices_callout_fn iterate_fn, int *blocksize) |
902 | { | 902 | { |
903 | struct dm_target *ti; | 903 | struct dm_target *ti; |
904 | unsigned i; | 904 | unsigned i; |
@@ -911,7 +911,7 @@ bool dm_table_supports_dax(struct dm_table *t, | |||
911 | return false; | 911 | return false; |
912 | 912 | ||
913 | if (!ti->type->iterate_devices || | 913 | if (!ti->type->iterate_devices || |
914 | !ti->type->iterate_devices(ti, iterate_fn, blocksize)) | 914 | !ti->type->iterate_devices(ti, iterate_fn, blocksize)) |
915 | return false; | 915 | return false; |
916 | } | 916 | } |
917 | 917 | ||
@@ -1921,7 +1921,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, | |||
1921 | 1921 | ||
1922 | if (dm_table_supports_dax(t, device_supports_dax, &page_size)) { | 1922 | if (dm_table_supports_dax(t, device_supports_dax, &page_size)) { |
1923 | blk_queue_flag_set(QUEUE_FLAG_DAX, q); | 1923 | blk_queue_flag_set(QUEUE_FLAG_DAX, q); |
1924 | if (dm_table_supports_dax(t, device_synchronous, NULL)) | 1924 | if (dm_table_supports_dax(t, device_dax_synchronous, NULL)) |
1925 | set_dax_synchronous(t->md->dax_dev); | 1925 | set_dax_synchronous(t->md->dax_dev); |
1926 | } | 1926 | } |
1927 | else | 1927 | else |
diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig index f88094719552..f2abe27010ef 100644 --- a/drivers/misc/eeprom/Kconfig +++ b/drivers/misc/eeprom/Kconfig | |||
@@ -5,6 +5,7 @@ config EEPROM_AT24 | |||
5 | tristate "I2C EEPROMs / RAMs / ROMs from most vendors" | 5 | tristate "I2C EEPROMs / RAMs / ROMs from most vendors" |
6 | depends on I2C && SYSFS | 6 | depends on I2C && SYSFS |
7 | select NVMEM | 7 | select NVMEM |
8 | select NVMEM_SYSFS | ||
8 | select REGMAP_I2C | 9 | select REGMAP_I2C |
9 | help | 10 | help |
10 | Enable this driver to get read/write support to most I2C EEPROMs | 11 | Enable this driver to get read/write support to most I2C EEPROMs |
@@ -34,6 +35,7 @@ config EEPROM_AT25 | |||
34 | tristate "SPI EEPROMs from most vendors" | 35 | tristate "SPI EEPROMs from most vendors" |
35 | depends on SPI && SYSFS | 36 | depends on SPI && SYSFS |
36 | select NVMEM | 37 | select NVMEM |
38 | select NVMEM_SYSFS | ||
37 | help | 39 | help |
38 | Enable this driver to get read/write support to most SPI EEPROMs, | 40 | Enable this driver to get read/write support to most SPI EEPROMs, |
39 | after you configure the board init code to know about each eeprom | 41 | after you configure the board init code to know about each eeprom |
@@ -80,6 +82,7 @@ config EEPROM_93XX46 | |||
80 | depends on SPI && SYSFS | 82 | depends on SPI && SYSFS |
81 | select REGMAP | 83 | select REGMAP |
82 | select NVMEM | 84 | select NVMEM |
85 | select NVMEM_SYSFS | ||
83 | help | 86 | help |
84 | Driver for the microwire EEPROM chipsets 93xx46x. The driver | 87 | Driver for the microwire EEPROM chipsets 93xx46x. The driver |
85 | supports both read and write commands and also the command to | 88 | supports both read and write commands and also the command to |
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c index 35bf2477693d..518945b2f737 100644 --- a/drivers/misc/eeprom/at24.c +++ b/drivers/misc/eeprom/at24.c | |||
@@ -685,7 +685,7 @@ static int at24_probe(struct i2c_client *client) | |||
685 | nvmem_config.name = dev_name(dev); | 685 | nvmem_config.name = dev_name(dev); |
686 | nvmem_config.dev = dev; | 686 | nvmem_config.dev = dev; |
687 | nvmem_config.read_only = !writable; | 687 | nvmem_config.read_only = !writable; |
688 | nvmem_config.root_only = true; | 688 | nvmem_config.root_only = !(flags & AT24_FLAG_IRUGO); |
689 | nvmem_config.owner = THIS_MODULE; | 689 | nvmem_config.owner = THIS_MODULE; |
690 | nvmem_config.compat = true; | 690 | nvmem_config.compat = true; |
691 | nvmem_config.base_dev = dev; | 691 | nvmem_config.base_dev = dev; |
diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c index 75294ec65257..1a2c062a57d4 100644 --- a/drivers/misc/habanalabs/goya/goya.c +++ b/drivers/misc/habanalabs/goya/goya.c | |||
@@ -695,8 +695,8 @@ static int goya_sw_init(struct hl_device *hdev) | |||
695 | goto free_dma_pool; | 695 | goto free_dma_pool; |
696 | } | 696 | } |
697 | 697 | ||
698 | dev_dbg(hdev->dev, "cpu accessible memory at bus address 0x%llx\n", | 698 | dev_dbg(hdev->dev, "cpu accessible memory at bus address %pad\n", |
699 | hdev->cpu_accessible_dma_address); | 699 | &hdev->cpu_accessible_dma_address); |
700 | 700 | ||
701 | hdev->cpu_accessible_dma_pool = gen_pool_create(ilog2(32), -1); | 701 | hdev->cpu_accessible_dma_pool = gen_pool_create(ilog2(32), -1); |
702 | if (!hdev->cpu_accessible_dma_pool) { | 702 | if (!hdev->cpu_accessible_dma_pool) { |
@@ -4449,7 +4449,6 @@ void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry) | |||
4449 | case GOYA_ASYNC_EVENT_ID_AXI_ECC: | 4449 | case GOYA_ASYNC_EVENT_ID_AXI_ECC: |
4450 | case GOYA_ASYNC_EVENT_ID_L2_RAM_ECC: | 4450 | case GOYA_ASYNC_EVENT_ID_L2_RAM_ECC: |
4451 | case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_05_SW_RESET: | 4451 | case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_05_SW_RESET: |
4452 | case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_10_VRHOT_ICRIT: | ||
4453 | goya_print_irq_info(hdev, event_type, false); | 4452 | goya_print_irq_info(hdev, event_type, false); |
4454 | hl_device_reset(hdev, true, false); | 4453 | hl_device_reset(hdev, true, false); |
4455 | break; | 4454 | break; |
@@ -4485,6 +4484,7 @@ void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry) | |||
4485 | goya_unmask_irq(hdev, event_type); | 4484 | goya_unmask_irq(hdev, event_type); |
4486 | break; | 4485 | break; |
4487 | 4486 | ||
4487 | case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_10_VRHOT_ICRIT: | ||
4488 | case GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU: | 4488 | case GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU: |
4489 | case GOYA_ASYNC_EVENT_ID_TPC1_BMON_SPMU: | 4489 | case GOYA_ASYNC_EVENT_ID_TPC1_BMON_SPMU: |
4490 | case GOYA_ASYNC_EVENT_ID_TPC2_BMON_SPMU: | 4490 | case GOYA_ASYNC_EVENT_ID_TPC2_BMON_SPMU: |
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h index d74b182e19f3..6c0173772162 100644 --- a/drivers/misc/mei/hw-me-regs.h +++ b/drivers/misc/mei/hw-me-regs.h | |||
@@ -81,6 +81,9 @@ | |||
81 | 81 | ||
82 | #define MEI_DEV_ID_ICP_LP 0x34E0 /* Ice Lake Point LP */ | 82 | #define MEI_DEV_ID_ICP_LP 0x34E0 /* Ice Lake Point LP */ |
83 | 83 | ||
84 | #define MEI_DEV_ID_MCC 0x4B70 /* Mule Creek Canyon (EHL) */ | ||
85 | #define MEI_DEV_ID_MCC_4 0x4B75 /* Mule Creek Canyon 4 (EHL) */ | ||
86 | |||
84 | /* | 87 | /* |
85 | * MEI HW Section | 88 | * MEI HW Section |
86 | */ | 89 | */ |
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index 7a2b3545a7f9..57cb68f5cc64 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c | |||
@@ -98,6 +98,9 @@ static const struct pci_device_id mei_me_pci_tbl[] = { | |||
98 | 98 | ||
99 | {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)}, | 99 | {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)}, |
100 | 100 | ||
101 | {MEI_PCI_DEVICE(MEI_DEV_ID_MCC, MEI_ME_PCH12_CFG)}, | ||
102 | {MEI_PCI_DEVICE(MEI_DEV_ID_MCC_4, MEI_ME_PCH8_CFG)}, | ||
103 | |||
101 | /* required last entry */ | 104 | /* required last entry */ |
102 | {0, } | 105 | {0, } |
103 | }; | 106 | }; |
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c index e327f80ebe70..7102e2ebc614 100644 --- a/drivers/mmc/core/queue.c +++ b/drivers/mmc/core/queue.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/kthread.h> | 10 | #include <linux/kthread.h> |
11 | #include <linux/scatterlist.h> | 11 | #include <linux/scatterlist.h> |
12 | #include <linux/dma-mapping.h> | 12 | #include <linux/dma-mapping.h> |
13 | #include <linux/backing-dev.h> | ||
13 | 14 | ||
14 | #include <linux/mmc/card.h> | 15 | #include <linux/mmc/card.h> |
15 | #include <linux/mmc/host.h> | 16 | #include <linux/mmc/host.h> |
@@ -427,6 +428,10 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card) | |||
427 | goto free_tag_set; | 428 | goto free_tag_set; |
428 | } | 429 | } |
429 | 430 | ||
431 | if (mmc_host_is_spi(host) && host->use_spi_crc) | ||
432 | mq->queue->backing_dev_info->capabilities |= | ||
433 | BDI_CAP_STABLE_WRITES; | ||
434 | |||
430 | mq->queue->queuedata = mq; | 435 | mq->queue->queuedata = mq; |
431 | blk_queue_rq_timeout(mq->queue, 60 * HZ); | 436 | blk_queue_rq_timeout(mq->queue, 60 * HZ); |
432 | 437 | ||
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index faaaf52a46d2..eea52e2c5a0c 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c | |||
@@ -2012,8 +2012,7 @@ static void dw_mci_tasklet_func(unsigned long priv) | |||
2012 | * delayed. Allowing the transfer to take place | 2012 | * delayed. Allowing the transfer to take place |
2013 | * avoids races and keeps things simple. | 2013 | * avoids races and keeps things simple. |
2014 | */ | 2014 | */ |
2015 | if ((err != -ETIMEDOUT) && | 2015 | if (err != -ETIMEDOUT) { |
2016 | (cmd->opcode == MMC_SEND_TUNING_BLOCK)) { | ||
2017 | state = STATE_SENDING_DATA; | 2016 | state = STATE_SENDING_DATA; |
2018 | continue; | 2017 | continue; |
2019 | } | 2018 | } |
diff --git a/drivers/mmc/host/meson-mx-sdio.c b/drivers/mmc/host/meson-mx-sdio.c index 2d736e416775..ba9a63db73da 100644 --- a/drivers/mmc/host/meson-mx-sdio.c +++ b/drivers/mmc/host/meson-mx-sdio.c | |||
@@ -73,7 +73,7 @@ | |||
73 | #define MESON_MX_SDIO_IRQC_IF_CONFIG_MASK GENMASK(7, 6) | 73 | #define MESON_MX_SDIO_IRQC_IF_CONFIG_MASK GENMASK(7, 6) |
74 | #define MESON_MX_SDIO_IRQC_FORCE_DATA_CLK BIT(8) | 74 | #define MESON_MX_SDIO_IRQC_FORCE_DATA_CLK BIT(8) |
75 | #define MESON_MX_SDIO_IRQC_FORCE_DATA_CMD BIT(9) | 75 | #define MESON_MX_SDIO_IRQC_FORCE_DATA_CMD BIT(9) |
76 | #define MESON_MX_SDIO_IRQC_FORCE_DATA_DAT_MASK GENMASK(10, 13) | 76 | #define MESON_MX_SDIO_IRQC_FORCE_DATA_DAT_MASK GENMASK(13, 10) |
77 | #define MESON_MX_SDIO_IRQC_SOFT_RESET BIT(15) | 77 | #define MESON_MX_SDIO_IRQC_SOFT_RESET BIT(15) |
78 | #define MESON_MX_SDIO_IRQC_FORCE_HALT BIT(30) | 78 | #define MESON_MX_SDIO_IRQC_FORCE_HALT BIT(30) |
79 | #define MESON_MX_SDIO_IRQC_HALT_HOLE BIT(31) | 79 | #define MESON_MX_SDIO_IRQC_HALT_HOLE BIT(31) |
diff --git a/drivers/mmc/host/sdhci-sprd.c b/drivers/mmc/host/sdhci-sprd.c index 6ee340a3fb3a..603a5d9f045a 100644 --- a/drivers/mmc/host/sdhci-sprd.c +++ b/drivers/mmc/host/sdhci-sprd.c | |||
@@ -624,6 +624,7 @@ err_cleanup_host: | |||
624 | sdhci_cleanup_host(host); | 624 | sdhci_cleanup_host(host); |
625 | 625 | ||
626 | pm_runtime_disable: | 626 | pm_runtime_disable: |
627 | pm_runtime_put_noidle(&pdev->dev); | ||
627 | pm_runtime_disable(&pdev->dev); | 628 | pm_runtime_disable(&pdev->dev); |
628 | pm_runtime_set_suspended(&pdev->dev); | 629 | pm_runtime_set_suspended(&pdev->dev); |
629 | 630 | ||
diff --git a/drivers/mtd/hyperbus/Kconfig b/drivers/mtd/hyperbus/Kconfig index cff6bbd226f5..b4e3caf7d799 100644 --- a/drivers/mtd/hyperbus/Kconfig +++ b/drivers/mtd/hyperbus/Kconfig | |||
@@ -14,8 +14,9 @@ if MTD_HYPERBUS | |||
14 | 14 | ||
15 | config HBMC_AM654 | 15 | config HBMC_AM654 |
16 | tristate "HyperBus controller driver for AM65x SoC" | 16 | tristate "HyperBus controller driver for AM65x SoC" |
17 | depends on ARM64 || COMPILE_TEST | ||
17 | select MULTIPLEXER | 18 | select MULTIPLEXER |
18 | select MUX_MMIO | 19 | imply MUX_MMIO |
19 | help | 20 | help |
20 | This is the driver for HyperBus controller on TI's AM65x and | 21 | This is the driver for HyperBus controller on TI's AM65x and |
21 | other SoCs | 22 | other SoCs |
diff --git a/drivers/mtd/nand/onenand/onenand_base.c b/drivers/mtd/nand/onenand/onenand_base.c index a1f8fe1abb10..e082d632fb74 100644 --- a/drivers/mtd/nand/onenand/onenand_base.c +++ b/drivers/mtd/nand/onenand/onenand_base.c | |||
@@ -3259,6 +3259,7 @@ static void onenand_check_features(struct mtd_info *mtd) | |||
3259 | switch (density) { | 3259 | switch (density) { |
3260 | case ONENAND_DEVICE_DENSITY_8Gb: | 3260 | case ONENAND_DEVICE_DENSITY_8Gb: |
3261 | this->options |= ONENAND_HAS_NOP_1; | 3261 | this->options |= ONENAND_HAS_NOP_1; |
3262 | /* fall through */ | ||
3262 | case ONENAND_DEVICE_DENSITY_4Gb: | 3263 | case ONENAND_DEVICE_DENSITY_4Gb: |
3263 | if (ONENAND_IS_DDP(this)) | 3264 | if (ONENAND_IS_DDP(this)) |
3264 | this->options |= ONENAND_HAS_2PLANE; | 3265 | this->options |= ONENAND_HAS_2PLANE; |
diff --git a/drivers/mtd/nand/raw/nand_micron.c b/drivers/mtd/nand/raw/nand_micron.c index 1622d3145587..8ca9fad6e6ad 100644 --- a/drivers/mtd/nand/raw/nand_micron.c +++ b/drivers/mtd/nand/raw/nand_micron.c | |||
@@ -390,6 +390,14 @@ static int micron_supports_on_die_ecc(struct nand_chip *chip) | |||
390 | (chip->id.data[4] & MICRON_ID_INTERNAL_ECC_MASK) != 0x2) | 390 | (chip->id.data[4] & MICRON_ID_INTERNAL_ECC_MASK) != 0x2) |
391 | return MICRON_ON_DIE_UNSUPPORTED; | 391 | return MICRON_ON_DIE_UNSUPPORTED; |
392 | 392 | ||
393 | /* | ||
394 | * It seems that there are devices which do not support ECC officially. | ||
395 | * At least the MT29F2G08ABAGA / MT29F2G08ABBGA devices supports | ||
396 | * enabling the ECC feature but don't reflect that to the READ_ID table. | ||
397 | * So we have to guarantee that we disable the ECC feature directly | ||
398 | * after we did the READ_ID table command. Later we can evaluate the | ||
399 | * ECC_ENABLE support. | ||
400 | */ | ||
393 | ret = micron_nand_on_die_ecc_setup(chip, true); | 401 | ret = micron_nand_on_die_ecc_setup(chip, true); |
394 | if (ret) | 402 | if (ret) |
395 | return MICRON_ON_DIE_UNSUPPORTED; | 403 | return MICRON_ON_DIE_UNSUPPORTED; |
@@ -398,13 +406,13 @@ static int micron_supports_on_die_ecc(struct nand_chip *chip) | |||
398 | if (ret) | 406 | if (ret) |
399 | return MICRON_ON_DIE_UNSUPPORTED; | 407 | return MICRON_ON_DIE_UNSUPPORTED; |
400 | 408 | ||
401 | if (!(id[4] & MICRON_ID_ECC_ENABLED)) | ||
402 | return MICRON_ON_DIE_UNSUPPORTED; | ||
403 | |||
404 | ret = micron_nand_on_die_ecc_setup(chip, false); | 409 | ret = micron_nand_on_die_ecc_setup(chip, false); |
405 | if (ret) | 410 | if (ret) |
406 | return MICRON_ON_DIE_UNSUPPORTED; | 411 | return MICRON_ON_DIE_UNSUPPORTED; |
407 | 412 | ||
413 | if (!(id[4] & MICRON_ID_ECC_ENABLED)) | ||
414 | return MICRON_ON_DIE_UNSUPPORTED; | ||
415 | |||
408 | ret = nand_readid_op(chip, 0, id, sizeof(id)); | 416 | ret = nand_readid_op(chip, 0, id, sizeof(id)); |
409 | if (ret) | 417 | if (ret) |
410 | return MICRON_ON_DIE_UNSUPPORTED; | 418 | return MICRON_ON_DIE_UNSUPPORTED; |
diff --git a/drivers/net/arcnet/arc-rimi.c b/drivers/net/arcnet/arc-rimi.c index 11c5bad95226..14a5fb378145 100644 --- a/drivers/net/arcnet/arc-rimi.c +++ b/drivers/net/arcnet/arc-rimi.c | |||
@@ -363,10 +363,13 @@ static int __init arcrimi_setup(char *s) | |||
363 | switch (ints[0]) { | 363 | switch (ints[0]) { |
364 | default: /* ERROR */ | 364 | default: /* ERROR */ |
365 | pr_err("Too many arguments\n"); | 365 | pr_err("Too many arguments\n"); |
366 | /* Fall through */ | ||
366 | case 3: /* Node ID */ | 367 | case 3: /* Node ID */ |
367 | node = ints[3]; | 368 | node = ints[3]; |
369 | /* Fall through */ | ||
368 | case 2: /* IRQ */ | 370 | case 2: /* IRQ */ |
369 | irq = ints[2]; | 371 | irq = ints[2]; |
372 | /* Fall through */ | ||
370 | case 1: /* IO address */ | 373 | case 1: /* IO address */ |
371 | io = ints[1]; | 374 | io = ints[1]; |
372 | } | 375 | } |
diff --git a/drivers/net/arcnet/com20020-isa.c b/drivers/net/arcnet/com20020-isa.c index 28510e33924f..cd27fdc1059b 100644 --- a/drivers/net/arcnet/com20020-isa.c +++ b/drivers/net/arcnet/com20020-isa.c | |||
@@ -197,16 +197,22 @@ static int __init com20020isa_setup(char *s) | |||
197 | switch (ints[0]) { | 197 | switch (ints[0]) { |
198 | default: /* ERROR */ | 198 | default: /* ERROR */ |
199 | pr_info("Too many arguments\n"); | 199 | pr_info("Too many arguments\n"); |
200 | /* Fall through */ | ||
200 | case 6: /* Timeout */ | 201 | case 6: /* Timeout */ |
201 | timeout = ints[6]; | 202 | timeout = ints[6]; |
203 | /* Fall through */ | ||
202 | case 5: /* CKP value */ | 204 | case 5: /* CKP value */ |
203 | clockp = ints[5]; | 205 | clockp = ints[5]; |
206 | /* Fall through */ | ||
204 | case 4: /* Backplane flag */ | 207 | case 4: /* Backplane flag */ |
205 | backplane = ints[4]; | 208 | backplane = ints[4]; |
209 | /* Fall through */ | ||
206 | case 3: /* Node ID */ | 210 | case 3: /* Node ID */ |
207 | node = ints[3]; | 211 | node = ints[3]; |
212 | /* Fall through */ | ||
208 | case 2: /* IRQ */ | 213 | case 2: /* IRQ */ |
209 | irq = ints[2]; | 214 | irq = ints[2]; |
215 | /* Fall through */ | ||
210 | case 1: /* IO address */ | 216 | case 1: /* IO address */ |
211 | io = ints[1]; | 217 | io = ints[1]; |
212 | } | 218 | } |
diff --git a/drivers/net/arcnet/com90io.c b/drivers/net/arcnet/com90io.c index 2c546013a980..186bbf87bc84 100644 --- a/drivers/net/arcnet/com90io.c +++ b/drivers/net/arcnet/com90io.c | |||
@@ -363,8 +363,10 @@ static int __init com90io_setup(char *s) | |||
363 | switch (ints[0]) { | 363 | switch (ints[0]) { |
364 | default: /* ERROR */ | 364 | default: /* ERROR */ |
365 | pr_err("Too many arguments\n"); | 365 | pr_err("Too many arguments\n"); |
366 | /* Fall through */ | ||
366 | case 2: /* IRQ */ | 367 | case 2: /* IRQ */ |
367 | irq = ints[2]; | 368 | irq = ints[2]; |
369 | /* Fall through */ | ||
368 | case 1: /* IO address */ | 370 | case 1: /* IO address */ |
369 | io = ints[1]; | 371 | io = ints[1]; |
370 | } | 372 | } |
diff --git a/drivers/net/arcnet/com90xx.c b/drivers/net/arcnet/com90xx.c index ca4a57c30bf8..bd75d06ad7df 100644 --- a/drivers/net/arcnet/com90xx.c +++ b/drivers/net/arcnet/com90xx.c | |||
@@ -693,10 +693,13 @@ static int __init com90xx_setup(char *s) | |||
693 | switch (ints[0]) { | 693 | switch (ints[0]) { |
694 | default: /* ERROR */ | 694 | default: /* ERROR */ |
695 | pr_err("Too many arguments\n"); | 695 | pr_err("Too many arguments\n"); |
696 | /* Fall through */ | ||
696 | case 3: /* Mem address */ | 697 | case 3: /* Mem address */ |
697 | shmem = ints[3]; | 698 | shmem = ints[3]; |
699 | /* Fall through */ | ||
698 | case 2: /* IRQ */ | 700 | case 2: /* IRQ */ |
699 | irq = ints[2]; | 701 | irq = ints[2]; |
702 | /* Fall through */ | ||
700 | case 1: /* IO address */ | 703 | case 1: /* IO address */ |
701 | io = ints[1]; | 704 | io = ints[1]; |
702 | } | 705 | } |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 9b7016abca2f..02fd7822c14a 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -2196,6 +2196,15 @@ static void bond_miimon_commit(struct bonding *bond) | |||
2196 | bond_for_each_slave(bond, slave, iter) { | 2196 | bond_for_each_slave(bond, slave, iter) { |
2197 | switch (slave->new_link) { | 2197 | switch (slave->new_link) { |
2198 | case BOND_LINK_NOCHANGE: | 2198 | case BOND_LINK_NOCHANGE: |
2199 | /* For 802.3ad mode, check current slave speed and | ||
2200 | * duplex again in case its port was disabled after | ||
2201 | * invalid speed/duplex reporting but recovered before | ||
2202 | * link monitoring could make a decision on the actual | ||
2203 | * link status | ||
2204 | */ | ||
2205 | if (BOND_MODE(bond) == BOND_MODE_8023AD && | ||
2206 | slave->link == BOND_LINK_UP) | ||
2207 | bond_3ad_adapter_speed_duplex_changed(slave); | ||
2199 | continue; | 2208 | continue; |
2200 | 2209 | ||
2201 | case BOND_LINK_UP: | 2210 | case BOND_LINK_UP: |
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c index b6b93a2d93a5..483d270664cc 100644 --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev.c | |||
@@ -1249,6 +1249,8 @@ int register_candev(struct net_device *dev) | |||
1249 | return -EINVAL; | 1249 | return -EINVAL; |
1250 | 1250 | ||
1251 | dev->rtnl_link_ops = &can_link_ops; | 1251 | dev->rtnl_link_ops = &can_link_ops; |
1252 | netif_carrier_off(dev); | ||
1253 | |||
1252 | return register_netdev(dev); | 1254 | return register_netdev(dev); |
1253 | } | 1255 | } |
1254 | EXPORT_SYMBOL_GPL(register_candev); | 1256 | EXPORT_SYMBOL_GPL(register_candev); |
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 09d8e623dcf6..dc5695dffc2e 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c | |||
@@ -404,9 +404,10 @@ static void flexcan_enable_wakeup_irq(struct flexcan_priv *priv, bool enable) | |||
404 | priv->write(reg_mcr, ®s->mcr); | 404 | priv->write(reg_mcr, ®s->mcr); |
405 | } | 405 | } |
406 | 406 | ||
407 | static inline void flexcan_enter_stop_mode(struct flexcan_priv *priv) | 407 | static inline int flexcan_enter_stop_mode(struct flexcan_priv *priv) |
408 | { | 408 | { |
409 | struct flexcan_regs __iomem *regs = priv->regs; | 409 | struct flexcan_regs __iomem *regs = priv->regs; |
410 | unsigned int ackval; | ||
410 | u32 reg_mcr; | 411 | u32 reg_mcr; |
411 | 412 | ||
412 | reg_mcr = priv->read(®s->mcr); | 413 | reg_mcr = priv->read(®s->mcr); |
@@ -416,20 +417,37 @@ static inline void flexcan_enter_stop_mode(struct flexcan_priv *priv) | |||
416 | /* enable stop request */ | 417 | /* enable stop request */ |
417 | regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr, | 418 | regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr, |
418 | 1 << priv->stm.req_bit, 1 << priv->stm.req_bit); | 419 | 1 << priv->stm.req_bit, 1 << priv->stm.req_bit); |
420 | |||
421 | /* get stop acknowledgment */ | ||
422 | if (regmap_read_poll_timeout(priv->stm.gpr, priv->stm.ack_gpr, | ||
423 | ackval, ackval & (1 << priv->stm.ack_bit), | ||
424 | 0, FLEXCAN_TIMEOUT_US)) | ||
425 | return -ETIMEDOUT; | ||
426 | |||
427 | return 0; | ||
419 | } | 428 | } |
420 | 429 | ||
421 | static inline void flexcan_exit_stop_mode(struct flexcan_priv *priv) | 430 | static inline int flexcan_exit_stop_mode(struct flexcan_priv *priv) |
422 | { | 431 | { |
423 | struct flexcan_regs __iomem *regs = priv->regs; | 432 | struct flexcan_regs __iomem *regs = priv->regs; |
433 | unsigned int ackval; | ||
424 | u32 reg_mcr; | 434 | u32 reg_mcr; |
425 | 435 | ||
426 | /* remove stop request */ | 436 | /* remove stop request */ |
427 | regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr, | 437 | regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr, |
428 | 1 << priv->stm.req_bit, 0); | 438 | 1 << priv->stm.req_bit, 0); |
429 | 439 | ||
440 | /* get stop acknowledgment */ | ||
441 | if (regmap_read_poll_timeout(priv->stm.gpr, priv->stm.ack_gpr, | ||
442 | ackval, !(ackval & (1 << priv->stm.ack_bit)), | ||
443 | 0, FLEXCAN_TIMEOUT_US)) | ||
444 | return -ETIMEDOUT; | ||
445 | |||
430 | reg_mcr = priv->read(®s->mcr); | 446 | reg_mcr = priv->read(®s->mcr); |
431 | reg_mcr &= ~FLEXCAN_MCR_SLF_WAK; | 447 | reg_mcr &= ~FLEXCAN_MCR_SLF_WAK; |
432 | priv->write(reg_mcr, ®s->mcr); | 448 | priv->write(reg_mcr, ®s->mcr); |
449 | |||
450 | return 0; | ||
433 | } | 451 | } |
434 | 452 | ||
435 | static inline void flexcan_error_irq_enable(const struct flexcan_priv *priv) | 453 | static inline void flexcan_error_irq_enable(const struct flexcan_priv *priv) |
@@ -1455,10 +1473,10 @@ static int flexcan_setup_stop_mode(struct platform_device *pdev) | |||
1455 | 1473 | ||
1456 | priv = netdev_priv(dev); | 1474 | priv = netdev_priv(dev); |
1457 | priv->stm.gpr = syscon_node_to_regmap(gpr_np); | 1475 | priv->stm.gpr = syscon_node_to_regmap(gpr_np); |
1458 | of_node_put(gpr_np); | ||
1459 | if (IS_ERR(priv->stm.gpr)) { | 1476 | if (IS_ERR(priv->stm.gpr)) { |
1460 | dev_dbg(&pdev->dev, "could not find gpr regmap\n"); | 1477 | dev_dbg(&pdev->dev, "could not find gpr regmap\n"); |
1461 | return PTR_ERR(priv->stm.gpr); | 1478 | ret = PTR_ERR(priv->stm.gpr); |
1479 | goto out_put_node; | ||
1462 | } | 1480 | } |
1463 | 1481 | ||
1464 | priv->stm.req_gpr = out_val[1]; | 1482 | priv->stm.req_gpr = out_val[1]; |
@@ -1477,6 +1495,10 @@ static int flexcan_setup_stop_mode(struct platform_device *pdev) | |||
1477 | device_set_wakeup_enable(&pdev->dev, true); | 1495 | device_set_wakeup_enable(&pdev->dev, true); |
1478 | 1496 | ||
1479 | return 0; | 1497 | return 0; |
1498 | |||
1499 | out_put_node: | ||
1500 | of_node_put(gpr_np); | ||
1501 | return ret; | ||
1480 | } | 1502 | } |
1481 | 1503 | ||
1482 | static const struct of_device_id flexcan_of_match[] = { | 1504 | static const struct of_device_id flexcan_of_match[] = { |
@@ -1644,7 +1666,9 @@ static int __maybe_unused flexcan_suspend(struct device *device) | |||
1644 | */ | 1666 | */ |
1645 | if (device_may_wakeup(device)) { | 1667 | if (device_may_wakeup(device)) { |
1646 | enable_irq_wake(dev->irq); | 1668 | enable_irq_wake(dev->irq); |
1647 | flexcan_enter_stop_mode(priv); | 1669 | err = flexcan_enter_stop_mode(priv); |
1670 | if (err) | ||
1671 | return err; | ||
1648 | } else { | 1672 | } else { |
1649 | err = flexcan_chip_disable(priv); | 1673 | err = flexcan_chip_disable(priv); |
1650 | if (err) | 1674 | if (err) |
@@ -1717,10 +1741,13 @@ static int __maybe_unused flexcan_noirq_resume(struct device *device) | |||
1717 | { | 1741 | { |
1718 | struct net_device *dev = dev_get_drvdata(device); | 1742 | struct net_device *dev = dev_get_drvdata(device); |
1719 | struct flexcan_priv *priv = netdev_priv(dev); | 1743 | struct flexcan_priv *priv = netdev_priv(dev); |
1744 | int err; | ||
1720 | 1745 | ||
1721 | if (netif_running(dev) && device_may_wakeup(device)) { | 1746 | if (netif_running(dev) && device_may_wakeup(device)) { |
1722 | flexcan_enable_wakeup_irq(priv, false); | 1747 | flexcan_enable_wakeup_irq(priv, false); |
1723 | flexcan_exit_stop_mode(priv); | 1748 | err = flexcan_exit_stop_mode(priv); |
1749 | if (err) | ||
1750 | return err; | ||
1724 | } | 1751 | } |
1725 | 1752 | ||
1726 | return 0; | 1753 | return 0; |
diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c index 51eecc7cdcdd..edaa1ca972c1 100644 --- a/drivers/net/can/rcar/rcar_canfd.c +++ b/drivers/net/can/rcar/rcar_canfd.c | |||
@@ -1508,10 +1508,11 @@ static int rcar_canfd_rx_poll(struct napi_struct *napi, int quota) | |||
1508 | 1508 | ||
1509 | /* All packets processed */ | 1509 | /* All packets processed */ |
1510 | if (num_pkts < quota) { | 1510 | if (num_pkts < quota) { |
1511 | napi_complete_done(napi, num_pkts); | 1511 | if (napi_complete_done(napi, num_pkts)) { |
1512 | /* Enable Rx FIFO interrupts */ | 1512 | /* Enable Rx FIFO interrupts */ |
1513 | rcar_canfd_set_bit(priv->base, RCANFD_RFCC(ridx), | 1513 | rcar_canfd_set_bit(priv->base, RCANFD_RFCC(ridx), |
1514 | RCANFD_RFCC_RFIE); | 1514 | RCANFD_RFCC_RFIE); |
1515 | } | ||
1515 | } | 1516 | } |
1516 | return num_pkts; | 1517 | return num_pkts; |
1517 | } | 1518 | } |
diff --git a/drivers/net/can/sja1000/peak_pcmcia.c b/drivers/net/can/sja1000/peak_pcmcia.c index 185c7f7d38a4..5e0d5e8101c8 100644 --- a/drivers/net/can/sja1000/peak_pcmcia.c +++ b/drivers/net/can/sja1000/peak_pcmcia.c | |||
@@ -479,7 +479,7 @@ static void pcan_free_channels(struct pcan_pccard *card) | |||
479 | if (!netdev) | 479 | if (!netdev) |
480 | continue; | 480 | continue; |
481 | 481 | ||
482 | strncpy(name, netdev->name, IFNAMSIZ); | 482 | strlcpy(name, netdev->name, IFNAMSIZ); |
483 | 483 | ||
484 | unregister_sja1000dev(netdev); | 484 | unregister_sja1000dev(netdev); |
485 | 485 | ||
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c index 234cf1042df6..12358f06d194 100644 --- a/drivers/net/can/spi/mcp251x.c +++ b/drivers/net/can/spi/mcp251x.c | |||
@@ -664,17 +664,6 @@ static int mcp251x_power_enable(struct regulator *reg, int enable) | |||
664 | return regulator_disable(reg); | 664 | return regulator_disable(reg); |
665 | } | 665 | } |
666 | 666 | ||
667 | static void mcp251x_open_clean(struct net_device *net) | ||
668 | { | ||
669 | struct mcp251x_priv *priv = netdev_priv(net); | ||
670 | struct spi_device *spi = priv->spi; | ||
671 | |||
672 | free_irq(spi->irq, priv); | ||
673 | mcp251x_hw_sleep(spi); | ||
674 | mcp251x_power_enable(priv->transceiver, 0); | ||
675 | close_candev(net); | ||
676 | } | ||
677 | |||
678 | static int mcp251x_stop(struct net_device *net) | 667 | static int mcp251x_stop(struct net_device *net) |
679 | { | 668 | { |
680 | struct mcp251x_priv *priv = netdev_priv(net); | 669 | struct mcp251x_priv *priv = netdev_priv(net); |
@@ -941,37 +930,43 @@ static int mcp251x_open(struct net_device *net) | |||
941 | flags | IRQF_ONESHOT, DEVICE_NAME, priv); | 930 | flags | IRQF_ONESHOT, DEVICE_NAME, priv); |
942 | if (ret) { | 931 | if (ret) { |
943 | dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq); | 932 | dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq); |
944 | mcp251x_power_enable(priv->transceiver, 0); | 933 | goto out_close; |
945 | close_candev(net); | ||
946 | goto open_unlock; | ||
947 | } | 934 | } |
948 | 935 | ||
949 | priv->wq = alloc_workqueue("mcp251x_wq", WQ_FREEZABLE | WQ_MEM_RECLAIM, | 936 | priv->wq = alloc_workqueue("mcp251x_wq", WQ_FREEZABLE | WQ_MEM_RECLAIM, |
950 | 0); | 937 | 0); |
938 | if (!priv->wq) { | ||
939 | ret = -ENOMEM; | ||
940 | goto out_clean; | ||
941 | } | ||
951 | INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler); | 942 | INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler); |
952 | INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler); | 943 | INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler); |
953 | 944 | ||
954 | ret = mcp251x_hw_reset(spi); | 945 | ret = mcp251x_hw_reset(spi); |
955 | if (ret) { | 946 | if (ret) |
956 | mcp251x_open_clean(net); | 947 | goto out_free_wq; |
957 | goto open_unlock; | ||
958 | } | ||
959 | ret = mcp251x_setup(net, spi); | 948 | ret = mcp251x_setup(net, spi); |
960 | if (ret) { | 949 | if (ret) |
961 | mcp251x_open_clean(net); | 950 | goto out_free_wq; |
962 | goto open_unlock; | ||
963 | } | ||
964 | ret = mcp251x_set_normal_mode(spi); | 951 | ret = mcp251x_set_normal_mode(spi); |
965 | if (ret) { | 952 | if (ret) |
966 | mcp251x_open_clean(net); | 953 | goto out_free_wq; |
967 | goto open_unlock; | ||
968 | } | ||
969 | 954 | ||
970 | can_led_event(net, CAN_LED_EVENT_OPEN); | 955 | can_led_event(net, CAN_LED_EVENT_OPEN); |
971 | 956 | ||
972 | netif_wake_queue(net); | 957 | netif_wake_queue(net); |
958 | mutex_unlock(&priv->mcp_lock); | ||
973 | 959 | ||
974 | open_unlock: | 960 | return 0; |
961 | |||
962 | out_free_wq: | ||
963 | destroy_workqueue(priv->wq); | ||
964 | out_clean: | ||
965 | free_irq(spi->irq, priv); | ||
966 | mcp251x_hw_sleep(spi); | ||
967 | out_close: | ||
968 | mcp251x_power_enable(priv->transceiver, 0); | ||
969 | close_candev(net); | ||
975 | mutex_unlock(&priv->mcp_lock); | 970 | mutex_unlock(&priv->mcp_lock); |
976 | return ret; | 971 | return ret; |
977 | } | 972 | } |
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c index 458154c9b482..65dce642b86b 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c | |||
@@ -568,16 +568,16 @@ static int peak_usb_ndo_stop(struct net_device *netdev) | |||
568 | dev->state &= ~PCAN_USB_STATE_STARTED; | 568 | dev->state &= ~PCAN_USB_STATE_STARTED; |
569 | netif_stop_queue(netdev); | 569 | netif_stop_queue(netdev); |
570 | 570 | ||
571 | close_candev(netdev); | ||
572 | |||
573 | dev->can.state = CAN_STATE_STOPPED; | ||
574 | |||
571 | /* unlink all pending urbs and free used memory */ | 575 | /* unlink all pending urbs and free used memory */ |
572 | peak_usb_unlink_all_urbs(dev); | 576 | peak_usb_unlink_all_urbs(dev); |
573 | 577 | ||
574 | if (dev->adapter->dev_stop) | 578 | if (dev->adapter->dev_stop) |
575 | dev->adapter->dev_stop(dev); | 579 | dev->adapter->dev_stop(dev); |
576 | 580 | ||
577 | close_candev(netdev); | ||
578 | |||
579 | dev->can.state = CAN_STATE_STOPPED; | ||
580 | |||
581 | /* can set bus off now */ | 581 | /* can set bus off now */ |
582 | if (dev->adapter->dev_set_bus) { | 582 | if (dev->adapter->dev_set_bus) { |
583 | int err = dev->adapter->dev_set_bus(dev, 0); | 583 | int err = dev->adapter->dev_set_bus(dev, 0); |
@@ -855,7 +855,7 @@ static void peak_usb_disconnect(struct usb_interface *intf) | |||
855 | 855 | ||
856 | dev_prev_siblings = dev->prev_siblings; | 856 | dev_prev_siblings = dev->prev_siblings; |
857 | dev->state &= ~PCAN_USB_STATE_CONNECTED; | 857 | dev->state &= ~PCAN_USB_STATE_CONNECTED; |
858 | strncpy(name, netdev->name, IFNAMSIZ); | 858 | strlcpy(name, netdev->name, IFNAMSIZ); |
859 | 859 | ||
860 | unregister_netdev(netdev); | 860 | unregister_netdev(netdev); |
861 | 861 | ||
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c index 34761c3a6286..47cc1ff5b88e 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c | |||
@@ -841,7 +841,7 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev) | |||
841 | goto err_out; | 841 | goto err_out; |
842 | 842 | ||
843 | /* allocate command buffer once for all for the interface */ | 843 | /* allocate command buffer once for all for the interface */ |
844 | pdev->cmd_buffer_addr = kmalloc(PCAN_UFD_CMD_BUFFER_SIZE, | 844 | pdev->cmd_buffer_addr = kzalloc(PCAN_UFD_CMD_BUFFER_SIZE, |
845 | GFP_KERNEL); | 845 | GFP_KERNEL); |
846 | if (!pdev->cmd_buffer_addr) | 846 | if (!pdev->cmd_buffer_addr) |
847 | goto err_out_1; | 847 | goto err_out_1; |
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c index 178bb7cff0c1..53cb2f72bdd0 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c | |||
@@ -494,7 +494,7 @@ static int pcan_usb_pro_drv_loaded(struct peak_usb_device *dev, int loaded) | |||
494 | u8 *buffer; | 494 | u8 *buffer; |
495 | int err; | 495 | int err; |
496 | 496 | ||
497 | buffer = kmalloc(PCAN_USBPRO_FCT_DRVLD_REQ_LEN, GFP_KERNEL); | 497 | buffer = kzalloc(PCAN_USBPRO_FCT_DRVLD_REQ_LEN, GFP_KERNEL); |
498 | if (!buffer) | 498 | if (!buffer) |
499 | return -ENOMEM; | 499 | return -ENOMEM; |
500 | 500 | ||
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 467e61ced82c..d3804ffd3d2a 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c | |||
@@ -27,7 +27,6 @@ | |||
27 | #include <linux/platform_data/mv88e6xxx.h> | 27 | #include <linux/platform_data/mv88e6xxx.h> |
28 | #include <linux/netdevice.h> | 28 | #include <linux/netdevice.h> |
29 | #include <linux/gpio/consumer.h> | 29 | #include <linux/gpio/consumer.h> |
30 | #include <linux/phy.h> | ||
31 | #include <linux/phylink.h> | 30 | #include <linux/phylink.h> |
32 | #include <net/dsa.h> | 31 | #include <net/dsa.h> |
33 | 32 | ||
@@ -430,7 +429,7 @@ int mv88e6xxx_port_setup_mac(struct mv88e6xxx_chip *chip, int port, int link, | |||
430 | return 0; | 429 | return 0; |
431 | 430 | ||
432 | /* Port's MAC control must not be changed unless the link is down */ | 431 | /* Port's MAC control must not be changed unless the link is down */ |
433 | err = chip->info->ops->port_set_link(chip, port, 0); | 432 | err = chip->info->ops->port_set_link(chip, port, LINK_FORCED_DOWN); |
434 | if (err) | 433 | if (err) |
435 | return err; | 434 | return err; |
436 | 435 | ||
@@ -482,30 +481,6 @@ static int mv88e6xxx_phy_is_internal(struct dsa_switch *ds, int port) | |||
482 | return port < chip->info->num_internal_phys; | 481 | return port < chip->info->num_internal_phys; |
483 | } | 482 | } |
484 | 483 | ||
485 | /* We expect the switch to perform auto negotiation if there is a real | ||
486 | * phy. However, in the case of a fixed link phy, we force the port | ||
487 | * settings from the fixed link settings. | ||
488 | */ | ||
489 | static void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port, | ||
490 | struct phy_device *phydev) | ||
491 | { | ||
492 | struct mv88e6xxx_chip *chip = ds->priv; | ||
493 | int err; | ||
494 | |||
495 | if (!phy_is_pseudo_fixed_link(phydev) && | ||
496 | mv88e6xxx_phy_is_internal(ds, port)) | ||
497 | return; | ||
498 | |||
499 | mv88e6xxx_reg_lock(chip); | ||
500 | err = mv88e6xxx_port_setup_mac(chip, port, phydev->link, phydev->speed, | ||
501 | phydev->duplex, phydev->pause, | ||
502 | phydev->interface); | ||
503 | mv88e6xxx_reg_unlock(chip); | ||
504 | |||
505 | if (err && err != -EOPNOTSUPP) | ||
506 | dev_err(ds->dev, "p%d: failed to configure MAC\n", port); | ||
507 | } | ||
508 | |||
509 | static void mv88e6065_phylink_validate(struct mv88e6xxx_chip *chip, int port, | 484 | static void mv88e6065_phylink_validate(struct mv88e6xxx_chip *chip, int port, |
510 | unsigned long *mask, | 485 | unsigned long *mask, |
511 | struct phylink_link_state *state) | 486 | struct phylink_link_state *state) |
@@ -2747,6 +2722,7 @@ static int mv88e6xxx_mdios_register(struct mv88e6xxx_chip *chip, | |||
2747 | err = mv88e6xxx_mdio_register(chip, child, true); | 2722 | err = mv88e6xxx_mdio_register(chip, child, true); |
2748 | if (err) { | 2723 | if (err) { |
2749 | mv88e6xxx_mdios_unregister(chip); | 2724 | mv88e6xxx_mdios_unregister(chip); |
2725 | of_node_put(child); | ||
2750 | return err; | 2726 | return err; |
2751 | } | 2727 | } |
2752 | } | 2728 | } |
@@ -4720,7 +4696,6 @@ static int mv88e6xxx_port_egress_floods(struct dsa_switch *ds, int port, | |||
4720 | static const struct dsa_switch_ops mv88e6xxx_switch_ops = { | 4696 | static const struct dsa_switch_ops mv88e6xxx_switch_ops = { |
4721 | .get_tag_protocol = mv88e6xxx_get_tag_protocol, | 4697 | .get_tag_protocol = mv88e6xxx_get_tag_protocol, |
4722 | .setup = mv88e6xxx_setup, | 4698 | .setup = mv88e6xxx_setup, |
4723 | .adjust_link = mv88e6xxx_adjust_link, | ||
4724 | .phylink_validate = mv88e6xxx_validate, | 4699 | .phylink_validate = mv88e6xxx_validate, |
4725 | .phylink_mac_link_state = mv88e6xxx_link_state, | 4700 | .phylink_mac_link_state = mv88e6xxx_link_state, |
4726 | .phylink_mac_config = mv88e6xxx_mac_config, | 4701 | .phylink_mac_config = mv88e6xxx_mac_config, |
diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c index 232e8cc96f6d..16f15c93a102 100644 --- a/drivers/net/dsa/qca8k.c +++ b/drivers/net/dsa/qca8k.c | |||
@@ -2,7 +2,7 @@ | |||
2 | /* | 2 | /* |
3 | * Copyright (C) 2009 Felix Fietkau <nbd@nbd.name> | 3 | * Copyright (C) 2009 Felix Fietkau <nbd@nbd.name> |
4 | * Copyright (C) 2011-2012 Gabor Juhos <juhosg@openwrt.org> | 4 | * Copyright (C) 2011-2012 Gabor Juhos <juhosg@openwrt.org> |
5 | * Copyright (c) 2015, The Linux Foundation. All rights reserved. | 5 | * Copyright (c) 2015, 2019, The Linux Foundation. All rights reserved. |
6 | * Copyright (c) 2016 John Crispin <john@phrozen.org> | 6 | * Copyright (c) 2016 John Crispin <john@phrozen.org> |
7 | */ | 7 | */ |
8 | 8 | ||
@@ -583,8 +583,11 @@ qca8k_setup_mdio_bus(struct qca8k_priv *priv) | |||
583 | 583 | ||
584 | for_each_available_child_of_node(ports, port) { | 584 | for_each_available_child_of_node(ports, port) { |
585 | err = of_property_read_u32(port, "reg", ®); | 585 | err = of_property_read_u32(port, "reg", ®); |
586 | if (err) | 586 | if (err) { |
587 | of_node_put(port); | ||
588 | of_node_put(ports); | ||
587 | return err; | 589 | return err; |
590 | } | ||
588 | 591 | ||
589 | if (!dsa_is_user_port(priv->ds, reg)) | 592 | if (!dsa_is_user_port(priv->ds, reg)) |
590 | continue; | 593 | continue; |
@@ -595,6 +598,7 @@ qca8k_setup_mdio_bus(struct qca8k_priv *priv) | |||
595 | internal_mdio_mask |= BIT(reg); | 598 | internal_mdio_mask |= BIT(reg); |
596 | } | 599 | } |
597 | 600 | ||
601 | of_node_put(ports); | ||
598 | if (!external_mdio_mask && !internal_mdio_mask) { | 602 | if (!external_mdio_mask && !internal_mdio_mask) { |
599 | dev_err(priv->dev, "no PHYs are defined.\n"); | 603 | dev_err(priv->dev, "no PHYs are defined.\n"); |
600 | return -EINVAL; | 604 | return -EINVAL; |
@@ -935,6 +939,8 @@ qca8k_port_enable(struct dsa_switch *ds, int port, | |||
935 | qca8k_port_set_status(priv, port, 1); | 939 | qca8k_port_set_status(priv, port, 1); |
936 | priv->port_sts[port].enabled = 1; | 940 | priv->port_sts[port].enabled = 1; |
937 | 941 | ||
942 | phy_support_asym_pause(phy); | ||
943 | |||
938 | return 0; | 944 | return 0; |
939 | } | 945 | } |
940 | 946 | ||
diff --git a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c index 6bfb1696a6f2..9988c9d18567 100644 --- a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c +++ b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c | |||
@@ -277,6 +277,18 @@ sja1105et_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd, | |||
277 | SJA1105ET_SIZE_L2_LOOKUP_ENTRY, op); | 277 | SJA1105ET_SIZE_L2_LOOKUP_ENTRY, op); |
278 | } | 278 | } |
279 | 279 | ||
280 | static size_t sja1105et_dyn_l2_lookup_entry_packing(void *buf, void *entry_ptr, | ||
281 | enum packing_op op) | ||
282 | { | ||
283 | struct sja1105_l2_lookup_entry *entry = entry_ptr; | ||
284 | u8 *cmd = buf + SJA1105ET_SIZE_L2_LOOKUP_ENTRY; | ||
285 | const int size = SJA1105_SIZE_DYN_CMD; | ||
286 | |||
287 | sja1105_packing(cmd, &entry->lockeds, 28, 28, size, op); | ||
288 | |||
289 | return sja1105et_l2_lookup_entry_packing(buf, entry_ptr, op); | ||
290 | } | ||
291 | |||
280 | static void | 292 | static void |
281 | sja1105et_mgmt_route_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd, | 293 | sja1105et_mgmt_route_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd, |
282 | enum packing_op op) | 294 | enum packing_op op) |
@@ -477,7 +489,7 @@ sja1105et_general_params_entry_packing(void *buf, void *entry_ptr, | |||
477 | /* SJA1105E/T: First generation */ | 489 | /* SJA1105E/T: First generation */ |
478 | struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN] = { | 490 | struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN] = { |
479 | [BLK_IDX_L2_LOOKUP] = { | 491 | [BLK_IDX_L2_LOOKUP] = { |
480 | .entry_packing = sja1105et_l2_lookup_entry_packing, | 492 | .entry_packing = sja1105et_dyn_l2_lookup_entry_packing, |
481 | .cmd_packing = sja1105et_l2_lookup_cmd_packing, | 493 | .cmd_packing = sja1105et_l2_lookup_cmd_packing, |
482 | .access = (OP_READ | OP_WRITE | OP_DEL), | 494 | .access = (OP_READ | OP_WRITE | OP_DEL), |
483 | .max_entry_count = SJA1105_MAX_L2_LOOKUP_COUNT, | 495 | .max_entry_count = SJA1105_MAX_L2_LOOKUP_COUNT, |
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c index 32bf3a7cc3b6..d073baffc20b 100644 --- a/drivers/net/dsa/sja1105/sja1105_main.c +++ b/drivers/net/dsa/sja1105/sja1105_main.c | |||
@@ -218,7 +218,7 @@ static int sja1105_init_l2_lookup_params(struct sja1105_private *priv) | |||
218 | /* This selects between Independent VLAN Learning (IVL) and | 218 | /* This selects between Independent VLAN Learning (IVL) and |
219 | * Shared VLAN Learning (SVL) | 219 | * Shared VLAN Learning (SVL) |
220 | */ | 220 | */ |
221 | .shared_learn = false, | 221 | .shared_learn = true, |
222 | /* Don't discard management traffic based on ENFPORT - | 222 | /* Don't discard management traffic based on ENFPORT - |
223 | * we don't perform SMAC port enforcement anyway, so | 223 | * we don't perform SMAC port enforcement anyway, so |
224 | * what we are setting here doesn't matter. | 224 | * what we are setting here doesn't matter. |
@@ -625,6 +625,7 @@ static int sja1105_parse_ports_node(struct sja1105_private *priv, | |||
625 | if (of_property_read_u32(child, "reg", &index) < 0) { | 625 | if (of_property_read_u32(child, "reg", &index) < 0) { |
626 | dev_err(dev, "Port number not defined in device tree " | 626 | dev_err(dev, "Port number not defined in device tree " |
627 | "(property \"reg\")\n"); | 627 | "(property \"reg\")\n"); |
628 | of_node_put(child); | ||
628 | return -ENODEV; | 629 | return -ENODEV; |
629 | } | 630 | } |
630 | 631 | ||
@@ -634,6 +635,7 @@ static int sja1105_parse_ports_node(struct sja1105_private *priv, | |||
634 | dev_err(dev, "Failed to read phy-mode or " | 635 | dev_err(dev, "Failed to read phy-mode or " |
635 | "phy-interface-type property for port %d\n", | 636 | "phy-interface-type property for port %d\n", |
636 | index); | 637 | index); |
638 | of_node_put(child); | ||
637 | return -ENODEV; | 639 | return -ENODEV; |
638 | } | 640 | } |
639 | ports[index].phy_mode = phy_mode; | 641 | ports[index].phy_mode = phy_mode; |
@@ -643,6 +645,7 @@ static int sja1105_parse_ports_node(struct sja1105_private *priv, | |||
643 | if (!of_phy_is_fixed_link(child)) { | 645 | if (!of_phy_is_fixed_link(child)) { |
644 | dev_err(dev, "phy-handle or fixed-link " | 646 | dev_err(dev, "phy-handle or fixed-link " |
645 | "properties missing!\n"); | 647 | "properties missing!\n"); |
648 | of_node_put(child); | ||
646 | return -ENODEV; | 649 | return -ENODEV; |
647 | } | 650 | } |
648 | /* phy-handle is missing, but fixed-link isn't. | 651 | /* phy-handle is missing, but fixed-link isn't. |
@@ -1089,8 +1092,13 @@ int sja1105pqrs_fdb_add(struct dsa_switch *ds, int port, | |||
1089 | l2_lookup.vlanid = vid; | 1092 | l2_lookup.vlanid = vid; |
1090 | l2_lookup.iotag = SJA1105_S_TAG; | 1093 | l2_lookup.iotag = SJA1105_S_TAG; |
1091 | l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0); | 1094 | l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0); |
1092 | l2_lookup.mask_vlanid = VLAN_VID_MASK; | 1095 | if (dsa_port_is_vlan_filtering(&ds->ports[port])) { |
1093 | l2_lookup.mask_iotag = BIT(0); | 1096 | l2_lookup.mask_vlanid = VLAN_VID_MASK; |
1097 | l2_lookup.mask_iotag = BIT(0); | ||
1098 | } else { | ||
1099 | l2_lookup.mask_vlanid = 0; | ||
1100 | l2_lookup.mask_iotag = 0; | ||
1101 | } | ||
1094 | l2_lookup.destports = BIT(port); | 1102 | l2_lookup.destports = BIT(port); |
1095 | 1103 | ||
1096 | rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP, | 1104 | rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP, |
@@ -1147,8 +1155,13 @@ int sja1105pqrs_fdb_del(struct dsa_switch *ds, int port, | |||
1147 | l2_lookup.vlanid = vid; | 1155 | l2_lookup.vlanid = vid; |
1148 | l2_lookup.iotag = SJA1105_S_TAG; | 1156 | l2_lookup.iotag = SJA1105_S_TAG; |
1149 | l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0); | 1157 | l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0); |
1150 | l2_lookup.mask_vlanid = VLAN_VID_MASK; | 1158 | if (dsa_port_is_vlan_filtering(&ds->ports[port])) { |
1151 | l2_lookup.mask_iotag = BIT(0); | 1159 | l2_lookup.mask_vlanid = VLAN_VID_MASK; |
1160 | l2_lookup.mask_iotag = BIT(0); | ||
1161 | } else { | ||
1162 | l2_lookup.mask_vlanid = 0; | ||
1163 | l2_lookup.mask_iotag = 0; | ||
1164 | } | ||
1152 | l2_lookup.destports = BIT(port); | 1165 | l2_lookup.destports = BIT(port); |
1153 | 1166 | ||
1154 | rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP, | 1167 | rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP, |
@@ -1178,60 +1191,31 @@ static int sja1105_fdb_add(struct dsa_switch *ds, int port, | |||
1178 | const unsigned char *addr, u16 vid) | 1191 | const unsigned char *addr, u16 vid) |
1179 | { | 1192 | { |
1180 | struct sja1105_private *priv = ds->priv; | 1193 | struct sja1105_private *priv = ds->priv; |
1181 | u16 rx_vid, tx_vid; | ||
1182 | int rc, i; | ||
1183 | 1194 | ||
1184 | if (dsa_port_is_vlan_filtering(&ds->ports[port])) | 1195 | /* dsa_8021q is in effect when the bridge's vlan_filtering isn't, |
1185 | return priv->info->fdb_add_cmd(ds, port, addr, vid); | 1196 | * so the switch still does some VLAN processing internally. |
1186 | 1197 | * But Shared VLAN Learning (SVL) is also active, and it will take | |
1187 | /* Since we make use of VLANs even when the bridge core doesn't tell us | 1198 | * care of autonomous forwarding between the unique pvid's of each |
1188 | * to, translate these FDB entries into the correct dsa_8021q ones. | 1199 | * port. Here we just make sure that users can't add duplicate FDB |
1189 | * The basic idea (also repeats for removal below) is: | 1200 | * entries when in this mode - the actual VID doesn't matter except |
1190 | * - Each of the other front-panel ports needs to be able to forward a | 1201 | * for what gets printed in 'bridge fdb show'. In the case of zero, |
1191 | * pvid-tagged (aka tagged with their rx_vid) frame that matches this | 1202 | * no VID gets printed at all. |
1192 | * DMAC. | ||
1193 | * - The CPU port (aka the tx_vid of this port) needs to be able to | ||
1194 | * send a frame matching this DMAC to the specified port. | ||
1195 | * For a better picture see net/dsa/tag_8021q.c. | ||
1196 | */ | 1203 | */ |
1197 | for (i = 0; i < SJA1105_NUM_PORTS; i++) { | 1204 | if (!dsa_port_is_vlan_filtering(&ds->ports[port])) |
1198 | if (i == port) | 1205 | vid = 0; |
1199 | continue; | ||
1200 | if (i == dsa_upstream_port(priv->ds, port)) | ||
1201 | continue; | ||
1202 | 1206 | ||
1203 | rx_vid = dsa_8021q_rx_vid(ds, i); | 1207 | return priv->info->fdb_add_cmd(ds, port, addr, vid); |
1204 | rc = priv->info->fdb_add_cmd(ds, port, addr, rx_vid); | ||
1205 | if (rc < 0) | ||
1206 | return rc; | ||
1207 | } | ||
1208 | tx_vid = dsa_8021q_tx_vid(ds, port); | ||
1209 | return priv->info->fdb_add_cmd(ds, port, addr, tx_vid); | ||
1210 | } | 1208 | } |
1211 | 1209 | ||
1212 | static int sja1105_fdb_del(struct dsa_switch *ds, int port, | 1210 | static int sja1105_fdb_del(struct dsa_switch *ds, int port, |
1213 | const unsigned char *addr, u16 vid) | 1211 | const unsigned char *addr, u16 vid) |
1214 | { | 1212 | { |
1215 | struct sja1105_private *priv = ds->priv; | 1213 | struct sja1105_private *priv = ds->priv; |
1216 | u16 rx_vid, tx_vid; | ||
1217 | int rc, i; | ||
1218 | |||
1219 | if (dsa_port_is_vlan_filtering(&ds->ports[port])) | ||
1220 | return priv->info->fdb_del_cmd(ds, port, addr, vid); | ||
1221 | 1214 | ||
1222 | for (i = 0; i < SJA1105_NUM_PORTS; i++) { | 1215 | if (!dsa_port_is_vlan_filtering(&ds->ports[port])) |
1223 | if (i == port) | 1216 | vid = 0; |
1224 | continue; | ||
1225 | if (i == dsa_upstream_port(priv->ds, port)) | ||
1226 | continue; | ||
1227 | 1217 | ||
1228 | rx_vid = dsa_8021q_rx_vid(ds, i); | 1218 | return priv->info->fdb_del_cmd(ds, port, addr, vid); |
1229 | rc = priv->info->fdb_del_cmd(ds, port, addr, rx_vid); | ||
1230 | if (rc < 0) | ||
1231 | return rc; | ||
1232 | } | ||
1233 | tx_vid = dsa_8021q_tx_vid(ds, port); | ||
1234 | return priv->info->fdb_del_cmd(ds, port, addr, tx_vid); | ||
1235 | } | 1219 | } |
1236 | 1220 | ||
1237 | static int sja1105_fdb_dump(struct dsa_switch *ds, int port, | 1221 | static int sja1105_fdb_dump(struct dsa_switch *ds, int port, |
@@ -1270,39 +1254,9 @@ static int sja1105_fdb_dump(struct dsa_switch *ds, int port, | |||
1270 | continue; | 1254 | continue; |
1271 | u64_to_ether_addr(l2_lookup.macaddr, macaddr); | 1255 | u64_to_ether_addr(l2_lookup.macaddr, macaddr); |
1272 | 1256 | ||
1273 | /* On SJA1105 E/T, the switch doesn't implement the LOCKEDS | 1257 | /* We need to hide the dsa_8021q VLANs from the user. */ |
1274 | * bit, so it doesn't tell us whether a FDB entry is static | 1258 | if (!dsa_port_is_vlan_filtering(&ds->ports[port])) |
1275 | * or not. | 1259 | l2_lookup.vlanid = 0; |
1276 | * But, of course, we can find out - we're the ones who added | ||
1277 | * it in the first place. | ||
1278 | */ | ||
1279 | if (priv->info->device_id == SJA1105E_DEVICE_ID || | ||
1280 | priv->info->device_id == SJA1105T_DEVICE_ID) { | ||
1281 | int match; | ||
1282 | |||
1283 | match = sja1105_find_static_fdb_entry(priv, port, | ||
1284 | &l2_lookup); | ||
1285 | l2_lookup.lockeds = (match >= 0); | ||
1286 | } | ||
1287 | |||
1288 | /* We need to hide the dsa_8021q VLANs from the user. This | ||
1289 | * basically means hiding the duplicates and only showing | ||
1290 | * the pvid that is supposed to be active in standalone and | ||
1291 | * non-vlan_filtering modes (aka 1). | ||
1292 | * - For statically added FDB entries (bridge fdb add), we | ||
1293 | * can convert the TX VID (coming from the CPU port) into the | ||
1294 | * pvid and ignore the RX VIDs of the other ports. | ||
1295 | * - For dynamically learned FDB entries, a single entry with | ||
1296 | * no duplicates is learned - that which has the real port's | ||
1297 | * pvid, aka RX VID. | ||
1298 | */ | ||
1299 | if (!dsa_port_is_vlan_filtering(&ds->ports[port])) { | ||
1300 | if (l2_lookup.vlanid == tx_vid || | ||
1301 | l2_lookup.vlanid == rx_vid) | ||
1302 | l2_lookup.vlanid = 1; | ||
1303 | else | ||
1304 | continue; | ||
1305 | } | ||
1306 | cb(macaddr, l2_lookup.vlanid, l2_lookup.lockeds, data); | 1260 | cb(macaddr, l2_lookup.vlanid, l2_lookup.lockeds, data); |
1307 | } | 1261 | } |
1308 | return 0; | 1262 | return 0; |
@@ -1594,6 +1548,7 @@ static int sja1105_vlan_prepare(struct dsa_switch *ds, int port, | |||
1594 | */ | 1548 | */ |
1595 | static int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled) | 1549 | static int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled) |
1596 | { | 1550 | { |
1551 | struct sja1105_l2_lookup_params_entry *l2_lookup_params; | ||
1597 | struct sja1105_general_params_entry *general_params; | 1552 | struct sja1105_general_params_entry *general_params; |
1598 | struct sja1105_private *priv = ds->priv; | 1553 | struct sja1105_private *priv = ds->priv; |
1599 | struct sja1105_table *table; | 1554 | struct sja1105_table *table; |
@@ -1622,6 +1577,28 @@ static int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled) | |||
1622 | general_params->incl_srcpt1 = enabled; | 1577 | general_params->incl_srcpt1 = enabled; |
1623 | general_params->incl_srcpt0 = enabled; | 1578 | general_params->incl_srcpt0 = enabled; |
1624 | 1579 | ||
1580 | /* VLAN filtering => independent VLAN learning. | ||
1581 | * No VLAN filtering => shared VLAN learning. | ||
1582 | * | ||
1583 | * In shared VLAN learning mode, untagged traffic still gets | ||
1584 | * pvid-tagged, and the FDB table gets populated with entries | ||
1585 | * containing the "real" (pvid or from VLAN tag) VLAN ID. | ||
1586 | * However the switch performs a masked L2 lookup in the FDB, | ||
1587 | * effectively only looking up a frame's DMAC (and not VID) for the | ||
1588 | * forwarding decision. | ||
1589 | * | ||
1590 | * This is extremely convenient for us, because in modes with | ||
1591 | * vlan_filtering=0, dsa_8021q actually installs unique pvid's into | ||
1592 | * each front panel port. This is good for identification but breaks | ||
1593 | * learning badly - the VID of the learnt FDB entry is unique, aka | ||
1594 | * no frames coming from any other port are going to have it. So | ||
1595 | * for forwarding purposes, this is as though learning was broken | ||
1596 | * (all frames get flooded). | ||
1597 | */ | ||
1598 | table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS]; | ||
1599 | l2_lookup_params = table->entries; | ||
1600 | l2_lookup_params->shared_learn = !enabled; | ||
1601 | |||
1625 | rc = sja1105_static_config_reload(priv); | 1602 | rc = sja1105_static_config_reload(priv); |
1626 | if (rc) | 1603 | if (rc) |
1627 | dev_err(ds->dev, "Failed to change VLAN Ethertype\n"); | 1604 | dev_err(ds->dev, "Failed to change VLAN Ethertype\n"); |
@@ -1751,6 +1728,8 @@ static void sja1105_teardown(struct dsa_switch *ds) | |||
1751 | 1728 | ||
1752 | cancel_work_sync(&priv->tagger_data.rxtstamp_work); | 1729 | cancel_work_sync(&priv->tagger_data.rxtstamp_work); |
1753 | skb_queue_purge(&priv->tagger_data.skb_rxtstamp_queue); | 1730 | skb_queue_purge(&priv->tagger_data.skb_rxtstamp_queue); |
1731 | sja1105_ptp_clock_unregister(priv); | ||
1732 | sja1105_static_config_free(&priv->static_config); | ||
1754 | } | 1733 | } |
1755 | 1734 | ||
1756 | static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot, | 1735 | static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot, |
@@ -2208,9 +2187,7 @@ static int sja1105_remove(struct spi_device *spi) | |||
2208 | { | 2187 | { |
2209 | struct sja1105_private *priv = spi_get_drvdata(spi); | 2188 | struct sja1105_private *priv = spi_get_drvdata(spi); |
2210 | 2189 | ||
2211 | sja1105_ptp_clock_unregister(priv); | ||
2212 | dsa_unregister_switch(priv->ds); | 2190 | dsa_unregister_switch(priv->ds); |
2213 | sja1105_static_config_free(&priv->static_config); | ||
2214 | return 0; | 2191 | return 0; |
2215 | } | 2192 | } |
2216 | 2193 | ||
diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.c b/drivers/net/dsa/sja1105/sja1105_ptp.c index d19cfdf681af..d8e8dd59f3d1 100644 --- a/drivers/net/dsa/sja1105/sja1105_ptp.c +++ b/drivers/net/dsa/sja1105/sja1105_ptp.c | |||
@@ -369,16 +369,15 @@ int sja1105_ptp_clock_register(struct sja1105_private *priv) | |||
369 | .mult = SJA1105_CC_MULT, | 369 | .mult = SJA1105_CC_MULT, |
370 | }; | 370 | }; |
371 | mutex_init(&priv->ptp_lock); | 371 | mutex_init(&priv->ptp_lock); |
372 | INIT_DELAYED_WORK(&priv->refresh_work, sja1105_ptp_overflow_check); | ||
373 | |||
374 | schedule_delayed_work(&priv->refresh_work, SJA1105_REFRESH_INTERVAL); | ||
375 | |||
376 | priv->ptp_caps = sja1105_ptp_caps; | 372 | priv->ptp_caps = sja1105_ptp_caps; |
377 | 373 | ||
378 | priv->clock = ptp_clock_register(&priv->ptp_caps, ds->dev); | 374 | priv->clock = ptp_clock_register(&priv->ptp_caps, ds->dev); |
379 | if (IS_ERR_OR_NULL(priv->clock)) | 375 | if (IS_ERR_OR_NULL(priv->clock)) |
380 | return PTR_ERR(priv->clock); | 376 | return PTR_ERR(priv->clock); |
381 | 377 | ||
378 | INIT_DELAYED_WORK(&priv->refresh_work, sja1105_ptp_overflow_check); | ||
379 | schedule_delayed_work(&priv->refresh_work, SJA1105_REFRESH_INTERVAL); | ||
380 | |||
382 | return sja1105_ptp_reset(priv); | 381 | return sja1105_ptp_reset(priv); |
383 | } | 382 | } |
384 | 383 | ||
diff --git a/drivers/net/ethernet/8390/Kconfig b/drivers/net/ethernet/8390/Kconfig index 2a3e2450968e..a9478577b495 100644 --- a/drivers/net/ethernet/8390/Kconfig +++ b/drivers/net/ethernet/8390/Kconfig | |||
@@ -12,8 +12,8 @@ config NET_VENDOR_8390 | |||
12 | 12 | ||
13 | Note that the answer to this question doesn't directly affect the | 13 | Note that the answer to this question doesn't directly affect the |
14 | kernel: saying N will just cause the configurator to skip all | 14 | kernel: saying N will just cause the configurator to skip all |
15 | the questions about Western Digital cards. If you say Y, you will be | 15 | the questions about National Semiconductor 8390 cards. If you say Y, |
16 | asked for your specific card in the following questions. | 16 | you will be asked for your specific card in the following questions. |
17 | 17 | ||
18 | if NET_VENDOR_8390 | 18 | if NET_VENDOR_8390 |
19 | 19 | ||
diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c index e43d922f043e..174344c450af 100644 --- a/drivers/net/ethernet/agere/et131x.c +++ b/drivers/net/ethernet/agere/et131x.c | |||
@@ -2362,7 +2362,7 @@ static int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter) | |||
2362 | 2362 | ||
2363 | /* Allocate memory for the TCB's (Transmit Control Block) */ | 2363 | /* Allocate memory for the TCB's (Transmit Control Block) */ |
2364 | tx_ring->tcb_ring = kcalloc(NUM_TCB, sizeof(struct tcb), | 2364 | tx_ring->tcb_ring = kcalloc(NUM_TCB, sizeof(struct tcb), |
2365 | GFP_ATOMIC | GFP_DMA); | 2365 | GFP_KERNEL | GFP_DMA); |
2366 | if (!tx_ring->tcb_ring) | 2366 | if (!tx_ring->tcb_ring) |
2367 | return -ENOMEM; | 2367 | return -ENOMEM; |
2368 | 2368 | ||
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c index 3434730a7699..0537df06a9b5 100644 --- a/drivers/net/ethernet/allwinner/sun4i-emac.c +++ b/drivers/net/ethernet/allwinner/sun4i-emac.c | |||
@@ -860,7 +860,9 @@ static int emac_probe(struct platform_device *pdev) | |||
860 | goto out_clk_disable_unprepare; | 860 | goto out_clk_disable_unprepare; |
861 | } | 861 | } |
862 | 862 | ||
863 | db->phy_node = of_parse_phandle(np, "phy", 0); | 863 | db->phy_node = of_parse_phandle(np, "phy-handle", 0); |
864 | if (!db->phy_node) | ||
865 | db->phy_node = of_parse_phandle(np, "phy", 0); | ||
864 | if (!db->phy_node) { | 866 | if (!db->phy_node) { |
865 | dev_err(&pdev->dev, "no associated PHY\n"); | 867 | dev_err(&pdev->dev, "no associated PHY\n"); |
866 | ret = -ENODEV; | 868 | ret = -ENODEV; |
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig index de4950d2022e..9f965cdfff5c 100644 --- a/drivers/net/ethernet/amd/Kconfig +++ b/drivers/net/ethernet/amd/Kconfig | |||
@@ -14,7 +14,7 @@ config NET_VENDOR_AMD | |||
14 | say Y. | 14 | say Y. |
15 | 15 | ||
16 | Note that the answer to this question does not directly affect | 16 | Note that the answer to this question does not directly affect |
17 | the kernel: saying N will just case the configurator to skip all | 17 | the kernel: saying N will just cause the configurator to skip all |
18 | the questions regarding AMD chipsets. If you say Y, you will be asked | 18 | the questions regarding AMD chipsets. If you say Y, you will be asked |
19 | for your specific chipset/driver in the following questions. | 19 | for your specific chipset/driver in the following questions. |
20 | 20 | ||
diff --git a/drivers/net/ethernet/apple/Kconfig b/drivers/net/ethernet/apple/Kconfig index fde7ae33e302..f78b9c841296 100644 --- a/drivers/net/ethernet/apple/Kconfig +++ b/drivers/net/ethernet/apple/Kconfig | |||
@@ -11,8 +11,8 @@ config NET_VENDOR_APPLE | |||
11 | If you have a network (Ethernet) card belonging to this class, say Y. | 11 | If you have a network (Ethernet) card belonging to this class, say Y. |
12 | 12 | ||
13 | Note that the answer to this question doesn't directly affect the | 13 | Note that the answer to this question doesn't directly affect the |
14 | kernel: saying N will just cause the configurator to skip all | 14 | kernel: saying N will just cause the configurator to skip all the |
15 | the questions about IBM devices. If you say Y, you will be asked for | 15 | questions about Apple devices. If you say Y, you will be asked for |
16 | your specific card in the following questions. | 16 | your specific card in the following questions. |
17 | 17 | ||
18 | if NET_VENDOR_APPLE | 18 | if NET_VENDOR_APPLE |
diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c index 40a8717f51b1..7548247455d7 100644 --- a/drivers/net/ethernet/atheros/ag71xx.c +++ b/drivers/net/ethernet/atheros/ag71xx.c | |||
@@ -1141,7 +1141,7 @@ static int ag71xx_rings_init(struct ag71xx *ag) | |||
1141 | 1141 | ||
1142 | tx->descs_cpu = dma_alloc_coherent(&ag->pdev->dev, | 1142 | tx->descs_cpu = dma_alloc_coherent(&ag->pdev->dev, |
1143 | ring_size * AG71XX_DESC_SIZE, | 1143 | ring_size * AG71XX_DESC_SIZE, |
1144 | &tx->descs_dma, GFP_ATOMIC); | 1144 | &tx->descs_dma, GFP_KERNEL); |
1145 | if (!tx->descs_cpu) { | 1145 | if (!tx->descs_cpu) { |
1146 | kfree(tx->buf); | 1146 | kfree(tx->buf); |
1147 | tx->buf = NULL; | 1147 | tx->buf = NULL; |
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig index e9017caf024d..e24f5d2b6afe 100644 --- a/drivers/net/ethernet/broadcom/Kconfig +++ b/drivers/net/ethernet/broadcom/Kconfig | |||
@@ -14,9 +14,9 @@ config NET_VENDOR_BROADCOM | |||
14 | say Y. | 14 | say Y. |
15 | 15 | ||
16 | Note that the answer to this question does not directly affect | 16 | Note that the answer to this question does not directly affect |
17 | the kernel: saying N will just case the configurator to skip all | 17 | the kernel: saying N will just cause the configurator to skip all |
18 | the questions regarding AMD chipsets. If you say Y, you will be asked | 18 | the questions regarding Broadcom chipsets. If you say Y, you will |
19 | for your specific chipset/driver in the following questions. | 19 | be asked for your specific chipset/driver in the following questions. |
20 | 20 | ||
21 | if NET_VENDOR_BROADCOM | 21 | if NET_VENDOR_BROADCOM |
22 | 22 | ||
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index b9c5cea8db16..9483553ce444 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c | |||
@@ -992,7 +992,7 @@ static int bcm_sysport_poll(struct napi_struct *napi, int budget) | |||
992 | { | 992 | { |
993 | struct bcm_sysport_priv *priv = | 993 | struct bcm_sysport_priv *priv = |
994 | container_of(napi, struct bcm_sysport_priv, napi); | 994 | container_of(napi, struct bcm_sysport_priv, napi); |
995 | struct dim_sample dim_sample; | 995 | struct dim_sample dim_sample = {}; |
996 | unsigned int work_done = 0; | 996 | unsigned int work_done = 0; |
997 | 997 | ||
998 | work_done = bcm_sysport_desc_rx(priv, budget); | 998 | work_done = bcm_sysport_desc_rx(priv, budget); |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index e2be5a685130..e47ea92e2ae3 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | |||
@@ -1934,8 +1934,7 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, | |||
1934 | } | 1934 | } |
1935 | 1935 | ||
1936 | /* select a non-FCoE queue */ | 1936 | /* select a non-FCoE queue */ |
1937 | return netdev_pick_tx(dev, skb, NULL) % | 1937 | return netdev_pick_tx(dev, skb, NULL) % (BNX2X_NUM_ETH_QUEUES(bp)); |
1938 | (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos); | ||
1939 | } | 1938 | } |
1940 | 1939 | ||
1941 | void bnx2x_set_num_queues(struct bnx2x *bp) | 1940 | void bnx2x_set_num_queues(struct bnx2x *bp) |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index c23fbb34f0e9..94be97b7952c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c | |||
@@ -2267,7 +2267,7 @@ static int bnxt_poll(struct napi_struct *napi, int budget) | |||
2267 | } | 2267 | } |
2268 | } | 2268 | } |
2269 | if (bp->flags & BNXT_FLAG_DIM) { | 2269 | if (bp->flags & BNXT_FLAG_DIM) { |
2270 | struct dim_sample dim_sample; | 2270 | struct dim_sample dim_sample = {}; |
2271 | 2271 | ||
2272 | dim_update_sample(cpr->event_ctr, | 2272 | dim_update_sample(cpr->event_ctr, |
2273 | cpr->rx_packets, | 2273 | cpr->rx_packets, |
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index a2b57807453b..d3a0b614dbfa 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c | |||
@@ -1895,7 +1895,7 @@ static int bcmgenet_rx_poll(struct napi_struct *napi, int budget) | |||
1895 | { | 1895 | { |
1896 | struct bcmgenet_rx_ring *ring = container_of(napi, | 1896 | struct bcmgenet_rx_ring *ring = container_of(napi, |
1897 | struct bcmgenet_rx_ring, napi); | 1897 | struct bcmgenet_rx_ring, napi); |
1898 | struct dim_sample dim_sample; | 1898 | struct dim_sample dim_sample = {}; |
1899 | unsigned int work_done; | 1899 | unsigned int work_done; |
1900 | 1900 | ||
1901 | work_done = bcmgenet_desc_rx(ring, budget); | 1901 | work_done = bcmgenet_desc_rx(ring, budget); |
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c index ad22554857bf..acb016834f04 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c | |||
@@ -1381,24 +1381,18 @@ static int acpi_get_mac_address(struct device *dev, struct acpi_device *adev, | |||
1381 | u8 *dst) | 1381 | u8 *dst) |
1382 | { | 1382 | { |
1383 | u8 mac[ETH_ALEN]; | 1383 | u8 mac[ETH_ALEN]; |
1384 | int ret; | 1384 | u8 *addr; |
1385 | 1385 | ||
1386 | ret = fwnode_property_read_u8_array(acpi_fwnode_handle(adev), | 1386 | addr = fwnode_get_mac_address(acpi_fwnode_handle(adev), mac, ETH_ALEN); |
1387 | "mac-address", mac, ETH_ALEN); | 1387 | if (!addr) { |
1388 | if (ret) | ||
1389 | goto out; | ||
1390 | |||
1391 | if (!is_valid_ether_addr(mac)) { | ||
1392 | dev_err(dev, "MAC address invalid: %pM\n", mac); | 1388 | dev_err(dev, "MAC address invalid: %pM\n", mac); |
1393 | ret = -EINVAL; | 1389 | return -EINVAL; |
1394 | goto out; | ||
1395 | } | 1390 | } |
1396 | 1391 | ||
1397 | dev_info(dev, "MAC address set to: %pM\n", mac); | 1392 | dev_info(dev, "MAC address set to: %pM\n", mac); |
1398 | 1393 | ||
1399 | memcpy(dst, mac, ETH_ALEN); | 1394 | ether_addr_copy(dst, mac); |
1400 | out: | 1395 | return 0; |
1401 | return ret; | ||
1402 | } | 1396 | } |
1403 | 1397 | ||
1404 | /* Currently only sets the MAC address. */ | 1398 | /* Currently only sets the MAC address. */ |
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c index 1e82b9efe447..58f89f6a040f 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c | |||
@@ -3269,7 +3269,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
3269 | if (!adapter->regs) { | 3269 | if (!adapter->regs) { |
3270 | dev_err(&pdev->dev, "cannot map device registers\n"); | 3270 | dev_err(&pdev->dev, "cannot map device registers\n"); |
3271 | err = -ENOMEM; | 3271 | err = -ENOMEM; |
3272 | goto out_free_adapter; | 3272 | goto out_free_adapter_nofail; |
3273 | } | 3273 | } |
3274 | 3274 | ||
3275 | adapter->pdev = pdev; | 3275 | adapter->pdev = pdev; |
@@ -3397,6 +3397,9 @@ out_free_dev: | |||
3397 | if (adapter->port[i]) | 3397 | if (adapter->port[i]) |
3398 | free_netdev(adapter->port[i]); | 3398 | free_netdev(adapter->port[i]); |
3399 | 3399 | ||
3400 | out_free_adapter_nofail: | ||
3401 | kfree_skb(adapter->nofail_skb); | ||
3402 | |||
3400 | out_free_adapter: | 3403 | out_free_adapter: |
3401 | kfree(adapter); | 3404 | kfree(adapter); |
3402 | 3405 | ||
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index ef5d61d57597..323976c811e9 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c | |||
@@ -550,7 +550,7 @@ int be_process_mcc(struct be_adapter *adapter) | |||
550 | int num = 0, status = 0; | 550 | int num = 0, status = 0; |
551 | struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; | 551 | struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; |
552 | 552 | ||
553 | spin_lock(&adapter->mcc_cq_lock); | 553 | spin_lock_bh(&adapter->mcc_cq_lock); |
554 | 554 | ||
555 | while ((compl = be_mcc_compl_get(adapter))) { | 555 | while ((compl = be_mcc_compl_get(adapter))) { |
556 | if (compl->flags & CQE_FLAGS_ASYNC_MASK) { | 556 | if (compl->flags & CQE_FLAGS_ASYNC_MASK) { |
@@ -566,7 +566,7 @@ int be_process_mcc(struct be_adapter *adapter) | |||
566 | if (num) | 566 | if (num) |
567 | be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num); | 567 | be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num); |
568 | 568 | ||
569 | spin_unlock(&adapter->mcc_cq_lock); | 569 | spin_unlock_bh(&adapter->mcc_cq_lock); |
570 | return status; | 570 | return status; |
571 | } | 571 | } |
572 | 572 | ||
@@ -581,9 +581,7 @@ static int be_mcc_wait_compl(struct be_adapter *adapter) | |||
581 | if (be_check_error(adapter, BE_ERROR_ANY)) | 581 | if (be_check_error(adapter, BE_ERROR_ANY)) |
582 | return -EIO; | 582 | return -EIO; |
583 | 583 | ||
584 | local_bh_disable(); | ||
585 | status = be_process_mcc(adapter); | 584 | status = be_process_mcc(adapter); |
586 | local_bh_enable(); | ||
587 | 585 | ||
588 | if (atomic_read(&mcc_obj->q.used) == 0) | 586 | if (atomic_read(&mcc_obj->q.used) == 0) |
589 | break; | 587 | break; |
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 1c9883019767..314e9868b861 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c | |||
@@ -5630,9 +5630,7 @@ static void be_worker(struct work_struct *work) | |||
5630 | * mcc completions | 5630 | * mcc completions |
5631 | */ | 5631 | */ |
5632 | if (!netif_running(adapter->netdev)) { | 5632 | if (!netif_running(adapter->netdev)) { |
5633 | local_bh_disable(); | ||
5634 | be_process_mcc(adapter); | 5633 | be_process_mcc(adapter); |
5635 | local_bh_enable(); | ||
5636 | goto reschedule; | 5634 | goto reschedule; |
5637 | } | 5635 | } |
5638 | 5636 | ||
diff --git a/drivers/net/ethernet/freescale/enetc/Kconfig b/drivers/net/ethernet/freescale/enetc/Kconfig index 9c530f75134f..c219587bd334 100644 --- a/drivers/net/ethernet/freescale/enetc/Kconfig +++ b/drivers/net/ethernet/freescale/enetc/Kconfig | |||
@@ -2,6 +2,7 @@ | |||
2 | config FSL_ENETC | 2 | config FSL_ENETC |
3 | tristate "ENETC PF driver" | 3 | tristate "ENETC PF driver" |
4 | depends on PCI && PCI_MSI && (ARCH_LAYERSCAPE || COMPILE_TEST) | 4 | depends on PCI && PCI_MSI && (ARCH_LAYERSCAPE || COMPILE_TEST) |
5 | select PHYLIB | ||
5 | help | 6 | help |
6 | This driver supports NXP ENETC gigabit ethernet controller PCIe | 7 | This driver supports NXP ENETC gigabit ethernet controller PCIe |
7 | physical function (PF) devices, managing ENETC Ports at a privileged | 8 | physical function (PF) devices, managing ENETC Ports at a privileged |
@@ -12,6 +13,7 @@ config FSL_ENETC | |||
12 | config FSL_ENETC_VF | 13 | config FSL_ENETC_VF |
13 | tristate "ENETC VF driver" | 14 | tristate "ENETC VF driver" |
14 | depends on PCI && PCI_MSI && (ARCH_LAYERSCAPE || COMPILE_TEST) | 15 | depends on PCI && PCI_MSI && (ARCH_LAYERSCAPE || COMPILE_TEST) |
16 | select PHYLIB | ||
15 | help | 17 | help |
16 | This driver supports NXP ENETC gigabit ethernet controller PCIe | 18 | This driver supports NXP ENETC gigabit ethernet controller PCIe |
17 | virtual function (VF) devices enabled by the ENETC PF driver. | 19 | virtual function (VF) devices enabled by the ENETC PF driver. |
diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c index e80fedb27cee..210749bf1eac 100644 --- a/drivers/net/ethernet/freescale/fman/fman.c +++ b/drivers/net/ethernet/freescale/fman/fman.c | |||
@@ -2439,9 +2439,6 @@ MODULE_PARM_DESC(fsl_fm_rx_extra_headroom, "Extra headroom for Rx buffers"); | |||
2439 | * buffers when not using jumbo frames. | 2439 | * buffers when not using jumbo frames. |
2440 | * Must be large enough to accommodate the network MTU, but small enough | 2440 | * Must be large enough to accommodate the network MTU, but small enough |
2441 | * to avoid wasting skb memory. | 2441 | * to avoid wasting skb memory. |
2442 | * | ||
2443 | * Could be overridden once, at boot-time, via the | ||
2444 | * fm_set_max_frm() callback. | ||
2445 | */ | 2442 | */ |
2446 | static int fsl_fm_max_frm = FSL_FM_MAX_FRAME_SIZE; | 2443 | static int fsl_fm_max_frm = FSL_FM_MAX_FRAME_SIZE; |
2447 | module_param(fsl_fm_max_frm, int, 0); | 2444 | module_param(fsl_fm_max_frm, int, 0); |
diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h index 92372dc43be8..ebc37e256922 100644 --- a/drivers/net/ethernet/google/gve/gve.h +++ b/drivers/net/ethernet/google/gve/gve.h | |||
@@ -31,9 +31,6 @@ | |||
31 | struct gve_rx_desc_queue { | 31 | struct gve_rx_desc_queue { |
32 | struct gve_rx_desc *desc_ring; /* the descriptor ring */ | 32 | struct gve_rx_desc *desc_ring; /* the descriptor ring */ |
33 | dma_addr_t bus; /* the bus for the desc_ring */ | 33 | dma_addr_t bus; /* the bus for the desc_ring */ |
34 | u32 cnt; /* free-running total number of completed packets */ | ||
35 | u32 fill_cnt; /* free-running total number of descriptors posted */ | ||
36 | u32 mask; /* masks the cnt to the size of the ring */ | ||
37 | u8 seqno; /* the next expected seqno for this desc*/ | 34 | u8 seqno; /* the next expected seqno for this desc*/ |
38 | }; | 35 | }; |
39 | 36 | ||
@@ -60,8 +57,6 @@ struct gve_rx_data_queue { | |||
60 | dma_addr_t data_bus; /* dma mapping of the slots */ | 57 | dma_addr_t data_bus; /* dma mapping of the slots */ |
61 | struct gve_rx_slot_page_info *page_info; /* page info of the buffers */ | 58 | struct gve_rx_slot_page_info *page_info; /* page info of the buffers */ |
62 | struct gve_queue_page_list *qpl; /* qpl assigned to this queue */ | 59 | struct gve_queue_page_list *qpl; /* qpl assigned to this queue */ |
63 | u32 mask; /* masks the cnt to the size of the ring */ | ||
64 | u32 cnt; /* free-running total number of completed packets */ | ||
65 | }; | 60 | }; |
66 | 61 | ||
67 | struct gve_priv; | 62 | struct gve_priv; |
@@ -73,6 +68,9 @@ struct gve_rx_ring { | |||
73 | struct gve_rx_data_queue data; | 68 | struct gve_rx_data_queue data; |
74 | u64 rbytes; /* free-running bytes received */ | 69 | u64 rbytes; /* free-running bytes received */ |
75 | u64 rpackets; /* free-running packets received */ | 70 | u64 rpackets; /* free-running packets received */ |
71 | u32 cnt; /* free-running total number of completed packets */ | ||
72 | u32 fill_cnt; /* free-running total number of descs and buffs posted */ | ||
73 | u32 mask; /* masks the cnt and fill_cnt to the size of the ring */ | ||
76 | u32 q_num; /* queue index */ | 74 | u32 q_num; /* queue index */ |
77 | u32 ntfy_id; /* notification block index */ | 75 | u32 ntfy_id; /* notification block index */ |
78 | struct gve_queue_resources *q_resources; /* head and tail pointer idx */ | 76 | struct gve_queue_resources *q_resources; /* head and tail pointer idx */ |
diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c index 26540b856541..d8fa816f4473 100644 --- a/drivers/net/ethernet/google/gve/gve_ethtool.c +++ b/drivers/net/ethernet/google/gve/gve_ethtool.c | |||
@@ -138,8 +138,8 @@ gve_get_ethtool_stats(struct net_device *netdev, | |||
138 | for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) { | 138 | for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) { |
139 | struct gve_rx_ring *rx = &priv->rx[ring]; | 139 | struct gve_rx_ring *rx = &priv->rx[ring]; |
140 | 140 | ||
141 | data[i++] = rx->desc.cnt; | 141 | data[i++] = rx->cnt; |
142 | data[i++] = rx->desc.fill_cnt; | 142 | data[i++] = rx->fill_cnt; |
143 | } | 143 | } |
144 | } else { | 144 | } else { |
145 | i += priv->rx_cfg.num_queues * NUM_GVE_RX_CNTS; | 145 | i += priv->rx_cfg.num_queues * NUM_GVE_RX_CNTS; |
diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c index 1914b8350da7..59564ac99d2a 100644 --- a/drivers/net/ethernet/google/gve/gve_rx.c +++ b/drivers/net/ethernet/google/gve/gve_rx.c | |||
@@ -37,7 +37,7 @@ static void gve_rx_free_ring(struct gve_priv *priv, int idx) | |||
37 | rx->data.qpl = NULL; | 37 | rx->data.qpl = NULL; |
38 | kvfree(rx->data.page_info); | 38 | kvfree(rx->data.page_info); |
39 | 39 | ||
40 | slots = rx->data.mask + 1; | 40 | slots = rx->mask + 1; |
41 | bytes = sizeof(*rx->data.data_ring) * slots; | 41 | bytes = sizeof(*rx->data.data_ring) * slots; |
42 | dma_free_coherent(dev, bytes, rx->data.data_ring, | 42 | dma_free_coherent(dev, bytes, rx->data.data_ring, |
43 | rx->data.data_bus); | 43 | rx->data.data_bus); |
@@ -64,7 +64,7 @@ static int gve_prefill_rx_pages(struct gve_rx_ring *rx) | |||
64 | /* Allocate one page per Rx queue slot. Each page is split into two | 64 | /* Allocate one page per Rx queue slot. Each page is split into two |
65 | * packet buffers, when possible we "page flip" between the two. | 65 | * packet buffers, when possible we "page flip" between the two. |
66 | */ | 66 | */ |
67 | slots = rx->data.mask + 1; | 67 | slots = rx->mask + 1; |
68 | 68 | ||
69 | rx->data.page_info = kvzalloc(slots * | 69 | rx->data.page_info = kvzalloc(slots * |
70 | sizeof(*rx->data.page_info), GFP_KERNEL); | 70 | sizeof(*rx->data.page_info), GFP_KERNEL); |
@@ -111,7 +111,7 @@ static int gve_rx_alloc_ring(struct gve_priv *priv, int idx) | |||
111 | rx->q_num = idx; | 111 | rx->q_num = idx; |
112 | 112 | ||
113 | slots = priv->rx_pages_per_qpl; | 113 | slots = priv->rx_pages_per_qpl; |
114 | rx->data.mask = slots - 1; | 114 | rx->mask = slots - 1; |
115 | 115 | ||
116 | /* alloc rx data ring */ | 116 | /* alloc rx data ring */ |
117 | bytes = sizeof(*rx->data.data_ring) * slots; | 117 | bytes = sizeof(*rx->data.data_ring) * slots; |
@@ -125,7 +125,7 @@ static int gve_rx_alloc_ring(struct gve_priv *priv, int idx) | |||
125 | err = -ENOMEM; | 125 | err = -ENOMEM; |
126 | goto abort_with_slots; | 126 | goto abort_with_slots; |
127 | } | 127 | } |
128 | rx->desc.fill_cnt = filled_pages; | 128 | rx->fill_cnt = filled_pages; |
129 | /* Ensure data ring slots (packet buffers) are visible. */ | 129 | /* Ensure data ring slots (packet buffers) are visible. */ |
130 | dma_wmb(); | 130 | dma_wmb(); |
131 | 131 | ||
@@ -156,8 +156,8 @@ static int gve_rx_alloc_ring(struct gve_priv *priv, int idx) | |||
156 | err = -ENOMEM; | 156 | err = -ENOMEM; |
157 | goto abort_with_q_resources; | 157 | goto abort_with_q_resources; |
158 | } | 158 | } |
159 | rx->desc.mask = slots - 1; | 159 | rx->mask = slots - 1; |
160 | rx->desc.cnt = 0; | 160 | rx->cnt = 0; |
161 | rx->desc.seqno = 1; | 161 | rx->desc.seqno = 1; |
162 | gve_rx_add_to_block(priv, idx); | 162 | gve_rx_add_to_block(priv, idx); |
163 | 163 | ||
@@ -213,7 +213,7 @@ void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx) | |||
213 | { | 213 | { |
214 | u32 db_idx = be32_to_cpu(rx->q_resources->db_index); | 214 | u32 db_idx = be32_to_cpu(rx->q_resources->db_index); |
215 | 215 | ||
216 | iowrite32be(rx->desc.fill_cnt, &priv->db_bar2[db_idx]); | 216 | iowrite32be(rx->fill_cnt, &priv->db_bar2[db_idx]); |
217 | } | 217 | } |
218 | 218 | ||
219 | static enum pkt_hash_types gve_rss_type(__be16 pkt_flags) | 219 | static enum pkt_hash_types gve_rss_type(__be16 pkt_flags) |
@@ -273,7 +273,7 @@ static void gve_rx_flip_buff(struct gve_rx_slot_page_info *page_info, | |||
273 | } | 273 | } |
274 | 274 | ||
275 | static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc, | 275 | static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc, |
276 | netdev_features_t feat) | 276 | netdev_features_t feat, u32 idx) |
277 | { | 277 | { |
278 | struct gve_rx_slot_page_info *page_info; | 278 | struct gve_rx_slot_page_info *page_info; |
279 | struct gve_priv *priv = rx->gve; | 279 | struct gve_priv *priv = rx->gve; |
@@ -282,14 +282,12 @@ static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc, | |||
282 | struct sk_buff *skb; | 282 | struct sk_buff *skb; |
283 | int pagecount; | 283 | int pagecount; |
284 | u16 len; | 284 | u16 len; |
285 | u32 idx; | ||
286 | 285 | ||
287 | /* drop this packet */ | 286 | /* drop this packet */ |
288 | if (unlikely(rx_desc->flags_seq & GVE_RXF_ERR)) | 287 | if (unlikely(rx_desc->flags_seq & GVE_RXF_ERR)) |
289 | return true; | 288 | return true; |
290 | 289 | ||
291 | len = be16_to_cpu(rx_desc->len) - GVE_RX_PAD; | 290 | len = be16_to_cpu(rx_desc->len) - GVE_RX_PAD; |
292 | idx = rx->data.cnt & rx->data.mask; | ||
293 | page_info = &rx->data.page_info[idx]; | 291 | page_info = &rx->data.page_info[idx]; |
294 | 292 | ||
295 | /* gvnic can only receive into registered segments. If the buffer | 293 | /* gvnic can only receive into registered segments. If the buffer |
@@ -340,8 +338,6 @@ have_skb: | |||
340 | if (!skb) | 338 | if (!skb) |
341 | return true; | 339 | return true; |
342 | 340 | ||
343 | rx->data.cnt++; | ||
344 | |||
345 | if (likely(feat & NETIF_F_RXCSUM)) { | 341 | if (likely(feat & NETIF_F_RXCSUM)) { |
346 | /* NIC passes up the partial sum */ | 342 | /* NIC passes up the partial sum */ |
347 | if (rx_desc->csum) | 343 | if (rx_desc->csum) |
@@ -370,7 +366,7 @@ static bool gve_rx_work_pending(struct gve_rx_ring *rx) | |||
370 | __be16 flags_seq; | 366 | __be16 flags_seq; |
371 | u32 next_idx; | 367 | u32 next_idx; |
372 | 368 | ||
373 | next_idx = rx->desc.cnt & rx->desc.mask; | 369 | next_idx = rx->cnt & rx->mask; |
374 | desc = rx->desc.desc_ring + next_idx; | 370 | desc = rx->desc.desc_ring + next_idx; |
375 | 371 | ||
376 | flags_seq = desc->flags_seq; | 372 | flags_seq = desc->flags_seq; |
@@ -385,8 +381,8 @@ bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget, | |||
385 | { | 381 | { |
386 | struct gve_priv *priv = rx->gve; | 382 | struct gve_priv *priv = rx->gve; |
387 | struct gve_rx_desc *desc; | 383 | struct gve_rx_desc *desc; |
388 | u32 cnt = rx->desc.cnt; | 384 | u32 cnt = rx->cnt; |
389 | u32 idx = cnt & rx->desc.mask; | 385 | u32 idx = cnt & rx->mask; |
390 | u32 work_done = 0; | 386 | u32 work_done = 0; |
391 | u64 bytes = 0; | 387 | u64 bytes = 0; |
392 | 388 | ||
@@ -401,10 +397,10 @@ bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget, | |||
401 | rx->q_num, GVE_SEQNO(desc->flags_seq), | 397 | rx->q_num, GVE_SEQNO(desc->flags_seq), |
402 | rx->desc.seqno); | 398 | rx->desc.seqno); |
403 | bytes += be16_to_cpu(desc->len) - GVE_RX_PAD; | 399 | bytes += be16_to_cpu(desc->len) - GVE_RX_PAD; |
404 | if (!gve_rx(rx, desc, feat)) | 400 | if (!gve_rx(rx, desc, feat, idx)) |
405 | gve_schedule_reset(priv); | 401 | gve_schedule_reset(priv); |
406 | cnt++; | 402 | cnt++; |
407 | idx = cnt & rx->desc.mask; | 403 | idx = cnt & rx->mask; |
408 | desc = rx->desc.desc_ring + idx; | 404 | desc = rx->desc.desc_ring + idx; |
409 | rx->desc.seqno = gve_next_seqno(rx->desc.seqno); | 405 | rx->desc.seqno = gve_next_seqno(rx->desc.seqno); |
410 | work_done++; | 406 | work_done++; |
@@ -417,8 +413,8 @@ bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget, | |||
417 | rx->rpackets += work_done; | 413 | rx->rpackets += work_done; |
418 | rx->rbytes += bytes; | 414 | rx->rbytes += bytes; |
419 | u64_stats_update_end(&rx->statss); | 415 | u64_stats_update_end(&rx->statss); |
420 | rx->desc.cnt = cnt; | 416 | rx->cnt = cnt; |
421 | rx->desc.fill_cnt += work_done; | 417 | rx->fill_cnt += work_done; |
422 | 418 | ||
423 | /* restock desc ring slots */ | 419 | /* restock desc ring slots */ |
424 | dma_wmb(); /* Ensure descs are visible before ringing doorbell */ | 420 | dma_wmb(); /* Ensure descs are visible before ringing doorbell */ |
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c index d60452845539..c84167447abe 100644 --- a/drivers/net/ethernet/hisilicon/hip04_eth.c +++ b/drivers/net/ethernet/hisilicon/hip04_eth.c | |||
@@ -220,6 +220,7 @@ struct hip04_priv { | |||
220 | unsigned int reg_inten; | 220 | unsigned int reg_inten; |
221 | 221 | ||
222 | struct napi_struct napi; | 222 | struct napi_struct napi; |
223 | struct device *dev; | ||
223 | struct net_device *ndev; | 224 | struct net_device *ndev; |
224 | 225 | ||
225 | struct tx_desc *tx_desc; | 226 | struct tx_desc *tx_desc; |
@@ -248,7 +249,7 @@ struct hip04_priv { | |||
248 | 249 | ||
249 | static inline unsigned int tx_count(unsigned int head, unsigned int tail) | 250 | static inline unsigned int tx_count(unsigned int head, unsigned int tail) |
250 | { | 251 | { |
251 | return (head - tail) % (TX_DESC_NUM - 1); | 252 | return (head - tail) % TX_DESC_NUM; |
252 | } | 253 | } |
253 | 254 | ||
254 | static void hip04_config_port(struct net_device *ndev, u32 speed, u32 duplex) | 255 | static void hip04_config_port(struct net_device *ndev, u32 speed, u32 duplex) |
@@ -465,7 +466,7 @@ static int hip04_tx_reclaim(struct net_device *ndev, bool force) | |||
465 | } | 466 | } |
466 | 467 | ||
467 | if (priv->tx_phys[tx_tail]) { | 468 | if (priv->tx_phys[tx_tail]) { |
468 | dma_unmap_single(&ndev->dev, priv->tx_phys[tx_tail], | 469 | dma_unmap_single(priv->dev, priv->tx_phys[tx_tail], |
469 | priv->tx_skb[tx_tail]->len, | 470 | priv->tx_skb[tx_tail]->len, |
470 | DMA_TO_DEVICE); | 471 | DMA_TO_DEVICE); |
471 | priv->tx_phys[tx_tail] = 0; | 472 | priv->tx_phys[tx_tail] = 0; |
@@ -516,8 +517,8 @@ hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
516 | return NETDEV_TX_BUSY; | 517 | return NETDEV_TX_BUSY; |
517 | } | 518 | } |
518 | 519 | ||
519 | phys = dma_map_single(&ndev->dev, skb->data, skb->len, DMA_TO_DEVICE); | 520 | phys = dma_map_single(priv->dev, skb->data, skb->len, DMA_TO_DEVICE); |
520 | if (dma_mapping_error(&ndev->dev, phys)) { | 521 | if (dma_mapping_error(priv->dev, phys)) { |
521 | dev_kfree_skb(skb); | 522 | dev_kfree_skb(skb); |
522 | return NETDEV_TX_OK; | 523 | return NETDEV_TX_OK; |
523 | } | 524 | } |
@@ -585,6 +586,9 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget) | |||
585 | u16 len; | 586 | u16 len; |
586 | u32 err; | 587 | u32 err; |
587 | 588 | ||
589 | /* clean up tx descriptors */ | ||
590 | tx_remaining = hip04_tx_reclaim(ndev, false); | ||
591 | |||
588 | while (cnt && !last) { | 592 | while (cnt && !last) { |
589 | buf = priv->rx_buf[priv->rx_head]; | 593 | buf = priv->rx_buf[priv->rx_head]; |
590 | skb = build_skb(buf, priv->rx_buf_size); | 594 | skb = build_skb(buf, priv->rx_buf_size); |
@@ -593,7 +597,7 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget) | |||
593 | goto refill; | 597 | goto refill; |
594 | } | 598 | } |
595 | 599 | ||
596 | dma_unmap_single(&ndev->dev, priv->rx_phys[priv->rx_head], | 600 | dma_unmap_single(priv->dev, priv->rx_phys[priv->rx_head], |
597 | RX_BUF_SIZE, DMA_FROM_DEVICE); | 601 | RX_BUF_SIZE, DMA_FROM_DEVICE); |
598 | priv->rx_phys[priv->rx_head] = 0; | 602 | priv->rx_phys[priv->rx_head] = 0; |
599 | 603 | ||
@@ -622,9 +626,9 @@ refill: | |||
622 | buf = netdev_alloc_frag(priv->rx_buf_size); | 626 | buf = netdev_alloc_frag(priv->rx_buf_size); |
623 | if (!buf) | 627 | if (!buf) |
624 | goto done; | 628 | goto done; |
625 | phys = dma_map_single(&ndev->dev, buf, | 629 | phys = dma_map_single(priv->dev, buf, |
626 | RX_BUF_SIZE, DMA_FROM_DEVICE); | 630 | RX_BUF_SIZE, DMA_FROM_DEVICE); |
627 | if (dma_mapping_error(&ndev->dev, phys)) | 631 | if (dma_mapping_error(priv->dev, phys)) |
628 | goto done; | 632 | goto done; |
629 | priv->rx_buf[priv->rx_head] = buf; | 633 | priv->rx_buf[priv->rx_head] = buf; |
630 | priv->rx_phys[priv->rx_head] = phys; | 634 | priv->rx_phys[priv->rx_head] = phys; |
@@ -645,8 +649,7 @@ refill: | |||
645 | } | 649 | } |
646 | napi_complete_done(napi, rx); | 650 | napi_complete_done(napi, rx); |
647 | done: | 651 | done: |
648 | /* clean up tx descriptors and start a new timer if necessary */ | 652 | /* start a new timer if necessary */ |
649 | tx_remaining = hip04_tx_reclaim(ndev, false); | ||
650 | if (rx < budget && tx_remaining) | 653 | if (rx < budget && tx_remaining) |
651 | hip04_start_tx_timer(priv); | 654 | hip04_start_tx_timer(priv); |
652 | 655 | ||
@@ -728,9 +731,9 @@ static int hip04_mac_open(struct net_device *ndev) | |||
728 | for (i = 0; i < RX_DESC_NUM; i++) { | 731 | for (i = 0; i < RX_DESC_NUM; i++) { |
729 | dma_addr_t phys; | 732 | dma_addr_t phys; |
730 | 733 | ||
731 | phys = dma_map_single(&ndev->dev, priv->rx_buf[i], | 734 | phys = dma_map_single(priv->dev, priv->rx_buf[i], |
732 | RX_BUF_SIZE, DMA_FROM_DEVICE); | 735 | RX_BUF_SIZE, DMA_FROM_DEVICE); |
733 | if (dma_mapping_error(&ndev->dev, phys)) | 736 | if (dma_mapping_error(priv->dev, phys)) |
734 | return -EIO; | 737 | return -EIO; |
735 | 738 | ||
736 | priv->rx_phys[i] = phys; | 739 | priv->rx_phys[i] = phys; |
@@ -764,7 +767,7 @@ static int hip04_mac_stop(struct net_device *ndev) | |||
764 | 767 | ||
765 | for (i = 0; i < RX_DESC_NUM; i++) { | 768 | for (i = 0; i < RX_DESC_NUM; i++) { |
766 | if (priv->rx_phys[i]) { | 769 | if (priv->rx_phys[i]) { |
767 | dma_unmap_single(&ndev->dev, priv->rx_phys[i], | 770 | dma_unmap_single(priv->dev, priv->rx_phys[i], |
768 | RX_BUF_SIZE, DMA_FROM_DEVICE); | 771 | RX_BUF_SIZE, DMA_FROM_DEVICE); |
769 | priv->rx_phys[i] = 0; | 772 | priv->rx_phys[i] = 0; |
770 | } | 773 | } |
@@ -907,6 +910,7 @@ static int hip04_mac_probe(struct platform_device *pdev) | |||
907 | return -ENOMEM; | 910 | return -ENOMEM; |
908 | 911 | ||
909 | priv = netdev_priv(ndev); | 912 | priv = netdev_priv(ndev); |
913 | priv->dev = d; | ||
910 | priv->ndev = ndev; | 914 | priv->ndev = ndev; |
911 | platform_set_drvdata(pdev, ndev); | 915 | platform_set_drvdata(pdev, ndev); |
912 | SET_NETDEV_DEV(ndev, &pdev->dev); | 916 | SET_NETDEV_DEV(ndev, &pdev->dev); |
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index 4138a8480347..cca71ba7a74a 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c | |||
@@ -3251,7 +3251,7 @@ static int ehea_mem_notifier(struct notifier_block *nb, | |||
3251 | switch (action) { | 3251 | switch (action) { |
3252 | case MEM_CANCEL_OFFLINE: | 3252 | case MEM_CANCEL_OFFLINE: |
3253 | pr_info("memory offlining canceled"); | 3253 | pr_info("memory offlining canceled"); |
3254 | /* Fall through: re-add canceled memory block */ | 3254 | /* Fall through - re-add canceled memory block */ |
3255 | 3255 | ||
3256 | case MEM_ONLINE: | 3256 | case MEM_ONLINE: |
3257 | pr_info("memory is going online"); | 3257 | pr_info("memory is going online"); |
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c index f660cc2b8258..0b9e851f3da4 100644 --- a/drivers/net/ethernet/marvell/mvmdio.c +++ b/drivers/net/ethernet/marvell/mvmdio.c | |||
@@ -319,20 +319,33 @@ static int orion_mdio_probe(struct platform_device *pdev) | |||
319 | 319 | ||
320 | init_waitqueue_head(&dev->smi_busy_wait); | 320 | init_waitqueue_head(&dev->smi_busy_wait); |
321 | 321 | ||
322 | for (i = 0; i < ARRAY_SIZE(dev->clk); i++) { | 322 | if (pdev->dev.of_node) { |
323 | dev->clk[i] = of_clk_get(pdev->dev.of_node, i); | 323 | for (i = 0; i < ARRAY_SIZE(dev->clk); i++) { |
324 | if (PTR_ERR(dev->clk[i]) == -EPROBE_DEFER) { | 324 | dev->clk[i] = of_clk_get(pdev->dev.of_node, i); |
325 | if (PTR_ERR(dev->clk[i]) == -EPROBE_DEFER) { | ||
326 | ret = -EPROBE_DEFER; | ||
327 | goto out_clk; | ||
328 | } | ||
329 | if (IS_ERR(dev->clk[i])) | ||
330 | break; | ||
331 | clk_prepare_enable(dev->clk[i]); | ||
332 | } | ||
333 | |||
334 | if (!IS_ERR(of_clk_get(pdev->dev.of_node, | ||
335 | ARRAY_SIZE(dev->clk)))) | ||
336 | dev_warn(&pdev->dev, | ||
337 | "unsupported number of clocks, limiting to the first " | ||
338 | __stringify(ARRAY_SIZE(dev->clk)) "\n"); | ||
339 | } else { | ||
340 | dev->clk[0] = clk_get(&pdev->dev, NULL); | ||
341 | if (PTR_ERR(dev->clk[0]) == -EPROBE_DEFER) { | ||
325 | ret = -EPROBE_DEFER; | 342 | ret = -EPROBE_DEFER; |
326 | goto out_clk; | 343 | goto out_clk; |
327 | } | 344 | } |
328 | if (IS_ERR(dev->clk[i])) | 345 | if (!IS_ERR(dev->clk[0])) |
329 | break; | 346 | clk_prepare_enable(dev->clk[0]); |
330 | clk_prepare_enable(dev->clk[i]); | ||
331 | } | 347 | } |
332 | 348 | ||
333 | if (!IS_ERR(of_clk_get(pdev->dev.of_node, ARRAY_SIZE(dev->clk)))) | ||
334 | dev_warn(&pdev->dev, "unsupported number of clocks, limiting to the first " | ||
335 | __stringify(ARRAY_SIZE(dev->clk)) "\n"); | ||
336 | 349 | ||
337 | dev->err_interrupt = platform_get_irq(pdev, 0); | 350 | dev->err_interrupt = platform_get_irq(pdev, 0); |
338 | if (dev->err_interrupt > 0 && | 351 | if (dev->err_interrupt > 0 && |
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index e9d8ffe897e9..74fd9e171865 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c | |||
@@ -811,6 +811,26 @@ static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port) | |||
811 | return 0; | 811 | return 0; |
812 | } | 812 | } |
813 | 813 | ||
814 | static void mvpp2_set_hw_csum(struct mvpp2_port *port, | ||
815 | enum mvpp2_bm_pool_log_num new_long_pool) | ||
816 | { | ||
817 | const netdev_features_t csums = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; | ||
818 | |||
819 | /* Update L4 checksum when jumbo enable/disable on port. | ||
820 | * Only port 0 supports hardware checksum offload due to | ||
821 | * the Tx FIFO size limitation. | ||
822 | * Also, don't set NETIF_F_HW_CSUM because L3_offset in TX descriptor | ||
823 | * has 7 bits, so the maximum L3 offset is 128. | ||
824 | */ | ||
825 | if (new_long_pool == MVPP2_BM_JUMBO && port->id != 0) { | ||
826 | port->dev->features &= ~csums; | ||
827 | port->dev->hw_features &= ~csums; | ||
828 | } else { | ||
829 | port->dev->features |= csums; | ||
830 | port->dev->hw_features |= csums; | ||
831 | } | ||
832 | } | ||
833 | |||
814 | static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu) | 834 | static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu) |
815 | { | 835 | { |
816 | struct mvpp2_port *port = netdev_priv(dev); | 836 | struct mvpp2_port *port = netdev_priv(dev); |
@@ -843,15 +863,7 @@ static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu) | |||
843 | /* Add port to new short & long pool */ | 863 | /* Add port to new short & long pool */ |
844 | mvpp2_swf_bm_pool_init(port); | 864 | mvpp2_swf_bm_pool_init(port); |
845 | 865 | ||
846 | /* Update L4 checksum when jumbo enable/disable on port */ | 866 | mvpp2_set_hw_csum(port, new_long_pool); |
847 | if (new_long_pool == MVPP2_BM_JUMBO && port->id != 0) { | ||
848 | dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); | ||
849 | dev->hw_features &= ~(NETIF_F_IP_CSUM | | ||
850 | NETIF_F_IPV6_CSUM); | ||
851 | } else { | ||
852 | dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; | ||
853 | dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; | ||
854 | } | ||
855 | } | 867 | } |
856 | 868 | ||
857 | dev->mtu = mtu; | 869 | dev->mtu = mtu; |
@@ -3701,6 +3713,7 @@ static int mvpp2_set_mac_address(struct net_device *dev, void *p) | |||
3701 | static int mvpp2_change_mtu(struct net_device *dev, int mtu) | 3713 | static int mvpp2_change_mtu(struct net_device *dev, int mtu) |
3702 | { | 3714 | { |
3703 | struct mvpp2_port *port = netdev_priv(dev); | 3715 | struct mvpp2_port *port = netdev_priv(dev); |
3716 | bool running = netif_running(dev); | ||
3704 | int err; | 3717 | int err; |
3705 | 3718 | ||
3706 | if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) { | 3719 | if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) { |
@@ -3709,40 +3722,24 @@ static int mvpp2_change_mtu(struct net_device *dev, int mtu) | |||
3709 | mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8); | 3722 | mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8); |
3710 | } | 3723 | } |
3711 | 3724 | ||
3712 | if (!netif_running(dev)) { | 3725 | if (running) |
3713 | err = mvpp2_bm_update_mtu(dev, mtu); | 3726 | mvpp2_stop_dev(port); |
3714 | if (!err) { | ||
3715 | port->pkt_size = MVPP2_RX_PKT_SIZE(mtu); | ||
3716 | return 0; | ||
3717 | } | ||
3718 | |||
3719 | /* Reconfigure BM to the original MTU */ | ||
3720 | err = mvpp2_bm_update_mtu(dev, dev->mtu); | ||
3721 | if (err) | ||
3722 | goto log_error; | ||
3723 | } | ||
3724 | |||
3725 | mvpp2_stop_dev(port); | ||
3726 | 3727 | ||
3727 | err = mvpp2_bm_update_mtu(dev, mtu); | 3728 | err = mvpp2_bm_update_mtu(dev, mtu); |
3728 | if (!err) { | 3729 | if (err) { |
3730 | netdev_err(dev, "failed to change MTU\n"); | ||
3731 | /* Reconfigure BM to the original MTU */ | ||
3732 | mvpp2_bm_update_mtu(dev, dev->mtu); | ||
3733 | } else { | ||
3729 | port->pkt_size = MVPP2_RX_PKT_SIZE(mtu); | 3734 | port->pkt_size = MVPP2_RX_PKT_SIZE(mtu); |
3730 | goto out_start; | ||
3731 | } | 3735 | } |
3732 | 3736 | ||
3733 | /* Reconfigure BM to the original MTU */ | 3737 | if (running) { |
3734 | err = mvpp2_bm_update_mtu(dev, dev->mtu); | 3738 | mvpp2_start_dev(port); |
3735 | if (err) | 3739 | mvpp2_egress_enable(port); |
3736 | goto log_error; | 3740 | mvpp2_ingress_enable(port); |
3737 | 3741 | } | |
3738 | out_start: | ||
3739 | mvpp2_start_dev(port); | ||
3740 | mvpp2_egress_enable(port); | ||
3741 | mvpp2_ingress_enable(port); | ||
3742 | 3742 | ||
3743 | return 0; | ||
3744 | log_error: | ||
3745 | netdev_err(dev, "failed to change MTU\n"); | ||
3746 | return err; | 3743 | return err; |
3747 | } | 3744 | } |
3748 | 3745 | ||
@@ -4740,9 +4737,9 @@ static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode, | |||
4740 | else | 4737 | else |
4741 | ctrl0 &= ~MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN; | 4738 | ctrl0 &= ~MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN; |
4742 | 4739 | ||
4743 | ctrl4 &= ~MVPP22_XLG_CTRL4_MACMODSELECT_GMAC; | 4740 | ctrl4 &= ~(MVPP22_XLG_CTRL4_MACMODSELECT_GMAC | |
4744 | ctrl4 |= MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC | | 4741 | MVPP22_XLG_CTRL4_EN_IDLE_CHECK); |
4745 | MVPP22_XLG_CTRL4_EN_IDLE_CHECK; | 4742 | ctrl4 |= MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC; |
4746 | 4743 | ||
4747 | if (old_ctrl0 != ctrl0) | 4744 | if (old_ctrl0 != ctrl0) |
4748 | writel(ctrl0, port->base + MVPP22_XLG_CTRL0_REG); | 4745 | writel(ctrl0, port->base + MVPP22_XLG_CTRL0_REG); |
@@ -5207,10 +5204,7 @@ static int mvpp2_port_probe(struct platform_device *pdev, | |||
5207 | dev->features |= NETIF_F_NTUPLE; | 5204 | dev->features |= NETIF_F_NTUPLE; |
5208 | } | 5205 | } |
5209 | 5206 | ||
5210 | if (port->pool_long->id == MVPP2_BM_JUMBO && port->id != 0) { | 5207 | mvpp2_set_hw_csum(port, port->pool_long->id); |
5211 | dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); | ||
5212 | dev->hw_features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); | ||
5213 | } | ||
5214 | 5208 | ||
5215 | dev->vlan_features |= features; | 5209 | dev->vlan_features |= features; |
5216 | dev->gso_max_segs = MVPP2_MAX_TSO_SEGS; | 5210 | dev->gso_max_segs = MVPP2_MAX_TSO_SEGS; |
@@ -5756,9 +5750,6 @@ static int mvpp2_remove(struct platform_device *pdev) | |||
5756 | 5750 | ||
5757 | mvpp2_dbgfs_cleanup(priv); | 5751 | mvpp2_dbgfs_cleanup(priv); |
5758 | 5752 | ||
5759 | flush_workqueue(priv->stats_queue); | ||
5760 | destroy_workqueue(priv->stats_queue); | ||
5761 | |||
5762 | fwnode_for_each_available_child_node(fwnode, port_fwnode) { | 5753 | fwnode_for_each_available_child_node(fwnode, port_fwnode) { |
5763 | if (priv->port_list[i]) { | 5754 | if (priv->port_list[i]) { |
5764 | mutex_destroy(&priv->port_list[i]->gather_stats_lock); | 5755 | mutex_destroy(&priv->port_list[i]->gather_stats_lock); |
@@ -5767,6 +5758,8 @@ static int mvpp2_remove(struct platform_device *pdev) | |||
5767 | i++; | 5758 | i++; |
5768 | } | 5759 | } |
5769 | 5760 | ||
5761 | destroy_workqueue(priv->stats_queue); | ||
5762 | |||
5770 | for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) { | 5763 | for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) { |
5771 | struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i]; | 5764 | struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i]; |
5772 | 5765 | ||
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index 762fe0821923..c2e00bb587cd 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c | |||
@@ -4924,6 +4924,13 @@ static const struct dmi_system_id msi_blacklist[] = { | |||
4924 | DMI_MATCH(DMI_PRODUCT_NAME, "P5W DH Deluxe"), | 4924 | DMI_MATCH(DMI_PRODUCT_NAME, "P5W DH Deluxe"), |
4925 | }, | 4925 | }, |
4926 | }, | 4926 | }, |
4927 | { | ||
4928 | .ident = "ASUS P6T", | ||
4929 | .matches = { | ||
4930 | DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), | ||
4931 | DMI_MATCH(DMI_BOARD_NAME, "P6T"), | ||
4932 | }, | ||
4933 | }, | ||
4927 | {} | 4934 | {} |
4928 | }; | 4935 | }; |
4929 | 4936 | ||
diff --git a/drivers/net/ethernet/mediatek/Kconfig b/drivers/net/ethernet/mediatek/Kconfig index 263cd0909fe0..1f7fff81f24d 100644 --- a/drivers/net/ethernet/mediatek/Kconfig +++ b/drivers/net/ethernet/mediatek/Kconfig | |||
@@ -9,7 +9,6 @@ if NET_VENDOR_MEDIATEK | |||
9 | 9 | ||
10 | config NET_MEDIATEK_SOC | 10 | config NET_MEDIATEK_SOC |
11 | tristate "MediaTek SoC Gigabit Ethernet support" | 11 | tristate "MediaTek SoC Gigabit Ethernet support" |
12 | depends on NET_VENDOR_MEDIATEK | ||
13 | select PHYLIB | 12 | select PHYLIB |
14 | ---help--- | 13 | ---help--- |
15 | This driver supports the gigabit ethernet MACs in the | 14 | This driver supports the gigabit ethernet MACs in the |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c index 5bb6a26ea267..50862275544e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c | |||
@@ -213,7 +213,7 @@ void mlx5_unregister_device(struct mlx5_core_dev *dev) | |||
213 | struct mlx5_interface *intf; | 213 | struct mlx5_interface *intf; |
214 | 214 | ||
215 | mutex_lock(&mlx5_intf_mutex); | 215 | mutex_lock(&mlx5_intf_mutex); |
216 | list_for_each_entry(intf, &intf_list, list) | 216 | list_for_each_entry_reverse(intf, &intf_list, list) |
217 | mlx5_remove_device(intf, priv); | 217 | mlx5_remove_device(intf, priv); |
218 | list_del(&priv->dev_list); | 218 | list_del(&priv->dev_list); |
219 | mutex_unlock(&mlx5_intf_mutex); | 219 | mutex_unlock(&mlx5_intf_mutex); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 30f13f81c965..0807992090b8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h | |||
@@ -159,7 +159,7 @@ do { \ | |||
159 | enum mlx5e_rq_group { | 159 | enum mlx5e_rq_group { |
160 | MLX5E_RQ_GROUP_REGULAR, | 160 | MLX5E_RQ_GROUP_REGULAR, |
161 | MLX5E_RQ_GROUP_XSK, | 161 | MLX5E_RQ_GROUP_XSK, |
162 | MLX5E_NUM_RQ_GROUPS /* Keep last. */ | 162 | #define MLX5E_NUM_RQ_GROUPS(g) (1 + MLX5E_RQ_GROUP_##g) |
163 | }; | 163 | }; |
164 | 164 | ||
165 | static inline u16 mlx5_min_rx_wqes(int wq_type, u32 wq_size) | 165 | static inline u16 mlx5_min_rx_wqes(int wq_type, u32 wq_size) |
@@ -182,14 +182,6 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev) | |||
182 | min_t(int, mlx5_comp_vectors_count(mdev), MLX5E_MAX_NUM_CHANNELS); | 182 | min_t(int, mlx5_comp_vectors_count(mdev), MLX5E_MAX_NUM_CHANNELS); |
183 | } | 183 | } |
184 | 184 | ||
185 | /* Use this function to get max num channels after netdev was created */ | ||
186 | static inline int mlx5e_get_netdev_max_channels(struct net_device *netdev) | ||
187 | { | ||
188 | return min_t(unsigned int, | ||
189 | netdev->num_rx_queues / MLX5E_NUM_RQ_GROUPS, | ||
190 | netdev->num_tx_queues); | ||
191 | } | ||
192 | |||
193 | struct mlx5e_tx_wqe { | 185 | struct mlx5e_tx_wqe { |
194 | struct mlx5_wqe_ctrl_seg ctrl; | 186 | struct mlx5_wqe_ctrl_seg ctrl; |
195 | struct mlx5_wqe_eth_seg eth; | 187 | struct mlx5_wqe_eth_seg eth; |
@@ -829,6 +821,7 @@ struct mlx5e_priv { | |||
829 | struct net_device *netdev; | 821 | struct net_device *netdev; |
830 | struct mlx5e_stats stats; | 822 | struct mlx5e_stats stats; |
831 | struct mlx5e_channel_stats channel_stats[MLX5E_MAX_NUM_CHANNELS]; | 823 | struct mlx5e_channel_stats channel_stats[MLX5E_MAX_NUM_CHANNELS]; |
824 | u16 max_nch; | ||
832 | u8 max_opened_tc; | 825 | u8 max_opened_tc; |
833 | struct hwtstamp_config tstamp; | 826 | struct hwtstamp_config tstamp; |
834 | u16 q_counter; | 827 | u16 q_counter; |
@@ -870,6 +863,7 @@ struct mlx5e_profile { | |||
870 | mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe; | 863 | mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe; |
871 | } rx_handlers; | 864 | } rx_handlers; |
872 | int max_tc; | 865 | int max_tc; |
866 | u8 rq_groups; | ||
873 | }; | 867 | }; |
874 | 868 | ||
875 | void mlx5e_build_ptys2ethtool_map(void); | 869 | void mlx5e_build_ptys2ethtool_map(void); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h index bd882b5ee9a7..3a615d663d84 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h | |||
@@ -66,9 +66,10 @@ static inline void mlx5e_qid_get_ch_and_group(struct mlx5e_params *params, | |||
66 | *group = qid / nch; | 66 | *group = qid / nch; |
67 | } | 67 | } |
68 | 68 | ||
69 | static inline bool mlx5e_qid_validate(struct mlx5e_params *params, u64 qid) | 69 | static inline bool mlx5e_qid_validate(const struct mlx5e_profile *profile, |
70 | struct mlx5e_params *params, u64 qid) | ||
70 | { | 71 | { |
71 | return qid < params->num_channels * MLX5E_NUM_RQ_GROUPS; | 72 | return qid < params->num_channels * profile->rq_groups; |
72 | } | 73 | } |
73 | 74 | ||
74 | /* Parameter calculations */ | 75 | /* Parameter calculations */ |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c index d5e5afbdca6d..f777994f3005 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c | |||
@@ -78,9 +78,10 @@ static const u32 mlx5e_ext_link_speed[MLX5E_EXT_LINK_MODES_NUMBER] = { | |||
78 | }; | 78 | }; |
79 | 79 | ||
80 | static void mlx5e_port_get_speed_arr(struct mlx5_core_dev *mdev, | 80 | static void mlx5e_port_get_speed_arr(struct mlx5_core_dev *mdev, |
81 | const u32 **arr, u32 *size) | 81 | const u32 **arr, u32 *size, |
82 | bool force_legacy) | ||
82 | { | 83 | { |
83 | bool ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); | 84 | bool ext = force_legacy ? false : MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); |
84 | 85 | ||
85 | *size = ext ? ARRAY_SIZE(mlx5e_ext_link_speed) : | 86 | *size = ext ? ARRAY_SIZE(mlx5e_ext_link_speed) : |
86 | ARRAY_SIZE(mlx5e_link_speed); | 87 | ARRAY_SIZE(mlx5e_link_speed); |
@@ -152,7 +153,8 @@ int mlx5_port_set_eth_ptys(struct mlx5_core_dev *dev, bool an_disable, | |||
152 | sizeof(out), MLX5_REG_PTYS, 0, 1); | 153 | sizeof(out), MLX5_REG_PTYS, 0, 1); |
153 | } | 154 | } |
154 | 155 | ||
155 | u32 mlx5e_port_ptys2speed(struct mlx5_core_dev *mdev, u32 eth_proto_oper) | 156 | u32 mlx5e_port_ptys2speed(struct mlx5_core_dev *mdev, u32 eth_proto_oper, |
157 | bool force_legacy) | ||
156 | { | 158 | { |
157 | unsigned long temp = eth_proto_oper; | 159 | unsigned long temp = eth_proto_oper; |
158 | const u32 *table; | 160 | const u32 *table; |
@@ -160,7 +162,7 @@ u32 mlx5e_port_ptys2speed(struct mlx5_core_dev *mdev, u32 eth_proto_oper) | |||
160 | u32 max_size; | 162 | u32 max_size; |
161 | int i; | 163 | int i; |
162 | 164 | ||
163 | mlx5e_port_get_speed_arr(mdev, &table, &max_size); | 165 | mlx5e_port_get_speed_arr(mdev, &table, &max_size, force_legacy); |
164 | i = find_first_bit(&temp, max_size); | 166 | i = find_first_bit(&temp, max_size); |
165 | if (i < max_size) | 167 | if (i < max_size) |
166 | speed = table[i]; | 168 | speed = table[i]; |
@@ -170,6 +172,7 @@ u32 mlx5e_port_ptys2speed(struct mlx5_core_dev *mdev, u32 eth_proto_oper) | |||
170 | int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed) | 172 | int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed) |
171 | { | 173 | { |
172 | struct mlx5e_port_eth_proto eproto; | 174 | struct mlx5e_port_eth_proto eproto; |
175 | bool force_legacy = false; | ||
173 | bool ext; | 176 | bool ext; |
174 | int err; | 177 | int err; |
175 | 178 | ||
@@ -177,8 +180,13 @@ int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed) | |||
177 | err = mlx5_port_query_eth_proto(mdev, 1, ext, &eproto); | 180 | err = mlx5_port_query_eth_proto(mdev, 1, ext, &eproto); |
178 | if (err) | 181 | if (err) |
179 | goto out; | 182 | goto out; |
180 | 183 | if (ext && !eproto.admin) { | |
181 | *speed = mlx5e_port_ptys2speed(mdev, eproto.oper); | 184 | force_legacy = true; |
185 | err = mlx5_port_query_eth_proto(mdev, 1, false, &eproto); | ||
186 | if (err) | ||
187 | goto out; | ||
188 | } | ||
189 | *speed = mlx5e_port_ptys2speed(mdev, eproto.oper, force_legacy); | ||
182 | if (!(*speed)) | 190 | if (!(*speed)) |
183 | err = -EINVAL; | 191 | err = -EINVAL; |
184 | 192 | ||
@@ -201,7 +209,7 @@ int mlx5e_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed) | |||
201 | if (err) | 209 | if (err) |
202 | return err; | 210 | return err; |
203 | 211 | ||
204 | mlx5e_port_get_speed_arr(mdev, &table, &max_size); | 212 | mlx5e_port_get_speed_arr(mdev, &table, &max_size, false); |
205 | for (i = 0; i < max_size; ++i) | 213 | for (i = 0; i < max_size; ++i) |
206 | if (eproto.cap & MLX5E_PROT_MASK(i)) | 214 | if (eproto.cap & MLX5E_PROT_MASK(i)) |
207 | max_speed = max(max_speed, table[i]); | 215 | max_speed = max(max_speed, table[i]); |
@@ -210,14 +218,15 @@ int mlx5e_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed) | |||
210 | return 0; | 218 | return 0; |
211 | } | 219 | } |
212 | 220 | ||
213 | u32 mlx5e_port_speed2linkmodes(struct mlx5_core_dev *mdev, u32 speed) | 221 | u32 mlx5e_port_speed2linkmodes(struct mlx5_core_dev *mdev, u32 speed, |
222 | bool force_legacy) | ||
214 | { | 223 | { |
215 | u32 link_modes = 0; | 224 | u32 link_modes = 0; |
216 | const u32 *table; | 225 | const u32 *table; |
217 | u32 max_size; | 226 | u32 max_size; |
218 | int i; | 227 | int i; |
219 | 228 | ||
220 | mlx5e_port_get_speed_arr(mdev, &table, &max_size); | 229 | mlx5e_port_get_speed_arr(mdev, &table, &max_size, force_legacy); |
221 | for (i = 0; i < max_size; ++i) { | 230 | for (i = 0; i < max_size; ++i) { |
222 | if (table[i] == speed) | 231 | if (table[i] == speed) |
223 | link_modes |= MLX5E_PROT_MASK(i); | 232 | link_modes |= MLX5E_PROT_MASK(i); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.h b/drivers/net/ethernet/mellanox/mlx5/core/en/port.h index 70f536ec51c4..4a7f4497692b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.h | |||
@@ -48,10 +48,12 @@ void mlx5_port_query_eth_autoneg(struct mlx5_core_dev *dev, u8 *an_status, | |||
48 | u8 *an_disable_cap, u8 *an_disable_admin); | 48 | u8 *an_disable_cap, u8 *an_disable_admin); |
49 | int mlx5_port_set_eth_ptys(struct mlx5_core_dev *dev, bool an_disable, | 49 | int mlx5_port_set_eth_ptys(struct mlx5_core_dev *dev, bool an_disable, |
50 | u32 proto_admin, bool ext); | 50 | u32 proto_admin, bool ext); |
51 | u32 mlx5e_port_ptys2speed(struct mlx5_core_dev *mdev, u32 eth_proto_oper); | 51 | u32 mlx5e_port_ptys2speed(struct mlx5_core_dev *mdev, u32 eth_proto_oper, |
52 | bool force_legacy); | ||
52 | int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed); | 53 | int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed); |
53 | int mlx5e_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed); | 54 | int mlx5e_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed); |
54 | u32 mlx5e_port_speed2linkmodes(struct mlx5_core_dev *mdev, u32 speed); | 55 | u32 mlx5e_port_speed2linkmodes(struct mlx5_core_dev *mdev, u32 speed, |
56 | bool force_legacy); | ||
55 | 57 | ||
56 | int mlx5e_port_query_pbmc(struct mlx5_core_dev *mdev, void *out); | 58 | int mlx5e_port_query_pbmc(struct mlx5_core_dev *mdev, void *out); |
57 | int mlx5e_port_set_pbmc(struct mlx5_core_dev *mdev, void *in); | 59 | int mlx5e_port_set_pbmc(struct mlx5_core_dev *mdev, void *in); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c index ea032f54197e..3766545ce259 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c | |||
@@ -412,7 +412,7 @@ struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev, | |||
412 | goto out; | 412 | goto out; |
413 | 413 | ||
414 | tls_ctx = tls_get_ctx(skb->sk); | 414 | tls_ctx = tls_get_ctx(skb->sk); |
415 | if (unlikely(tls_ctx->netdev != netdev)) | 415 | if (unlikely(WARN_ON_ONCE(tls_ctx->netdev != netdev))) |
416 | goto err_out; | 416 | goto err_out; |
417 | 417 | ||
418 | priv_tx = mlx5e_get_ktls_tx_priv_ctx(tls_ctx); | 418 | priv_tx = mlx5e_get_ktls_tx_priv_ctx(tls_ctx); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index a6b0eda0bd1a..02530b50609c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c | |||
@@ -391,7 +391,7 @@ void mlx5e_ethtool_get_channels(struct mlx5e_priv *priv, | |||
391 | { | 391 | { |
392 | mutex_lock(&priv->state_lock); | 392 | mutex_lock(&priv->state_lock); |
393 | 393 | ||
394 | ch->max_combined = mlx5e_get_netdev_max_channels(priv->netdev); | 394 | ch->max_combined = priv->max_nch; |
395 | ch->combined_count = priv->channels.params.num_channels; | 395 | ch->combined_count = priv->channels.params.num_channels; |
396 | if (priv->xsk.refcnt) { | 396 | if (priv->xsk.refcnt) { |
397 | /* The upper half are XSK queues. */ | 397 | /* The upper half are XSK queues. */ |
@@ -785,7 +785,7 @@ static void ptys2ethtool_supported_advertised_port(struct ethtool_link_ksettings | |||
785 | } | 785 | } |
786 | 786 | ||
787 | static void get_speed_duplex(struct net_device *netdev, | 787 | static void get_speed_duplex(struct net_device *netdev, |
788 | u32 eth_proto_oper, | 788 | u32 eth_proto_oper, bool force_legacy, |
789 | struct ethtool_link_ksettings *link_ksettings) | 789 | struct ethtool_link_ksettings *link_ksettings) |
790 | { | 790 | { |
791 | struct mlx5e_priv *priv = netdev_priv(netdev); | 791 | struct mlx5e_priv *priv = netdev_priv(netdev); |
@@ -795,7 +795,7 @@ static void get_speed_duplex(struct net_device *netdev, | |||
795 | if (!netif_carrier_ok(netdev)) | 795 | if (!netif_carrier_ok(netdev)) |
796 | goto out; | 796 | goto out; |
797 | 797 | ||
798 | speed = mlx5e_port_ptys2speed(priv->mdev, eth_proto_oper); | 798 | speed = mlx5e_port_ptys2speed(priv->mdev, eth_proto_oper, force_legacy); |
799 | if (!speed) { | 799 | if (!speed) { |
800 | speed = SPEED_UNKNOWN; | 800 | speed = SPEED_UNKNOWN; |
801 | goto out; | 801 | goto out; |
@@ -914,8 +914,8 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv, | |||
914 | /* Fields: eth_proto_admin and ext_eth_proto_admin are | 914 | /* Fields: eth_proto_admin and ext_eth_proto_admin are |
915 | * mutually exclusive. Hence try reading legacy advertising | 915 | * mutually exclusive. Hence try reading legacy advertising |
916 | * when extended advertising is zero. | 916 | * when extended advertising is zero. |
917 | * admin_ext indicates how eth_proto_admin should be | 917 | * admin_ext indicates which proto_admin (ext vs. legacy) |
918 | * interpreted | 918 | * should be read and interpreted |
919 | */ | 919 | */ |
920 | admin_ext = ext; | 920 | admin_ext = ext; |
921 | if (ext && !eth_proto_admin) { | 921 | if (ext && !eth_proto_admin) { |
@@ -924,7 +924,7 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv, | |||
924 | admin_ext = false; | 924 | admin_ext = false; |
925 | } | 925 | } |
926 | 926 | ||
927 | eth_proto_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, | 927 | eth_proto_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, admin_ext, |
928 | eth_proto_oper); | 928 | eth_proto_oper); |
929 | eth_proto_lp = MLX5_GET(ptys_reg, out, eth_proto_lp_advertise); | 929 | eth_proto_lp = MLX5_GET(ptys_reg, out, eth_proto_lp_advertise); |
930 | an_disable_admin = MLX5_GET(ptys_reg, out, an_disable_admin); | 930 | an_disable_admin = MLX5_GET(ptys_reg, out, an_disable_admin); |
@@ -939,7 +939,8 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv, | |||
939 | get_supported(mdev, eth_proto_cap, link_ksettings); | 939 | get_supported(mdev, eth_proto_cap, link_ksettings); |
940 | get_advertising(eth_proto_admin, tx_pause, rx_pause, link_ksettings, | 940 | get_advertising(eth_proto_admin, tx_pause, rx_pause, link_ksettings, |
941 | admin_ext); | 941 | admin_ext); |
942 | get_speed_duplex(priv->netdev, eth_proto_oper, link_ksettings); | 942 | get_speed_duplex(priv->netdev, eth_proto_oper, !admin_ext, |
943 | link_ksettings); | ||
943 | 944 | ||
944 | eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap; | 945 | eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap; |
945 | 946 | ||
@@ -1016,45 +1017,69 @@ static u32 mlx5e_ethtool2ptys_ext_adver_link(const unsigned long *link_modes) | |||
1016 | return ptys_modes; | 1017 | return ptys_modes; |
1017 | } | 1018 | } |
1018 | 1019 | ||
1020 | static bool ext_link_mode_requested(const unsigned long *adver) | ||
1021 | { | ||
1022 | #define MLX5E_MIN_PTYS_EXT_LINK_MODE_BIT ETHTOOL_LINK_MODE_50000baseKR_Full_BIT | ||
1023 | int size = __ETHTOOL_LINK_MODE_MASK_NBITS - MLX5E_MIN_PTYS_EXT_LINK_MODE_BIT; | ||
1024 | __ETHTOOL_DECLARE_LINK_MODE_MASK(modes); | ||
1025 | |||
1026 | bitmap_set(modes, MLX5E_MIN_PTYS_EXT_LINK_MODE_BIT, size); | ||
1027 | return bitmap_intersects(modes, adver, __ETHTOOL_LINK_MODE_MASK_NBITS); | ||
1028 | } | ||
1029 | |||
1030 | static bool ext_speed_requested(u32 speed) | ||
1031 | { | ||
1032 | #define MLX5E_MAX_PTYS_LEGACY_SPEED 100000 | ||
1033 | return !!(speed > MLX5E_MAX_PTYS_LEGACY_SPEED); | ||
1034 | } | ||
1035 | |||
1036 | static bool ext_requested(u8 autoneg, const unsigned long *adver, u32 speed) | ||
1037 | { | ||
1038 | bool ext_link_mode = ext_link_mode_requested(adver); | ||
1039 | bool ext_speed = ext_speed_requested(speed); | ||
1040 | |||
1041 | return autoneg == AUTONEG_ENABLE ? ext_link_mode : ext_speed; | ||
1042 | } | ||
1043 | |||
1019 | int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv, | 1044 | int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv, |
1020 | const struct ethtool_link_ksettings *link_ksettings) | 1045 | const struct ethtool_link_ksettings *link_ksettings) |
1021 | { | 1046 | { |
1022 | struct mlx5_core_dev *mdev = priv->mdev; | 1047 | struct mlx5_core_dev *mdev = priv->mdev; |
1023 | struct mlx5e_port_eth_proto eproto; | 1048 | struct mlx5e_port_eth_proto eproto; |
1049 | const unsigned long *adver; | ||
1024 | bool an_changes = false; | 1050 | bool an_changes = false; |
1025 | u8 an_disable_admin; | 1051 | u8 an_disable_admin; |
1026 | bool ext_supported; | 1052 | bool ext_supported; |
1027 | bool ext_requested; | ||
1028 | u8 an_disable_cap; | 1053 | u8 an_disable_cap; |
1029 | bool an_disable; | 1054 | bool an_disable; |
1030 | u32 link_modes; | 1055 | u32 link_modes; |
1031 | u8 an_status; | 1056 | u8 an_status; |
1057 | u8 autoneg; | ||
1032 | u32 speed; | 1058 | u32 speed; |
1059 | bool ext; | ||
1033 | int err; | 1060 | int err; |
1034 | 1061 | ||
1035 | u32 (*ethtool2ptys_adver_func)(const unsigned long *adver); | 1062 | u32 (*ethtool2ptys_adver_func)(const unsigned long *adver); |
1036 | 1063 | ||
1037 | #define MLX5E_PTYS_EXT ((1ULL << ETHTOOL_LINK_MODE_50000baseKR_Full_BIT) - 1) | 1064 | adver = link_ksettings->link_modes.advertising; |
1065 | autoneg = link_ksettings->base.autoneg; | ||
1066 | speed = link_ksettings->base.speed; | ||
1038 | 1067 | ||
1039 | ext_requested = !!(link_ksettings->link_modes.advertising[0] > | 1068 | ext = ext_requested(autoneg, adver, speed), |
1040 | MLX5E_PTYS_EXT || | ||
1041 | link_ksettings->link_modes.advertising[1]); | ||
1042 | ext_supported = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); | 1069 | ext_supported = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); |
1043 | ext_requested &= ext_supported; | 1070 | if (!ext_supported && ext) |
1071 | return -EOPNOTSUPP; | ||
1044 | 1072 | ||
1045 | speed = link_ksettings->base.speed; | 1073 | ethtool2ptys_adver_func = ext ? mlx5e_ethtool2ptys_ext_adver_link : |
1046 | ethtool2ptys_adver_func = ext_requested ? | ||
1047 | mlx5e_ethtool2ptys_ext_adver_link : | ||
1048 | mlx5e_ethtool2ptys_adver_link; | 1074 | mlx5e_ethtool2ptys_adver_link; |
1049 | err = mlx5_port_query_eth_proto(mdev, 1, ext_requested, &eproto); | 1075 | err = mlx5_port_query_eth_proto(mdev, 1, ext, &eproto); |
1050 | if (err) { | 1076 | if (err) { |
1051 | netdev_err(priv->netdev, "%s: query port eth proto failed: %d\n", | 1077 | netdev_err(priv->netdev, "%s: query port eth proto failed: %d\n", |
1052 | __func__, err); | 1078 | __func__, err); |
1053 | goto out; | 1079 | goto out; |
1054 | } | 1080 | } |
1055 | link_modes = link_ksettings->base.autoneg == AUTONEG_ENABLE ? | 1081 | link_modes = autoneg == AUTONEG_ENABLE ? ethtool2ptys_adver_func(adver) : |
1056 | ethtool2ptys_adver_func(link_ksettings->link_modes.advertising) : | 1082 | mlx5e_port_speed2linkmodes(mdev, speed, !ext); |
1057 | mlx5e_port_speed2linkmodes(mdev, speed); | ||
1058 | 1083 | ||
1059 | link_modes = link_modes & eproto.cap; | 1084 | link_modes = link_modes & eproto.cap; |
1060 | if (!link_modes) { | 1085 | if (!link_modes) { |
@@ -1067,14 +1092,14 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv, | |||
1067 | mlx5_port_query_eth_autoneg(mdev, &an_status, &an_disable_cap, | 1092 | mlx5_port_query_eth_autoneg(mdev, &an_status, &an_disable_cap, |
1068 | &an_disable_admin); | 1093 | &an_disable_admin); |
1069 | 1094 | ||
1070 | an_disable = link_ksettings->base.autoneg == AUTONEG_DISABLE; | 1095 | an_disable = autoneg == AUTONEG_DISABLE; |
1071 | an_changes = ((!an_disable && an_disable_admin) || | 1096 | an_changes = ((!an_disable && an_disable_admin) || |
1072 | (an_disable && !an_disable_admin)); | 1097 | (an_disable && !an_disable_admin)); |
1073 | 1098 | ||
1074 | if (!an_changes && link_modes == eproto.admin) | 1099 | if (!an_changes && link_modes == eproto.admin) |
1075 | goto out; | 1100 | goto out; |
1076 | 1101 | ||
1077 | mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, ext_requested); | 1102 | mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, ext); |
1078 | mlx5_toggle_port_link(mdev); | 1103 | mlx5_toggle_port_link(mdev); |
1079 | 1104 | ||
1080 | out: | 1105 | out: |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c index a66589816e21..eed7101e8bb7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c | |||
@@ -611,7 +611,8 @@ static int validate_flow(struct mlx5e_priv *priv, | |||
611 | return -ENOSPC; | 611 | return -ENOSPC; |
612 | 612 | ||
613 | if (fs->ring_cookie != RX_CLS_FLOW_DISC) | 613 | if (fs->ring_cookie != RX_CLS_FLOW_DISC) |
614 | if (!mlx5e_qid_validate(&priv->channels.params, fs->ring_cookie)) | 614 | if (!mlx5e_qid_validate(priv->profile, &priv->channels.params, |
615 | fs->ring_cookie)) | ||
615 | return -EINVAL; | 616 | return -EINVAL; |
616 | 617 | ||
617 | switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { | 618 | switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 4db595a7eb03..9a2fcef6e7f0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c | |||
@@ -331,12 +331,11 @@ static inline u64 mlx5e_get_mpwqe_offset(struct mlx5e_rq *rq, u16 wqe_ix) | |||
331 | 331 | ||
332 | static void mlx5e_init_frags_partition(struct mlx5e_rq *rq) | 332 | static void mlx5e_init_frags_partition(struct mlx5e_rq *rq) |
333 | { | 333 | { |
334 | struct mlx5e_wqe_frag_info next_frag, *prev; | 334 | struct mlx5e_wqe_frag_info next_frag = {}; |
335 | struct mlx5e_wqe_frag_info *prev = NULL; | ||
335 | int i; | 336 | int i; |
336 | 337 | ||
337 | next_frag.di = &rq->wqe.di[0]; | 338 | next_frag.di = &rq->wqe.di[0]; |
338 | next_frag.offset = 0; | ||
339 | prev = NULL; | ||
340 | 339 | ||
341 | for (i = 0; i < mlx5_wq_cyc_get_size(&rq->wqe.wq); i++) { | 340 | for (i = 0; i < mlx5_wq_cyc_get_size(&rq->wqe.wq); i++) { |
342 | struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0]; | 341 | struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0]; |
@@ -1679,10 +1678,10 @@ static int mlx5e_open_sqs(struct mlx5e_channel *c, | |||
1679 | struct mlx5e_channel_param *cparam) | 1678 | struct mlx5e_channel_param *cparam) |
1680 | { | 1679 | { |
1681 | struct mlx5e_priv *priv = c->priv; | 1680 | struct mlx5e_priv *priv = c->priv; |
1682 | int err, tc, max_nch = mlx5e_get_netdev_max_channels(priv->netdev); | 1681 | int err, tc; |
1683 | 1682 | ||
1684 | for (tc = 0; tc < params->num_tc; tc++) { | 1683 | for (tc = 0; tc < params->num_tc; tc++) { |
1685 | int txq_ix = c->ix + tc * max_nch; | 1684 | int txq_ix = c->ix + tc * priv->max_nch; |
1686 | 1685 | ||
1687 | err = mlx5e_open_txqsq(c, c->priv->tisn[tc], txq_ix, | 1686 | err = mlx5e_open_txqsq(c, c->priv->tisn[tc], txq_ix, |
1688 | params, &cparam->sq, &c->sq[tc], tc); | 1687 | params, &cparam->sq, &c->sq[tc], tc); |
@@ -2440,11 +2439,10 @@ int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv) | |||
2440 | 2439 | ||
2441 | int mlx5e_create_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs) | 2440 | int mlx5e_create_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs) |
2442 | { | 2441 | { |
2443 | const int max_nch = mlx5e_get_netdev_max_channels(priv->netdev); | ||
2444 | int err; | 2442 | int err; |
2445 | int ix; | 2443 | int ix; |
2446 | 2444 | ||
2447 | for (ix = 0; ix < max_nch; ix++) { | 2445 | for (ix = 0; ix < priv->max_nch; ix++) { |
2448 | err = mlx5e_create_rqt(priv, 1 /*size */, &tirs[ix].rqt); | 2446 | err = mlx5e_create_rqt(priv, 1 /*size */, &tirs[ix].rqt); |
2449 | if (unlikely(err)) | 2447 | if (unlikely(err)) |
2450 | goto err_destroy_rqts; | 2448 | goto err_destroy_rqts; |
@@ -2462,10 +2460,9 @@ err_destroy_rqts: | |||
2462 | 2460 | ||
2463 | void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs) | 2461 | void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs) |
2464 | { | 2462 | { |
2465 | const int max_nch = mlx5e_get_netdev_max_channels(priv->netdev); | ||
2466 | int i; | 2463 | int i; |
2467 | 2464 | ||
2468 | for (i = 0; i < max_nch; i++) | 2465 | for (i = 0; i < priv->max_nch; i++) |
2469 | mlx5e_destroy_rqt(priv, &tirs[i].rqt); | 2466 | mlx5e_destroy_rqt(priv, &tirs[i].rqt); |
2470 | } | 2467 | } |
2471 | 2468 | ||
@@ -2559,7 +2556,7 @@ static void mlx5e_redirect_rqts(struct mlx5e_priv *priv, | |||
2559 | mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, rrp); | 2556 | mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, rrp); |
2560 | } | 2557 | } |
2561 | 2558 | ||
2562 | for (ix = 0; ix < mlx5e_get_netdev_max_channels(priv->netdev); ix++) { | 2559 | for (ix = 0; ix < priv->max_nch; ix++) { |
2563 | struct mlx5e_redirect_rqt_param direct_rrp = { | 2560 | struct mlx5e_redirect_rqt_param direct_rrp = { |
2564 | .is_rss = false, | 2561 | .is_rss = false, |
2565 | { | 2562 | { |
@@ -2760,7 +2757,7 @@ static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv) | |||
2760 | goto free_in; | 2757 | goto free_in; |
2761 | } | 2758 | } |
2762 | 2759 | ||
2763 | for (ix = 0; ix < mlx5e_get_netdev_max_channels(priv->netdev); ix++) { | 2760 | for (ix = 0; ix < priv->max_nch; ix++) { |
2764 | err = mlx5_core_modify_tir(mdev, priv->direct_tir[ix].tirn, | 2761 | err = mlx5_core_modify_tir(mdev, priv->direct_tir[ix].tirn, |
2765 | in, inlen); | 2762 | in, inlen); |
2766 | if (err) | 2763 | if (err) |
@@ -2860,12 +2857,11 @@ static void mlx5e_netdev_set_tcs(struct net_device *netdev) | |||
2860 | 2857 | ||
2861 | static void mlx5e_build_tc2txq_maps(struct mlx5e_priv *priv) | 2858 | static void mlx5e_build_tc2txq_maps(struct mlx5e_priv *priv) |
2862 | { | 2859 | { |
2863 | int max_nch = mlx5e_get_netdev_max_channels(priv->netdev); | ||
2864 | int i, tc; | 2860 | int i, tc; |
2865 | 2861 | ||
2866 | for (i = 0; i < max_nch; i++) | 2862 | for (i = 0; i < priv->max_nch; i++) |
2867 | for (tc = 0; tc < priv->profile->max_tc; tc++) | 2863 | for (tc = 0; tc < priv->profile->max_tc; tc++) |
2868 | priv->channel_tc2txq[i][tc] = i + tc * max_nch; | 2864 | priv->channel_tc2txq[i][tc] = i + tc * priv->max_nch; |
2869 | } | 2865 | } |
2870 | 2866 | ||
2871 | static void mlx5e_build_tx2sq_maps(struct mlx5e_priv *priv) | 2867 | static void mlx5e_build_tx2sq_maps(struct mlx5e_priv *priv) |
@@ -2886,7 +2882,7 @@ static void mlx5e_build_tx2sq_maps(struct mlx5e_priv *priv) | |||
2886 | void mlx5e_activate_priv_channels(struct mlx5e_priv *priv) | 2882 | void mlx5e_activate_priv_channels(struct mlx5e_priv *priv) |
2887 | { | 2883 | { |
2888 | int num_txqs = priv->channels.num * priv->channels.params.num_tc; | 2884 | int num_txqs = priv->channels.num * priv->channels.params.num_tc; |
2889 | int num_rxqs = priv->channels.num * MLX5E_NUM_RQ_GROUPS; | 2885 | int num_rxqs = priv->channels.num * priv->profile->rq_groups; |
2890 | struct net_device *netdev = priv->netdev; | 2886 | struct net_device *netdev = priv->netdev; |
2891 | 2887 | ||
2892 | mlx5e_netdev_set_tcs(netdev); | 2888 | mlx5e_netdev_set_tcs(netdev); |
@@ -3308,7 +3304,6 @@ err_destroy_inner_tirs: | |||
3308 | 3304 | ||
3309 | int mlx5e_create_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs) | 3305 | int mlx5e_create_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs) |
3310 | { | 3306 | { |
3311 | const int max_nch = mlx5e_get_netdev_max_channels(priv->netdev); | ||
3312 | struct mlx5e_tir *tir; | 3307 | struct mlx5e_tir *tir; |
3313 | void *tirc; | 3308 | void *tirc; |
3314 | int inlen; | 3309 | int inlen; |
@@ -3321,7 +3316,7 @@ int mlx5e_create_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs) | |||
3321 | if (!in) | 3316 | if (!in) |
3322 | return -ENOMEM; | 3317 | return -ENOMEM; |
3323 | 3318 | ||
3324 | for (ix = 0; ix < max_nch; ix++) { | 3319 | for (ix = 0; ix < priv->max_nch; ix++) { |
3325 | memset(in, 0, inlen); | 3320 | memset(in, 0, inlen); |
3326 | tir = &tirs[ix]; | 3321 | tir = &tirs[ix]; |
3327 | tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); | 3322 | tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); |
@@ -3360,10 +3355,9 @@ void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc) | |||
3360 | 3355 | ||
3361 | void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs) | 3356 | void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs) |
3362 | { | 3357 | { |
3363 | const int max_nch = mlx5e_get_netdev_max_channels(priv->netdev); | ||
3364 | int i; | 3358 | int i; |
3365 | 3359 | ||
3366 | for (i = 0; i < max_nch; i++) | 3360 | for (i = 0; i < priv->max_nch; i++) |
3367 | mlx5e_destroy_tir(priv->mdev, &tirs[i]); | 3361 | mlx5e_destroy_tir(priv->mdev, &tirs[i]); |
3368 | } | 3362 | } |
3369 | 3363 | ||
@@ -3489,7 +3483,7 @@ void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s) | |||
3489 | { | 3483 | { |
3490 | int i; | 3484 | int i; |
3491 | 3485 | ||
3492 | for (i = 0; i < mlx5e_get_netdev_max_channels(priv->netdev); i++) { | 3486 | for (i = 0; i < priv->max_nch; i++) { |
3493 | struct mlx5e_channel_stats *channel_stats = &priv->channel_stats[i]; | 3487 | struct mlx5e_channel_stats *channel_stats = &priv->channel_stats[i]; |
3494 | struct mlx5e_rq_stats *xskrq_stats = &channel_stats->xskrq; | 3488 | struct mlx5e_rq_stats *xskrq_stats = &channel_stats->xskrq; |
3495 | struct mlx5e_rq_stats *rq_stats = &channel_stats->rq; | 3489 | struct mlx5e_rq_stats *rq_stats = &channel_stats->rq; |
@@ -4964,8 +4958,7 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev, | |||
4964 | return err; | 4958 | return err; |
4965 | 4959 | ||
4966 | mlx5e_build_nic_params(mdev, &priv->xsk, rss, &priv->channels.params, | 4960 | mlx5e_build_nic_params(mdev, &priv->xsk, rss, &priv->channels.params, |
4967 | mlx5e_get_netdev_max_channels(netdev), | 4961 | priv->max_nch, netdev->mtu); |
4968 | netdev->mtu); | ||
4969 | 4962 | ||
4970 | mlx5e_timestamp_init(priv); | 4963 | mlx5e_timestamp_init(priv); |
4971 | 4964 | ||
@@ -5168,6 +5161,7 @@ static const struct mlx5e_profile mlx5e_nic_profile = { | |||
5168 | .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe, | 5161 | .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe, |
5169 | .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq, | 5162 | .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq, |
5170 | .max_tc = MLX5E_MAX_NUM_TC, | 5163 | .max_tc = MLX5E_MAX_NUM_TC, |
5164 | .rq_groups = MLX5E_NUM_RQ_GROUPS(XSK), | ||
5171 | }; | 5165 | }; |
5172 | 5166 | ||
5173 | /* mlx5e generic netdev management API (move to en_common.c) */ | 5167 | /* mlx5e generic netdev management API (move to en_common.c) */ |
@@ -5185,6 +5179,7 @@ int mlx5e_netdev_init(struct net_device *netdev, | |||
5185 | priv->profile = profile; | 5179 | priv->profile = profile; |
5186 | priv->ppriv = ppriv; | 5180 | priv->ppriv = ppriv; |
5187 | priv->msglevel = MLX5E_MSG_LEVEL; | 5181 | priv->msglevel = MLX5E_MSG_LEVEL; |
5182 | priv->max_nch = netdev->num_rx_queues / max_t(u8, profile->rq_groups, 1); | ||
5188 | priv->max_opened_tc = 1; | 5183 | priv->max_opened_tc = 1; |
5189 | 5184 | ||
5190 | mutex_init(&priv->state_lock); | 5185 | mutex_init(&priv->state_lock); |
@@ -5222,7 +5217,7 @@ struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev, | |||
5222 | 5217 | ||
5223 | netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), | 5218 | netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), |
5224 | nch * profile->max_tc, | 5219 | nch * profile->max_tc, |
5225 | nch * MLX5E_NUM_RQ_GROUPS); | 5220 | nch * profile->rq_groups); |
5226 | if (!netdev) { | 5221 | if (!netdev) { |
5227 | mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n"); | 5222 | mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n"); |
5228 | return NULL; | 5223 | return NULL; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index bf6f4835457e..fbb9de633578 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | |||
@@ -1718,6 +1718,7 @@ static const struct mlx5e_profile mlx5e_rep_profile = { | |||
1718 | .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep, | 1718 | .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep, |
1719 | .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq, | 1719 | .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq, |
1720 | .max_tc = 1, | 1720 | .max_tc = 1, |
1721 | .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR), | ||
1721 | }; | 1722 | }; |
1722 | 1723 | ||
1723 | static const struct mlx5e_profile mlx5e_uplink_rep_profile = { | 1724 | static const struct mlx5e_profile mlx5e_uplink_rep_profile = { |
@@ -1735,6 +1736,7 @@ static const struct mlx5e_profile mlx5e_uplink_rep_profile = { | |||
1735 | .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep, | 1736 | .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep, |
1736 | .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq, | 1737 | .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq, |
1737 | .max_tc = MLX5E_MAX_NUM_TC, | 1738 | .max_tc = MLX5E_MAX_NUM_TC, |
1739 | .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR), | ||
1738 | }; | 1740 | }; |
1739 | 1741 | ||
1740 | static bool | 1742 | static bool |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 6eee3c7d4b06..94a32c76c182 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c | |||
@@ -174,7 +174,7 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) | |||
174 | 174 | ||
175 | memset(s, 0, sizeof(*s)); | 175 | memset(s, 0, sizeof(*s)); |
176 | 176 | ||
177 | for (i = 0; i < mlx5e_get_netdev_max_channels(priv->netdev); i++) { | 177 | for (i = 0; i < priv->max_nch; i++) { |
178 | struct mlx5e_channel_stats *channel_stats = | 178 | struct mlx5e_channel_stats *channel_stats = |
179 | &priv->channel_stats[i]; | 179 | &priv->channel_stats[i]; |
180 | struct mlx5e_xdpsq_stats *xdpsq_red_stats = &channel_stats->xdpsq; | 180 | struct mlx5e_xdpsq_stats *xdpsq_red_stats = &channel_stats->xdpsq; |
@@ -1401,7 +1401,7 @@ static const struct counter_desc ch_stats_desc[] = { | |||
1401 | 1401 | ||
1402 | static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv) | 1402 | static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv) |
1403 | { | 1403 | { |
1404 | int max_nch = mlx5e_get_netdev_max_channels(priv->netdev); | 1404 | int max_nch = priv->max_nch; |
1405 | 1405 | ||
1406 | return (NUM_RQ_STATS * max_nch) + | 1406 | return (NUM_RQ_STATS * max_nch) + |
1407 | (NUM_CH_STATS * max_nch) + | 1407 | (NUM_CH_STATS * max_nch) + |
@@ -1415,8 +1415,8 @@ static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv) | |||
1415 | static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data, | 1415 | static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data, |
1416 | int idx) | 1416 | int idx) |
1417 | { | 1417 | { |
1418 | int max_nch = mlx5e_get_netdev_max_channels(priv->netdev); | ||
1419 | bool is_xsk = priv->xsk.ever_used; | 1418 | bool is_xsk = priv->xsk.ever_used; |
1419 | int max_nch = priv->max_nch; | ||
1420 | int i, j, tc; | 1420 | int i, j, tc; |
1421 | 1421 | ||
1422 | for (i = 0; i < max_nch; i++) | 1422 | for (i = 0; i < max_nch; i++) |
@@ -1458,8 +1458,8 @@ static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data, | |||
1458 | static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data, | 1458 | static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data, |
1459 | int idx) | 1459 | int idx) |
1460 | { | 1460 | { |
1461 | int max_nch = mlx5e_get_netdev_max_channels(priv->netdev); | ||
1462 | bool is_xsk = priv->xsk.ever_used; | 1461 | bool is_xsk = priv->xsk.ever_used; |
1462 | int max_nch = priv->max_nch; | ||
1463 | int i, j, tc; | 1463 | int i, j, tc; |
1464 | 1464 | ||
1465 | for (i = 0; i < max_nch; i++) | 1465 | for (i = 0; i < max_nch; i++) |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index c5d75e2ecf54..4d97cc47835f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | |||
@@ -1320,13 +1320,13 @@ static struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow) | |||
1320 | void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe) | 1320 | void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe) |
1321 | { | 1321 | { |
1322 | struct mlx5e_neigh *m_neigh = &nhe->m_neigh; | 1322 | struct mlx5e_neigh *m_neigh = &nhe->m_neigh; |
1323 | u64 bytes, packets, lastuse = 0; | ||
1324 | struct mlx5e_tc_flow *flow; | 1323 | struct mlx5e_tc_flow *flow; |
1325 | struct mlx5e_encap_entry *e; | 1324 | struct mlx5e_encap_entry *e; |
1326 | struct mlx5_fc *counter; | 1325 | struct mlx5_fc *counter; |
1327 | struct neigh_table *tbl; | 1326 | struct neigh_table *tbl; |
1328 | bool neigh_used = false; | 1327 | bool neigh_used = false; |
1329 | struct neighbour *n; | 1328 | struct neighbour *n; |
1329 | u64 lastuse; | ||
1330 | 1330 | ||
1331 | if (m_neigh->family == AF_INET) | 1331 | if (m_neigh->family == AF_INET) |
1332 | tbl = &arp_tbl; | 1332 | tbl = &arp_tbl; |
@@ -1349,7 +1349,7 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe) | |||
1349 | 1349 | ||
1350 | if (mlx5e_is_offloaded_flow(flow)) { | 1350 | if (mlx5e_is_offloaded_flow(flow)) { |
1351 | counter = mlx5e_tc_get_counter(flow); | 1351 | counter = mlx5e_tc_get_counter(flow); |
1352 | mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse); | 1352 | lastuse = mlx5_fc_query_lastuse(counter); |
1353 | if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) { | 1353 | if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) { |
1354 | mlx5e_flow_put(netdev_priv(e->out_dev), flow); | 1354 | mlx5e_flow_put(netdev_priv(e->out_dev), flow); |
1355 | neigh_used = true; | 1355 | neigh_used = true; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c index c50b6f0769c8..49b06b256c92 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c | |||
@@ -49,7 +49,7 @@ static inline bool mlx5e_channel_no_affinity_change(struct mlx5e_channel *c) | |||
49 | static void mlx5e_handle_tx_dim(struct mlx5e_txqsq *sq) | 49 | static void mlx5e_handle_tx_dim(struct mlx5e_txqsq *sq) |
50 | { | 50 | { |
51 | struct mlx5e_sq_stats *stats = sq->stats; | 51 | struct mlx5e_sq_stats *stats = sq->stats; |
52 | struct dim_sample dim_sample; | 52 | struct dim_sample dim_sample = {}; |
53 | 53 | ||
54 | if (unlikely(!test_bit(MLX5E_SQ_STATE_AM, &sq->state))) | 54 | if (unlikely(!test_bit(MLX5E_SQ_STATE_AM, &sq->state))) |
55 | return; | 55 | return; |
@@ -61,7 +61,7 @@ static void mlx5e_handle_tx_dim(struct mlx5e_txqsq *sq) | |||
61 | static void mlx5e_handle_rx_dim(struct mlx5e_rq *rq) | 61 | static void mlx5e_handle_rx_dim(struct mlx5e_rq *rq) |
62 | { | 62 | { |
63 | struct mlx5e_rq_stats *stats = rq->stats; | 63 | struct mlx5e_rq_stats *stats = rq->stats; |
64 | struct dim_sample dim_sample; | 64 | struct dim_sample dim_sample = {}; |
65 | 65 | ||
66 | if (unlikely(!test_bit(MLX5E_RQ_STATE_AM, &rq->state))) | 66 | if (unlikely(!test_bit(MLX5E_RQ_STATE_AM, &rq->state))) |
67 | return; | 67 | return; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h index c48c382f926f..c1252d6be0ef 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h | |||
@@ -68,7 +68,7 @@ enum fs_flow_table_type { | |||
68 | FS_FT_SNIFFER_RX = 0X5, | 68 | FS_FT_SNIFFER_RX = 0X5, |
69 | FS_FT_SNIFFER_TX = 0X6, | 69 | FS_FT_SNIFFER_TX = 0X6, |
70 | FS_FT_RDMA_RX = 0X7, | 70 | FS_FT_RDMA_RX = 0X7, |
71 | FS_FT_MAX_TYPE = FS_FT_SNIFFER_TX, | 71 | FS_FT_MAX_TYPE = FS_FT_RDMA_RX, |
72 | }; | 72 | }; |
73 | 73 | ||
74 | enum fs_flow_table_op_mod { | 74 | enum fs_flow_table_op_mod { |
@@ -275,7 +275,8 @@ void mlx5_cleanup_fs(struct mlx5_core_dev *dev); | |||
275 | (type == FS_FT_FDB) ? MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) : \ | 275 | (type == FS_FT_FDB) ? MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) : \ |
276 | (type == FS_FT_SNIFFER_RX) ? MLX5_CAP_FLOWTABLE_SNIFFER_RX(mdev, cap) : \ | 276 | (type == FS_FT_SNIFFER_RX) ? MLX5_CAP_FLOWTABLE_SNIFFER_RX(mdev, cap) : \ |
277 | (type == FS_FT_SNIFFER_TX) ? MLX5_CAP_FLOWTABLE_SNIFFER_TX(mdev, cap) : \ | 277 | (type == FS_FT_SNIFFER_TX) ? MLX5_CAP_FLOWTABLE_SNIFFER_TX(mdev, cap) : \ |
278 | (BUILD_BUG_ON_ZERO(FS_FT_SNIFFER_TX != FS_FT_MAX_TYPE))\ | 278 | (type == FS_FT_RDMA_RX) ? MLX5_CAP_FLOWTABLE_RDMA_RX(mdev, cap) : \ |
279 | (BUILD_BUG_ON_ZERO(FS_FT_RDMA_RX != FS_FT_MAX_TYPE))\ | ||
279 | ) | 280 | ) |
280 | 281 | ||
281 | #endif | 282 | #endif |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c index 51f1736c455d..1804cf3c3814 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c | |||
@@ -426,6 +426,11 @@ int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter, | |||
426 | } | 426 | } |
427 | EXPORT_SYMBOL(mlx5_fc_query); | 427 | EXPORT_SYMBOL(mlx5_fc_query); |
428 | 428 | ||
429 | u64 mlx5_fc_query_lastuse(struct mlx5_fc *counter) | ||
430 | { | ||
431 | return counter->cache.lastuse; | ||
432 | } | ||
433 | |||
429 | void mlx5_fc_query_cached(struct mlx5_fc *counter, | 434 | void mlx5_fc_query_cached(struct mlx5_fc *counter, |
430 | u64 *bytes, u64 *packets, u64 *lastuse) | 435 | u64 *bytes, u64 *packets, u64 *lastuse) |
431 | { | 436 | { |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index 6bfaaab362dc..1a2560e3bf7c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c | |||
@@ -88,8 +88,7 @@ int mlx5i_init(struct mlx5_core_dev *mdev, | |||
88 | netdev->mtu = netdev->max_mtu; | 88 | netdev->mtu = netdev->max_mtu; |
89 | 89 | ||
90 | mlx5e_build_nic_params(mdev, NULL, &priv->rss_params, &priv->channels.params, | 90 | mlx5e_build_nic_params(mdev, NULL, &priv->rss_params, &priv->channels.params, |
91 | mlx5e_get_netdev_max_channels(netdev), | 91 | priv->max_nch, netdev->mtu); |
92 | netdev->mtu); | ||
93 | mlx5i_build_nic_params(mdev, &priv->channels.params); | 92 | mlx5i_build_nic_params(mdev, &priv->channels.params); |
94 | 93 | ||
95 | mlx5e_timestamp_init(priv); | 94 | mlx5e_timestamp_init(priv); |
@@ -118,11 +117,10 @@ void mlx5i_cleanup(struct mlx5e_priv *priv) | |||
118 | 117 | ||
119 | static void mlx5i_grp_sw_update_stats(struct mlx5e_priv *priv) | 118 | static void mlx5i_grp_sw_update_stats(struct mlx5e_priv *priv) |
120 | { | 119 | { |
121 | int max_nch = mlx5e_get_netdev_max_channels(priv->netdev); | ||
122 | struct mlx5e_sw_stats s = { 0 }; | 120 | struct mlx5e_sw_stats s = { 0 }; |
123 | int i, j; | 121 | int i, j; |
124 | 122 | ||
125 | for (i = 0; i < max_nch; i++) { | 123 | for (i = 0; i < priv->max_nch; i++) { |
126 | struct mlx5e_channel_stats *channel_stats; | 124 | struct mlx5e_channel_stats *channel_stats; |
127 | struct mlx5e_rq_stats *rq_stats; | 125 | struct mlx5e_rq_stats *rq_stats; |
128 | 126 | ||
@@ -436,6 +434,7 @@ static const struct mlx5e_profile mlx5i_nic_profile = { | |||
436 | .rx_handlers.handle_rx_cqe = mlx5i_handle_rx_cqe, | 434 | .rx_handlers.handle_rx_cqe = mlx5i_handle_rx_cqe, |
437 | .rx_handlers.handle_rx_cqe_mpwqe = NULL, /* Not supported */ | 435 | .rx_handlers.handle_rx_cqe_mpwqe = NULL, /* Not supported */ |
438 | .max_tc = MLX5I_MAX_NUM_TC, | 436 | .max_tc = MLX5I_MAX_NUM_TC, |
437 | .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR), | ||
439 | }; | 438 | }; |
440 | 439 | ||
441 | /* mlx5i netdev NDos */ | 440 | /* mlx5i netdev NDos */ |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c index 6e56fa769d2e..c5a491e22e55 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c | |||
@@ -355,6 +355,7 @@ static const struct mlx5e_profile mlx5i_pkey_nic_profile = { | |||
355 | .rx_handlers.handle_rx_cqe = mlx5i_handle_rx_cqe, | 355 | .rx_handlers.handle_rx_cqe = mlx5i_handle_rx_cqe, |
356 | .rx_handlers.handle_rx_cqe_mpwqe = NULL, /* Not supported */ | 356 | .rx_handlers.handle_rx_cqe_mpwqe = NULL, /* Not supported */ |
357 | .max_tc = MLX5I_MAX_NUM_TC, | 357 | .max_tc = MLX5I_MAX_NUM_TC, |
358 | .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR), | ||
358 | }; | 359 | }; |
359 | 360 | ||
360 | const struct mlx5e_profile *mlx5i_pkey_get_profile(void) | 361 | const struct mlx5e_profile *mlx5i_pkey_get_profile(void) |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 9277b3f125e8..5a8e94c0a95a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c | |||
@@ -6350,7 +6350,7 @@ static int __init mlxsw_sp_module_init(void) | |||
6350 | return 0; | 6350 | return 0; |
6351 | 6351 | ||
6352 | err_sp2_pci_driver_register: | 6352 | err_sp2_pci_driver_register: |
6353 | mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver); | 6353 | mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); |
6354 | err_sp1_pci_driver_register: | 6354 | err_sp1_pci_driver_register: |
6355 | mlxsw_core_driver_unregister(&mlxsw_sp2_driver); | 6355 | mlxsw_core_driver_unregister(&mlxsw_sp2_driver); |
6356 | err_sp2_core_driver_register: | 6356 | err_sp2_core_driver_register: |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index c78d93afbb9d..db17ba35ec84 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h | |||
@@ -954,4 +954,8 @@ void mlxsw_sp_port_nve_fini(struct mlxsw_sp_port *mlxsw_sp_port); | |||
954 | int mlxsw_sp_nve_init(struct mlxsw_sp *mlxsw_sp); | 954 | int mlxsw_sp_nve_init(struct mlxsw_sp *mlxsw_sp); |
955 | void mlxsw_sp_nve_fini(struct mlxsw_sp *mlxsw_sp); | 955 | void mlxsw_sp_nve_fini(struct mlxsw_sp *mlxsw_sp); |
956 | 956 | ||
957 | /* spectrum_nve_vxlan.c */ | ||
958 | int mlxsw_sp_nve_inc_parsing_depth_get(struct mlxsw_sp *mlxsw_sp); | ||
959 | void mlxsw_sp_nve_inc_parsing_depth_put(struct mlxsw_sp *mlxsw_sp); | ||
960 | |||
957 | #endif | 961 | #endif |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c index 1537f70bc26d..888ba4300bcc 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c | |||
@@ -437,8 +437,8 @@ static const struct mlxsw_sp_sb_pr mlxsw_sp1_sb_prs[] = { | |||
437 | MLXSW_SP1_SB_PR_CPU_SIZE, true, false), | 437 | MLXSW_SP1_SB_PR_CPU_SIZE, true, false), |
438 | }; | 438 | }; |
439 | 439 | ||
440 | #define MLXSW_SP2_SB_PR_INGRESS_SIZE 38128752 | 440 | #define MLXSW_SP2_SB_PR_INGRESS_SIZE 35297568 |
441 | #define MLXSW_SP2_SB_PR_EGRESS_SIZE 38128752 | 441 | #define MLXSW_SP2_SB_PR_EGRESS_SIZE 35297568 |
442 | #define MLXSW_SP2_SB_PR_CPU_SIZE (256 * 1000) | 442 | #define MLXSW_SP2_SB_PR_CPU_SIZE (256 * 1000) |
443 | 443 | ||
444 | /* Order according to mlxsw_sp2_sb_pool_dess */ | 444 | /* Order according to mlxsw_sp2_sb_pool_dess */ |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c index 1df164a4b06d..17f334b46c40 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c | |||
@@ -775,6 +775,7 @@ static void mlxsw_sp_nve_tunnel_fini(struct mlxsw_sp *mlxsw_sp) | |||
775 | ops->fini(nve); | 775 | ops->fini(nve); |
776 | mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1, | 776 | mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1, |
777 | nve->tunnel_index); | 777 | nve->tunnel_index); |
778 | memset(&nve->config, 0, sizeof(nve->config)); | ||
778 | } | 779 | } |
779 | nve->num_nve_tunnels--; | 780 | nve->num_nve_tunnels--; |
780 | } | 781 | } |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h index 0035640156a1..12f664f42f21 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h | |||
@@ -29,6 +29,7 @@ struct mlxsw_sp_nve { | |||
29 | unsigned int num_max_mc_entries[MLXSW_SP_L3_PROTO_MAX]; | 29 | unsigned int num_max_mc_entries[MLXSW_SP_L3_PROTO_MAX]; |
30 | u32 tunnel_index; | 30 | u32 tunnel_index; |
31 | u16 ul_rif_index; /* Reserved for Spectrum */ | 31 | u16 ul_rif_index; /* Reserved for Spectrum */ |
32 | unsigned int inc_parsing_depth_refs; | ||
32 | }; | 33 | }; |
33 | 34 | ||
34 | struct mlxsw_sp_nve_ops { | 35 | struct mlxsw_sp_nve_ops { |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c index 93ccd9fc2266..05517c7feaa5 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c | |||
@@ -103,9 +103,9 @@ static void mlxsw_sp_nve_vxlan_config(const struct mlxsw_sp_nve *nve, | |||
103 | config->udp_dport = cfg->dst_port; | 103 | config->udp_dport = cfg->dst_port; |
104 | } | 104 | } |
105 | 105 | ||
106 | static int mlxsw_sp_nve_parsing_set(struct mlxsw_sp *mlxsw_sp, | 106 | static int __mlxsw_sp_nve_parsing_set(struct mlxsw_sp *mlxsw_sp, |
107 | unsigned int parsing_depth, | 107 | unsigned int parsing_depth, |
108 | __be16 udp_dport) | 108 | __be16 udp_dport) |
109 | { | 109 | { |
110 | char mprs_pl[MLXSW_REG_MPRS_LEN]; | 110 | char mprs_pl[MLXSW_REG_MPRS_LEN]; |
111 | 111 | ||
@@ -113,6 +113,56 @@ static int mlxsw_sp_nve_parsing_set(struct mlxsw_sp *mlxsw_sp, | |||
113 | return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl); | 113 | return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl); |
114 | } | 114 | } |
115 | 115 | ||
116 | static int mlxsw_sp_nve_parsing_set(struct mlxsw_sp *mlxsw_sp, | ||
117 | __be16 udp_dport) | ||
118 | { | ||
119 | int parsing_depth = mlxsw_sp->nve->inc_parsing_depth_refs ? | ||
120 | MLXSW_SP_NVE_VXLAN_PARSING_DEPTH : | ||
121 | MLXSW_SP_NVE_DEFAULT_PARSING_DEPTH; | ||
122 | |||
123 | return __mlxsw_sp_nve_parsing_set(mlxsw_sp, parsing_depth, udp_dport); | ||
124 | } | ||
125 | |||
126 | static int | ||
127 | __mlxsw_sp_nve_inc_parsing_depth_get(struct mlxsw_sp *mlxsw_sp, | ||
128 | __be16 udp_dport) | ||
129 | { | ||
130 | int err; | ||
131 | |||
132 | mlxsw_sp->nve->inc_parsing_depth_refs++; | ||
133 | |||
134 | err = mlxsw_sp_nve_parsing_set(mlxsw_sp, udp_dport); | ||
135 | if (err) | ||
136 | goto err_nve_parsing_set; | ||
137 | return 0; | ||
138 | |||
139 | err_nve_parsing_set: | ||
140 | mlxsw_sp->nve->inc_parsing_depth_refs--; | ||
141 | return err; | ||
142 | } | ||
143 | |||
144 | static void | ||
145 | __mlxsw_sp_nve_inc_parsing_depth_put(struct mlxsw_sp *mlxsw_sp, | ||
146 | __be16 udp_dport) | ||
147 | { | ||
148 | mlxsw_sp->nve->inc_parsing_depth_refs--; | ||
149 | mlxsw_sp_nve_parsing_set(mlxsw_sp, udp_dport); | ||
150 | } | ||
151 | |||
152 | int mlxsw_sp_nve_inc_parsing_depth_get(struct mlxsw_sp *mlxsw_sp) | ||
153 | { | ||
154 | __be16 udp_dport = mlxsw_sp->nve->config.udp_dport; | ||
155 | |||
156 | return __mlxsw_sp_nve_inc_parsing_depth_get(mlxsw_sp, udp_dport); | ||
157 | } | ||
158 | |||
159 | void mlxsw_sp_nve_inc_parsing_depth_put(struct mlxsw_sp *mlxsw_sp) | ||
160 | { | ||
161 | __be16 udp_dport = mlxsw_sp->nve->config.udp_dport; | ||
162 | |||
163 | __mlxsw_sp_nve_inc_parsing_depth_put(mlxsw_sp, udp_dport); | ||
164 | } | ||
165 | |||
116 | static void | 166 | static void |
117 | mlxsw_sp_nve_vxlan_config_prepare(char *tngcr_pl, | 167 | mlxsw_sp_nve_vxlan_config_prepare(char *tngcr_pl, |
118 | const struct mlxsw_sp_nve_config *config) | 168 | const struct mlxsw_sp_nve_config *config) |
@@ -176,9 +226,7 @@ static int mlxsw_sp1_nve_vxlan_init(struct mlxsw_sp_nve *nve, | |||
176 | struct mlxsw_sp *mlxsw_sp = nve->mlxsw_sp; | 226 | struct mlxsw_sp *mlxsw_sp = nve->mlxsw_sp; |
177 | int err; | 227 | int err; |
178 | 228 | ||
179 | err = mlxsw_sp_nve_parsing_set(mlxsw_sp, | 229 | err = __mlxsw_sp_nve_inc_parsing_depth_get(mlxsw_sp, config->udp_dport); |
180 | MLXSW_SP_NVE_VXLAN_PARSING_DEPTH, | ||
181 | config->udp_dport); | ||
182 | if (err) | 230 | if (err) |
183 | return err; | 231 | return err; |
184 | 232 | ||
@@ -203,8 +251,7 @@ err_promote_decap: | |||
203 | err_rtdp_set: | 251 | err_rtdp_set: |
204 | mlxsw_sp1_nve_vxlan_config_clear(mlxsw_sp); | 252 | mlxsw_sp1_nve_vxlan_config_clear(mlxsw_sp); |
205 | err_config_set: | 253 | err_config_set: |
206 | mlxsw_sp_nve_parsing_set(mlxsw_sp, MLXSW_SP_NVE_DEFAULT_PARSING_DEPTH, | 254 | __mlxsw_sp_nve_inc_parsing_depth_put(mlxsw_sp, 0); |
207 | config->udp_dport); | ||
208 | return err; | 255 | return err; |
209 | } | 256 | } |
210 | 257 | ||
@@ -216,8 +263,7 @@ static void mlxsw_sp1_nve_vxlan_fini(struct mlxsw_sp_nve *nve) | |||
216 | mlxsw_sp_router_nve_demote_decap(mlxsw_sp, config->ul_tb_id, | 263 | mlxsw_sp_router_nve_demote_decap(mlxsw_sp, config->ul_tb_id, |
217 | config->ul_proto, &config->ul_sip); | 264 | config->ul_proto, &config->ul_sip); |
218 | mlxsw_sp1_nve_vxlan_config_clear(mlxsw_sp); | 265 | mlxsw_sp1_nve_vxlan_config_clear(mlxsw_sp); |
219 | mlxsw_sp_nve_parsing_set(mlxsw_sp, MLXSW_SP_NVE_DEFAULT_PARSING_DEPTH, | 266 | __mlxsw_sp_nve_inc_parsing_depth_put(mlxsw_sp, 0); |
220 | config->udp_dport); | ||
221 | } | 267 | } |
222 | 268 | ||
223 | static int | 269 | static int |
@@ -320,9 +366,7 @@ static int mlxsw_sp2_nve_vxlan_init(struct mlxsw_sp_nve *nve, | |||
320 | struct mlxsw_sp *mlxsw_sp = nve->mlxsw_sp; | 366 | struct mlxsw_sp *mlxsw_sp = nve->mlxsw_sp; |
321 | int err; | 367 | int err; |
322 | 368 | ||
323 | err = mlxsw_sp_nve_parsing_set(mlxsw_sp, | 369 | err = __mlxsw_sp_nve_inc_parsing_depth_get(mlxsw_sp, config->udp_dport); |
324 | MLXSW_SP_NVE_VXLAN_PARSING_DEPTH, | ||
325 | config->udp_dport); | ||
326 | if (err) | 370 | if (err) |
327 | return err; | 371 | return err; |
328 | 372 | ||
@@ -348,8 +392,7 @@ err_promote_decap: | |||
348 | err_rtdp_set: | 392 | err_rtdp_set: |
349 | mlxsw_sp2_nve_vxlan_config_clear(mlxsw_sp); | 393 | mlxsw_sp2_nve_vxlan_config_clear(mlxsw_sp); |
350 | err_config_set: | 394 | err_config_set: |
351 | mlxsw_sp_nve_parsing_set(mlxsw_sp, MLXSW_SP_NVE_DEFAULT_PARSING_DEPTH, | 395 | __mlxsw_sp_nve_inc_parsing_depth_put(mlxsw_sp, 0); |
352 | config->udp_dport); | ||
353 | return err; | 396 | return err; |
354 | } | 397 | } |
355 | 398 | ||
@@ -361,8 +404,7 @@ static void mlxsw_sp2_nve_vxlan_fini(struct mlxsw_sp_nve *nve) | |||
361 | mlxsw_sp_router_nve_demote_decap(mlxsw_sp, config->ul_tb_id, | 404 | mlxsw_sp_router_nve_demote_decap(mlxsw_sp, config->ul_tb_id, |
362 | config->ul_proto, &config->ul_sip); | 405 | config->ul_proto, &config->ul_sip); |
363 | mlxsw_sp2_nve_vxlan_config_clear(mlxsw_sp); | 406 | mlxsw_sp2_nve_vxlan_config_clear(mlxsw_sp); |
364 | mlxsw_sp_nve_parsing_set(mlxsw_sp, MLXSW_SP_NVE_DEFAULT_PARSING_DEPTH, | 407 | __mlxsw_sp_nve_inc_parsing_depth_put(mlxsw_sp, 0); |
365 | config->udp_dport); | ||
366 | } | 408 | } |
367 | 409 | ||
368 | const struct mlxsw_sp_nve_ops mlxsw_sp2_nve_vxlan_ops = { | 410 | const struct mlxsw_sp_nve_ops mlxsw_sp2_nve_vxlan_ops = { |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c index bd9c2bc2d5d6..63b07edd9d81 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c | |||
@@ -979,6 +979,9 @@ static int mlxsw_sp1_ptp_mtpppc_update(struct mlxsw_sp_port *mlxsw_sp_port, | |||
979 | { | 979 | { |
980 | struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; | 980 | struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; |
981 | struct mlxsw_sp_port *tmp; | 981 | struct mlxsw_sp_port *tmp; |
982 | u16 orig_ing_types = 0; | ||
983 | u16 orig_egr_types = 0; | ||
984 | int err; | ||
982 | int i; | 985 | int i; |
983 | 986 | ||
984 | /* MTPPPC configures timestamping globally, not per port. Find the | 987 | /* MTPPPC configures timestamping globally, not per port. Find the |
@@ -986,12 +989,26 @@ static int mlxsw_sp1_ptp_mtpppc_update(struct mlxsw_sp_port *mlxsw_sp_port, | |||
986 | */ | 989 | */ |
987 | for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) { | 990 | for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) { |
988 | tmp = mlxsw_sp->ports[i]; | 991 | tmp = mlxsw_sp->ports[i]; |
992 | if (tmp) { | ||
993 | orig_ing_types |= tmp->ptp.ing_types; | ||
994 | orig_egr_types |= tmp->ptp.egr_types; | ||
995 | } | ||
989 | if (tmp && tmp != mlxsw_sp_port) { | 996 | if (tmp && tmp != mlxsw_sp_port) { |
990 | ing_types |= tmp->ptp.ing_types; | 997 | ing_types |= tmp->ptp.ing_types; |
991 | egr_types |= tmp->ptp.egr_types; | 998 | egr_types |= tmp->ptp.egr_types; |
992 | } | 999 | } |
993 | } | 1000 | } |
994 | 1001 | ||
1002 | if ((ing_types || egr_types) && !(orig_ing_types || orig_egr_types)) { | ||
1003 | err = mlxsw_sp_nve_inc_parsing_depth_get(mlxsw_sp); | ||
1004 | if (err) { | ||
1005 | netdev_err(mlxsw_sp_port->dev, "Failed to increase parsing depth"); | ||
1006 | return err; | ||
1007 | } | ||
1008 | } | ||
1009 | if (!(ing_types || egr_types) && (orig_ing_types || orig_egr_types)) | ||
1010 | mlxsw_sp_nve_inc_parsing_depth_put(mlxsw_sp); | ||
1011 | |||
995 | return mlxsw_sp1_ptp_mtpppc_set(mlxsw_sp_port->mlxsw_sp, | 1012 | return mlxsw_sp1_ptp_mtpppc_set(mlxsw_sp_port->mlxsw_sp, |
996 | ing_types, egr_types); | 1013 | ing_types, egr_types); |
997 | } | 1014 | } |
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index b71e4ecbe469..6932e615d4b0 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c | |||
@@ -1818,6 +1818,7 @@ EXPORT_SYMBOL(ocelot_init); | |||
1818 | 1818 | ||
1819 | void ocelot_deinit(struct ocelot *ocelot) | 1819 | void ocelot_deinit(struct ocelot *ocelot) |
1820 | { | 1820 | { |
1821 | cancel_delayed_work(&ocelot->stats_work); | ||
1821 | destroy_workqueue(ocelot->stats_queue); | 1822 | destroy_workqueue(ocelot->stats_queue); |
1822 | mutex_destroy(&ocelot->stats_lock); | 1823 | mutex_destroy(&ocelot->stats_lock); |
1823 | ocelot_ace_deinit(); | 1824 | ocelot_ace_deinit(); |
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c index d9cbe84ac6ad..1b840ee47339 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c | |||
@@ -444,12 +444,12 @@ static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data) | |||
444 | data = nfp_pr_et(data, "hw_rx_csum_complete"); | 444 | data = nfp_pr_et(data, "hw_rx_csum_complete"); |
445 | data = nfp_pr_et(data, "hw_rx_csum_err"); | 445 | data = nfp_pr_et(data, "hw_rx_csum_err"); |
446 | data = nfp_pr_et(data, "rx_replace_buf_alloc_fail"); | 446 | data = nfp_pr_et(data, "rx_replace_buf_alloc_fail"); |
447 | data = nfp_pr_et(data, "rx_tls_decrypted"); | 447 | data = nfp_pr_et(data, "rx_tls_decrypted_packets"); |
448 | data = nfp_pr_et(data, "hw_tx_csum"); | 448 | data = nfp_pr_et(data, "hw_tx_csum"); |
449 | data = nfp_pr_et(data, "hw_tx_inner_csum"); | 449 | data = nfp_pr_et(data, "hw_tx_inner_csum"); |
450 | data = nfp_pr_et(data, "tx_gather"); | 450 | data = nfp_pr_et(data, "tx_gather"); |
451 | data = nfp_pr_et(data, "tx_lso"); | 451 | data = nfp_pr_et(data, "tx_lso"); |
452 | data = nfp_pr_et(data, "tx_tls_encrypted"); | 452 | data = nfp_pr_et(data, "tx_tls_encrypted_packets"); |
453 | data = nfp_pr_et(data, "tx_tls_ooo"); | 453 | data = nfp_pr_et(data, "tx_tls_ooo"); |
454 | data = nfp_pr_et(data, "tx_tls_drop_no_sync_data"); | 454 | data = nfp_pr_et(data, "tx_tls_drop_no_sync_data"); |
455 | 455 | ||
diff --git a/drivers/net/ethernet/ni/Kconfig b/drivers/net/ethernet/ni/Kconfig index 70b1a03c0953..01229190132d 100644 --- a/drivers/net/ethernet/ni/Kconfig +++ b/drivers/net/ethernet/ni/Kconfig | |||
@@ -11,7 +11,7 @@ config NET_VENDOR_NI | |||
11 | 11 | ||
12 | Note that the answer to this question doesn't directly affect the | 12 | Note that the answer to this question doesn't directly affect the |
13 | kernel: saying N will just cause the configurator to skip all | 13 | kernel: saying N will just cause the configurator to skip all |
14 | the questions about National Instrument devices. | 14 | the questions about National Instruments devices. |
15 | If you say Y, you will be asked for your specific device in the | 15 | If you say Y, you will be asked for your specific device in the |
16 | following questions. | 16 | following questions. |
17 | 17 | ||
diff --git a/drivers/net/ethernet/packetengines/Kconfig b/drivers/net/ethernet/packetengines/Kconfig index 8161e308e64b..ead3750b4489 100644 --- a/drivers/net/ethernet/packetengines/Kconfig +++ b/drivers/net/ethernet/packetengines/Kconfig | |||
@@ -1,10 +1,10 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0-only | 1 | # SPDX-License-Identifier: GPL-2.0-only |
2 | # | 2 | # |
3 | # Packet engine device configuration | 3 | # Packet Engines device configuration |
4 | # | 4 | # |
5 | 5 | ||
6 | config NET_VENDOR_PACKET_ENGINES | 6 | config NET_VENDOR_PACKET_ENGINES |
7 | bool "Packet Engine devices" | 7 | bool "Packet Engines devices" |
8 | default y | 8 | default y |
9 | depends on PCI | 9 | depends on PCI |
10 | ---help--- | 10 | ---help--- |
@@ -12,7 +12,7 @@ config NET_VENDOR_PACKET_ENGINES | |||
12 | 12 | ||
13 | Note that the answer to this question doesn't directly affect the | 13 | Note that the answer to this question doesn't directly affect the |
14 | kernel: saying N will just cause the configurator to skip all | 14 | kernel: saying N will just cause the configurator to skip all |
15 | the questions about packet engine devices. If you say Y, you will | 15 | the questions about Packet Engines devices. If you say Y, you will |
16 | be asked for your specific card in the following questions. | 16 | be asked for your specific card in the following questions. |
17 | 17 | ||
18 | if NET_VENDOR_PACKET_ENGINES | 18 | if NET_VENDOR_PACKET_ENGINES |
diff --git a/drivers/net/ethernet/packetengines/Makefile b/drivers/net/ethernet/packetengines/Makefile index 1553c9cfc254..cf054b796d11 100644 --- a/drivers/net/ethernet/packetengines/Makefile +++ b/drivers/net/ethernet/packetengines/Makefile | |||
@@ -1,6 +1,6 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0-only | 1 | # SPDX-License-Identifier: GPL-2.0-only |
2 | # | 2 | # |
3 | # Makefile for the Packet Engine network device drivers. | 3 | # Makefile for the Packet Engines network device drivers. |
4 | # | 4 | # |
5 | 5 | ||
6 | obj-$(CONFIG_HAMACHI) += hamachi.o | 6 | obj-$(CONFIG_HAMACHI) += hamachi.o |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index 4e8118a08654..9f5113639eaf 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c | |||
@@ -1093,7 +1093,7 @@ static int qed_int_deassertion(struct qed_hwfn *p_hwfn, | |||
1093 | snprintf(bit_name, 30, | 1093 | snprintf(bit_name, 30, |
1094 | p_aeu->bit_name, num); | 1094 | p_aeu->bit_name, num); |
1095 | else | 1095 | else |
1096 | strncpy(bit_name, | 1096 | strlcpy(bit_name, |
1097 | p_aeu->bit_name, 30); | 1097 | p_aeu->bit_name, 30); |
1098 | 1098 | ||
1099 | /* We now need to pass bitmask in its | 1099 | /* We now need to pass bitmask in its |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c index 17c64e43d6c3..158ac0738911 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c | |||
@@ -442,7 +442,7 @@ static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn, | |||
442 | /* Vendor specific information */ | 442 | /* Vendor specific information */ |
443 | dev->vendor_id = cdev->vendor_id; | 443 | dev->vendor_id = cdev->vendor_id; |
444 | dev->vendor_part_id = cdev->device_id; | 444 | dev->vendor_part_id = cdev->device_id; |
445 | dev->hw_ver = 0; | 445 | dev->hw_ver = cdev->chip_rev; |
446 | dev->fw_ver = (FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) | | 446 | dev->fw_ver = (FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) | |
447 | (FW_REVISION_VERSION << 8) | (FW_ENGINEERING_VERSION); | 447 | (FW_REVISION_VERSION << 8) | (FW_ENGINEERING_VERSION); |
448 | 448 | ||
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c index 60189923737a..21d38167f961 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c | |||
@@ -206,9 +206,9 @@ rmnet_map_ipv4_ul_csum_header(void *iphdr, | |||
206 | ul_header->csum_insert_offset = skb->csum_offset; | 206 | ul_header->csum_insert_offset = skb->csum_offset; |
207 | ul_header->csum_enabled = 1; | 207 | ul_header->csum_enabled = 1; |
208 | if (ip4h->protocol == IPPROTO_UDP) | 208 | if (ip4h->protocol == IPPROTO_UDP) |
209 | ul_header->udp_ip4_ind = 1; | 209 | ul_header->udp_ind = 1; |
210 | else | 210 | else |
211 | ul_header->udp_ip4_ind = 0; | 211 | ul_header->udp_ind = 0; |
212 | 212 | ||
213 | /* Changing remaining fields to network order */ | 213 | /* Changing remaining fields to network order */ |
214 | hdr++; | 214 | hdr++; |
@@ -239,6 +239,7 @@ rmnet_map_ipv6_ul_csum_header(void *ip6hdr, | |||
239 | struct rmnet_map_ul_csum_header *ul_header, | 239 | struct rmnet_map_ul_csum_header *ul_header, |
240 | struct sk_buff *skb) | 240 | struct sk_buff *skb) |
241 | { | 241 | { |
242 | struct ipv6hdr *ip6h = (struct ipv6hdr *)ip6hdr; | ||
242 | __be16 *hdr = (__be16 *)ul_header, offset; | 243 | __be16 *hdr = (__be16 *)ul_header, offset; |
243 | 244 | ||
244 | offset = htons((__force u16)(skb_transport_header(skb) - | 245 | offset = htons((__force u16)(skb_transport_header(skb) - |
@@ -246,7 +247,11 @@ rmnet_map_ipv6_ul_csum_header(void *ip6hdr, | |||
246 | ul_header->csum_start_offset = offset; | 247 | ul_header->csum_start_offset = offset; |
247 | ul_header->csum_insert_offset = skb->csum_offset; | 248 | ul_header->csum_insert_offset = skb->csum_offset; |
248 | ul_header->csum_enabled = 1; | 249 | ul_header->csum_enabled = 1; |
249 | ul_header->udp_ip4_ind = 0; | 250 | |
251 | if (ip6h->nexthdr == IPPROTO_UDP) | ||
252 | ul_header->udp_ind = 1; | ||
253 | else | ||
254 | ul_header->udp_ind = 0; | ||
250 | 255 | ||
251 | /* Changing remaining fields to network order */ | 256 | /* Changing remaining fields to network order */ |
252 | hdr++; | 257 | hdr++; |
@@ -419,7 +424,7 @@ sw_csum: | |||
419 | ul_header->csum_start_offset = 0; | 424 | ul_header->csum_start_offset = 0; |
420 | ul_header->csum_insert_offset = 0; | 425 | ul_header->csum_insert_offset = 0; |
421 | ul_header->csum_enabled = 0; | 426 | ul_header->csum_enabled = 0; |
422 | ul_header->udp_ip4_ind = 0; | 427 | ul_header->udp_ind = 0; |
423 | 428 | ||
424 | priv->stats.csum_sw++; | 429 | priv->stats.csum_sw++; |
425 | } | 430 | } |
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index a10ff9e1efec..fa6eae2e7ed8 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c | |||
@@ -6098,10 +6098,7 @@ static int r8169_phy_connect(struct rtl8169_private *tp) | |||
6098 | if (ret) | 6098 | if (ret) |
6099 | return ret; | 6099 | return ret; |
6100 | 6100 | ||
6101 | if (tp->supports_gmii) | 6101 | if (!tp->supports_gmii) |
6102 | phy_remove_link_mode(phydev, | ||
6103 | ETHTOOL_LINK_MODE_1000baseT_Half_BIT); | ||
6104 | else | ||
6105 | phy_set_max_speed(phydev, SPEED_100); | 6102 | phy_set_max_speed(phydev, SPEED_100); |
6106 | 6103 | ||
6107 | phy_support_asym_pause(phydev); | 6104 | phy_support_asym_pause(phydev); |
@@ -6552,13 +6549,18 @@ static int rtl_alloc_irq(struct rtl8169_private *tp) | |||
6552 | { | 6549 | { |
6553 | unsigned int flags; | 6550 | unsigned int flags; |
6554 | 6551 | ||
6555 | if (tp->mac_version <= RTL_GIGA_MAC_VER_06) { | 6552 | switch (tp->mac_version) { |
6553 | case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06: | ||
6556 | rtl_unlock_config_regs(tp); | 6554 | rtl_unlock_config_regs(tp); |
6557 | RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~MSIEnable); | 6555 | RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~MSIEnable); |
6558 | rtl_lock_config_regs(tp); | 6556 | rtl_lock_config_regs(tp); |
6557 | /* fall through */ | ||
6558 | case RTL_GIGA_MAC_VER_07 ... RTL_GIGA_MAC_VER_24: | ||
6559 | flags = PCI_IRQ_LEGACY; | 6559 | flags = PCI_IRQ_LEGACY; |
6560 | } else { | 6560 | break; |
6561 | default: | ||
6561 | flags = PCI_IRQ_ALL_TYPES; | 6562 | flags = PCI_IRQ_ALL_TYPES; |
6563 | break; | ||
6562 | } | 6564 | } |
6563 | 6565 | ||
6564 | return pci_alloc_irq_vectors(tp->pci_dev, 1, 1, flags); | 6566 | return pci_alloc_irq_vectors(tp->pci_dev, 1, 1, flags); |
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c index 079f459c73a5..2c5d3f5b84dd 100644 --- a/drivers/net/ethernet/rocker/rocker_main.c +++ b/drivers/net/ethernet/rocker/rocker_main.c | |||
@@ -2208,10 +2208,12 @@ static int rocker_router_fib_event(struct notifier_block *nb, | |||
2208 | 2208 | ||
2209 | if (fen_info->fi->fib_nh_is_v6) { | 2209 | if (fen_info->fi->fib_nh_is_v6) { |
2210 | NL_SET_ERR_MSG_MOD(info->extack, "IPv6 gateway with IPv4 route is not supported"); | 2210 | NL_SET_ERR_MSG_MOD(info->extack, "IPv6 gateway with IPv4 route is not supported"); |
2211 | kfree(fib_work); | ||
2211 | return notifier_from_errno(-EINVAL); | 2212 | return notifier_from_errno(-EINVAL); |
2212 | } | 2213 | } |
2213 | if (fen_info->fi->nh) { | 2214 | if (fen_info->fi->nh) { |
2214 | NL_SET_ERR_MSG_MOD(info->extack, "IPv4 route with nexthop objects is not supported"); | 2215 | NL_SET_ERR_MSG_MOD(info->extack, "IPv4 route with nexthop objects is not supported"); |
2216 | kfree(fib_work); | ||
2215 | return notifier_from_errno(-EINVAL); | 2217 | return notifier_from_errno(-EINVAL); |
2216 | } | 2218 | } |
2217 | } | 2219 | } |
diff --git a/drivers/net/ethernet/samsung/Kconfig b/drivers/net/ethernet/samsung/Kconfig index 027938017579..e92a178a76df 100644 --- a/drivers/net/ethernet/samsung/Kconfig +++ b/drivers/net/ethernet/samsung/Kconfig | |||
@@ -11,7 +11,7 @@ config NET_VENDOR_SAMSUNG | |||
11 | say Y. | 11 | say Y. |
12 | 12 | ||
13 | Note that the answer to this question does not directly affect | 13 | Note that the answer to this question does not directly affect |
14 | the kernel: saying N will just case the configurator to skip all | 14 | the kernel: saying N will just cause the configurator to skip all |
15 | the questions about Samsung chipsets. If you say Y, you will be asked | 15 | the questions about Samsung chipsets. If you say Y, you will be asked |
16 | for your specific chipset/driver in the following questions. | 16 | for your specific chipset/driver in the following questions. |
17 | 17 | ||
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c index bd14803545de..8d88e4083456 100644 --- a/drivers/net/ethernet/smsc/smc911x.c +++ b/drivers/net/ethernet/smsc/smc911x.c | |||
@@ -712,6 +712,7 @@ static void smc911x_phy_detect(struct net_device *dev) | |||
712 | /* Found an external PHY */ | 712 | /* Found an external PHY */ |
713 | break; | 713 | break; |
714 | } | 714 | } |
715 | /* Else, fall through */ | ||
715 | default: | 716 | default: |
716 | /* Internal media only */ | 717 | /* Internal media only */ |
717 | SMC_GET_PHY_ID1(lp, 1, id1); | 718 | SMC_GET_PHY_ID1(lp, 1, id1); |
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c index 01c2e2d83e76..fc9954e4a772 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c | |||
@@ -85,6 +85,8 @@ static void dwmac4_rx_queue_priority(struct mac_device_info *hw, | |||
85 | u32 value; | 85 | u32 value; |
86 | 86 | ||
87 | base_register = (queue < 4) ? GMAC_RXQ_CTRL2 : GMAC_RXQ_CTRL3; | 87 | base_register = (queue < 4) ? GMAC_RXQ_CTRL2 : GMAC_RXQ_CTRL3; |
88 | if (queue >= 4) | ||
89 | queue -= 4; | ||
88 | 90 | ||
89 | value = readl(ioaddr + base_register); | 91 | value = readl(ioaddr + base_register); |
90 | 92 | ||
@@ -102,6 +104,8 @@ static void dwmac4_tx_queue_priority(struct mac_device_info *hw, | |||
102 | u32 value; | 104 | u32 value; |
103 | 105 | ||
104 | base_register = (queue < 4) ? GMAC_TXQ_PRTY_MAP0 : GMAC_TXQ_PRTY_MAP1; | 106 | base_register = (queue < 4) ? GMAC_TXQ_PRTY_MAP0 : GMAC_TXQ_PRTY_MAP1; |
107 | if (queue >= 4) | ||
108 | queue -= 4; | ||
105 | 109 | ||
106 | value = readl(ioaddr + base_register); | 110 | value = readl(ioaddr + base_register); |
107 | 111 | ||
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h index 7f86dffb264d..3174b701aa90 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h | |||
@@ -44,11 +44,13 @@ | |||
44 | #define XGMAC_CORE_INIT_RX 0 | 44 | #define XGMAC_CORE_INIT_RX 0 |
45 | #define XGMAC_PACKET_FILTER 0x00000008 | 45 | #define XGMAC_PACKET_FILTER 0x00000008 |
46 | #define XGMAC_FILTER_RA BIT(31) | 46 | #define XGMAC_FILTER_RA BIT(31) |
47 | #define XGMAC_FILTER_HPF BIT(10) | ||
47 | #define XGMAC_FILTER_PCF BIT(7) | 48 | #define XGMAC_FILTER_PCF BIT(7) |
48 | #define XGMAC_FILTER_PM BIT(4) | 49 | #define XGMAC_FILTER_PM BIT(4) |
49 | #define XGMAC_FILTER_HMC BIT(2) | 50 | #define XGMAC_FILTER_HMC BIT(2) |
50 | #define XGMAC_FILTER_PR BIT(0) | 51 | #define XGMAC_FILTER_PR BIT(0) |
51 | #define XGMAC_HASH_TABLE(x) (0x00000010 + (x) * 4) | 52 | #define XGMAC_HASH_TABLE(x) (0x00000010 + (x) * 4) |
53 | #define XGMAC_MAX_HASH_TABLE 8 | ||
52 | #define XGMAC_RXQ_CTRL0 0x000000a0 | 54 | #define XGMAC_RXQ_CTRL0 0x000000a0 |
53 | #define XGMAC_RXQEN(x) GENMASK((x) * 2 + 1, (x) * 2) | 55 | #define XGMAC_RXQEN(x) GENMASK((x) * 2 + 1, (x) * 2) |
54 | #define XGMAC_RXQEN_SHIFT(x) ((x) * 2) | 56 | #define XGMAC_RXQEN_SHIFT(x) ((x) * 2) |
@@ -99,11 +101,12 @@ | |||
99 | #define XGMAC_MDIO_ADDR 0x00000200 | 101 | #define XGMAC_MDIO_ADDR 0x00000200 |
100 | #define XGMAC_MDIO_DATA 0x00000204 | 102 | #define XGMAC_MDIO_DATA 0x00000204 |
101 | #define XGMAC_MDIO_C22P 0x00000220 | 103 | #define XGMAC_MDIO_C22P 0x00000220 |
102 | #define XGMAC_ADDR0_HIGH 0x00000300 | 104 | #define XGMAC_ADDRx_HIGH(x) (0x00000300 + (x) * 0x8) |
105 | #define XGMAC_ADDR_MAX 32 | ||
103 | #define XGMAC_AE BIT(31) | 106 | #define XGMAC_AE BIT(31) |
104 | #define XGMAC_DCS GENMASK(19, 16) | 107 | #define XGMAC_DCS GENMASK(19, 16) |
105 | #define XGMAC_DCS_SHIFT 16 | 108 | #define XGMAC_DCS_SHIFT 16 |
106 | #define XGMAC_ADDR0_LOW 0x00000304 | 109 | #define XGMAC_ADDRx_LOW(x) (0x00000304 + (x) * 0x8) |
107 | #define XGMAC_ARP_ADDR 0x00000c10 | 110 | #define XGMAC_ARP_ADDR 0x00000c10 |
108 | #define XGMAC_TIMESTAMP_STATUS 0x00000d20 | 111 | #define XGMAC_TIMESTAMP_STATUS 0x00000d20 |
109 | #define XGMAC_TXTSC BIT(15) | 112 | #define XGMAC_TXTSC BIT(15) |
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c index 0a32c96a7854..85c68b7ee8c6 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c | |||
@@ -4,6 +4,8 @@ | |||
4 | * stmmac XGMAC support. | 4 | * stmmac XGMAC support. |
5 | */ | 5 | */ |
6 | 6 | ||
7 | #include <linux/bitrev.h> | ||
8 | #include <linux/crc32.h> | ||
7 | #include "stmmac.h" | 9 | #include "stmmac.h" |
8 | #include "dwxgmac2.h" | 10 | #include "dwxgmac2.h" |
9 | 11 | ||
@@ -106,6 +108,8 @@ static void dwxgmac2_rx_queue_prio(struct mac_device_info *hw, u32 prio, | |||
106 | u32 value, reg; | 108 | u32 value, reg; |
107 | 109 | ||
108 | reg = (queue < 4) ? XGMAC_RXQ_CTRL2 : XGMAC_RXQ_CTRL3; | 110 | reg = (queue < 4) ? XGMAC_RXQ_CTRL2 : XGMAC_RXQ_CTRL3; |
111 | if (queue >= 4) | ||
112 | queue -= 4; | ||
109 | 113 | ||
110 | value = readl(ioaddr + reg); | 114 | value = readl(ioaddr + reg); |
111 | value &= ~XGMAC_PSRQ(queue); | 115 | value &= ~XGMAC_PSRQ(queue); |
@@ -169,6 +173,8 @@ static void dwxgmac2_map_mtl_to_dma(struct mac_device_info *hw, u32 queue, | |||
169 | u32 value, reg; | 173 | u32 value, reg; |
170 | 174 | ||
171 | reg = (queue < 4) ? XGMAC_MTL_RXQ_DMA_MAP0 : XGMAC_MTL_RXQ_DMA_MAP1; | 175 | reg = (queue < 4) ? XGMAC_MTL_RXQ_DMA_MAP0 : XGMAC_MTL_RXQ_DMA_MAP1; |
176 | if (queue >= 4) | ||
177 | queue -= 4; | ||
172 | 178 | ||
173 | value = readl(ioaddr + reg); | 179 | value = readl(ioaddr + reg); |
174 | value &= ~XGMAC_QxMDMACH(queue); | 180 | value &= ~XGMAC_QxMDMACH(queue); |
@@ -278,10 +284,10 @@ static void dwxgmac2_set_umac_addr(struct mac_device_info *hw, | |||
278 | u32 value; | 284 | u32 value; |
279 | 285 | ||
280 | value = (addr[5] << 8) | addr[4]; | 286 | value = (addr[5] << 8) | addr[4]; |
281 | writel(value | XGMAC_AE, ioaddr + XGMAC_ADDR0_HIGH); | 287 | writel(value | XGMAC_AE, ioaddr + XGMAC_ADDRx_HIGH(reg_n)); |
282 | 288 | ||
283 | value = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0]; | 289 | value = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0]; |
284 | writel(value, ioaddr + XGMAC_ADDR0_LOW); | 290 | writel(value, ioaddr + XGMAC_ADDRx_LOW(reg_n)); |
285 | } | 291 | } |
286 | 292 | ||
287 | static void dwxgmac2_get_umac_addr(struct mac_device_info *hw, | 293 | static void dwxgmac2_get_umac_addr(struct mac_device_info *hw, |
@@ -291,8 +297,8 @@ static void dwxgmac2_get_umac_addr(struct mac_device_info *hw, | |||
291 | u32 hi_addr, lo_addr; | 297 | u32 hi_addr, lo_addr; |
292 | 298 | ||
293 | /* Read the MAC address from the hardware */ | 299 | /* Read the MAC address from the hardware */ |
294 | hi_addr = readl(ioaddr + XGMAC_ADDR0_HIGH); | 300 | hi_addr = readl(ioaddr + XGMAC_ADDRx_HIGH(reg_n)); |
295 | lo_addr = readl(ioaddr + XGMAC_ADDR0_LOW); | 301 | lo_addr = readl(ioaddr + XGMAC_ADDRx_LOW(reg_n)); |
296 | 302 | ||
297 | /* Extract the MAC address from the high and low words */ | 303 | /* Extract the MAC address from the high and low words */ |
298 | addr[0] = lo_addr & 0xff; | 304 | addr[0] = lo_addr & 0xff; |
@@ -303,19 +309,82 @@ static void dwxgmac2_get_umac_addr(struct mac_device_info *hw, | |||
303 | addr[5] = (hi_addr >> 8) & 0xff; | 309 | addr[5] = (hi_addr >> 8) & 0xff; |
304 | } | 310 | } |
305 | 311 | ||
312 | static void dwxgmac2_set_mchash(void __iomem *ioaddr, u32 *mcfilterbits, | ||
313 | int mcbitslog2) | ||
314 | { | ||
315 | int numhashregs, regs; | ||
316 | |||
317 | switch (mcbitslog2) { | ||
318 | case 6: | ||
319 | numhashregs = 2; | ||
320 | break; | ||
321 | case 7: | ||
322 | numhashregs = 4; | ||
323 | break; | ||
324 | case 8: | ||
325 | numhashregs = 8; | ||
326 | break; | ||
327 | default: | ||
328 | return; | ||
329 | } | ||
330 | |||
331 | for (regs = 0; regs < numhashregs; regs++) | ||
332 | writel(mcfilterbits[regs], ioaddr + XGMAC_HASH_TABLE(regs)); | ||
333 | } | ||
334 | |||
306 | static void dwxgmac2_set_filter(struct mac_device_info *hw, | 335 | static void dwxgmac2_set_filter(struct mac_device_info *hw, |
307 | struct net_device *dev) | 336 | struct net_device *dev) |
308 | { | 337 | { |
309 | void __iomem *ioaddr = (void __iomem *)dev->base_addr; | 338 | void __iomem *ioaddr = (void __iomem *)dev->base_addr; |
310 | u32 value = XGMAC_FILTER_RA; | 339 | u32 value = readl(ioaddr + XGMAC_PACKET_FILTER); |
340 | int mcbitslog2 = hw->mcast_bits_log2; | ||
341 | u32 mc_filter[8]; | ||
342 | int i; | ||
343 | |||
344 | value &= ~(XGMAC_FILTER_PR | XGMAC_FILTER_HMC | XGMAC_FILTER_PM); | ||
345 | value |= XGMAC_FILTER_HPF; | ||
346 | |||
347 | memset(mc_filter, 0, sizeof(mc_filter)); | ||
311 | 348 | ||
312 | if (dev->flags & IFF_PROMISC) { | 349 | if (dev->flags & IFF_PROMISC) { |
313 | value |= XGMAC_FILTER_PR | XGMAC_FILTER_PCF; | 350 | value |= XGMAC_FILTER_PR; |
351 | value |= XGMAC_FILTER_PCF; | ||
314 | } else if ((dev->flags & IFF_ALLMULTI) || | 352 | } else if ((dev->flags & IFF_ALLMULTI) || |
315 | (netdev_mc_count(dev) > HASH_TABLE_SIZE)) { | 353 | (netdev_mc_count(dev) > hw->multicast_filter_bins)) { |
316 | value |= XGMAC_FILTER_PM; | 354 | value |= XGMAC_FILTER_PM; |
317 | writel(~0x0, ioaddr + XGMAC_HASH_TABLE(0)); | 355 | |
318 | writel(~0x0, ioaddr + XGMAC_HASH_TABLE(1)); | 356 | for (i = 0; i < XGMAC_MAX_HASH_TABLE; i++) |
357 | writel(~0x0, ioaddr + XGMAC_HASH_TABLE(i)); | ||
358 | } else if (!netdev_mc_empty(dev)) { | ||
359 | struct netdev_hw_addr *ha; | ||
360 | |||
361 | value |= XGMAC_FILTER_HMC; | ||
362 | |||
363 | netdev_for_each_mc_addr(ha, dev) { | ||
364 | int nr = (bitrev32(~crc32_le(~0, ha->addr, 6)) >> | ||
365 | (32 - mcbitslog2)); | ||
366 | mc_filter[nr >> 5] |= (1 << (nr & 0x1F)); | ||
367 | } | ||
368 | } | ||
369 | |||
370 | dwxgmac2_set_mchash(ioaddr, mc_filter, mcbitslog2); | ||
371 | |||
372 | /* Handle multiple unicast addresses */ | ||
373 | if (netdev_uc_count(dev) > XGMAC_ADDR_MAX) { | ||
374 | value |= XGMAC_FILTER_PR; | ||
375 | } else { | ||
376 | struct netdev_hw_addr *ha; | ||
377 | int reg = 1; | ||
378 | |||
379 | netdev_for_each_uc_addr(ha, dev) { | ||
380 | dwxgmac2_set_umac_addr(hw, ha->addr, reg); | ||
381 | reg++; | ||
382 | } | ||
383 | |||
384 | for ( ; reg < XGMAC_ADDR_MAX; reg++) { | ||
385 | writel(0, ioaddr + XGMAC_ADDRx_HIGH(reg)); | ||
386 | writel(0, ioaddr + XGMAC_ADDRx_LOW(reg)); | ||
387 | } | ||
319 | } | 388 | } |
320 | 389 | ||
321 | writel(value, ioaddr + XGMAC_PACKET_FILTER); | 390 | writel(value, ioaddr + XGMAC_PACKET_FILTER); |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index c7c9e5f162e6..fd54c7c87485 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
@@ -814,20 +814,15 @@ static void stmmac_validate(struct phylink_config *config, | |||
814 | phylink_set(mac_supported, 10baseT_Full); | 814 | phylink_set(mac_supported, 10baseT_Full); |
815 | phylink_set(mac_supported, 100baseT_Half); | 815 | phylink_set(mac_supported, 100baseT_Half); |
816 | phylink_set(mac_supported, 100baseT_Full); | 816 | phylink_set(mac_supported, 100baseT_Full); |
817 | phylink_set(mac_supported, 1000baseT_Half); | ||
818 | phylink_set(mac_supported, 1000baseT_Full); | ||
819 | phylink_set(mac_supported, 1000baseKX_Full); | ||
817 | 820 | ||
818 | phylink_set(mac_supported, Autoneg); | 821 | phylink_set(mac_supported, Autoneg); |
819 | phylink_set(mac_supported, Pause); | 822 | phylink_set(mac_supported, Pause); |
820 | phylink_set(mac_supported, Asym_Pause); | 823 | phylink_set(mac_supported, Asym_Pause); |
821 | phylink_set_port_modes(mac_supported); | 824 | phylink_set_port_modes(mac_supported); |
822 | 825 | ||
823 | if (priv->plat->has_gmac || | ||
824 | priv->plat->has_gmac4 || | ||
825 | priv->plat->has_xgmac) { | ||
826 | phylink_set(mac_supported, 1000baseT_Half); | ||
827 | phylink_set(mac_supported, 1000baseT_Full); | ||
828 | phylink_set(mac_supported, 1000baseKX_Full); | ||
829 | } | ||
830 | |||
831 | /* Cut down 1G if asked to */ | 826 | /* Cut down 1G if asked to */ |
832 | if ((max_speed > 0) && (max_speed < 1000)) { | 827 | if ((max_speed > 0) && (max_speed < 1000)) { |
833 | phylink_set(mask, 1000baseT_Full); | 828 | phylink_set(mask, 1000baseT_Full); |
@@ -1295,6 +1290,8 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags) | |||
1295 | "(%s) dma_rx_phy=0x%08x\n", __func__, | 1290 | "(%s) dma_rx_phy=0x%08x\n", __func__, |
1296 | (u32)rx_q->dma_rx_phy); | 1291 | (u32)rx_q->dma_rx_phy); |
1297 | 1292 | ||
1293 | stmmac_clear_rx_descriptors(priv, queue); | ||
1294 | |||
1298 | for (i = 0; i < DMA_RX_SIZE; i++) { | 1295 | for (i = 0; i < DMA_RX_SIZE; i++) { |
1299 | struct dma_desc *p; | 1296 | struct dma_desc *p; |
1300 | 1297 | ||
@@ -1312,8 +1309,6 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags) | |||
1312 | rx_q->cur_rx = 0; | 1309 | rx_q->cur_rx = 0; |
1313 | rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE); | 1310 | rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE); |
1314 | 1311 | ||
1315 | stmmac_clear_rx_descriptors(priv, queue); | ||
1316 | |||
1317 | /* Setup the chained descriptor addresses */ | 1312 | /* Setup the chained descriptor addresses */ |
1318 | if (priv->mode == STMMAC_CHAIN_MODE) { | 1313 | if (priv->mode == STMMAC_CHAIN_MODE) { |
1319 | if (priv->extend_desc) | 1314 | if (priv->extend_desc) |
@@ -1555,9 +1550,8 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv) | |||
1555 | goto err_dma; | 1550 | goto err_dma; |
1556 | } | 1551 | } |
1557 | 1552 | ||
1558 | rx_q->buf_pool = kmalloc_array(DMA_RX_SIZE, | 1553 | rx_q->buf_pool = kcalloc(DMA_RX_SIZE, sizeof(*rx_q->buf_pool), |
1559 | sizeof(*rx_q->buf_pool), | 1554 | GFP_KERNEL); |
1560 | GFP_KERNEL); | ||
1561 | if (!rx_q->buf_pool) | 1555 | if (!rx_q->buf_pool) |
1562 | goto err_dma; | 1556 | goto err_dma; |
1563 | 1557 | ||
@@ -1608,15 +1602,15 @@ static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv) | |||
1608 | tx_q->queue_index = queue; | 1602 | tx_q->queue_index = queue; |
1609 | tx_q->priv_data = priv; | 1603 | tx_q->priv_data = priv; |
1610 | 1604 | ||
1611 | tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE, | 1605 | tx_q->tx_skbuff_dma = kcalloc(DMA_TX_SIZE, |
1612 | sizeof(*tx_q->tx_skbuff_dma), | 1606 | sizeof(*tx_q->tx_skbuff_dma), |
1613 | GFP_KERNEL); | 1607 | GFP_KERNEL); |
1614 | if (!tx_q->tx_skbuff_dma) | 1608 | if (!tx_q->tx_skbuff_dma) |
1615 | goto err_dma; | 1609 | goto err_dma; |
1616 | 1610 | ||
1617 | tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE, | 1611 | tx_q->tx_skbuff = kcalloc(DMA_TX_SIZE, |
1618 | sizeof(struct sk_buff *), | 1612 | sizeof(struct sk_buff *), |
1619 | GFP_KERNEL); | 1613 | GFP_KERNEL); |
1620 | if (!tx_q->tx_skbuff) | 1614 | if (!tx_q->tx_skbuff) |
1621 | goto err_dma; | 1615 | goto err_dma; |
1622 | 1616 | ||
@@ -3277,9 +3271,11 @@ static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q) | |||
3277 | static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) | 3271 | static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) |
3278 | { | 3272 | { |
3279 | struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; | 3273 | struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; |
3280 | int dirty = stmmac_rx_dirty(priv, queue); | 3274 | int len, dirty = stmmac_rx_dirty(priv, queue); |
3281 | unsigned int entry = rx_q->dirty_rx; | 3275 | unsigned int entry = rx_q->dirty_rx; |
3282 | 3276 | ||
3277 | len = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE) * PAGE_SIZE; | ||
3278 | |||
3283 | while (dirty-- > 0) { | 3279 | while (dirty-- > 0) { |
3284 | struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry]; | 3280 | struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry]; |
3285 | struct dma_desc *p; | 3281 | struct dma_desc *p; |
@@ -3297,6 +3293,13 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) | |||
3297 | } | 3293 | } |
3298 | 3294 | ||
3299 | buf->addr = page_pool_get_dma_addr(buf->page); | 3295 | buf->addr = page_pool_get_dma_addr(buf->page); |
3296 | |||
3297 | /* Sync whole allocation to device. This will invalidate old | ||
3298 | * data. | ||
3299 | */ | ||
3300 | dma_sync_single_for_device(priv->device, buf->addr, len, | ||
3301 | DMA_FROM_DEVICE); | ||
3302 | |||
3300 | stmmac_set_desc_addr(priv, p, buf->addr); | 3303 | stmmac_set_desc_addr(priv, p, buf->addr); |
3301 | stmmac_refill_desc3(priv, rx_q, p); | 3304 | stmmac_refill_desc3(priv, rx_q, p); |
3302 | 3305 | ||
@@ -3431,8 +3434,6 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) | |||
3431 | skb_copy_to_linear_data(skb, page_address(buf->page), | 3434 | skb_copy_to_linear_data(skb, page_address(buf->page), |
3432 | frame_len); | 3435 | frame_len); |
3433 | skb_put(skb, frame_len); | 3436 | skb_put(skb, frame_len); |
3434 | dma_sync_single_for_device(priv->device, buf->addr, | ||
3435 | frame_len, DMA_FROM_DEVICE); | ||
3436 | 3437 | ||
3437 | if (netif_msg_pktdata(priv)) { | 3438 | if (netif_msg_pktdata(priv)) { |
3438 | netdev_dbg(priv->dev, "frame received (%dbytes)", | 3439 | netdev_dbg(priv->dev, "frame received (%dbytes)", |
@@ -4319,8 +4320,9 @@ int stmmac_dvr_probe(struct device *device, | |||
4319 | NAPI_POLL_WEIGHT); | 4320 | NAPI_POLL_WEIGHT); |
4320 | } | 4321 | } |
4321 | if (queue < priv->plat->tx_queues_to_use) { | 4322 | if (queue < priv->plat->tx_queues_to_use) { |
4322 | netif_napi_add(ndev, &ch->tx_napi, stmmac_napi_poll_tx, | 4323 | netif_tx_napi_add(ndev, &ch->tx_napi, |
4323 | NAPI_POLL_WEIGHT); | 4324 | stmmac_napi_poll_tx, |
4325 | NAPI_POLL_WEIGHT); | ||
4324 | } | 4326 | } |
4325 | } | 4327 | } |
4326 | 4328 | ||
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 0f3e6ce7f6ec..eaf8f08f2e91 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c | |||
@@ -376,6 +376,13 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) | |||
376 | return ERR_PTR(-ENOMEM); | 376 | return ERR_PTR(-ENOMEM); |
377 | 377 | ||
378 | *mac = of_get_mac_address(np); | 378 | *mac = of_get_mac_address(np); |
379 | if (IS_ERR(*mac)) { | ||
380 | if (PTR_ERR(*mac) == -EPROBE_DEFER) | ||
381 | return ERR_CAST(*mac); | ||
382 | |||
383 | *mac = NULL; | ||
384 | } | ||
385 | |||
379 | plat->interface = of_get_phy_mode(np); | 386 | plat->interface = of_get_phy_mode(np); |
380 | 387 | ||
381 | /* Some wrapper drivers still rely on phy_node. Let's save it while | 388 | /* Some wrapper drivers still rely on phy_node. Let's save it while |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c index 58ea18af9813..37c0bc699cd9 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c | |||
@@ -37,7 +37,7 @@ static struct stmmac_tc_entry *tc_find_entry(struct stmmac_priv *priv, | |||
37 | entry = &priv->tc_entries[i]; | 37 | entry = &priv->tc_entries[i]; |
38 | if (!entry->in_use && !first && free) | 38 | if (!entry->in_use && !first && free) |
39 | first = entry; | 39 | first = entry; |
40 | if (entry->handle == loc && !free) | 40 | if ((entry->handle == loc) && !free && !entry->is_frag) |
41 | dup = entry; | 41 | dup = entry; |
42 | } | 42 | } |
43 | 43 | ||
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c index 5b196ebfed49..0f346761a2b2 100644 --- a/drivers/net/ethernet/toshiba/spider_net.c +++ b/drivers/net/ethernet/toshiba/spider_net.c | |||
@@ -788,6 +788,7 @@ spider_net_release_tx_chain(struct spider_net_card *card, int brutal) | |||
788 | /* fallthrough, if we release the descriptors | 788 | /* fallthrough, if we release the descriptors |
789 | * brutally (then we don't care about | 789 | * brutally (then we don't care about |
790 | * SPIDER_NET_DESCR_CARDOWNED) */ | 790 | * SPIDER_NET_DESCR_CARDOWNED) */ |
791 | /* Fall through */ | ||
791 | 792 | ||
792 | case SPIDER_NET_DESCR_RESPONSE_ERROR: | 793 | case SPIDER_NET_DESCR_RESPONSE_ERROR: |
793 | case SPIDER_NET_DESCR_PROTECTION_ERROR: | 794 | case SPIDER_NET_DESCR_PROTECTION_ERROR: |
diff --git a/drivers/net/ethernet/xscale/Kconfig b/drivers/net/ethernet/xscale/Kconfig index 2f354ba029a6..cd0a8f46e7c6 100644 --- a/drivers/net/ethernet/xscale/Kconfig +++ b/drivers/net/ethernet/xscale/Kconfig | |||
@@ -13,7 +13,7 @@ config NET_VENDOR_XSCALE | |||
13 | 13 | ||
14 | Note that the answer to this question does not directly affect the | 14 | Note that the answer to this question does not directly affect the |
15 | kernel: saying N will just cause the configurator to skip all | 15 | kernel: saying N will just cause the configurator to skip all |
16 | the questions about XSacle IXP devices. If you say Y, you will be | 16 | the questions about XScale IXP devices. If you say Y, you will be |
17 | asked for your specific card in the following questions. | 17 | asked for your specific card in the following questions. |
18 | 18 | ||
19 | if NET_VENDOR_XSCALE | 19 | if NET_VENDOR_XSCALE |
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c index daab2c07d891..9303aeb2595f 100644 --- a/drivers/net/hamradio/baycom_epp.c +++ b/drivers/net/hamradio/baycom_epp.c | |||
@@ -500,8 +500,9 @@ static int transmit(struct baycom_state *bc, int cnt, unsigned char stat) | |||
500 | } | 500 | } |
501 | break; | 501 | break; |
502 | } | 502 | } |
503 | /* fall through */ | ||
503 | 504 | ||
504 | default: /* fall through */ | 505 | default: |
505 | if (bc->hdlctx.calibrate <= 0) | 506 | if (bc->hdlctx.calibrate <= 0) |
506 | return 0; | 507 | return 0; |
507 | i = min_t(int, cnt, bc->hdlctx.calibrate); | 508 | i = min_t(int, cnt, bc->hdlctx.calibrate); |
diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c index 3ffe46df249e..7c5265fd2b94 100644 --- a/drivers/net/phy/fixed_phy.c +++ b/drivers/net/phy/fixed_phy.c | |||
@@ -216,8 +216,10 @@ static struct gpio_desc *fixed_phy_get_gpiod(struct device_node *np) | |||
216 | if (IS_ERR(gpiod)) { | 216 | if (IS_ERR(gpiod)) { |
217 | if (PTR_ERR(gpiod) == -EPROBE_DEFER) | 217 | if (PTR_ERR(gpiod) == -EPROBE_DEFER) |
218 | return gpiod; | 218 | return gpiod; |
219 | pr_err("error getting GPIO for fixed link %pOF, proceed without\n", | 219 | |
220 | fixed_link_node); | 220 | if (PTR_ERR(gpiod) != -ENOENT) |
221 | pr_err("error getting GPIO for fixed link %pOF, proceed without\n", | ||
222 | fixed_link_node); | ||
221 | gpiod = NULL; | 223 | gpiod = NULL; |
222 | } | 224 | } |
223 | 225 | ||
diff --git a/drivers/net/phy/mscc.c b/drivers/net/phy/mscc.c index 28676af97b42..645d354ffb48 100644 --- a/drivers/net/phy/mscc.c +++ b/drivers/net/phy/mscc.c | |||
@@ -2226,8 +2226,8 @@ static int vsc8514_probe(struct phy_device *phydev) | |||
2226 | vsc8531->supp_led_modes = VSC85XX_SUPP_LED_MODES; | 2226 | vsc8531->supp_led_modes = VSC85XX_SUPP_LED_MODES; |
2227 | vsc8531->hw_stats = vsc85xx_hw_stats; | 2227 | vsc8531->hw_stats = vsc85xx_hw_stats; |
2228 | vsc8531->nstats = ARRAY_SIZE(vsc85xx_hw_stats); | 2228 | vsc8531->nstats = ARRAY_SIZE(vsc85xx_hw_stats); |
2229 | vsc8531->stats = devm_kmalloc_array(&phydev->mdio.dev, vsc8531->nstats, | 2229 | vsc8531->stats = devm_kcalloc(&phydev->mdio.dev, vsc8531->nstats, |
2230 | sizeof(u64), GFP_KERNEL); | 2230 | sizeof(u64), GFP_KERNEL); |
2231 | if (!vsc8531->stats) | 2231 | if (!vsc8531->stats) |
2232 | return -ENOMEM; | 2232 | return -ENOMEM; |
2233 | 2233 | ||
@@ -2251,8 +2251,8 @@ static int vsc8574_probe(struct phy_device *phydev) | |||
2251 | vsc8531->supp_led_modes = VSC8584_SUPP_LED_MODES; | 2251 | vsc8531->supp_led_modes = VSC8584_SUPP_LED_MODES; |
2252 | vsc8531->hw_stats = vsc8584_hw_stats; | 2252 | vsc8531->hw_stats = vsc8584_hw_stats; |
2253 | vsc8531->nstats = ARRAY_SIZE(vsc8584_hw_stats); | 2253 | vsc8531->nstats = ARRAY_SIZE(vsc8584_hw_stats); |
2254 | vsc8531->stats = devm_kmalloc_array(&phydev->mdio.dev, vsc8531->nstats, | 2254 | vsc8531->stats = devm_kcalloc(&phydev->mdio.dev, vsc8531->nstats, |
2255 | sizeof(u64), GFP_KERNEL); | 2255 | sizeof(u64), GFP_KERNEL); |
2256 | if (!vsc8531->stats) | 2256 | if (!vsc8531->stats) |
2257 | return -ENOMEM; | 2257 | return -ENOMEM; |
2258 | 2258 | ||
@@ -2281,8 +2281,8 @@ static int vsc8584_probe(struct phy_device *phydev) | |||
2281 | vsc8531->supp_led_modes = VSC8584_SUPP_LED_MODES; | 2281 | vsc8531->supp_led_modes = VSC8584_SUPP_LED_MODES; |
2282 | vsc8531->hw_stats = vsc8584_hw_stats; | 2282 | vsc8531->hw_stats = vsc8584_hw_stats; |
2283 | vsc8531->nstats = ARRAY_SIZE(vsc8584_hw_stats); | 2283 | vsc8531->nstats = ARRAY_SIZE(vsc8584_hw_stats); |
2284 | vsc8531->stats = devm_kmalloc_array(&phydev->mdio.dev, vsc8531->nstats, | 2284 | vsc8531->stats = devm_kcalloc(&phydev->mdio.dev, vsc8531->nstats, |
2285 | sizeof(u64), GFP_KERNEL); | 2285 | sizeof(u64), GFP_KERNEL); |
2286 | if (!vsc8531->stats) | 2286 | if (!vsc8531->stats) |
2287 | return -ENOMEM; | 2287 | return -ENOMEM; |
2288 | 2288 | ||
@@ -2311,8 +2311,8 @@ static int vsc85xx_probe(struct phy_device *phydev) | |||
2311 | vsc8531->supp_led_modes = VSC85XX_SUPP_LED_MODES; | 2311 | vsc8531->supp_led_modes = VSC85XX_SUPP_LED_MODES; |
2312 | vsc8531->hw_stats = vsc85xx_hw_stats; | 2312 | vsc8531->hw_stats = vsc85xx_hw_stats; |
2313 | vsc8531->nstats = ARRAY_SIZE(vsc85xx_hw_stats); | 2313 | vsc8531->nstats = ARRAY_SIZE(vsc85xx_hw_stats); |
2314 | vsc8531->stats = devm_kmalloc_array(&phydev->mdio.dev, vsc8531->nstats, | 2314 | vsc8531->stats = devm_kcalloc(&phydev->mdio.dev, vsc8531->nstats, |
2315 | sizeof(u64), GFP_KERNEL); | 2315 | sizeof(u64), GFP_KERNEL); |
2316 | if (!vsc8531->stats) | 2316 | if (!vsc8531->stats) |
2317 | return -ENOMEM; | 2317 | return -ENOMEM; |
2318 | 2318 | ||
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 6b5cb87f3866..7ddd91df99e3 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c | |||
@@ -1774,6 +1774,12 @@ done: | |||
1774 | phydev->link = status & BMSR_LSTATUS ? 1 : 0; | 1774 | phydev->link = status & BMSR_LSTATUS ? 1 : 0; |
1775 | phydev->autoneg_complete = status & BMSR_ANEGCOMPLETE ? 1 : 0; | 1775 | phydev->autoneg_complete = status & BMSR_ANEGCOMPLETE ? 1 : 0; |
1776 | 1776 | ||
1777 | /* Consider the case that autoneg was started and "aneg complete" | ||
1778 | * bit has been reset, but "link up" bit not yet. | ||
1779 | */ | ||
1780 | if (phydev->autoneg == AUTONEG_ENABLE && !phydev->autoneg_complete) | ||
1781 | phydev->link = 0; | ||
1782 | |||
1777 | return 0; | 1783 | return 0; |
1778 | } | 1784 | } |
1779 | EXPORT_SYMBOL(genphy_update_link); | 1785 | EXPORT_SYMBOL(genphy_update_link); |
diff --git a/drivers/net/phy/phy_led_triggers.c b/drivers/net/phy/phy_led_triggers.c index b86a4b2116f8..59a94e07e7c5 100644 --- a/drivers/net/phy/phy_led_triggers.c +++ b/drivers/net/phy/phy_led_triggers.c | |||
@@ -48,8 +48,9 @@ void phy_led_trigger_change_speed(struct phy_device *phy) | |||
48 | if (!phy->last_triggered) | 48 | if (!phy->last_triggered) |
49 | led_trigger_event(&phy->led_link_trigger->trigger, | 49 | led_trigger_event(&phy->led_link_trigger->trigger, |
50 | LED_FULL); | 50 | LED_FULL); |
51 | else | ||
52 | led_trigger_event(&phy->last_triggered->trigger, LED_OFF); | ||
51 | 53 | ||
52 | led_trigger_event(&phy->last_triggered->trigger, LED_OFF); | ||
53 | led_trigger_event(&plt->trigger, LED_FULL); | 54 | led_trigger_event(&plt->trigger, LED_FULL); |
54 | phy->last_triggered = plt; | 55 | phy->last_triggered = plt; |
55 | } | 56 | } |
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 5d0af041b8f9..a45c5de96ab1 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c | |||
@@ -216,6 +216,8 @@ static int phylink_parse_fixedlink(struct phylink *pl, | |||
216 | pl->supported, true); | 216 | pl->supported, true); |
217 | linkmode_zero(pl->supported); | 217 | linkmode_zero(pl->supported); |
218 | phylink_set(pl->supported, MII); | 218 | phylink_set(pl->supported, MII); |
219 | phylink_set(pl->supported, Pause); | ||
220 | phylink_set(pl->supported, Asym_Pause); | ||
219 | if (s) { | 221 | if (s) { |
220 | __set_bit(s->bit, pl->supported); | 222 | __set_bit(s->bit, pl->supported); |
221 | } else { | 223 | } else { |
@@ -990,10 +992,10 @@ void phylink_start(struct phylink *pl) | |||
990 | } | 992 | } |
991 | if (pl->link_an_mode == MLO_AN_FIXED && pl->get_fixed_state) | 993 | if (pl->link_an_mode == MLO_AN_FIXED && pl->get_fixed_state) |
992 | mod_timer(&pl->link_poll, jiffies + HZ); | 994 | mod_timer(&pl->link_poll, jiffies + HZ); |
993 | if (pl->sfp_bus) | ||
994 | sfp_upstream_start(pl->sfp_bus); | ||
995 | if (pl->phydev) | 995 | if (pl->phydev) |
996 | phy_start(pl->phydev); | 996 | phy_start(pl->phydev); |
997 | if (pl->sfp_bus) | ||
998 | sfp_upstream_start(pl->sfp_bus); | ||
997 | } | 999 | } |
998 | EXPORT_SYMBOL_GPL(phylink_start); | 1000 | EXPORT_SYMBOL_GPL(phylink_start); |
999 | 1001 | ||
@@ -1010,10 +1012,10 @@ void phylink_stop(struct phylink *pl) | |||
1010 | { | 1012 | { |
1011 | ASSERT_RTNL(); | 1013 | ASSERT_RTNL(); |
1012 | 1014 | ||
1013 | if (pl->phydev) | ||
1014 | phy_stop(pl->phydev); | ||
1015 | if (pl->sfp_bus) | 1015 | if (pl->sfp_bus) |
1016 | sfp_upstream_stop(pl->sfp_bus); | 1016 | sfp_upstream_stop(pl->sfp_bus); |
1017 | if (pl->phydev) | ||
1018 | phy_stop(pl->phydev); | ||
1017 | del_timer_sync(&pl->link_poll); | 1019 | del_timer_sync(&pl->link_poll); |
1018 | if (pl->link_irq) { | 1020 | if (pl->link_irq) { |
1019 | free_irq(pl->link_irq, pl); | 1021 | free_irq(pl->link_irq, pl); |
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c index 1d902ecb4aa8..a44dd3c8af63 100644 --- a/drivers/net/ppp/pppoe.c +++ b/drivers/net/ppp/pppoe.c | |||
@@ -1115,6 +1115,9 @@ static const struct proto_ops pppoe_ops = { | |||
1115 | .recvmsg = pppoe_recvmsg, | 1115 | .recvmsg = pppoe_recvmsg, |
1116 | .mmap = sock_no_mmap, | 1116 | .mmap = sock_no_mmap, |
1117 | .ioctl = pppox_ioctl, | 1117 | .ioctl = pppox_ioctl, |
1118 | #ifdef CONFIG_COMPAT | ||
1119 | .compat_ioctl = pppox_compat_ioctl, | ||
1120 | #endif | ||
1118 | }; | 1121 | }; |
1119 | 1122 | ||
1120 | static const struct pppox_proto pppoe_proto = { | 1123 | static const struct pppox_proto pppoe_proto = { |
diff --git a/drivers/net/ppp/pppox.c b/drivers/net/ppp/pppox.c index 5ef422a43d70..08364f10a43f 100644 --- a/drivers/net/ppp/pppox.c +++ b/drivers/net/ppp/pppox.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/string.h> | 17 | #include <linux/string.h> |
18 | #include <linux/module.h> | 18 | #include <linux/module.h> |
19 | #include <linux/kernel.h> | 19 | #include <linux/kernel.h> |
20 | #include <linux/compat.h> | ||
20 | #include <linux/errno.h> | 21 | #include <linux/errno.h> |
21 | #include <linux/netdevice.h> | 22 | #include <linux/netdevice.h> |
22 | #include <linux/net.h> | 23 | #include <linux/net.h> |
@@ -98,6 +99,18 @@ int pppox_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) | |||
98 | 99 | ||
99 | EXPORT_SYMBOL(pppox_ioctl); | 100 | EXPORT_SYMBOL(pppox_ioctl); |
100 | 101 | ||
102 | #ifdef CONFIG_COMPAT | ||
103 | int pppox_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) | ||
104 | { | ||
105 | if (cmd == PPPOEIOCSFWD32) | ||
106 | cmd = PPPOEIOCSFWD; | ||
107 | |||
108 | return pppox_ioctl(sock, cmd, (unsigned long)compat_ptr(arg)); | ||
109 | } | ||
110 | |||
111 | EXPORT_SYMBOL(pppox_compat_ioctl); | ||
112 | #endif | ||
113 | |||
101 | static int pppox_create(struct net *net, struct socket *sock, int protocol, | 114 | static int pppox_create(struct net *net, struct socket *sock, int protocol, |
102 | int kern) | 115 | int kern) |
103 | { | 116 | { |
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c index a8e52c8e4128..734de7de03f7 100644 --- a/drivers/net/ppp/pptp.c +++ b/drivers/net/ppp/pptp.c | |||
@@ -623,6 +623,9 @@ static const struct proto_ops pptp_ops = { | |||
623 | .recvmsg = sock_no_recvmsg, | 623 | .recvmsg = sock_no_recvmsg, |
624 | .mmap = sock_no_mmap, | 624 | .mmap = sock_no_mmap, |
625 | .ioctl = pppox_ioctl, | 625 | .ioctl = pppox_ioctl, |
626 | #ifdef CONFIG_COMPAT | ||
627 | .compat_ioctl = pppox_compat_ioctl, | ||
628 | #endif | ||
626 | }; | 629 | }; |
627 | 630 | ||
628 | static const struct pppox_proto pppox_pptp_proto = { | 631 | static const struct pppox_proto pppox_pptp_proto = { |
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 3d443597bd04..db16d7a13e00 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
@@ -1599,7 +1599,8 @@ static bool tun_can_build_skb(struct tun_struct *tun, struct tun_file *tfile, | |||
1599 | return true; | 1599 | return true; |
1600 | } | 1600 | } |
1601 | 1601 | ||
1602 | static struct sk_buff *__tun_build_skb(struct page_frag *alloc_frag, char *buf, | 1602 | static struct sk_buff *__tun_build_skb(struct tun_file *tfile, |
1603 | struct page_frag *alloc_frag, char *buf, | ||
1603 | int buflen, int len, int pad) | 1604 | int buflen, int len, int pad) |
1604 | { | 1605 | { |
1605 | struct sk_buff *skb = build_skb(buf, buflen); | 1606 | struct sk_buff *skb = build_skb(buf, buflen); |
@@ -1609,6 +1610,7 @@ static struct sk_buff *__tun_build_skb(struct page_frag *alloc_frag, char *buf, | |||
1609 | 1610 | ||
1610 | skb_reserve(skb, pad); | 1611 | skb_reserve(skb, pad); |
1611 | skb_put(skb, len); | 1612 | skb_put(skb, len); |
1613 | skb_set_owner_w(skb, tfile->socket.sk); | ||
1612 | 1614 | ||
1613 | get_page(alloc_frag->page); | 1615 | get_page(alloc_frag->page); |
1614 | alloc_frag->offset += buflen; | 1616 | alloc_frag->offset += buflen; |
@@ -1686,7 +1688,8 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, | |||
1686 | */ | 1688 | */ |
1687 | if (hdr->gso_type || !xdp_prog) { | 1689 | if (hdr->gso_type || !xdp_prog) { |
1688 | *skb_xdp = 1; | 1690 | *skb_xdp = 1; |
1689 | return __tun_build_skb(alloc_frag, buf, buflen, len, pad); | 1691 | return __tun_build_skb(tfile, alloc_frag, buf, buflen, len, |
1692 | pad); | ||
1690 | } | 1693 | } |
1691 | 1694 | ||
1692 | *skb_xdp = 0; | 1695 | *skb_xdp = 0; |
@@ -1723,7 +1726,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, | |||
1723 | rcu_read_unlock(); | 1726 | rcu_read_unlock(); |
1724 | local_bh_enable(); | 1727 | local_bh_enable(); |
1725 | 1728 | ||
1726 | return __tun_build_skb(alloc_frag, buf, buflen, len, pad); | 1729 | return __tun_build_skb(tfile, alloc_frag, buf, buflen, len, pad); |
1727 | 1730 | ||
1728 | err_xdp: | 1731 | err_xdp: |
1729 | put_page(alloc_frag->page); | 1732 | put_page(alloc_frag->page); |
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c index 6d25dea5ad4b..f7d117d80cfb 100644 --- a/drivers/net/usb/pegasus.c +++ b/drivers/net/usb/pegasus.c | |||
@@ -282,7 +282,7 @@ static void mdio_write(struct net_device *dev, int phy_id, int loc, int val) | |||
282 | static int read_eprom_word(pegasus_t *pegasus, __u8 index, __u16 *retdata) | 282 | static int read_eprom_word(pegasus_t *pegasus, __u8 index, __u16 *retdata) |
283 | { | 283 | { |
284 | int i; | 284 | int i; |
285 | __u8 tmp; | 285 | __u8 tmp = 0; |
286 | __le16 retdatai; | 286 | __le16 retdatai; |
287 | int ret; | 287 | int ret; |
288 | 288 | ||
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 69e0a2acfcb0..b6dc5d714b5e 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c | |||
@@ -1295,6 +1295,7 @@ static const struct usb_device_id products[] = { | |||
1295 | {QMI_FIXED_INTF(0x2001, 0x7e3d, 4)}, /* D-Link DWM-222 A2 */ | 1295 | {QMI_FIXED_INTF(0x2001, 0x7e3d, 4)}, /* D-Link DWM-222 A2 */ |
1296 | {QMI_FIXED_INTF(0x2020, 0x2031, 4)}, /* Olicard 600 */ | 1296 | {QMI_FIXED_INTF(0x2020, 0x2031, 4)}, /* Olicard 600 */ |
1297 | {QMI_FIXED_INTF(0x2020, 0x2033, 4)}, /* BroadMobi BM806U */ | 1297 | {QMI_FIXED_INTF(0x2020, 0x2033, 4)}, /* BroadMobi BM806U */ |
1298 | {QMI_FIXED_INTF(0x2020, 0x2060, 4)}, /* BroadMobi BM818 */ | ||
1298 | {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */ | 1299 | {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */ |
1299 | {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ | 1300 | {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ |
1300 | {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */ | 1301 | {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */ |
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 39e0768d734d..0cc03a9ff545 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c | |||
@@ -50,7 +50,7 @@ | |||
50 | #define PLA_TEREDO_WAKE_BASE 0xc0c4 | 50 | #define PLA_TEREDO_WAKE_BASE 0xc0c4 |
51 | #define PLA_MAR 0xcd00 | 51 | #define PLA_MAR 0xcd00 |
52 | #define PLA_BACKUP 0xd000 | 52 | #define PLA_BACKUP 0xd000 |
53 | #define PAL_BDC_CR 0xd1a0 | 53 | #define PLA_BDC_CR 0xd1a0 |
54 | #define PLA_TEREDO_TIMER 0xd2cc | 54 | #define PLA_TEREDO_TIMER 0xd2cc |
55 | #define PLA_REALWOW_TIMER 0xd2e8 | 55 | #define PLA_REALWOW_TIMER 0xd2e8 |
56 | #define PLA_SUSPEND_FLAG 0xd38a | 56 | #define PLA_SUSPEND_FLAG 0xd38a |
@@ -274,7 +274,7 @@ | |||
274 | #define TEREDO_RS_EVENT_MASK 0x00fe | 274 | #define TEREDO_RS_EVENT_MASK 0x00fe |
275 | #define OOB_TEREDO_EN 0x0001 | 275 | #define OOB_TEREDO_EN 0x0001 |
276 | 276 | ||
277 | /* PAL_BDC_CR */ | 277 | /* PLA_BDC_CR */ |
278 | #define ALDPS_PROXY_MODE 0x0001 | 278 | #define ALDPS_PROXY_MODE 0x0001 |
279 | 279 | ||
280 | /* PLA_EFUSE_CMD */ | 280 | /* PLA_EFUSE_CMD */ |
@@ -3191,9 +3191,9 @@ static void r8152b_enter_oob(struct r8152 *tp) | |||
3191 | 3191 | ||
3192 | rtl_rx_vlan_en(tp, true); | 3192 | rtl_rx_vlan_en(tp, true); |
3193 | 3193 | ||
3194 | ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PAL_BDC_CR); | 3194 | ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_BDC_CR); |
3195 | ocp_data |= ALDPS_PROXY_MODE; | 3195 | ocp_data |= ALDPS_PROXY_MODE; |
3196 | ocp_write_word(tp, MCU_TYPE_PLA, PAL_BDC_CR, ocp_data); | 3196 | ocp_write_word(tp, MCU_TYPE_PLA, PLA_BDC_CR, ocp_data); |
3197 | 3197 | ||
3198 | ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); | 3198 | ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); |
3199 | ocp_data |= NOW_IS_OOB | DIS_MCU_CLROOB; | 3199 | ocp_data |= NOW_IS_OOB | DIS_MCU_CLROOB; |
@@ -3577,9 +3577,9 @@ static void r8153_enter_oob(struct r8152 *tp) | |||
3577 | 3577 | ||
3578 | rtl_rx_vlan_en(tp, true); | 3578 | rtl_rx_vlan_en(tp, true); |
3579 | 3579 | ||
3580 | ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PAL_BDC_CR); | 3580 | ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_BDC_CR); |
3581 | ocp_data |= ALDPS_PROXY_MODE; | 3581 | ocp_data |= ALDPS_PROXY_MODE; |
3582 | ocp_write_word(tp, MCU_TYPE_PLA, PAL_BDC_CR, ocp_data); | 3582 | ocp_write_word(tp, MCU_TYPE_PLA, PLA_BDC_CR, ocp_data); |
3583 | 3583 | ||
3584 | ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); | 3584 | ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); |
3585 | ocp_data |= NOW_IS_OOB | DIS_MCU_CLROOB; | 3585 | ocp_data |= NOW_IS_OOB | DIS_MCU_CLROOB; |
diff --git a/drivers/net/wan/sdla.c b/drivers/net/wan/sdla.c index a9ac3f37b904..e2e679a01b65 100644 --- a/drivers/net/wan/sdla.c +++ b/drivers/net/wan/sdla.c | |||
@@ -413,6 +413,7 @@ static void sdla_errors(struct net_device *dev, int cmd, int dlci, int ret, int | |||
413 | case SDLA_RET_NO_BUFS: | 413 | case SDLA_RET_NO_BUFS: |
414 | if (cmd == SDLA_INFORMATION_WRITE) | 414 | if (cmd == SDLA_INFORMATION_WRITE) |
415 | break; | 415 | break; |
416 | /* Else, fall through */ | ||
416 | 417 | ||
417 | default: | 418 | default: |
418 | netdev_dbg(dev, "Cmd 0x%02X generated return code 0x%02X\n", | 419 | netdev_dbg(dev, "Cmd 0x%02X generated return code 0x%02X\n", |
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h index d55312ef58c9..9b0bb89599fc 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h | |||
@@ -776,7 +776,6 @@ struct iwl_rss_config_cmd { | |||
776 | u8 indirection_table[IWL_RSS_INDIRECTION_TABLE_SIZE]; | 776 | u8 indirection_table[IWL_RSS_INDIRECTION_TABLE_SIZE]; |
777 | } __packed; /* RSS_CONFIG_CMD_API_S_VER_1 */ | 777 | } __packed; /* RSS_CONFIG_CMD_API_S_VER_1 */ |
778 | 778 | ||
779 | #define IWL_MULTI_QUEUE_SYNC_MSG_MAX_SIZE 128 | ||
780 | #define IWL_MULTI_QUEUE_SYNC_SENDER_POS 0 | 779 | #define IWL_MULTI_QUEUE_SYNC_SENDER_POS 0 |
781 | #define IWL_MULTI_QUEUE_SYNC_SENDER_MSK 0xf | 780 | #define IWL_MULTI_QUEUE_SYNC_SENDER_MSK 0xf |
782 | 781 | ||
@@ -812,10 +811,12 @@ struct iwl_rxq_sync_notification { | |||
812 | * | 811 | * |
813 | * @IWL_MVM_RXQ_EMPTY: empty sync notification | 812 | * @IWL_MVM_RXQ_EMPTY: empty sync notification |
814 | * @IWL_MVM_RXQ_NOTIF_DEL_BA: notify RSS queues of delBA | 813 | * @IWL_MVM_RXQ_NOTIF_DEL_BA: notify RSS queues of delBA |
814 | * @IWL_MVM_RXQ_NSSN_SYNC: notify all the RSS queues with the new NSSN | ||
815 | */ | 815 | */ |
816 | enum iwl_mvm_rxq_notif_type { | 816 | enum iwl_mvm_rxq_notif_type { |
817 | IWL_MVM_RXQ_EMPTY, | 817 | IWL_MVM_RXQ_EMPTY, |
818 | IWL_MVM_RXQ_NOTIF_DEL_BA, | 818 | IWL_MVM_RXQ_NOTIF_DEL_BA, |
819 | IWL_MVM_RXQ_NSSN_SYNC, | ||
819 | }; | 820 | }; |
820 | 821 | ||
821 | /** | 822 | /** |
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c index e411ac98290d..4d81776f576d 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c | |||
@@ -2438,17 +2438,19 @@ static void iwl_fw_dbg_info_apply(struct iwl_fw_runtime *fwrt, | |||
2438 | { | 2438 | { |
2439 | u32 img_name_len = le32_to_cpu(dbg_info->img_name_len); | 2439 | u32 img_name_len = le32_to_cpu(dbg_info->img_name_len); |
2440 | u32 dbg_cfg_name_len = le32_to_cpu(dbg_info->dbg_cfg_name_len); | 2440 | u32 dbg_cfg_name_len = le32_to_cpu(dbg_info->dbg_cfg_name_len); |
2441 | const char err_str[] = | ||
2442 | "WRT: ext=%d. Invalid %s name length %d, expected %d\n"; | ||
2443 | 2441 | ||
2444 | if (img_name_len != IWL_FW_INI_MAX_IMG_NAME_LEN) { | 2442 | if (img_name_len != IWL_FW_INI_MAX_IMG_NAME_LEN) { |
2445 | IWL_WARN(fwrt, err_str, ext, "image", img_name_len, | 2443 | IWL_WARN(fwrt, |
2444 | "WRT: ext=%d. Invalid image name length %d, expected %d\n", | ||
2445 | ext, img_name_len, | ||
2446 | IWL_FW_INI_MAX_IMG_NAME_LEN); | 2446 | IWL_FW_INI_MAX_IMG_NAME_LEN); |
2447 | return; | 2447 | return; |
2448 | } | 2448 | } |
2449 | 2449 | ||
2450 | if (dbg_cfg_name_len != IWL_FW_INI_MAX_DBG_CFG_NAME_LEN) { | 2450 | if (dbg_cfg_name_len != IWL_FW_INI_MAX_DBG_CFG_NAME_LEN) { |
2451 | IWL_WARN(fwrt, err_str, ext, "debug cfg", dbg_cfg_name_len, | 2451 | IWL_WARN(fwrt, |
2452 | "WRT: ext=%d. Invalid debug cfg name length %d, expected %d\n", | ||
2453 | ext, dbg_cfg_name_len, | ||
2452 | IWL_FW_INI_MAX_DBG_CFG_NAME_LEN); | 2454 | IWL_FW_INI_MAX_DBG_CFG_NAME_LEN); |
2453 | return; | 2455 | return; |
2454 | } | 2456 | } |
@@ -2775,8 +2777,6 @@ static void _iwl_fw_dbg_apply_point(struct iwl_fw_runtime *fwrt, | |||
2775 | struct iwl_ucode_tlv *tlv = iter; | 2777 | struct iwl_ucode_tlv *tlv = iter; |
2776 | void *ini_tlv = (void *)tlv->data; | 2778 | void *ini_tlv = (void *)tlv->data; |
2777 | u32 type = le32_to_cpu(tlv->type); | 2779 | u32 type = le32_to_cpu(tlv->type); |
2778 | const char invalid_ap_str[] = | ||
2779 | "WRT: ext=%d. Invalid apply point %d for %s\n"; | ||
2780 | 2780 | ||
2781 | switch (type) { | 2781 | switch (type) { |
2782 | case IWL_UCODE_TLV_TYPE_DEBUG_INFO: | 2782 | case IWL_UCODE_TLV_TYPE_DEBUG_INFO: |
@@ -2786,8 +2786,9 @@ static void _iwl_fw_dbg_apply_point(struct iwl_fw_runtime *fwrt, | |||
2786 | struct iwl_fw_ini_allocation_data *buf_alloc = ini_tlv; | 2786 | struct iwl_fw_ini_allocation_data *buf_alloc = ini_tlv; |
2787 | 2787 | ||
2788 | if (pnt != IWL_FW_INI_APPLY_EARLY) { | 2788 | if (pnt != IWL_FW_INI_APPLY_EARLY) { |
2789 | IWL_ERR(fwrt, invalid_ap_str, ext, pnt, | 2789 | IWL_ERR(fwrt, |
2790 | "buffer allocation"); | 2790 | "WRT: ext=%d. Invalid apply point %d for buffer allocation\n", |
2791 | ext, pnt); | ||
2791 | goto next; | 2792 | goto next; |
2792 | } | 2793 | } |
2793 | 2794 | ||
@@ -2797,8 +2798,9 @@ static void _iwl_fw_dbg_apply_point(struct iwl_fw_runtime *fwrt, | |||
2797 | } | 2798 | } |
2798 | case IWL_UCODE_TLV_TYPE_HCMD: | 2799 | case IWL_UCODE_TLV_TYPE_HCMD: |
2799 | if (pnt < IWL_FW_INI_APPLY_AFTER_ALIVE) { | 2800 | if (pnt < IWL_FW_INI_APPLY_AFTER_ALIVE) { |
2800 | IWL_ERR(fwrt, invalid_ap_str, ext, pnt, | 2801 | IWL_ERR(fwrt, |
2801 | "host command"); | 2802 | "WRT: ext=%d. Invalid apply point %d for host command\n", |
2803 | ext, pnt); | ||
2802 | goto next; | 2804 | goto next; |
2803 | } | 2805 | } |
2804 | iwl_fw_dbg_send_hcmd(fwrt, tlv, ext); | 2806 | iwl_fw_dbg_send_hcmd(fwrt, tlv, ext); |
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index 57d09049e615..38672dd5aae9 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c | |||
@@ -1640,6 +1640,8 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans) | |||
1640 | init_completion(&drv->request_firmware_complete); | 1640 | init_completion(&drv->request_firmware_complete); |
1641 | INIT_LIST_HEAD(&drv->list); | 1641 | INIT_LIST_HEAD(&drv->list); |
1642 | 1642 | ||
1643 | iwl_load_fw_dbg_tlv(drv->trans->dev, drv->trans); | ||
1644 | |||
1643 | #ifdef CONFIG_IWLWIFI_DEBUGFS | 1645 | #ifdef CONFIG_IWLWIFI_DEBUGFS |
1644 | /* Create the device debugfs entries. */ | 1646 | /* Create the device debugfs entries. */ |
1645 | drv->dbgfs_drv = debugfs_create_dir(dev_name(trans->dev), | 1647 | drv->dbgfs_drv = debugfs_create_dir(dev_name(trans->dev), |
@@ -1660,8 +1662,8 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans) | |||
1660 | err_fw: | 1662 | err_fw: |
1661 | #ifdef CONFIG_IWLWIFI_DEBUGFS | 1663 | #ifdef CONFIG_IWLWIFI_DEBUGFS |
1662 | debugfs_remove_recursive(drv->dbgfs_drv); | 1664 | debugfs_remove_recursive(drv->dbgfs_drv); |
1663 | iwl_fw_dbg_free(drv->trans); | ||
1664 | #endif | 1665 | #endif |
1666 | iwl_fw_dbg_free(drv->trans); | ||
1665 | kfree(drv); | 1667 | kfree(drv); |
1666 | err: | 1668 | err: |
1667 | return ERR_PTR(ret); | 1669 | return ERR_PTR(ret); |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 1d608e9e9101..5de54d1559dd 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c | |||
@@ -755,7 +755,7 @@ static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm) | |||
755 | 755 | ||
756 | for (i = 0; i < n_profiles; i++) { | 756 | for (i = 0; i < n_profiles; i++) { |
757 | /* the tables start at element 3 */ | 757 | /* the tables start at element 3 */ |
758 | static int pos = 3; | 758 | int pos = 3; |
759 | 759 | ||
760 | /* The EWRD profiles officially go from 2 to 4, but we | 760 | /* The EWRD profiles officially go from 2 to 4, but we |
761 | * save them in sar_profiles[1-3] (because we don't | 761 | * save them in sar_profiles[1-3] (because we don't |
@@ -880,6 +880,22 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b) | |||
880 | return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd); | 880 | return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd); |
881 | } | 881 | } |
882 | 882 | ||
883 | static bool iwl_mvm_sar_geo_support(struct iwl_mvm *mvm) | ||
884 | { | ||
885 | /* | ||
886 | * The GEO_TX_POWER_LIMIT command is not supported on earlier | ||
887 | * firmware versions. Unfortunately, we don't have a TLV API | ||
888 | * flag to rely on, so rely on the major version which is in | ||
889 | * the first byte of ucode_ver. This was implemented | ||
890 | * initially on version 38 and then backported to 36, 29 and | ||
891 | * 17. | ||
892 | */ | ||
893 | return IWL_UCODE_SERIAL(mvm->fw->ucode_ver) >= 38 || | ||
894 | IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 36 || | ||
895 | IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 29 || | ||
896 | IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 17; | ||
897 | } | ||
898 | |||
883 | int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm) | 899 | int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm) |
884 | { | 900 | { |
885 | struct iwl_geo_tx_power_profiles_resp *resp; | 901 | struct iwl_geo_tx_power_profiles_resp *resp; |
@@ -909,6 +925,9 @@ int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm) | |||
909 | .data = { data }, | 925 | .data = { data }, |
910 | }; | 926 | }; |
911 | 927 | ||
928 | if (!iwl_mvm_sar_geo_support(mvm)) | ||
929 | return -EOPNOTSUPP; | ||
930 | |||
912 | ret = iwl_mvm_send_cmd(mvm, &cmd); | 931 | ret = iwl_mvm_send_cmd(mvm, &cmd); |
913 | if (ret) { | 932 | if (ret) { |
914 | IWL_ERR(mvm, "Failed to get geographic profile info %d\n", ret); | 933 | IWL_ERR(mvm, "Failed to get geographic profile info %d\n", ret); |
@@ -934,13 +953,7 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm) | |||
934 | int ret, i, j; | 953 | int ret, i, j; |
935 | u16 cmd_wide_id = WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT); | 954 | u16 cmd_wide_id = WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT); |
936 | 955 | ||
937 | /* | 956 | if (!iwl_mvm_sar_geo_support(mvm)) |
938 | * This command is not supported on earlier firmware versions. | ||
939 | * Unfortunately, we don't have a TLV API flag to rely on, so | ||
940 | * rely on the major version which is in the first byte of | ||
941 | * ucode_ver. | ||
942 | */ | ||
943 | if (IWL_UCODE_SERIAL(mvm->fw->ucode_ver) < 41) | ||
944 | return 0; | 957 | return 0; |
945 | 958 | ||
946 | ret = iwl_mvm_sar_get_wgds_table(mvm); | 959 | ret = iwl_mvm_sar_get_wgds_table(mvm); |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index e63623251d61..b74bd58f3f45 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c | |||
@@ -207,11 +207,11 @@ static const struct cfg80211_pmsr_capabilities iwl_mvm_pmsr_capa = { | |||
207 | }, | 207 | }, |
208 | }; | 208 | }; |
209 | 209 | ||
210 | static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, | 210 | static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw, |
211 | enum set_key_cmd cmd, | 211 | enum set_key_cmd cmd, |
212 | struct ieee80211_vif *vif, | 212 | struct ieee80211_vif *vif, |
213 | struct ieee80211_sta *sta, | 213 | struct ieee80211_sta *sta, |
214 | struct ieee80211_key_conf *key); | 214 | struct ieee80211_key_conf *key); |
215 | 215 | ||
216 | void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type) | 216 | void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type) |
217 | { | 217 | { |
@@ -474,7 +474,19 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) | |||
474 | ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW); | 474 | ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW); |
475 | ieee80211_hw_set(hw, BUFF_MMPDU_TXQ); | 475 | ieee80211_hw_set(hw, BUFF_MMPDU_TXQ); |
476 | ieee80211_hw_set(hw, STA_MMPDU_TXQ); | 476 | ieee80211_hw_set(hw, STA_MMPDU_TXQ); |
477 | ieee80211_hw_set(hw, TX_AMSDU); | 477 | /* |
478 | * On older devices, enabling TX A-MSDU occasionally leads to | ||
479 | * something getting messed up, the command read from the FIFO | ||
480 | * gets out of sync and isn't a TX command, so that we have an | ||
481 | * assert EDC. | ||
482 | * | ||
483 | * It's not clear where the bug is, but since we didn't used to | ||
484 | * support A-MSDU until moving the mac80211 iTXQs, just leave it | ||
485 | * for older devices. We also don't see this issue on any newer | ||
486 | * devices. | ||
487 | */ | ||
488 | if (mvm->cfg->device_family >= IWL_DEVICE_FAMILY_9000) | ||
489 | ieee80211_hw_set(hw, TX_AMSDU); | ||
478 | ieee80211_hw_set(hw, TX_FRAG_LIST); | 490 | ieee80211_hw_set(hw, TX_FRAG_LIST); |
479 | 491 | ||
480 | if (iwl_mvm_has_tlc_offload(mvm)) { | 492 | if (iwl_mvm_has_tlc_offload(mvm)) { |
@@ -2726,7 +2738,7 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw, | |||
2726 | 2738 | ||
2727 | mvmvif->ap_early_keys[i] = NULL; | 2739 | mvmvif->ap_early_keys[i] = NULL; |
2728 | 2740 | ||
2729 | ret = iwl_mvm_mac_set_key(hw, SET_KEY, vif, NULL, key); | 2741 | ret = __iwl_mvm_mac_set_key(hw, SET_KEY, vif, NULL, key); |
2730 | if (ret) | 2742 | if (ret) |
2731 | goto out_quota_failed; | 2743 | goto out_quota_failed; |
2732 | } | 2744 | } |
@@ -3494,11 +3506,11 @@ static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw, | |||
3494 | return ret; | 3506 | return ret; |
3495 | } | 3507 | } |
3496 | 3508 | ||
3497 | static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, | 3509 | static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw, |
3498 | enum set_key_cmd cmd, | 3510 | enum set_key_cmd cmd, |
3499 | struct ieee80211_vif *vif, | 3511 | struct ieee80211_vif *vif, |
3500 | struct ieee80211_sta *sta, | 3512 | struct ieee80211_sta *sta, |
3501 | struct ieee80211_key_conf *key) | 3513 | struct ieee80211_key_conf *key) |
3502 | { | 3514 | { |
3503 | struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); | 3515 | struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); |
3504 | struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); | 3516 | struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); |
@@ -3553,8 +3565,6 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, | |||
3553 | return -EOPNOTSUPP; | 3565 | return -EOPNOTSUPP; |
3554 | } | 3566 | } |
3555 | 3567 | ||
3556 | mutex_lock(&mvm->mutex); | ||
3557 | |||
3558 | switch (cmd) { | 3568 | switch (cmd) { |
3559 | case SET_KEY: | 3569 | case SET_KEY: |
3560 | if ((vif->type == NL80211_IFTYPE_ADHOC || | 3570 | if ((vif->type == NL80211_IFTYPE_ADHOC || |
@@ -3700,7 +3710,22 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, | |||
3700 | ret = -EINVAL; | 3710 | ret = -EINVAL; |
3701 | } | 3711 | } |
3702 | 3712 | ||
3713 | return ret; | ||
3714 | } | ||
3715 | |||
3716 | static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, | ||
3717 | enum set_key_cmd cmd, | ||
3718 | struct ieee80211_vif *vif, | ||
3719 | struct ieee80211_sta *sta, | ||
3720 | struct ieee80211_key_conf *key) | ||
3721 | { | ||
3722 | struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); | ||
3723 | int ret; | ||
3724 | |||
3725 | mutex_lock(&mvm->mutex); | ||
3726 | ret = __iwl_mvm_mac_set_key(hw, cmd, vif, sta, key); | ||
3703 | mutex_unlock(&mvm->mutex); | 3727 | mutex_unlock(&mvm->mutex); |
3728 | |||
3704 | return ret; | 3729 | return ret; |
3705 | } | 3730 | } |
3706 | 3731 | ||
@@ -5042,7 +5067,6 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm, | |||
5042 | u32 qmask = BIT(mvm->trans->num_rx_queues) - 1; | 5067 | u32 qmask = BIT(mvm->trans->num_rx_queues) - 1; |
5043 | int ret; | 5068 | int ret; |
5044 | 5069 | ||
5045 | lockdep_assert_held(&mvm->mutex); | ||
5046 | 5070 | ||
5047 | if (!iwl_mvm_has_new_rx_api(mvm)) | 5071 | if (!iwl_mvm_has_new_rx_api(mvm)) |
5048 | return; | 5072 | return; |
@@ -5053,13 +5077,15 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm, | |||
5053 | atomic_set(&mvm->queue_sync_counter, | 5077 | atomic_set(&mvm->queue_sync_counter, |
5054 | mvm->trans->num_rx_queues); | 5078 | mvm->trans->num_rx_queues); |
5055 | 5079 | ||
5056 | ret = iwl_mvm_notify_rx_queue(mvm, qmask, (u8 *)notif, size); | 5080 | ret = iwl_mvm_notify_rx_queue(mvm, qmask, (u8 *)notif, |
5081 | size, !notif->sync); | ||
5057 | if (ret) { | 5082 | if (ret) { |
5058 | IWL_ERR(mvm, "Failed to trigger RX queues sync (%d)\n", ret); | 5083 | IWL_ERR(mvm, "Failed to trigger RX queues sync (%d)\n", ret); |
5059 | goto out; | 5084 | goto out; |
5060 | } | 5085 | } |
5061 | 5086 | ||
5062 | if (notif->sync) { | 5087 | if (notif->sync) { |
5088 | lockdep_assert_held(&mvm->mutex); | ||
5063 | ret = wait_event_timeout(mvm->rx_sync_waitq, | 5089 | ret = wait_event_timeout(mvm->rx_sync_waitq, |
5064 | atomic_read(&mvm->queue_sync_counter) == 0 || | 5090 | atomic_read(&mvm->queue_sync_counter) == 0 || |
5065 | iwl_mvm_is_radio_killed(mvm), | 5091 | iwl_mvm_is_radio_killed(mvm), |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 48c77af54e99..a263cc629d75 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | |||
@@ -1664,9 +1664,9 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi, | |||
1664 | void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi, | 1664 | void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi, |
1665 | struct iwl_rx_cmd_buffer *rxb, int queue); | 1665 | struct iwl_rx_cmd_buffer *rxb, int queue); |
1666 | int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask, | 1666 | int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask, |
1667 | const u8 *data, u32 count); | 1667 | const u8 *data, u32 count, bool async); |
1668 | void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, | 1668 | void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct napi_struct *napi, |
1669 | int queue); | 1669 | struct iwl_rx_cmd_buffer *rxb, int queue); |
1670 | void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); | 1670 | void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); |
1671 | void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm, | 1671 | void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm, |
1672 | struct iwl_rx_cmd_buffer *rxb); | 1672 | struct iwl_rx_cmd_buffer *rxb); |
@@ -1813,7 +1813,7 @@ iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif) | |||
1813 | #endif /* CONFIG_IWLWIFI_DEBUGFS */ | 1813 | #endif /* CONFIG_IWLWIFI_DEBUGFS */ |
1814 | 1814 | ||
1815 | /* rate scaling */ | 1815 | /* rate scaling */ |
1816 | int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool sync); | 1816 | int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq); |
1817 | void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg); | 1817 | void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg); |
1818 | int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate); | 1818 | int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate); |
1819 | void rs_update_last_rssi(struct iwl_mvm *mvm, | 1819 | void rs_update_last_rssi(struct iwl_mvm *mvm, |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c index 719f793b3487..a9bb43a2f27b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c | |||
@@ -620,7 +620,7 @@ void iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm, | |||
620 | enum iwl_mcc_source src; | 620 | enum iwl_mcc_source src; |
621 | char mcc[3]; | 621 | char mcc[3]; |
622 | struct ieee80211_regdomain *regd; | 622 | struct ieee80211_regdomain *regd; |
623 | u32 wgds_tbl_idx; | 623 | int wgds_tbl_idx; |
624 | 624 | ||
625 | lockdep_assert_held(&mvm->mutex); | 625 | lockdep_assert_held(&mvm->mutex); |
626 | 626 | ||
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index d7d6f3398f86..4888054dc3d8 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c | |||
@@ -1088,7 +1088,7 @@ static void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode, | |||
1088 | iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, 0); | 1088 | iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, 0); |
1089 | else if (unlikely(cmd == WIDE_ID(DATA_PATH_GROUP, | 1089 | else if (unlikely(cmd == WIDE_ID(DATA_PATH_GROUP, |
1090 | RX_QUEUES_NOTIFICATION))) | 1090 | RX_QUEUES_NOTIFICATION))) |
1091 | iwl_mvm_rx_queue_notif(mvm, rxb, 0); | 1091 | iwl_mvm_rx_queue_notif(mvm, napi, rxb, 0); |
1092 | else if (cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE)) | 1092 | else if (cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE)) |
1093 | iwl_mvm_rx_frame_release(mvm, napi, rxb, 0); | 1093 | iwl_mvm_rx_frame_release(mvm, napi, rxb, 0); |
1094 | else if (cmd == WIDE_ID(DATA_PATH_GROUP, RX_NO_DATA_NOTIF)) | 1094 | else if (cmd == WIDE_ID(DATA_PATH_GROUP, RX_NO_DATA_NOTIF)) |
@@ -1812,7 +1812,7 @@ static void iwl_mvm_rx_mq_rss(struct iwl_op_mode *op_mode, | |||
1812 | iwl_mvm_rx_frame_release(mvm, napi, rxb, queue); | 1812 | iwl_mvm_rx_frame_release(mvm, napi, rxb, queue); |
1813 | else if (unlikely(cmd == WIDE_ID(DATA_PATH_GROUP, | 1813 | else if (unlikely(cmd == WIDE_ID(DATA_PATH_GROUP, |
1814 | RX_QUEUES_NOTIFICATION))) | 1814 | RX_QUEUES_NOTIFICATION))) |
1815 | iwl_mvm_rx_queue_notif(mvm, rxb, queue); | 1815 | iwl_mvm_rx_queue_notif(mvm, napi, rxb, queue); |
1816 | else if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD))) | 1816 | else if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD))) |
1817 | iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, queue); | 1817 | iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, queue); |
1818 | } | 1818 | } |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c index 009e72abcd51..e4415e58fa78 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c | |||
@@ -1197,239 +1197,6 @@ static u8 rs_get_tid(struct ieee80211_hdr *hdr) | |||
1197 | return tid; | 1197 | return tid; |
1198 | } | 1198 | } |
1199 | 1199 | ||
1200 | void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, | ||
1201 | int tid, struct ieee80211_tx_info *info, bool ndp) | ||
1202 | { | ||
1203 | int legacy_success; | ||
1204 | int retries; | ||
1205 | int i; | ||
1206 | struct iwl_lq_cmd *table; | ||
1207 | u32 lq_hwrate; | ||
1208 | struct rs_rate lq_rate, tx_resp_rate; | ||
1209 | struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl; | ||
1210 | u32 tlc_info = (uintptr_t)info->status.status_driver_data[0]; | ||
1211 | u8 reduced_txp = tlc_info & RS_DRV_DATA_TXP_MSK; | ||
1212 | u8 lq_color = RS_DRV_DATA_LQ_COLOR_GET(tlc_info); | ||
1213 | u32 tx_resp_hwrate = (uintptr_t)info->status.status_driver_data[1]; | ||
1214 | struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); | ||
1215 | struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta.rs_drv; | ||
1216 | |||
1217 | /* Treat uninitialized rate scaling data same as non-existing. */ | ||
1218 | if (!lq_sta) { | ||
1219 | IWL_DEBUG_RATE(mvm, "Station rate scaling not created yet.\n"); | ||
1220 | return; | ||
1221 | } else if (!lq_sta->pers.drv) { | ||
1222 | IWL_DEBUG_RATE(mvm, "Rate scaling not initialized yet.\n"); | ||
1223 | return; | ||
1224 | } | ||
1225 | |||
1226 | /* This packet was aggregated but doesn't carry status info */ | ||
1227 | if ((info->flags & IEEE80211_TX_CTL_AMPDU) && | ||
1228 | !(info->flags & IEEE80211_TX_STAT_AMPDU)) | ||
1229 | return; | ||
1230 | |||
1231 | if (rs_rate_from_ucode_rate(tx_resp_hwrate, info->band, | ||
1232 | &tx_resp_rate)) { | ||
1233 | WARN_ON_ONCE(1); | ||
1234 | return; | ||
1235 | } | ||
1236 | |||
1237 | #ifdef CONFIG_MAC80211_DEBUGFS | ||
1238 | /* Disable last tx check if we are debugging with fixed rate but | ||
1239 | * update tx stats */ | ||
1240 | if (lq_sta->pers.dbg_fixed_rate) { | ||
1241 | int index = tx_resp_rate.index; | ||
1242 | enum rs_column column; | ||
1243 | int attempts, success; | ||
1244 | |||
1245 | column = rs_get_column_from_rate(&tx_resp_rate); | ||
1246 | if (WARN_ONCE(column == RS_COLUMN_INVALID, | ||
1247 | "Can't map rate 0x%x to column", | ||
1248 | tx_resp_hwrate)) | ||
1249 | return; | ||
1250 | |||
1251 | if (info->flags & IEEE80211_TX_STAT_AMPDU) { | ||
1252 | attempts = info->status.ampdu_len; | ||
1253 | success = info->status.ampdu_ack_len; | ||
1254 | } else { | ||
1255 | attempts = info->status.rates[0].count; | ||
1256 | success = !!(info->flags & IEEE80211_TX_STAT_ACK); | ||
1257 | } | ||
1258 | |||
1259 | lq_sta->pers.tx_stats[column][index].total += attempts; | ||
1260 | lq_sta->pers.tx_stats[column][index].success += success; | ||
1261 | |||
1262 | IWL_DEBUG_RATE(mvm, "Fixed rate 0x%x success %d attempts %d\n", | ||
1263 | tx_resp_hwrate, success, attempts); | ||
1264 | return; | ||
1265 | } | ||
1266 | #endif | ||
1267 | |||
1268 | if (time_after(jiffies, | ||
1269 | (unsigned long)(lq_sta->last_tx + | ||
1270 | (IWL_MVM_RS_IDLE_TIMEOUT * HZ)))) { | ||
1271 | IWL_DEBUG_RATE(mvm, "Tx idle for too long. reinit rs\n"); | ||
1272 | iwl_mvm_rs_rate_init(mvm, sta, info->band, true); | ||
1273 | return; | ||
1274 | } | ||
1275 | lq_sta->last_tx = jiffies; | ||
1276 | |||
1277 | /* Ignore this Tx frame response if its initial rate doesn't match | ||
1278 | * that of latest Link Quality command. There may be stragglers | ||
1279 | * from a previous Link Quality command, but we're no longer interested | ||
1280 | * in those; they're either from the "active" mode while we're trying | ||
1281 | * to check "search" mode, or a prior "search" mode after we've moved | ||
1282 | * to a new "search" mode (which might become the new "active" mode). | ||
1283 | */ | ||
1284 | table = &lq_sta->lq; | ||
1285 | lq_hwrate = le32_to_cpu(table->rs_table[0]); | ||
1286 | if (rs_rate_from_ucode_rate(lq_hwrate, info->band, &lq_rate)) { | ||
1287 | WARN_ON_ONCE(1); | ||
1288 | return; | ||
1289 | } | ||
1290 | |||
1291 | /* Here we actually compare this rate to the latest LQ command */ | ||
1292 | if (lq_color != LQ_FLAG_COLOR_GET(table->flags)) { | ||
1293 | IWL_DEBUG_RATE(mvm, | ||
1294 | "tx resp color 0x%x does not match 0x%x\n", | ||
1295 | lq_color, LQ_FLAG_COLOR_GET(table->flags)); | ||
1296 | |||
1297 | /* | ||
1298 | * Since rates mis-match, the last LQ command may have failed. | ||
1299 | * After IWL_MISSED_RATE_MAX mis-matches, resync the uCode with | ||
1300 | * ... driver. | ||
1301 | */ | ||
1302 | lq_sta->missed_rate_counter++; | ||
1303 | if (lq_sta->missed_rate_counter > IWL_MVM_RS_MISSED_RATE_MAX) { | ||
1304 | lq_sta->missed_rate_counter = 0; | ||
1305 | IWL_DEBUG_RATE(mvm, | ||
1306 | "Too many rates mismatch. Send sync LQ. rs_state %d\n", | ||
1307 | lq_sta->rs_state); | ||
1308 | iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, false); | ||
1309 | } | ||
1310 | /* Regardless, ignore this status info for outdated rate */ | ||
1311 | return; | ||
1312 | } else | ||
1313 | /* Rate did match, so reset the missed_rate_counter */ | ||
1314 | lq_sta->missed_rate_counter = 0; | ||
1315 | |||
1316 | if (!lq_sta->search_better_tbl) { | ||
1317 | curr_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); | ||
1318 | other_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]); | ||
1319 | } else { | ||
1320 | curr_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]); | ||
1321 | other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); | ||
1322 | } | ||
1323 | |||
1324 | if (WARN_ON_ONCE(!rs_rate_column_match(&lq_rate, &curr_tbl->rate))) { | ||
1325 | IWL_DEBUG_RATE(mvm, | ||
1326 | "Neither active nor search matches tx rate\n"); | ||
1327 | tmp_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); | ||
1328 | rs_dump_rate(mvm, &tmp_tbl->rate, "ACTIVE"); | ||
1329 | tmp_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]); | ||
1330 | rs_dump_rate(mvm, &tmp_tbl->rate, "SEARCH"); | ||
1331 | rs_dump_rate(mvm, &lq_rate, "ACTUAL"); | ||
1332 | |||
1333 | /* | ||
1334 | * no matching table found, let's by-pass the data collection | ||
1335 | * and continue to perform rate scale to find the rate table | ||
1336 | */ | ||
1337 | rs_stay_in_table(lq_sta, true); | ||
1338 | goto done; | ||
1339 | } | ||
1340 | |||
1341 | /* | ||
1342 | * Updating the frame history depends on whether packets were | ||
1343 | * aggregated. | ||
1344 | * | ||
1345 | * For aggregation, all packets were transmitted at the same rate, the | ||
1346 | * first index into rate scale table. | ||
1347 | */ | ||
1348 | if (info->flags & IEEE80211_TX_STAT_AMPDU) { | ||
1349 | rs_collect_tpc_data(mvm, lq_sta, curr_tbl, tx_resp_rate.index, | ||
1350 | info->status.ampdu_len, | ||
1351 | info->status.ampdu_ack_len, | ||
1352 | reduced_txp); | ||
1353 | |||
1354 | /* ampdu_ack_len = 0 marks no BA was received. For TLC, treat | ||
1355 | * it as a single frame loss as we don't want the success ratio | ||
1356 | * to dip too quickly because a BA wasn't received. | ||
1357 | * For TPC, there's no need for this optimisation since we want | ||
1358 | * to recover very quickly from a bad power reduction and, | ||
1359 | * therefore we'd like the success ratio to get an immediate hit | ||
1360 | * when failing to get a BA, so we'd switch back to a lower or | ||
1361 | * zero power reduction. When FW transmits agg with a rate | ||
1362 | * different from the initial rate, it will not use reduced txp | ||
1363 | * and will send BA notification twice (one empty with reduced | ||
1364 | * txp equal to the value from LQ and one with reduced txp 0). | ||
1365 | * We need to update counters for each txp level accordingly. | ||
1366 | */ | ||
1367 | if (info->status.ampdu_ack_len == 0) | ||
1368 | info->status.ampdu_len = 1; | ||
1369 | |||
1370 | rs_collect_tlc_data(mvm, mvmsta, tid, curr_tbl, tx_resp_rate.index, | ||
1371 | info->status.ampdu_len, | ||
1372 | info->status.ampdu_ack_len); | ||
1373 | |||
1374 | /* Update success/fail counts if not searching for new mode */ | ||
1375 | if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) { | ||
1376 | lq_sta->total_success += info->status.ampdu_ack_len; | ||
1377 | lq_sta->total_failed += (info->status.ampdu_len - | ||
1378 | info->status.ampdu_ack_len); | ||
1379 | } | ||
1380 | } else { | ||
1381 | /* For legacy, update frame history with for each Tx retry. */ | ||
1382 | retries = info->status.rates[0].count - 1; | ||
1383 | /* HW doesn't send more than 15 retries */ | ||
1384 | retries = min(retries, 15); | ||
1385 | |||
1386 | /* The last transmission may have been successful */ | ||
1387 | legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK); | ||
1388 | /* Collect data for each rate used during failed TX attempts */ | ||
1389 | for (i = 0; i <= retries; ++i) { | ||
1390 | lq_hwrate = le32_to_cpu(table->rs_table[i]); | ||
1391 | if (rs_rate_from_ucode_rate(lq_hwrate, info->band, | ||
1392 | &lq_rate)) { | ||
1393 | WARN_ON_ONCE(1); | ||
1394 | return; | ||
1395 | } | ||
1396 | |||
1397 | /* | ||
1398 | * Only collect stats if retried rate is in the same RS | ||
1399 | * table as active/search. | ||
1400 | */ | ||
1401 | if (rs_rate_column_match(&lq_rate, &curr_tbl->rate)) | ||
1402 | tmp_tbl = curr_tbl; | ||
1403 | else if (rs_rate_column_match(&lq_rate, | ||
1404 | &other_tbl->rate)) | ||
1405 | tmp_tbl = other_tbl; | ||
1406 | else | ||
1407 | continue; | ||
1408 | |||
1409 | rs_collect_tpc_data(mvm, lq_sta, tmp_tbl, | ||
1410 | tx_resp_rate.index, 1, | ||
1411 | i < retries ? 0 : legacy_success, | ||
1412 | reduced_txp); | ||
1413 | rs_collect_tlc_data(mvm, mvmsta, tid, tmp_tbl, | ||
1414 | tx_resp_rate.index, 1, | ||
1415 | i < retries ? 0 : legacy_success); | ||
1416 | } | ||
1417 | |||
1418 | /* Update success/fail counts if not searching for new mode */ | ||
1419 | if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) { | ||
1420 | lq_sta->total_success += legacy_success; | ||
1421 | lq_sta->total_failed += retries + (1 - legacy_success); | ||
1422 | } | ||
1423 | } | ||
1424 | /* The last TX rate is cached in lq_sta; it's set in if/else above */ | ||
1425 | lq_sta->last_rate_n_flags = lq_hwrate; | ||
1426 | IWL_DEBUG_RATE(mvm, "reduced txpower: %d\n", reduced_txp); | ||
1427 | done: | ||
1428 | /* See if there's a better rate or modulation mode to try. */ | ||
1429 | if (sta->supp_rates[info->band]) | ||
1430 | rs_rate_scale_perform(mvm, sta, lq_sta, tid, ndp); | ||
1431 | } | ||
1432 | |||
1433 | /* | 1200 | /* |
1434 | * mac80211 sends us Tx status | 1201 | * mac80211 sends us Tx status |
1435 | */ | 1202 | */ |
@@ -1442,8 +1209,9 @@ static void rs_drv_mac80211_tx_status(void *mvm_r, | |||
1442 | struct iwl_op_mode *op_mode = mvm_r; | 1209 | struct iwl_op_mode *op_mode = mvm_r; |
1443 | struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); | 1210 | struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); |
1444 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | 1211 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
1212 | struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); | ||
1445 | 1213 | ||
1446 | if (!iwl_mvm_sta_from_mac80211(sta)->vif) | 1214 | if (!mvmsta->vif) |
1447 | return; | 1215 | return; |
1448 | 1216 | ||
1449 | if (!ieee80211_is_data(hdr->frame_control) || | 1217 | if (!ieee80211_is_data(hdr->frame_control) || |
@@ -1584,6 +1352,18 @@ static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta, | |||
1584 | tbl->expected_tpt = rs_get_expected_tpt_table(lq_sta, column, rate->bw); | 1352 | tbl->expected_tpt = rs_get_expected_tpt_table(lq_sta, column, rate->bw); |
1585 | } | 1353 | } |
1586 | 1354 | ||
1355 | /* rs uses two tables, one is active and the second is for searching better | ||
1356 | * configuration. This function, according to the index of the currently | ||
1357 | * active table returns the search table, which is located at the | ||
1358 | * index complementary to 1 according to the active table (active = 1, | ||
1359 | * search = 0 or active = 0, search = 1). | ||
1360 | * Since lq_info is an arary of size 2, make sure index cannot be out of bounds. | ||
1361 | */ | ||
1362 | static inline u8 rs_search_tbl(u8 active_tbl) | ||
1363 | { | ||
1364 | return (active_tbl ^ 1) & 1; | ||
1365 | } | ||
1366 | |||
1587 | static s32 rs_get_best_rate(struct iwl_mvm *mvm, | 1367 | static s32 rs_get_best_rate(struct iwl_mvm *mvm, |
1588 | struct iwl_lq_sta *lq_sta, | 1368 | struct iwl_lq_sta *lq_sta, |
1589 | struct iwl_scale_tbl_info *tbl, /* "search" */ | 1369 | struct iwl_scale_tbl_info *tbl, /* "search" */ |
@@ -1794,7 +1574,7 @@ static void rs_update_rate_tbl(struct iwl_mvm *mvm, | |||
1794 | struct iwl_scale_tbl_info *tbl) | 1574 | struct iwl_scale_tbl_info *tbl) |
1795 | { | 1575 | { |
1796 | rs_fill_lq_cmd(mvm, sta, lq_sta, &tbl->rate); | 1576 | rs_fill_lq_cmd(mvm, sta, lq_sta, &tbl->rate); |
1797 | iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, false); | 1577 | iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq); |
1798 | } | 1578 | } |
1799 | 1579 | ||
1800 | static bool rs_tweak_rate_tbl(struct iwl_mvm *mvm, | 1580 | static bool rs_tweak_rate_tbl(struct iwl_mvm *mvm, |
@@ -1931,9 +1711,9 @@ static int rs_switch_to_column(struct iwl_mvm *mvm, | |||
1931 | struct ieee80211_sta *sta, | 1711 | struct ieee80211_sta *sta, |
1932 | enum rs_column col_id) | 1712 | enum rs_column col_id) |
1933 | { | 1713 | { |
1934 | struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); | 1714 | struct iwl_scale_tbl_info *tbl = &lq_sta->lq_info[lq_sta->active_tbl]; |
1935 | struct iwl_scale_tbl_info *search_tbl = | 1715 | struct iwl_scale_tbl_info *search_tbl = |
1936 | &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]); | 1716 | &lq_sta->lq_info[rs_search_tbl(lq_sta->active_tbl)]; |
1937 | struct rs_rate *rate = &search_tbl->rate; | 1717 | struct rs_rate *rate = &search_tbl->rate; |
1938 | const struct rs_tx_column *column = &rs_tx_columns[col_id]; | 1718 | const struct rs_tx_column *column = &rs_tx_columns[col_id]; |
1939 | const struct rs_tx_column *curr_column = &rs_tx_columns[tbl->column]; | 1719 | const struct rs_tx_column *curr_column = &rs_tx_columns[tbl->column]; |
@@ -2341,7 +2121,7 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm, | |||
2341 | if (!lq_sta->search_better_tbl) | 2121 | if (!lq_sta->search_better_tbl) |
2342 | active_tbl = lq_sta->active_tbl; | 2122 | active_tbl = lq_sta->active_tbl; |
2343 | else | 2123 | else |
2344 | active_tbl = 1 - lq_sta->active_tbl; | 2124 | active_tbl = rs_search_tbl(lq_sta->active_tbl); |
2345 | 2125 | ||
2346 | tbl = &(lq_sta->lq_info[active_tbl]); | 2126 | tbl = &(lq_sta->lq_info[active_tbl]); |
2347 | rate = &tbl->rate; | 2127 | rate = &tbl->rate; |
@@ -2565,7 +2345,7 @@ lq_update: | |||
2565 | /* If new "search" mode was selected, set up in uCode table */ | 2345 | /* If new "search" mode was selected, set up in uCode table */ |
2566 | if (lq_sta->search_better_tbl) { | 2346 | if (lq_sta->search_better_tbl) { |
2567 | /* Access the "search" table, clear its history. */ | 2347 | /* Access the "search" table, clear its history. */ |
2568 | tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]); | 2348 | tbl = &lq_sta->lq_info[rs_search_tbl(lq_sta->active_tbl)]; |
2569 | rs_rate_scale_clear_tbl_windows(mvm, tbl); | 2349 | rs_rate_scale_clear_tbl_windows(mvm, tbl); |
2570 | 2350 | ||
2571 | /* Use new "search" start rate */ | 2351 | /* Use new "search" start rate */ |
@@ -2896,7 +2676,7 @@ void rs_update_last_rssi(struct iwl_mvm *mvm, | |||
2896 | static void rs_initialize_lq(struct iwl_mvm *mvm, | 2676 | static void rs_initialize_lq(struct iwl_mvm *mvm, |
2897 | struct ieee80211_sta *sta, | 2677 | struct ieee80211_sta *sta, |
2898 | struct iwl_lq_sta *lq_sta, | 2678 | struct iwl_lq_sta *lq_sta, |
2899 | enum nl80211_band band, bool update) | 2679 | enum nl80211_band band) |
2900 | { | 2680 | { |
2901 | struct iwl_scale_tbl_info *tbl; | 2681 | struct iwl_scale_tbl_info *tbl; |
2902 | struct rs_rate *rate; | 2682 | struct rs_rate *rate; |
@@ -2908,7 +2688,7 @@ static void rs_initialize_lq(struct iwl_mvm *mvm, | |||
2908 | if (!lq_sta->search_better_tbl) | 2688 | if (!lq_sta->search_better_tbl) |
2909 | active_tbl = lq_sta->active_tbl; | 2689 | active_tbl = lq_sta->active_tbl; |
2910 | else | 2690 | else |
2911 | active_tbl = 1 - lq_sta->active_tbl; | 2691 | active_tbl = rs_search_tbl(lq_sta->active_tbl); |
2912 | 2692 | ||
2913 | tbl = &(lq_sta->lq_info[active_tbl]); | 2693 | tbl = &(lq_sta->lq_info[active_tbl]); |
2914 | rate = &tbl->rate; | 2694 | rate = &tbl->rate; |
@@ -2926,7 +2706,7 @@ static void rs_initialize_lq(struct iwl_mvm *mvm, | |||
2926 | rs_set_expected_tpt_table(lq_sta, tbl); | 2706 | rs_set_expected_tpt_table(lq_sta, tbl); |
2927 | rs_fill_lq_cmd(mvm, sta, lq_sta, rate); | 2707 | rs_fill_lq_cmd(mvm, sta, lq_sta, rate); |
2928 | /* TODO restore station should remember the lq cmd */ | 2708 | /* TODO restore station should remember the lq cmd */ |
2929 | iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, !update); | 2709 | iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq); |
2930 | } | 2710 | } |
2931 | 2711 | ||
2932 | static void rs_drv_get_rate(void *mvm_r, struct ieee80211_sta *sta, | 2712 | static void rs_drv_get_rate(void *mvm_r, struct ieee80211_sta *sta, |
@@ -3175,7 +2955,7 @@ void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg) | |||
3175 | * Called after adding a new station to initialize rate scaling | 2955 | * Called after adding a new station to initialize rate scaling |
3176 | */ | 2956 | */ |
3177 | static void rs_drv_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, | 2957 | static void rs_drv_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, |
3178 | enum nl80211_band band, bool update) | 2958 | enum nl80211_band band) |
3179 | { | 2959 | { |
3180 | int i, j; | 2960 | int i, j; |
3181 | struct ieee80211_hw *hw = mvm->hw; | 2961 | struct ieee80211_hw *hw = mvm->hw; |
@@ -3186,6 +2966,8 @@ static void rs_drv_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, | |||
3186 | struct ieee80211_supported_band *sband; | 2966 | struct ieee80211_supported_band *sband; |
3187 | unsigned long supp; /* must be unsigned long for for_each_set_bit */ | 2967 | unsigned long supp; /* must be unsigned long for for_each_set_bit */ |
3188 | 2968 | ||
2969 | lockdep_assert_held(&mvmsta->lq_sta.rs_drv.pers.lock); | ||
2970 | |||
3189 | /* clear all non-persistent lq data */ | 2971 | /* clear all non-persistent lq data */ |
3190 | memset(lq_sta, 0, offsetof(typeof(*lq_sta), pers)); | 2972 | memset(lq_sta, 0, offsetof(typeof(*lq_sta), pers)); |
3191 | 2973 | ||
@@ -3255,7 +3037,7 @@ static void rs_drv_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, | |||
3255 | #ifdef CONFIG_IWLWIFI_DEBUGFS | 3037 | #ifdef CONFIG_IWLWIFI_DEBUGFS |
3256 | iwl_mvm_reset_frame_stats(mvm); | 3038 | iwl_mvm_reset_frame_stats(mvm); |
3257 | #endif | 3039 | #endif |
3258 | rs_initialize_lq(mvm, sta, lq_sta, band, update); | 3040 | rs_initialize_lq(mvm, sta, lq_sta, band); |
3259 | } | 3041 | } |
3260 | 3042 | ||
3261 | static void rs_drv_rate_update(void *mvm_r, | 3043 | static void rs_drv_rate_update(void *mvm_r, |
@@ -3278,6 +3060,258 @@ static void rs_drv_rate_update(void *mvm_r, | |||
3278 | iwl_mvm_rs_rate_init(mvm, sta, sband->band, true); | 3060 | iwl_mvm_rs_rate_init(mvm, sta, sband->band, true); |
3279 | } | 3061 | } |
3280 | 3062 | ||
3063 | static void __iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, | ||
3064 | struct ieee80211_sta *sta, | ||
3065 | int tid, struct ieee80211_tx_info *info, | ||
3066 | bool ndp) | ||
3067 | { | ||
3068 | int legacy_success; | ||
3069 | int retries; | ||
3070 | int i; | ||
3071 | struct iwl_lq_cmd *table; | ||
3072 | u32 lq_hwrate; | ||
3073 | struct rs_rate lq_rate, tx_resp_rate; | ||
3074 | struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl; | ||
3075 | u32 tlc_info = (uintptr_t)info->status.status_driver_data[0]; | ||
3076 | u8 reduced_txp = tlc_info & RS_DRV_DATA_TXP_MSK; | ||
3077 | u8 lq_color = RS_DRV_DATA_LQ_COLOR_GET(tlc_info); | ||
3078 | u32 tx_resp_hwrate = (uintptr_t)info->status.status_driver_data[1]; | ||
3079 | struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); | ||
3080 | struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta.rs_drv; | ||
3081 | |||
3082 | /* Treat uninitialized rate scaling data same as non-existing. */ | ||
3083 | if (!lq_sta) { | ||
3084 | IWL_DEBUG_RATE(mvm, "Station rate scaling not created yet.\n"); | ||
3085 | return; | ||
3086 | } else if (!lq_sta->pers.drv) { | ||
3087 | IWL_DEBUG_RATE(mvm, "Rate scaling not initialized yet.\n"); | ||
3088 | return; | ||
3089 | } | ||
3090 | |||
3091 | /* This packet was aggregated but doesn't carry status info */ | ||
3092 | if ((info->flags & IEEE80211_TX_CTL_AMPDU) && | ||
3093 | !(info->flags & IEEE80211_TX_STAT_AMPDU)) | ||
3094 | return; | ||
3095 | |||
3096 | if (rs_rate_from_ucode_rate(tx_resp_hwrate, info->band, | ||
3097 | &tx_resp_rate)) { | ||
3098 | WARN_ON_ONCE(1); | ||
3099 | return; | ||
3100 | } | ||
3101 | |||
3102 | #ifdef CONFIG_MAC80211_DEBUGFS | ||
3103 | /* Disable last tx check if we are debugging with fixed rate but | ||
3104 | * update tx stats | ||
3105 | */ | ||
3106 | if (lq_sta->pers.dbg_fixed_rate) { | ||
3107 | int index = tx_resp_rate.index; | ||
3108 | enum rs_column column; | ||
3109 | int attempts, success; | ||
3110 | |||
3111 | column = rs_get_column_from_rate(&tx_resp_rate); | ||
3112 | if (WARN_ONCE(column == RS_COLUMN_INVALID, | ||
3113 | "Can't map rate 0x%x to column", | ||
3114 | tx_resp_hwrate)) | ||
3115 | return; | ||
3116 | |||
3117 | if (info->flags & IEEE80211_TX_STAT_AMPDU) { | ||
3118 | attempts = info->status.ampdu_len; | ||
3119 | success = info->status.ampdu_ack_len; | ||
3120 | } else { | ||
3121 | attempts = info->status.rates[0].count; | ||
3122 | success = !!(info->flags & IEEE80211_TX_STAT_ACK); | ||
3123 | } | ||
3124 | |||
3125 | lq_sta->pers.tx_stats[column][index].total += attempts; | ||
3126 | lq_sta->pers.tx_stats[column][index].success += success; | ||
3127 | |||
3128 | IWL_DEBUG_RATE(mvm, "Fixed rate 0x%x success %d attempts %d\n", | ||
3129 | tx_resp_hwrate, success, attempts); | ||
3130 | return; | ||
3131 | } | ||
3132 | #endif | ||
3133 | |||
3134 | if (time_after(jiffies, | ||
3135 | (unsigned long)(lq_sta->last_tx + | ||
3136 | (IWL_MVM_RS_IDLE_TIMEOUT * HZ)))) { | ||
3137 | IWL_DEBUG_RATE(mvm, "Tx idle for too long. reinit rs\n"); | ||
3138 | /* reach here only in case of driver RS, call directly | ||
3139 | * the unlocked version | ||
3140 | */ | ||
3141 | rs_drv_rate_init(mvm, sta, info->band); | ||
3142 | return; | ||
3143 | } | ||
3144 | lq_sta->last_tx = jiffies; | ||
3145 | |||
3146 | /* Ignore this Tx frame response if its initial rate doesn't match | ||
3147 | * that of latest Link Quality command. There may be stragglers | ||
3148 | * from a previous Link Quality command, but we're no longer interested | ||
3149 | * in those; they're either from the "active" mode while we're trying | ||
3150 | * to check "search" mode, or a prior "search" mode after we've moved | ||
3151 | * to a new "search" mode (which might become the new "active" mode). | ||
3152 | */ | ||
3153 | table = &lq_sta->lq; | ||
3154 | lq_hwrate = le32_to_cpu(table->rs_table[0]); | ||
3155 | if (rs_rate_from_ucode_rate(lq_hwrate, info->band, &lq_rate)) { | ||
3156 | WARN_ON_ONCE(1); | ||
3157 | return; | ||
3158 | } | ||
3159 | |||
3160 | /* Here we actually compare this rate to the latest LQ command */ | ||
3161 | if (lq_color != LQ_FLAG_COLOR_GET(table->flags)) { | ||
3162 | IWL_DEBUG_RATE(mvm, | ||
3163 | "tx resp color 0x%x does not match 0x%x\n", | ||
3164 | lq_color, LQ_FLAG_COLOR_GET(table->flags)); | ||
3165 | |||
3166 | /* Since rates mis-match, the last LQ command may have failed. | ||
3167 | * After IWL_MISSED_RATE_MAX mis-matches, resync the uCode with | ||
3168 | * ... driver. | ||
3169 | */ | ||
3170 | lq_sta->missed_rate_counter++; | ||
3171 | if (lq_sta->missed_rate_counter > IWL_MVM_RS_MISSED_RATE_MAX) { | ||
3172 | lq_sta->missed_rate_counter = 0; | ||
3173 | IWL_DEBUG_RATE(mvm, | ||
3174 | "Too many rates mismatch. Send sync LQ. rs_state %d\n", | ||
3175 | lq_sta->rs_state); | ||
3176 | iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq); | ||
3177 | } | ||
3178 | /* Regardless, ignore this status info for outdated rate */ | ||
3179 | return; | ||
3180 | } | ||
3181 | |||
3182 | /* Rate did match, so reset the missed_rate_counter */ | ||
3183 | lq_sta->missed_rate_counter = 0; | ||
3184 | |||
3185 | if (!lq_sta->search_better_tbl) { | ||
3186 | curr_tbl = &lq_sta->lq_info[lq_sta->active_tbl]; | ||
3187 | other_tbl = &lq_sta->lq_info[rs_search_tbl(lq_sta->active_tbl)]; | ||
3188 | } else { | ||
3189 | curr_tbl = &lq_sta->lq_info[rs_search_tbl(lq_sta->active_tbl)]; | ||
3190 | other_tbl = &lq_sta->lq_info[lq_sta->active_tbl]; | ||
3191 | } | ||
3192 | |||
3193 | if (WARN_ON_ONCE(!rs_rate_column_match(&lq_rate, &curr_tbl->rate))) { | ||
3194 | IWL_DEBUG_RATE(mvm, | ||
3195 | "Neither active nor search matches tx rate\n"); | ||
3196 | tmp_tbl = &lq_sta->lq_info[lq_sta->active_tbl]; | ||
3197 | rs_dump_rate(mvm, &tmp_tbl->rate, "ACTIVE"); | ||
3198 | tmp_tbl = &lq_sta->lq_info[rs_search_tbl(lq_sta->active_tbl)]; | ||
3199 | rs_dump_rate(mvm, &tmp_tbl->rate, "SEARCH"); | ||
3200 | rs_dump_rate(mvm, &lq_rate, "ACTUAL"); | ||
3201 | |||
3202 | /* no matching table found, let's by-pass the data collection | ||
3203 | * and continue to perform rate scale to find the rate table | ||
3204 | */ | ||
3205 | rs_stay_in_table(lq_sta, true); | ||
3206 | goto done; | ||
3207 | } | ||
3208 | |||
3209 | /* Updating the frame history depends on whether packets were | ||
3210 | * aggregated. | ||
3211 | * | ||
3212 | * For aggregation, all packets were transmitted at the same rate, the | ||
3213 | * first index into rate scale table. | ||
3214 | */ | ||
3215 | if (info->flags & IEEE80211_TX_STAT_AMPDU) { | ||
3216 | rs_collect_tpc_data(mvm, lq_sta, curr_tbl, tx_resp_rate.index, | ||
3217 | info->status.ampdu_len, | ||
3218 | info->status.ampdu_ack_len, | ||
3219 | reduced_txp); | ||
3220 | |||
3221 | /* ampdu_ack_len = 0 marks no BA was received. For TLC, treat | ||
3222 | * it as a single frame loss as we don't want the success ratio | ||
3223 | * to dip too quickly because a BA wasn't received. | ||
3224 | * For TPC, there's no need for this optimisation since we want | ||
3225 | * to recover very quickly from a bad power reduction and, | ||
3226 | * therefore we'd like the success ratio to get an immediate hit | ||
3227 | * when failing to get a BA, so we'd switch back to a lower or | ||
3228 | * zero power reduction. When FW transmits agg with a rate | ||
3229 | * different from the initial rate, it will not use reduced txp | ||
3230 | * and will send BA notification twice (one empty with reduced | ||
3231 | * txp equal to the value from LQ and one with reduced txp 0). | ||
3232 | * We need to update counters for each txp level accordingly. | ||
3233 | */ | ||
3234 | if (info->status.ampdu_ack_len == 0) | ||
3235 | info->status.ampdu_len = 1; | ||
3236 | |||
3237 | rs_collect_tlc_data(mvm, mvmsta, tid, curr_tbl, | ||
3238 | tx_resp_rate.index, | ||
3239 | info->status.ampdu_len, | ||
3240 | info->status.ampdu_ack_len); | ||
3241 | |||
3242 | /* Update success/fail counts if not searching for new mode */ | ||
3243 | if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) { | ||
3244 | lq_sta->total_success += info->status.ampdu_ack_len; | ||
3245 | lq_sta->total_failed += (info->status.ampdu_len - | ||
3246 | info->status.ampdu_ack_len); | ||
3247 | } | ||
3248 | } else { | ||
3249 | /* For legacy, update frame history with for each Tx retry. */ | ||
3250 | retries = info->status.rates[0].count - 1; | ||
3251 | /* HW doesn't send more than 15 retries */ | ||
3252 | retries = min(retries, 15); | ||
3253 | |||
3254 | /* The last transmission may have been successful */ | ||
3255 | legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK); | ||
3256 | /* Collect data for each rate used during failed TX attempts */ | ||
3257 | for (i = 0; i <= retries; ++i) { | ||
3258 | lq_hwrate = le32_to_cpu(table->rs_table[i]); | ||
3259 | if (rs_rate_from_ucode_rate(lq_hwrate, info->band, | ||
3260 | &lq_rate)) { | ||
3261 | WARN_ON_ONCE(1); | ||
3262 | return; | ||
3263 | } | ||
3264 | |||
3265 | /* Only collect stats if retried rate is in the same RS | ||
3266 | * table as active/search. | ||
3267 | */ | ||
3268 | if (rs_rate_column_match(&lq_rate, &curr_tbl->rate)) | ||
3269 | tmp_tbl = curr_tbl; | ||
3270 | else if (rs_rate_column_match(&lq_rate, | ||
3271 | &other_tbl->rate)) | ||
3272 | tmp_tbl = other_tbl; | ||
3273 | else | ||
3274 | continue; | ||
3275 | |||
3276 | rs_collect_tpc_data(mvm, lq_sta, tmp_tbl, | ||
3277 | tx_resp_rate.index, 1, | ||
3278 | i < retries ? 0 : legacy_success, | ||
3279 | reduced_txp); | ||
3280 | rs_collect_tlc_data(mvm, mvmsta, tid, tmp_tbl, | ||
3281 | tx_resp_rate.index, 1, | ||
3282 | i < retries ? 0 : legacy_success); | ||
3283 | } | ||
3284 | |||
3285 | /* Update success/fail counts if not searching for new mode */ | ||
3286 | if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) { | ||
3287 | lq_sta->total_success += legacy_success; | ||
3288 | lq_sta->total_failed += retries + (1 - legacy_success); | ||
3289 | } | ||
3290 | } | ||
3291 | /* The last TX rate is cached in lq_sta; it's set in if/else above */ | ||
3292 | lq_sta->last_rate_n_flags = lq_hwrate; | ||
3293 | IWL_DEBUG_RATE(mvm, "reduced txpower: %d\n", reduced_txp); | ||
3294 | done: | ||
3295 | /* See if there's a better rate or modulation mode to try. */ | ||
3296 | if (sta->supp_rates[info->band]) | ||
3297 | rs_rate_scale_perform(mvm, sta, lq_sta, tid, ndp); | ||
3298 | } | ||
3299 | |||
3300 | void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, | ||
3301 | int tid, struct ieee80211_tx_info *info, bool ndp) | ||
3302 | { | ||
3303 | struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); | ||
3304 | |||
3305 | /* If it's locked we are in middle of init flow | ||
3306 | * just wait for next tx status to update the lq_sta data | ||
3307 | */ | ||
3308 | if (!spin_trylock(&mvmsta->lq_sta.rs_drv.pers.lock)) | ||
3309 | return; | ||
3310 | |||
3311 | __iwl_mvm_rs_tx_status(mvm, sta, tid, info, ndp); | ||
3312 | spin_unlock(&mvmsta->lq_sta.rs_drv.pers.lock); | ||
3313 | } | ||
3314 | |||
3281 | #ifdef CONFIG_MAC80211_DEBUGFS | 3315 | #ifdef CONFIG_MAC80211_DEBUGFS |
3282 | static void rs_build_rates_table_from_fixed(struct iwl_mvm *mvm, | 3316 | static void rs_build_rates_table_from_fixed(struct iwl_mvm *mvm, |
3283 | struct iwl_lq_cmd *lq_cmd, | 3317 | struct iwl_lq_cmd *lq_cmd, |
@@ -3569,7 +3603,7 @@ static void rs_set_lq_ss_params(struct iwl_mvm *mvm, | |||
3569 | 3603 | ||
3570 | bfersta_ss_params &= ~LQ_SS_BFER_ALLOWED; | 3604 | bfersta_ss_params &= ~LQ_SS_BFER_ALLOWED; |
3571 | bfersta_lq_cmd->ss_params = cpu_to_le32(bfersta_ss_params); | 3605 | bfersta_lq_cmd->ss_params = cpu_to_le32(bfersta_ss_params); |
3572 | iwl_mvm_send_lq_cmd(mvm, bfersta_lq_cmd, false); | 3606 | iwl_mvm_send_lq_cmd(mvm, bfersta_lq_cmd); |
3573 | 3607 | ||
3574 | ss_params |= LQ_SS_BFER_ALLOWED; | 3608 | ss_params |= LQ_SS_BFER_ALLOWED; |
3575 | IWL_DEBUG_RATE(mvm, | 3609 | IWL_DEBUG_RATE(mvm, |
@@ -3735,7 +3769,7 @@ static void rs_program_fix_rate(struct iwl_mvm *mvm, | |||
3735 | 3769 | ||
3736 | if (lq_sta->pers.dbg_fixed_rate) { | 3770 | if (lq_sta->pers.dbg_fixed_rate) { |
3737 | rs_fill_lq_cmd(mvm, NULL, lq_sta, NULL); | 3771 | rs_fill_lq_cmd(mvm, NULL, lq_sta, NULL); |
3738 | iwl_mvm_send_lq_cmd(lq_sta->pers.drv, &lq_sta->lq, false); | 3772 | iwl_mvm_send_lq_cmd(lq_sta->pers.drv, &lq_sta->lq); |
3739 | } | 3773 | } |
3740 | } | 3774 | } |
3741 | 3775 | ||
@@ -4127,10 +4161,15 @@ static const struct rate_control_ops rs_mvm_ops_drv = { | |||
4127 | void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, | 4161 | void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, |
4128 | enum nl80211_band band, bool update) | 4162 | enum nl80211_band band, bool update) |
4129 | { | 4163 | { |
4130 | if (iwl_mvm_has_tlc_offload(mvm)) | 4164 | if (iwl_mvm_has_tlc_offload(mvm)) { |
4131 | rs_fw_rate_init(mvm, sta, band, update); | 4165 | rs_fw_rate_init(mvm, sta, band, update); |
4132 | else | 4166 | } else { |
4133 | rs_drv_rate_init(mvm, sta, band, update); | 4167 | struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); |
4168 | |||
4169 | spin_lock(&mvmsta->lq_sta.rs_drv.pers.lock); | ||
4170 | rs_drv_rate_init(mvm, sta, band); | ||
4171 | spin_unlock(&mvmsta->lq_sta.rs_drv.pers.lock); | ||
4172 | } | ||
4134 | } | 4173 | } |
4135 | 4174 | ||
4136 | int iwl_mvm_rate_control_register(void) | 4175 | int iwl_mvm_rate_control_register(void) |
@@ -4160,7 +4199,7 @@ static int rs_drv_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, | |||
4160 | lq->flags &= ~LQ_FLAG_USE_RTS_MSK; | 4199 | lq->flags &= ~LQ_FLAG_USE_RTS_MSK; |
4161 | } | 4200 | } |
4162 | 4201 | ||
4163 | return iwl_mvm_send_lq_cmd(mvm, lq, false); | 4202 | return iwl_mvm_send_lq_cmd(mvm, lq); |
4164 | } | 4203 | } |
4165 | 4204 | ||
4166 | /** | 4205 | /** |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h index f7eb60dbaf20..428642e66658 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h | |||
@@ -4,7 +4,7 @@ | |||
4 | * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. | 4 | * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. |
5 | * Copyright(c) 2015 Intel Mobile Communications GmbH | 5 | * Copyright(c) 2015 Intel Mobile Communications GmbH |
6 | * Copyright(c) 2017 Intel Deutschland GmbH | 6 | * Copyright(c) 2017 Intel Deutschland GmbH |
7 | * Copyright(c) 2018 Intel Corporation | 7 | * Copyright(c) 2018 - 2019 Intel Corporation |
8 | * | 8 | * |
9 | * Contact Information: | 9 | * Contact Information: |
10 | * Intel Linux Wireless <linuxwifi@intel.com> | 10 | * Intel Linux Wireless <linuxwifi@intel.com> |
@@ -390,6 +390,7 @@ struct iwl_lq_sta { | |||
390 | s8 last_rssi; | 390 | s8 last_rssi; |
391 | struct rs_rate_stats tx_stats[RS_COLUMN_COUNT][IWL_RATE_COUNT]; | 391 | struct rs_rate_stats tx_stats[RS_COLUMN_COUNT][IWL_RATE_COUNT]; |
392 | struct iwl_mvm *drv; | 392 | struct iwl_mvm *drv; |
393 | spinlock_t lock; /* for races in reinit/update table */ | ||
393 | } pers; | 394 | } pers; |
394 | }; | 395 | }; |
395 | 396 | ||
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index 64f950501287..854edd7d7103 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c | |||
@@ -463,20 +463,22 @@ static bool iwl_mvm_is_dup(struct ieee80211_sta *sta, int queue, | |||
463 | } | 463 | } |
464 | 464 | ||
465 | int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask, | 465 | int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask, |
466 | const u8 *data, u32 count) | 466 | const u8 *data, u32 count, bool async) |
467 | { | 467 | { |
468 | struct iwl_rxq_sync_cmd *cmd; | 468 | u8 buf[sizeof(struct iwl_rxq_sync_cmd) + |
469 | sizeof(struct iwl_mvm_rss_sync_notif)]; | ||
470 | struct iwl_rxq_sync_cmd *cmd = (void *)buf; | ||
469 | u32 data_size = sizeof(*cmd) + count; | 471 | u32 data_size = sizeof(*cmd) + count; |
470 | int ret; | 472 | int ret; |
471 | 473 | ||
472 | /* should be DWORD aligned */ | 474 | /* |
473 | if (WARN_ON(count & 3 || count > IWL_MULTI_QUEUE_SYNC_MSG_MAX_SIZE)) | 475 | * size must be a multiple of DWORD |
476 | * Ensure we don't overflow buf | ||
477 | */ | ||
478 | if (WARN_ON(count & 3 || | ||
479 | count > sizeof(struct iwl_mvm_rss_sync_notif))) | ||
474 | return -EINVAL; | 480 | return -EINVAL; |
475 | 481 | ||
476 | cmd = kzalloc(data_size, GFP_KERNEL); | ||
477 | if (!cmd) | ||
478 | return -ENOMEM; | ||
479 | |||
480 | cmd->rxq_mask = cpu_to_le32(rxq_mask); | 482 | cmd->rxq_mask = cpu_to_le32(rxq_mask); |
481 | cmd->count = cpu_to_le32(count); | 483 | cmd->count = cpu_to_le32(count); |
482 | cmd->flags = 0; | 484 | cmd->flags = 0; |
@@ -485,9 +487,8 @@ int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask, | |||
485 | ret = iwl_mvm_send_cmd_pdu(mvm, | 487 | ret = iwl_mvm_send_cmd_pdu(mvm, |
486 | WIDE_ID(DATA_PATH_GROUP, | 488 | WIDE_ID(DATA_PATH_GROUP, |
487 | TRIGGER_RX_QUEUES_NOTIF_CMD), | 489 | TRIGGER_RX_QUEUES_NOTIF_CMD), |
488 | 0, data_size, cmd); | 490 | async ? CMD_ASYNC : 0, data_size, cmd); |
489 | 491 | ||
490 | kfree(cmd); | ||
491 | return ret; | 492 | return ret; |
492 | } | 493 | } |
493 | 494 | ||
@@ -503,14 +504,31 @@ static bool iwl_mvm_is_sn_less(u16 sn1, u16 sn2, u16 buffer_size) | |||
503 | !ieee80211_sn_less(sn1, sn2 - buffer_size); | 504 | !ieee80211_sn_less(sn1, sn2 - buffer_size); |
504 | } | 505 | } |
505 | 506 | ||
507 | static void iwl_mvm_sync_nssn(struct iwl_mvm *mvm, u8 baid, u16 nssn) | ||
508 | { | ||
509 | struct iwl_mvm_rss_sync_notif notif = { | ||
510 | .metadata.type = IWL_MVM_RXQ_NSSN_SYNC, | ||
511 | .metadata.sync = 0, | ||
512 | .nssn_sync.baid = baid, | ||
513 | .nssn_sync.nssn = nssn, | ||
514 | }; | ||
515 | |||
516 | iwl_mvm_sync_rx_queues_internal(mvm, (void *)¬if, sizeof(notif)); | ||
517 | } | ||
518 | |||
506 | #define RX_REORDER_BUF_TIMEOUT_MQ (HZ / 10) | 519 | #define RX_REORDER_BUF_TIMEOUT_MQ (HZ / 10) |
507 | 520 | ||
521 | enum iwl_mvm_release_flags { | ||
522 | IWL_MVM_RELEASE_SEND_RSS_SYNC = BIT(0), | ||
523 | IWL_MVM_RELEASE_FROM_RSS_SYNC = BIT(1), | ||
524 | }; | ||
525 | |||
508 | static void iwl_mvm_release_frames(struct iwl_mvm *mvm, | 526 | static void iwl_mvm_release_frames(struct iwl_mvm *mvm, |
509 | struct ieee80211_sta *sta, | 527 | struct ieee80211_sta *sta, |
510 | struct napi_struct *napi, | 528 | struct napi_struct *napi, |
511 | struct iwl_mvm_baid_data *baid_data, | 529 | struct iwl_mvm_baid_data *baid_data, |
512 | struct iwl_mvm_reorder_buffer *reorder_buf, | 530 | struct iwl_mvm_reorder_buffer *reorder_buf, |
513 | u16 nssn) | 531 | u16 nssn, u32 flags) |
514 | { | 532 | { |
515 | struct iwl_mvm_reorder_buf_entry *entries = | 533 | struct iwl_mvm_reorder_buf_entry *entries = |
516 | &baid_data->entries[reorder_buf->queue * | 534 | &baid_data->entries[reorder_buf->queue * |
@@ -519,6 +537,18 @@ static void iwl_mvm_release_frames(struct iwl_mvm *mvm, | |||
519 | 537 | ||
520 | lockdep_assert_held(&reorder_buf->lock); | 538 | lockdep_assert_held(&reorder_buf->lock); |
521 | 539 | ||
540 | /* | ||
541 | * We keep the NSSN not too far behind, if we are sync'ing it and it | ||
542 | * is more than 2048 ahead of us, it must be behind us. Discard it. | ||
543 | * This can happen if the queue that hit the 0 / 2048 seqno was lagging | ||
544 | * behind and this queue already processed packets. The next if | ||
545 | * would have caught cases where this queue would have processed less | ||
546 | * than 64 packets, but it may have processed more than 64 packets. | ||
547 | */ | ||
548 | if ((flags & IWL_MVM_RELEASE_FROM_RSS_SYNC) && | ||
549 | ieee80211_sn_less(nssn, ssn)) | ||
550 | goto set_timer; | ||
551 | |||
522 | /* ignore nssn smaller than head sn - this can happen due to timeout */ | 552 | /* ignore nssn smaller than head sn - this can happen due to timeout */ |
523 | if (iwl_mvm_is_sn_less(nssn, ssn, reorder_buf->buf_size)) | 553 | if (iwl_mvm_is_sn_less(nssn, ssn, reorder_buf->buf_size)) |
524 | goto set_timer; | 554 | goto set_timer; |
@@ -529,6 +559,9 @@ static void iwl_mvm_release_frames(struct iwl_mvm *mvm, | |||
529 | struct sk_buff *skb; | 559 | struct sk_buff *skb; |
530 | 560 | ||
531 | ssn = ieee80211_sn_inc(ssn); | 561 | ssn = ieee80211_sn_inc(ssn); |
562 | if ((flags & IWL_MVM_RELEASE_SEND_RSS_SYNC) && | ||
563 | (ssn == 2048 || ssn == 0)) | ||
564 | iwl_mvm_sync_nssn(mvm, baid_data->baid, ssn); | ||
532 | 565 | ||
533 | /* | 566 | /* |
534 | * Empty the list. Will have more than one frame for A-MSDU. | 567 | * Empty the list. Will have more than one frame for A-MSDU. |
@@ -615,7 +648,8 @@ void iwl_mvm_reorder_timer_expired(struct timer_list *t) | |||
615 | sta_id, sn); | 648 | sta_id, sn); |
616 | iwl_mvm_event_frame_timeout_callback(buf->mvm, mvmsta->vif, | 649 | iwl_mvm_event_frame_timeout_callback(buf->mvm, mvmsta->vif, |
617 | sta, baid_data->tid); | 650 | sta, baid_data->tid); |
618 | iwl_mvm_release_frames(buf->mvm, sta, NULL, baid_data, buf, sn); | 651 | iwl_mvm_release_frames(buf->mvm, sta, NULL, baid_data, |
652 | buf, sn, IWL_MVM_RELEASE_SEND_RSS_SYNC); | ||
619 | rcu_read_unlock(); | 653 | rcu_read_unlock(); |
620 | } else { | 654 | } else { |
621 | /* | 655 | /* |
@@ -657,7 +691,8 @@ static void iwl_mvm_del_ba(struct iwl_mvm *mvm, int queue, | |||
657 | spin_lock_bh(&reorder_buf->lock); | 691 | spin_lock_bh(&reorder_buf->lock); |
658 | iwl_mvm_release_frames(mvm, sta, NULL, ba_data, reorder_buf, | 692 | iwl_mvm_release_frames(mvm, sta, NULL, ba_data, reorder_buf, |
659 | ieee80211_sn_add(reorder_buf->head_sn, | 693 | ieee80211_sn_add(reorder_buf->head_sn, |
660 | reorder_buf->buf_size)); | 694 | reorder_buf->buf_size), |
695 | 0); | ||
661 | spin_unlock_bh(&reorder_buf->lock); | 696 | spin_unlock_bh(&reorder_buf->lock); |
662 | del_timer_sync(&reorder_buf->reorder_timer); | 697 | del_timer_sync(&reorder_buf->reorder_timer); |
663 | 698 | ||
@@ -665,8 +700,54 @@ out: | |||
665 | rcu_read_unlock(); | 700 | rcu_read_unlock(); |
666 | } | 701 | } |
667 | 702 | ||
668 | void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, | 703 | static void iwl_mvm_release_frames_from_notif(struct iwl_mvm *mvm, |
669 | int queue) | 704 | struct napi_struct *napi, |
705 | u8 baid, u16 nssn, int queue, | ||
706 | u32 flags) | ||
707 | { | ||
708 | struct ieee80211_sta *sta; | ||
709 | struct iwl_mvm_reorder_buffer *reorder_buf; | ||
710 | struct iwl_mvm_baid_data *ba_data; | ||
711 | |||
712 | IWL_DEBUG_HT(mvm, "Frame release notification for BAID %u, NSSN %d\n", | ||
713 | baid, nssn); | ||
714 | |||
715 | if (WARN_ON_ONCE(baid == IWL_RX_REORDER_DATA_INVALID_BAID || | ||
716 | baid >= ARRAY_SIZE(mvm->baid_map))) | ||
717 | return; | ||
718 | |||
719 | rcu_read_lock(); | ||
720 | |||
721 | ba_data = rcu_dereference(mvm->baid_map[baid]); | ||
722 | if (WARN_ON_ONCE(!ba_data)) | ||
723 | goto out; | ||
724 | |||
725 | sta = rcu_dereference(mvm->fw_id_to_mac_id[ba_data->sta_id]); | ||
726 | if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) | ||
727 | goto out; | ||
728 | |||
729 | reorder_buf = &ba_data->reorder_buf[queue]; | ||
730 | |||
731 | spin_lock_bh(&reorder_buf->lock); | ||
732 | iwl_mvm_release_frames(mvm, sta, napi, ba_data, | ||
733 | reorder_buf, nssn, flags); | ||
734 | spin_unlock_bh(&reorder_buf->lock); | ||
735 | |||
736 | out: | ||
737 | rcu_read_unlock(); | ||
738 | } | ||
739 | |||
740 | static void iwl_mvm_nssn_sync(struct iwl_mvm *mvm, | ||
741 | struct napi_struct *napi, int queue, | ||
742 | const struct iwl_mvm_nssn_sync_data *data) | ||
743 | { | ||
744 | iwl_mvm_release_frames_from_notif(mvm, napi, data->baid, | ||
745 | data->nssn, queue, | ||
746 | IWL_MVM_RELEASE_FROM_RSS_SYNC); | ||
747 | } | ||
748 | |||
749 | void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct napi_struct *napi, | ||
750 | struct iwl_rx_cmd_buffer *rxb, int queue) | ||
670 | { | 751 | { |
671 | struct iwl_rx_packet *pkt = rxb_addr(rxb); | 752 | struct iwl_rx_packet *pkt = rxb_addr(rxb); |
672 | struct iwl_rxq_sync_notification *notif; | 753 | struct iwl_rxq_sync_notification *notif; |
@@ -687,6 +768,10 @@ void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, | |||
687 | case IWL_MVM_RXQ_NOTIF_DEL_BA: | 768 | case IWL_MVM_RXQ_NOTIF_DEL_BA: |
688 | iwl_mvm_del_ba(mvm, queue, (void *)internal_notif->data); | 769 | iwl_mvm_del_ba(mvm, queue, (void *)internal_notif->data); |
689 | break; | 770 | break; |
771 | case IWL_MVM_RXQ_NSSN_SYNC: | ||
772 | iwl_mvm_nssn_sync(mvm, napi, queue, | ||
773 | (void *)internal_notif->data); | ||
774 | break; | ||
690 | default: | 775 | default: |
691 | WARN_ONCE(1, "Invalid identifier %d", internal_notif->type); | 776 | WARN_ONCE(1, "Invalid identifier %d", internal_notif->type); |
692 | } | 777 | } |
@@ -785,7 +870,8 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm, | |||
785 | } | 870 | } |
786 | 871 | ||
787 | if (ieee80211_is_back_req(hdr->frame_control)) { | 872 | if (ieee80211_is_back_req(hdr->frame_control)) { |
788 | iwl_mvm_release_frames(mvm, sta, napi, baid_data, buffer, nssn); | 873 | iwl_mvm_release_frames(mvm, sta, napi, baid_data, |
874 | buffer, nssn, 0); | ||
789 | goto drop; | 875 | goto drop; |
790 | } | 876 | } |
791 | 877 | ||
@@ -794,7 +880,10 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm, | |||
794 | * If the SN is smaller than the NSSN it might need to first go into | 880 | * If the SN is smaller than the NSSN it might need to first go into |
795 | * the reorder buffer, in which case we just release up to it and the | 881 | * the reorder buffer, in which case we just release up to it and the |
796 | * rest of the function will take care of storing it and releasing up to | 882 | * rest of the function will take care of storing it and releasing up to |
797 | * the nssn | 883 | * the nssn. |
884 | * This should not happen. This queue has been lagging and it should | ||
885 | * have been updated by a IWL_MVM_RXQ_NSSN_SYNC notification. Be nice | ||
886 | * and update the other queues. | ||
798 | */ | 887 | */ |
799 | if (!iwl_mvm_is_sn_less(nssn, buffer->head_sn + buffer->buf_size, | 888 | if (!iwl_mvm_is_sn_less(nssn, buffer->head_sn + buffer->buf_size, |
800 | buffer->buf_size) || | 889 | buffer->buf_size) || |
@@ -802,7 +891,7 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm, | |||
802 | u16 min_sn = ieee80211_sn_less(sn, nssn) ? sn : nssn; | 891 | u16 min_sn = ieee80211_sn_less(sn, nssn) ? sn : nssn; |
803 | 892 | ||
804 | iwl_mvm_release_frames(mvm, sta, napi, baid_data, buffer, | 893 | iwl_mvm_release_frames(mvm, sta, napi, baid_data, buffer, |
805 | min_sn); | 894 | min_sn, IWL_MVM_RELEASE_SEND_RSS_SYNC); |
806 | } | 895 | } |
807 | 896 | ||
808 | /* drop any oudated packets */ | 897 | /* drop any oudated packets */ |
@@ -813,8 +902,23 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm, | |||
813 | if (!buffer->num_stored && ieee80211_sn_less(sn, nssn)) { | 902 | if (!buffer->num_stored && ieee80211_sn_less(sn, nssn)) { |
814 | if (iwl_mvm_is_sn_less(buffer->head_sn, nssn, | 903 | if (iwl_mvm_is_sn_less(buffer->head_sn, nssn, |
815 | buffer->buf_size) && | 904 | buffer->buf_size) && |
816 | (!amsdu || last_subframe)) | 905 | (!amsdu || last_subframe)) { |
906 | /* | ||
907 | * If we crossed the 2048 or 0 SN, notify all the | ||
908 | * queues. This is done in order to avoid having a | ||
909 | * head_sn that lags behind for too long. When that | ||
910 | * happens, we can get to a situation where the head_sn | ||
911 | * is within the interval [nssn - buf_size : nssn] | ||
912 | * which will make us think that the nssn is a packet | ||
913 | * that we already freed because of the reordering | ||
914 | * buffer and we will ignore it. So maintain the | ||
915 | * head_sn somewhat updated across all the queues: | ||
916 | * when it crosses 0 and 2048. | ||
917 | */ | ||
918 | if (sn == 2048 || sn == 0) | ||
919 | iwl_mvm_sync_nssn(mvm, baid, sn); | ||
817 | buffer->head_sn = nssn; | 920 | buffer->head_sn = nssn; |
921 | } | ||
818 | /* No need to update AMSDU last SN - we are moving the head */ | 922 | /* No need to update AMSDU last SN - we are moving the head */ |
819 | spin_unlock_bh(&buffer->lock); | 923 | spin_unlock_bh(&buffer->lock); |
820 | return false; | 924 | return false; |
@@ -829,8 +933,11 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm, | |||
829 | * while technically there is no hole and we can move forward. | 933 | * while technically there is no hole and we can move forward. |
830 | */ | 934 | */ |
831 | if (!buffer->num_stored && sn == buffer->head_sn) { | 935 | if (!buffer->num_stored && sn == buffer->head_sn) { |
832 | if (!amsdu || last_subframe) | 936 | if (!amsdu || last_subframe) { |
937 | if (sn == 2048 || sn == 0) | ||
938 | iwl_mvm_sync_nssn(mvm, baid, sn); | ||
833 | buffer->head_sn = ieee80211_sn_inc(buffer->head_sn); | 939 | buffer->head_sn = ieee80211_sn_inc(buffer->head_sn); |
940 | } | ||
834 | /* No need to update AMSDU last SN - we are moving the head */ | 941 | /* No need to update AMSDU last SN - we are moving the head */ |
835 | spin_unlock_bh(&buffer->lock); | 942 | spin_unlock_bh(&buffer->lock); |
836 | return false; | 943 | return false; |
@@ -875,7 +982,9 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm, | |||
875 | * release notification with up to date NSSN. | 982 | * release notification with up to date NSSN. |
876 | */ | 983 | */ |
877 | if (!amsdu || last_subframe) | 984 | if (!amsdu || last_subframe) |
878 | iwl_mvm_release_frames(mvm, sta, napi, baid_data, buffer, nssn); | 985 | iwl_mvm_release_frames(mvm, sta, napi, baid_data, |
986 | buffer, nssn, | ||
987 | IWL_MVM_RELEASE_SEND_RSS_SYNC); | ||
879 | 988 | ||
880 | spin_unlock_bh(&buffer->lock); | 989 | spin_unlock_bh(&buffer->lock); |
881 | return true; | 990 | return true; |
@@ -1840,40 +1949,14 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi, | |||
1840 | out: | 1949 | out: |
1841 | rcu_read_unlock(); | 1950 | rcu_read_unlock(); |
1842 | } | 1951 | } |
1952 | |||
1843 | void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi, | 1953 | void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi, |
1844 | struct iwl_rx_cmd_buffer *rxb, int queue) | 1954 | struct iwl_rx_cmd_buffer *rxb, int queue) |
1845 | { | 1955 | { |
1846 | struct iwl_rx_packet *pkt = rxb_addr(rxb); | 1956 | struct iwl_rx_packet *pkt = rxb_addr(rxb); |
1847 | struct iwl_frame_release *release = (void *)pkt->data; | 1957 | struct iwl_frame_release *release = (void *)pkt->data; |
1848 | struct ieee80211_sta *sta; | ||
1849 | struct iwl_mvm_reorder_buffer *reorder_buf; | ||
1850 | struct iwl_mvm_baid_data *ba_data; | ||
1851 | |||
1852 | int baid = release->baid; | ||
1853 | |||
1854 | IWL_DEBUG_HT(mvm, "Frame release notification for BAID %u, NSSN %d\n", | ||
1855 | release->baid, le16_to_cpu(release->nssn)); | ||
1856 | 1958 | ||
1857 | if (WARN_ON_ONCE(baid == IWL_RX_REORDER_DATA_INVALID_BAID)) | 1959 | iwl_mvm_release_frames_from_notif(mvm, napi, release->baid, |
1858 | return; | 1960 | le16_to_cpu(release->nssn), |
1859 | 1961 | queue, 0); | |
1860 | rcu_read_lock(); | ||
1861 | |||
1862 | ba_data = rcu_dereference(mvm->baid_map[baid]); | ||
1863 | if (WARN_ON_ONCE(!ba_data)) | ||
1864 | goto out; | ||
1865 | |||
1866 | sta = rcu_dereference(mvm->fw_id_to_mac_id[ba_data->sta_id]); | ||
1867 | if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) | ||
1868 | goto out; | ||
1869 | |||
1870 | reorder_buf = &ba_data->reorder_buf[queue]; | ||
1871 | |||
1872 | spin_lock_bh(&reorder_buf->lock); | ||
1873 | iwl_mvm_release_frames(mvm, sta, napi, ba_data, reorder_buf, | ||
1874 | le16_to_cpu(release->nssn)); | ||
1875 | spin_unlock_bh(&reorder_buf->lock); | ||
1876 | |||
1877 | out: | ||
1878 | rcu_read_unlock(); | ||
1879 | } | 1962 | } |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index f545a737a92d..10f18536dd0d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c | |||
@@ -1684,6 +1684,8 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, | |||
1684 | */ | 1684 | */ |
1685 | if (iwl_mvm_has_tlc_offload(mvm)) | 1685 | if (iwl_mvm_has_tlc_offload(mvm)) |
1686 | iwl_mvm_rs_add_sta(mvm, mvm_sta); | 1686 | iwl_mvm_rs_add_sta(mvm, mvm_sta); |
1687 | else | ||
1688 | spin_lock_init(&mvm_sta->lq_sta.rs_drv.pers.lock); | ||
1687 | 1689 | ||
1688 | iwl_mvm_toggle_tx_ant(mvm, &mvm_sta->tx_ant); | 1690 | iwl_mvm_toggle_tx_ant(mvm, &mvm_sta->tx_ant); |
1689 | 1691 | ||
@@ -2421,7 +2423,7 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) | |||
2421 | 2423 | ||
2422 | static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid) | 2424 | static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid) |
2423 | { | 2425 | { |
2424 | struct iwl_mvm_delba_notif notif = { | 2426 | struct iwl_mvm_rss_sync_notif notif = { |
2425 | .metadata.type = IWL_MVM_RXQ_NOTIF_DEL_BA, | 2427 | .metadata.type = IWL_MVM_RXQ_NOTIF_DEL_BA, |
2426 | .metadata.sync = 1, | 2428 | .metadata.sync = 1, |
2427 | .delba.baid = baid, | 2429 | .delba.baid = baid, |
@@ -2972,7 +2974,7 @@ out: | |||
2972 | IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n", | 2974 | IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n", |
2973 | sta->addr, tid); | 2975 | sta->addr, tid); |
2974 | 2976 | ||
2975 | return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.rs_drv.lq, false); | 2977 | return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.rs_drv.lq); |
2976 | } | 2978 | } |
2977 | 2979 | ||
2978 | static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm, | 2980 | static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm, |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h index 4487cc3e07c1..8d70093847cb 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h | |||
@@ -343,9 +343,17 @@ struct iwl_mvm_delba_data { | |||
343 | u32 baid; | 343 | u32 baid; |
344 | } __packed; | 344 | } __packed; |
345 | 345 | ||
346 | struct iwl_mvm_delba_notif { | 346 | struct iwl_mvm_nssn_sync_data { |
347 | u32 baid; | ||
348 | u32 nssn; | ||
349 | } __packed; | ||
350 | |||
351 | struct iwl_mvm_rss_sync_notif { | ||
347 | struct iwl_mvm_internal_rxq_notif metadata; | 352 | struct iwl_mvm_internal_rxq_notif metadata; |
348 | struct iwl_mvm_delba_data delba; | 353 | union { |
354 | struct iwl_mvm_delba_data delba; | ||
355 | struct iwl_mvm_nssn_sync_data nssn_sync; | ||
356 | }; | ||
349 | } __packed; | 357 | } __packed; |
350 | 358 | ||
351 | /** | 359 | /** |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index a3e5d88f1c07..6ac114a393cc 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c | |||
@@ -831,6 +831,7 @@ iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes, | |||
831 | unsigned int tcp_payload_len; | 831 | unsigned int tcp_payload_len; |
832 | unsigned int mss = skb_shinfo(skb)->gso_size; | 832 | unsigned int mss = skb_shinfo(skb)->gso_size; |
833 | bool ipv4 = (skb->protocol == htons(ETH_P_IP)); | 833 | bool ipv4 = (skb->protocol == htons(ETH_P_IP)); |
834 | bool qos = ieee80211_is_data_qos(hdr->frame_control); | ||
834 | u16 ip_base_id = ipv4 ? ntohs(ip_hdr(skb)->id) : 0; | 835 | u16 ip_base_id = ipv4 ? ntohs(ip_hdr(skb)->id) : 0; |
835 | 836 | ||
836 | skb_shinfo(skb)->gso_size = num_subframes * mss; | 837 | skb_shinfo(skb)->gso_size = num_subframes * mss; |
@@ -864,7 +865,7 @@ iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes, | |||
864 | if (tcp_payload_len > mss) { | 865 | if (tcp_payload_len > mss) { |
865 | skb_shinfo(tmp)->gso_size = mss; | 866 | skb_shinfo(tmp)->gso_size = mss; |
866 | } else { | 867 | } else { |
867 | if (ieee80211_is_data_qos(hdr->frame_control)) { | 868 | if (qos) { |
868 | u8 *qc; | 869 | u8 *qc; |
869 | 870 | ||
870 | if (ipv4) | 871 | if (ipv4) |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c index 9ecd5f09615a..b8e20a01c192 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c | |||
@@ -653,12 +653,12 @@ int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id, | |||
653 | * this case to clear the state indicating that station creation is in | 653 | * this case to clear the state indicating that station creation is in |
654 | * progress. | 654 | * progress. |
655 | */ | 655 | */ |
656 | int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool sync) | 656 | int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq) |
657 | { | 657 | { |
658 | struct iwl_host_cmd cmd = { | 658 | struct iwl_host_cmd cmd = { |
659 | .id = LQ_CMD, | 659 | .id = LQ_CMD, |
660 | .len = { sizeof(struct iwl_lq_cmd), }, | 660 | .len = { sizeof(struct iwl_lq_cmd), }, |
661 | .flags = sync ? 0 : CMD_ASYNC, | 661 | .flags = CMD_ASYNC, |
662 | .data = { lq, }, | 662 | .data = { lq, }, |
663 | }; | 663 | }; |
664 | 664 | ||
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index ea2a03d4bf55..de711c1160d3 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c | |||
@@ -604,10 +604,13 @@ static const struct pci_device_id iwl_hw_card_ids[] = { | |||
604 | {IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)}, | 604 | {IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)}, |
605 | {IWL_PCI_DEVICE(0x2526, 0x4234, iwl9560_2ac_cfg_soc)}, | 605 | {IWL_PCI_DEVICE(0x2526, 0x4234, iwl9560_2ac_cfg_soc)}, |
606 | {IWL_PCI_DEVICE(0x2526, 0x42A4, iwl9462_2ac_cfg_soc)}, | 606 | {IWL_PCI_DEVICE(0x2526, 0x42A4, iwl9462_2ac_cfg_soc)}, |
607 | {IWL_PCI_DEVICE(0x2526, 0x6010, iwl9260_2ac_160_cfg)}, | ||
607 | {IWL_PCI_DEVICE(0x2526, 0x6014, iwl9260_2ac_160_cfg)}, | 608 | {IWL_PCI_DEVICE(0x2526, 0x6014, iwl9260_2ac_160_cfg)}, |
608 | {IWL_PCI_DEVICE(0x2526, 0x8014, iwl9260_2ac_160_cfg)}, | 609 | {IWL_PCI_DEVICE(0x2526, 0x8014, iwl9260_2ac_160_cfg)}, |
609 | {IWL_PCI_DEVICE(0x2526, 0x8010, iwl9260_2ac_160_cfg)}, | 610 | {IWL_PCI_DEVICE(0x2526, 0x8010, iwl9260_2ac_160_cfg)}, |
610 | {IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_160_cfg)}, | 611 | {IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_160_cfg)}, |
612 | {IWL_PCI_DEVICE(0x2526, 0xE010, iwl9260_2ac_160_cfg)}, | ||
613 | {IWL_PCI_DEVICE(0x2526, 0xE014, iwl9260_2ac_160_cfg)}, | ||
611 | {IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)}, | 614 | {IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)}, |
612 | {IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)}, | 615 | {IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)}, |
613 | {IWL_PCI_DEVICE(0x271B, 0x0210, iwl9160_2ac_cfg)}, | 616 | {IWL_PCI_DEVICE(0x271B, 0x0210, iwl9160_2ac_cfg)}, |
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index fa4245d0d4a8..2f0ba7ef53b8 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c | |||
@@ -435,6 +435,8 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans, | |||
435 | DMA_TO_DEVICE); | 435 | DMA_TO_DEVICE); |
436 | } | 436 | } |
437 | 437 | ||
438 | meta->tbs = 0; | ||
439 | |||
438 | if (trans->cfg->use_tfh) { | 440 | if (trans->cfg->use_tfh) { |
439 | struct iwl_tfh_tfd *tfd_fh = (void *)tfd; | 441 | struct iwl_tfh_tfd *tfd_fh = (void *)tfd; |
440 | 442 | ||
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 3aeff7a3c3d8..f86c2891310a 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c | |||
@@ -3619,10 +3619,12 @@ static int hwsim_dump_radio_nl(struct sk_buff *skb, | |||
3619 | hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, | 3619 | hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, |
3620 | cb->nlh->nlmsg_seq, &hwsim_genl_family, | 3620 | cb->nlh->nlmsg_seq, &hwsim_genl_family, |
3621 | NLM_F_MULTI, HWSIM_CMD_GET_RADIO); | 3621 | NLM_F_MULTI, HWSIM_CMD_GET_RADIO); |
3622 | if (!hdr) | 3622 | if (hdr) { |
3623 | genl_dump_check_consistent(cb, hdr); | ||
3624 | genlmsg_end(skb, hdr); | ||
3625 | } else { | ||
3623 | res = -EMSGSIZE; | 3626 | res = -EMSGSIZE; |
3624 | genl_dump_check_consistent(cb, hdr); | 3627 | } |
3625 | genlmsg_end(skb, hdr); | ||
3626 | } | 3628 | } |
3627 | 3629 | ||
3628 | done: | 3630 | done: |
diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h index 3e442c7f7882..095837fba300 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.h +++ b/drivers/net/wireless/marvell/mwifiex/main.h | |||
@@ -124,6 +124,7 @@ enum { | |||
124 | 124 | ||
125 | #define MWIFIEX_MAX_TOTAL_SCAN_TIME (MWIFIEX_TIMER_10S - MWIFIEX_TIMER_1S) | 125 | #define MWIFIEX_MAX_TOTAL_SCAN_TIME (MWIFIEX_TIMER_10S - MWIFIEX_TIMER_1S) |
126 | 126 | ||
127 | #define WPA_GTK_OUI_OFFSET 2 | ||
127 | #define RSN_GTK_OUI_OFFSET 2 | 128 | #define RSN_GTK_OUI_OFFSET 2 |
128 | 129 | ||
129 | #define MWIFIEX_OUI_NOT_PRESENT 0 | 130 | #define MWIFIEX_OUI_NOT_PRESENT 0 |
diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c index 0d6d41727037..21dda385f6c6 100644 --- a/drivers/net/wireless/marvell/mwifiex/scan.c +++ b/drivers/net/wireless/marvell/mwifiex/scan.c | |||
@@ -181,7 +181,8 @@ mwifiex_is_wpa_oui_present(struct mwifiex_bssdescriptor *bss_desc, u32 cipher) | |||
181 | u8 ret = MWIFIEX_OUI_NOT_PRESENT; | 181 | u8 ret = MWIFIEX_OUI_NOT_PRESENT; |
182 | 182 | ||
183 | if (has_vendor_hdr(bss_desc->bcn_wpa_ie, WLAN_EID_VENDOR_SPECIFIC)) { | 183 | if (has_vendor_hdr(bss_desc->bcn_wpa_ie, WLAN_EID_VENDOR_SPECIFIC)) { |
184 | iebody = (struct ie_body *) bss_desc->bcn_wpa_ie->data; | 184 | iebody = (struct ie_body *)((u8 *)bss_desc->bcn_wpa_ie->data + |
185 | WPA_GTK_OUI_OFFSET); | ||
185 | oui = &mwifiex_wpa_oui[cipher][0]; | 186 | oui = &mwifiex_wpa_oui[cipher][0]; |
186 | ret = mwifiex_search_oui_in_ie(iebody, oui); | 187 | ret = mwifiex_search_oui_in_ie(iebody, oui); |
187 | if (ret) | 188 | if (ret) |
diff --git a/drivers/nfc/nfcmrvl/main.c b/drivers/nfc/nfcmrvl/main.c index e65d027b91fa..529be35ac178 100644 --- a/drivers/nfc/nfcmrvl/main.c +++ b/drivers/nfc/nfcmrvl/main.c | |||
@@ -244,7 +244,7 @@ void nfcmrvl_chip_reset(struct nfcmrvl_private *priv) | |||
244 | /* Reset possible fault of previous session */ | 244 | /* Reset possible fault of previous session */ |
245 | clear_bit(NFCMRVL_PHY_ERROR, &priv->flags); | 245 | clear_bit(NFCMRVL_PHY_ERROR, &priv->flags); |
246 | 246 | ||
247 | if (priv->config.reset_n_io) { | 247 | if (gpio_is_valid(priv->config.reset_n_io)) { |
248 | nfc_info(priv->dev, "reset the chip\n"); | 248 | nfc_info(priv->dev, "reset the chip\n"); |
249 | gpio_set_value(priv->config.reset_n_io, 0); | 249 | gpio_set_value(priv->config.reset_n_io, 0); |
250 | usleep_range(5000, 10000); | 250 | usleep_range(5000, 10000); |
@@ -255,7 +255,7 @@ void nfcmrvl_chip_reset(struct nfcmrvl_private *priv) | |||
255 | 255 | ||
256 | void nfcmrvl_chip_halt(struct nfcmrvl_private *priv) | 256 | void nfcmrvl_chip_halt(struct nfcmrvl_private *priv) |
257 | { | 257 | { |
258 | if (priv->config.reset_n_io) | 258 | if (gpio_is_valid(priv->config.reset_n_io)) |
259 | gpio_set_value(priv->config.reset_n_io, 0); | 259 | gpio_set_value(priv->config.reset_n_io, 0); |
260 | } | 260 | } |
261 | 261 | ||
diff --git a/drivers/nfc/nfcmrvl/uart.c b/drivers/nfc/nfcmrvl/uart.c index 9a22056e8d9e..e5a622ce4b95 100644 --- a/drivers/nfc/nfcmrvl/uart.c +++ b/drivers/nfc/nfcmrvl/uart.c | |||
@@ -26,7 +26,7 @@ | |||
26 | static unsigned int hci_muxed; | 26 | static unsigned int hci_muxed; |
27 | static unsigned int flow_control; | 27 | static unsigned int flow_control; |
28 | static unsigned int break_control; | 28 | static unsigned int break_control; |
29 | static unsigned int reset_n_io; | 29 | static int reset_n_io = -EINVAL; |
30 | 30 | ||
31 | /* | 31 | /* |
32 | ** NFCMRVL NCI OPS | 32 | ** NFCMRVL NCI OPS |
@@ -231,5 +231,5 @@ MODULE_PARM_DESC(break_control, "Tell if UART driver must drive break signal."); | |||
231 | module_param(hci_muxed, uint, 0); | 231 | module_param(hci_muxed, uint, 0); |
232 | MODULE_PARM_DESC(hci_muxed, "Tell if transport is muxed in HCI one."); | 232 | MODULE_PARM_DESC(hci_muxed, "Tell if transport is muxed in HCI one."); |
233 | 233 | ||
234 | module_param(reset_n_io, uint, 0); | 234 | module_param(reset_n_io, int, 0); |
235 | MODULE_PARM_DESC(reset_n_io, "GPIO that is wired to RESET_N signal."); | 235 | MODULE_PARM_DESC(reset_n_io, "GPIO that is wired to RESET_N signal."); |
diff --git a/drivers/nfc/nfcmrvl/usb.c b/drivers/nfc/nfcmrvl/usb.c index 945cc903d8f1..888e298f610b 100644 --- a/drivers/nfc/nfcmrvl/usb.c +++ b/drivers/nfc/nfcmrvl/usb.c | |||
@@ -305,6 +305,7 @@ static int nfcmrvl_probe(struct usb_interface *intf, | |||
305 | 305 | ||
306 | /* No configuration for USB */ | 306 | /* No configuration for USB */ |
307 | memset(&config, 0, sizeof(config)); | 307 | memset(&config, 0, sizeof(config)); |
308 | config.reset_n_io = -EINVAL; | ||
308 | 309 | ||
309 | nfc_info(&udev->dev, "intf %p id %p\n", intf, id); | 310 | nfc_info(&udev->dev, "intf %p id %p\n", intf, id); |
310 | 311 | ||
diff --git a/drivers/nfc/st-nci/se.c b/drivers/nfc/st-nci/se.c index c3e10b6ab3a4..f25f1ec5f9e9 100644 --- a/drivers/nfc/st-nci/se.c +++ b/drivers/nfc/st-nci/se.c | |||
@@ -333,6 +333,8 @@ static int st_nci_hci_connectivity_event_received(struct nci_dev *ndev, | |||
333 | 333 | ||
334 | transaction = (struct nfc_evt_transaction *)devm_kzalloc(dev, | 334 | transaction = (struct nfc_evt_transaction *)devm_kzalloc(dev, |
335 | skb->len - 2, GFP_KERNEL); | 335 | skb->len - 2, GFP_KERNEL); |
336 | if (!transaction) | ||
337 | return -ENOMEM; | ||
336 | 338 | ||
337 | transaction->aid_len = skb->data[1]; | 339 | transaction->aid_len = skb->data[1]; |
338 | memcpy(transaction->aid, &skb->data[2], transaction->aid_len); | 340 | memcpy(transaction->aid, &skb->data[2], transaction->aid_len); |
diff --git a/drivers/nfc/st21nfca/se.c b/drivers/nfc/st21nfca/se.c index 06fc542fd198..6586378cacb0 100644 --- a/drivers/nfc/st21nfca/se.c +++ b/drivers/nfc/st21nfca/se.c | |||
@@ -317,6 +317,8 @@ int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host, | |||
317 | 317 | ||
318 | transaction = (struct nfc_evt_transaction *)devm_kzalloc(dev, | 318 | transaction = (struct nfc_evt_transaction *)devm_kzalloc(dev, |
319 | skb->len - 2, GFP_KERNEL); | 319 | skb->len - 2, GFP_KERNEL); |
320 | if (!transaction) | ||
321 | return -ENOMEM; | ||
320 | 322 | ||
321 | transaction->aid_len = skb->data[1]; | 323 | transaction->aid_len = skb->data[1]; |
322 | memcpy(transaction->aid, &skb->data[2], | 324 | memcpy(transaction->aid, &skb->data[2], |
diff --git a/drivers/nvdimm/btt_devs.c b/drivers/nvdimm/btt_devs.c index 62d00fffa4af..3508a79110c7 100644 --- a/drivers/nvdimm/btt_devs.c +++ b/drivers/nvdimm/btt_devs.c | |||
@@ -62,14 +62,14 @@ static ssize_t sector_size_store(struct device *dev, | |||
62 | struct nd_btt *nd_btt = to_nd_btt(dev); | 62 | struct nd_btt *nd_btt = to_nd_btt(dev); |
63 | ssize_t rc; | 63 | ssize_t rc; |
64 | 64 | ||
65 | device_lock(dev); | 65 | nd_device_lock(dev); |
66 | nvdimm_bus_lock(dev); | 66 | nvdimm_bus_lock(dev); |
67 | rc = nd_size_select_store(dev, buf, &nd_btt->lbasize, | 67 | rc = nd_size_select_store(dev, buf, &nd_btt->lbasize, |
68 | btt_lbasize_supported); | 68 | btt_lbasize_supported); |
69 | dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, | 69 | dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, |
70 | buf[len - 1] == '\n' ? "" : "\n"); | 70 | buf[len - 1] == '\n' ? "" : "\n"); |
71 | nvdimm_bus_unlock(dev); | 71 | nvdimm_bus_unlock(dev); |
72 | device_unlock(dev); | 72 | nd_device_unlock(dev); |
73 | 73 | ||
74 | return rc ? rc : len; | 74 | return rc ? rc : len; |
75 | } | 75 | } |
@@ -91,11 +91,11 @@ static ssize_t uuid_store(struct device *dev, | |||
91 | struct nd_btt *nd_btt = to_nd_btt(dev); | 91 | struct nd_btt *nd_btt = to_nd_btt(dev); |
92 | ssize_t rc; | 92 | ssize_t rc; |
93 | 93 | ||
94 | device_lock(dev); | 94 | nd_device_lock(dev); |
95 | rc = nd_uuid_store(dev, &nd_btt->uuid, buf, len); | 95 | rc = nd_uuid_store(dev, &nd_btt->uuid, buf, len); |
96 | dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, | 96 | dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, |
97 | buf[len - 1] == '\n' ? "" : "\n"); | 97 | buf[len - 1] == '\n' ? "" : "\n"); |
98 | device_unlock(dev); | 98 | nd_device_unlock(dev); |
99 | 99 | ||
100 | return rc ? rc : len; | 100 | return rc ? rc : len; |
101 | } | 101 | } |
@@ -120,13 +120,13 @@ static ssize_t namespace_store(struct device *dev, | |||
120 | struct nd_btt *nd_btt = to_nd_btt(dev); | 120 | struct nd_btt *nd_btt = to_nd_btt(dev); |
121 | ssize_t rc; | 121 | ssize_t rc; |
122 | 122 | ||
123 | device_lock(dev); | 123 | nd_device_lock(dev); |
124 | nvdimm_bus_lock(dev); | 124 | nvdimm_bus_lock(dev); |
125 | rc = nd_namespace_store(dev, &nd_btt->ndns, buf, len); | 125 | rc = nd_namespace_store(dev, &nd_btt->ndns, buf, len); |
126 | dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, | 126 | dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, |
127 | buf[len - 1] == '\n' ? "" : "\n"); | 127 | buf[len - 1] == '\n' ? "" : "\n"); |
128 | nvdimm_bus_unlock(dev); | 128 | nvdimm_bus_unlock(dev); |
129 | device_unlock(dev); | 129 | nd_device_unlock(dev); |
130 | 130 | ||
131 | return rc; | 131 | return rc; |
132 | } | 132 | } |
@@ -138,14 +138,14 @@ static ssize_t size_show(struct device *dev, | |||
138 | struct nd_btt *nd_btt = to_nd_btt(dev); | 138 | struct nd_btt *nd_btt = to_nd_btt(dev); |
139 | ssize_t rc; | 139 | ssize_t rc; |
140 | 140 | ||
141 | device_lock(dev); | 141 | nd_device_lock(dev); |
142 | if (dev->driver) | 142 | if (dev->driver) |
143 | rc = sprintf(buf, "%llu\n", nd_btt->size); | 143 | rc = sprintf(buf, "%llu\n", nd_btt->size); |
144 | else { | 144 | else { |
145 | /* no size to convey if the btt instance is disabled */ | 145 | /* no size to convey if the btt instance is disabled */ |
146 | rc = -ENXIO; | 146 | rc = -ENXIO; |
147 | } | 147 | } |
148 | device_unlock(dev); | 148 | nd_device_unlock(dev); |
149 | 149 | ||
150 | return rc; | 150 | return rc; |
151 | } | 151 | } |
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c index 2dca3034fee0..798c5c4aea9c 100644 --- a/drivers/nvdimm/bus.c +++ b/drivers/nvdimm/bus.c | |||
@@ -26,7 +26,7 @@ | |||
26 | 26 | ||
27 | int nvdimm_major; | 27 | int nvdimm_major; |
28 | static int nvdimm_bus_major; | 28 | static int nvdimm_bus_major; |
29 | static struct class *nd_class; | 29 | struct class *nd_class; |
30 | static DEFINE_IDA(nd_ida); | 30 | static DEFINE_IDA(nd_ida); |
31 | 31 | ||
32 | static int to_nd_device_type(struct device *dev) | 32 | static int to_nd_device_type(struct device *dev) |
@@ -73,7 +73,7 @@ static void nvdimm_bus_probe_end(struct nvdimm_bus *nvdimm_bus) | |||
73 | { | 73 | { |
74 | nvdimm_bus_lock(&nvdimm_bus->dev); | 74 | nvdimm_bus_lock(&nvdimm_bus->dev); |
75 | if (--nvdimm_bus->probe_active == 0) | 75 | if (--nvdimm_bus->probe_active == 0) |
76 | wake_up(&nvdimm_bus->probe_wait); | 76 | wake_up(&nvdimm_bus->wait); |
77 | nvdimm_bus_unlock(&nvdimm_bus->dev); | 77 | nvdimm_bus_unlock(&nvdimm_bus->dev); |
78 | } | 78 | } |
79 | 79 | ||
@@ -91,7 +91,10 @@ static int nvdimm_bus_probe(struct device *dev) | |||
91 | dev->driver->name, dev_name(dev)); | 91 | dev->driver->name, dev_name(dev)); |
92 | 92 | ||
93 | nvdimm_bus_probe_start(nvdimm_bus); | 93 | nvdimm_bus_probe_start(nvdimm_bus); |
94 | debug_nvdimm_lock(dev); | ||
94 | rc = nd_drv->probe(dev); | 95 | rc = nd_drv->probe(dev); |
96 | debug_nvdimm_unlock(dev); | ||
97 | |||
95 | if (rc == 0) | 98 | if (rc == 0) |
96 | nd_region_probe_success(nvdimm_bus, dev); | 99 | nd_region_probe_success(nvdimm_bus, dev); |
97 | else | 100 | else |
@@ -113,8 +116,11 @@ static int nvdimm_bus_remove(struct device *dev) | |||
113 | struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); | 116 | struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); |
114 | int rc = 0; | 117 | int rc = 0; |
115 | 118 | ||
116 | if (nd_drv->remove) | 119 | if (nd_drv->remove) { |
120 | debug_nvdimm_lock(dev); | ||
117 | rc = nd_drv->remove(dev); | 121 | rc = nd_drv->remove(dev); |
122 | debug_nvdimm_unlock(dev); | ||
123 | } | ||
118 | nd_region_disable(nvdimm_bus, dev); | 124 | nd_region_disable(nvdimm_bus, dev); |
119 | 125 | ||
120 | dev_dbg(&nvdimm_bus->dev, "%s.remove(%s) = %d\n", dev->driver->name, | 126 | dev_dbg(&nvdimm_bus->dev, "%s.remove(%s) = %d\n", dev->driver->name, |
@@ -140,7 +146,7 @@ static void nvdimm_bus_shutdown(struct device *dev) | |||
140 | 146 | ||
141 | void nd_device_notify(struct device *dev, enum nvdimm_event event) | 147 | void nd_device_notify(struct device *dev, enum nvdimm_event event) |
142 | { | 148 | { |
143 | device_lock(dev); | 149 | nd_device_lock(dev); |
144 | if (dev->driver) { | 150 | if (dev->driver) { |
145 | struct nd_device_driver *nd_drv; | 151 | struct nd_device_driver *nd_drv; |
146 | 152 | ||
@@ -148,7 +154,7 @@ void nd_device_notify(struct device *dev, enum nvdimm_event event) | |||
148 | if (nd_drv->notify) | 154 | if (nd_drv->notify) |
149 | nd_drv->notify(dev, event); | 155 | nd_drv->notify(dev, event); |
150 | } | 156 | } |
151 | device_unlock(dev); | 157 | nd_device_unlock(dev); |
152 | } | 158 | } |
153 | EXPORT_SYMBOL(nd_device_notify); | 159 | EXPORT_SYMBOL(nd_device_notify); |
154 | 160 | ||
@@ -296,7 +302,7 @@ static void nvdimm_bus_release(struct device *dev) | |||
296 | kfree(nvdimm_bus); | 302 | kfree(nvdimm_bus); |
297 | } | 303 | } |
298 | 304 | ||
299 | static bool is_nvdimm_bus(struct device *dev) | 305 | bool is_nvdimm_bus(struct device *dev) |
300 | { | 306 | { |
301 | return dev->release == nvdimm_bus_release; | 307 | return dev->release == nvdimm_bus_release; |
302 | } | 308 | } |
@@ -341,7 +347,7 @@ struct nvdimm_bus *nvdimm_bus_register(struct device *parent, | |||
341 | return NULL; | 347 | return NULL; |
342 | INIT_LIST_HEAD(&nvdimm_bus->list); | 348 | INIT_LIST_HEAD(&nvdimm_bus->list); |
343 | INIT_LIST_HEAD(&nvdimm_bus->mapping_list); | 349 | INIT_LIST_HEAD(&nvdimm_bus->mapping_list); |
344 | init_waitqueue_head(&nvdimm_bus->probe_wait); | 350 | init_waitqueue_head(&nvdimm_bus->wait); |
345 | nvdimm_bus->id = ida_simple_get(&nd_ida, 0, 0, GFP_KERNEL); | 351 | nvdimm_bus->id = ida_simple_get(&nd_ida, 0, 0, GFP_KERNEL); |
346 | if (nvdimm_bus->id < 0) { | 352 | if (nvdimm_bus->id < 0) { |
347 | kfree(nvdimm_bus); | 353 | kfree(nvdimm_bus); |
@@ -426,6 +432,9 @@ static int nd_bus_remove(struct device *dev) | |||
426 | list_del_init(&nvdimm_bus->list); | 432 | list_del_init(&nvdimm_bus->list); |
427 | mutex_unlock(&nvdimm_bus_list_mutex); | 433 | mutex_unlock(&nvdimm_bus_list_mutex); |
428 | 434 | ||
435 | wait_event(nvdimm_bus->wait, | ||
436 | atomic_read(&nvdimm_bus->ioctl_active) == 0); | ||
437 | |||
429 | nd_synchronize(); | 438 | nd_synchronize(); |
430 | device_for_each_child(&nvdimm_bus->dev, NULL, child_unregister); | 439 | device_for_each_child(&nvdimm_bus->dev, NULL, child_unregister); |
431 | 440 | ||
@@ -547,13 +556,38 @@ EXPORT_SYMBOL(nd_device_register); | |||
547 | 556 | ||
548 | void nd_device_unregister(struct device *dev, enum nd_async_mode mode) | 557 | void nd_device_unregister(struct device *dev, enum nd_async_mode mode) |
549 | { | 558 | { |
559 | bool killed; | ||
560 | |||
550 | switch (mode) { | 561 | switch (mode) { |
551 | case ND_ASYNC: | 562 | case ND_ASYNC: |
563 | /* | ||
564 | * In the async case this is being triggered with the | ||
565 | * device lock held and the unregistration work needs to | ||
566 | * be moved out of line iff this is thread has won the | ||
567 | * race to schedule the deletion. | ||
568 | */ | ||
569 | if (!kill_device(dev)) | ||
570 | return; | ||
571 | |||
552 | get_device(dev); | 572 | get_device(dev); |
553 | async_schedule_domain(nd_async_device_unregister, dev, | 573 | async_schedule_domain(nd_async_device_unregister, dev, |
554 | &nd_async_domain); | 574 | &nd_async_domain); |
555 | break; | 575 | break; |
556 | case ND_SYNC: | 576 | case ND_SYNC: |
577 | /* | ||
578 | * In the sync case the device is being unregistered due | ||
579 | * to a state change of the parent. Claim the kill state | ||
580 | * to synchronize against other unregistration requests, | ||
581 | * or otherwise let the async path handle it if the | ||
582 | * unregistration was already queued. | ||
583 | */ | ||
584 | nd_device_lock(dev); | ||
585 | killed = kill_device(dev); | ||
586 | nd_device_unlock(dev); | ||
587 | |||
588 | if (!killed) | ||
589 | return; | ||
590 | |||
557 | nd_synchronize(); | 591 | nd_synchronize(); |
558 | device_unregister(dev); | 592 | device_unregister(dev); |
559 | break; | 593 | break; |
@@ -859,10 +893,12 @@ void wait_nvdimm_bus_probe_idle(struct device *dev) | |||
859 | do { | 893 | do { |
860 | if (nvdimm_bus->probe_active == 0) | 894 | if (nvdimm_bus->probe_active == 0) |
861 | break; | 895 | break; |
862 | nvdimm_bus_unlock(&nvdimm_bus->dev); | 896 | nvdimm_bus_unlock(dev); |
863 | wait_event(nvdimm_bus->probe_wait, | 897 | nd_device_unlock(dev); |
898 | wait_event(nvdimm_bus->wait, | ||
864 | nvdimm_bus->probe_active == 0); | 899 | nvdimm_bus->probe_active == 0); |
865 | nvdimm_bus_lock(&nvdimm_bus->dev); | 900 | nd_device_lock(dev); |
901 | nvdimm_bus_lock(dev); | ||
866 | } while (true); | 902 | } while (true); |
867 | } | 903 | } |
868 | 904 | ||
@@ -945,20 +981,19 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm, | |||
945 | int read_only, unsigned int ioctl_cmd, unsigned long arg) | 981 | int read_only, unsigned int ioctl_cmd, unsigned long arg) |
946 | { | 982 | { |
947 | struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; | 983 | struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; |
948 | static char out_env[ND_CMD_MAX_ENVELOPE]; | ||
949 | static char in_env[ND_CMD_MAX_ENVELOPE]; | ||
950 | const struct nd_cmd_desc *desc = NULL; | 984 | const struct nd_cmd_desc *desc = NULL; |
951 | unsigned int cmd = _IOC_NR(ioctl_cmd); | 985 | unsigned int cmd = _IOC_NR(ioctl_cmd); |
952 | struct device *dev = &nvdimm_bus->dev; | 986 | struct device *dev = &nvdimm_bus->dev; |
953 | void __user *p = (void __user *) arg; | 987 | void __user *p = (void __user *) arg; |
988 | char *out_env = NULL, *in_env = NULL; | ||
954 | const char *cmd_name, *dimm_name; | 989 | const char *cmd_name, *dimm_name; |
955 | u32 in_len = 0, out_len = 0; | 990 | u32 in_len = 0, out_len = 0; |
956 | unsigned int func = cmd; | 991 | unsigned int func = cmd; |
957 | unsigned long cmd_mask; | 992 | unsigned long cmd_mask; |
958 | struct nd_cmd_pkg pkg; | 993 | struct nd_cmd_pkg pkg; |
959 | int rc, i, cmd_rc; | 994 | int rc, i, cmd_rc; |
995 | void *buf = NULL; | ||
960 | u64 buf_len = 0; | 996 | u64 buf_len = 0; |
961 | void *buf; | ||
962 | 997 | ||
963 | if (nvdimm) { | 998 | if (nvdimm) { |
964 | desc = nd_cmd_dimm_desc(cmd); | 999 | desc = nd_cmd_dimm_desc(cmd); |
@@ -989,7 +1024,7 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm, | |||
989 | case ND_CMD_ARS_START: | 1024 | case ND_CMD_ARS_START: |
990 | case ND_CMD_CLEAR_ERROR: | 1025 | case ND_CMD_CLEAR_ERROR: |
991 | case ND_CMD_CALL: | 1026 | case ND_CMD_CALL: |
992 | dev_dbg(&nvdimm_bus->dev, "'%s' command while read-only.\n", | 1027 | dev_dbg(dev, "'%s' command while read-only.\n", |
993 | nvdimm ? nvdimm_cmd_name(cmd) | 1028 | nvdimm ? nvdimm_cmd_name(cmd) |
994 | : nvdimm_bus_cmd_name(cmd)); | 1029 | : nvdimm_bus_cmd_name(cmd)); |
995 | return -EPERM; | 1030 | return -EPERM; |
@@ -998,6 +1033,9 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm, | |||
998 | } | 1033 | } |
999 | 1034 | ||
1000 | /* process an input envelope */ | 1035 | /* process an input envelope */ |
1036 | in_env = kzalloc(ND_CMD_MAX_ENVELOPE, GFP_KERNEL); | ||
1037 | if (!in_env) | ||
1038 | return -ENOMEM; | ||
1001 | for (i = 0; i < desc->in_num; i++) { | 1039 | for (i = 0; i < desc->in_num; i++) { |
1002 | u32 in_size, copy; | 1040 | u32 in_size, copy; |
1003 | 1041 | ||
@@ -1005,14 +1043,17 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm, | |||
1005 | if (in_size == UINT_MAX) { | 1043 | if (in_size == UINT_MAX) { |
1006 | dev_err(dev, "%s:%s unknown input size cmd: %s field: %d\n", | 1044 | dev_err(dev, "%s:%s unknown input size cmd: %s field: %d\n", |
1007 | __func__, dimm_name, cmd_name, i); | 1045 | __func__, dimm_name, cmd_name, i); |
1008 | return -ENXIO; | 1046 | rc = -ENXIO; |
1047 | goto out; | ||
1009 | } | 1048 | } |
1010 | if (in_len < sizeof(in_env)) | 1049 | if (in_len < ND_CMD_MAX_ENVELOPE) |
1011 | copy = min_t(u32, sizeof(in_env) - in_len, in_size); | 1050 | copy = min_t(u32, ND_CMD_MAX_ENVELOPE - in_len, in_size); |
1012 | else | 1051 | else |
1013 | copy = 0; | 1052 | copy = 0; |
1014 | if (copy && copy_from_user(&in_env[in_len], p + in_len, copy)) | 1053 | if (copy && copy_from_user(&in_env[in_len], p + in_len, copy)) { |
1015 | return -EFAULT; | 1054 | rc = -EFAULT; |
1055 | goto out; | ||
1056 | } | ||
1016 | in_len += in_size; | 1057 | in_len += in_size; |
1017 | } | 1058 | } |
1018 | 1059 | ||
@@ -1024,6 +1065,12 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm, | |||
1024 | } | 1065 | } |
1025 | 1066 | ||
1026 | /* process an output envelope */ | 1067 | /* process an output envelope */ |
1068 | out_env = kzalloc(ND_CMD_MAX_ENVELOPE, GFP_KERNEL); | ||
1069 | if (!out_env) { | ||
1070 | rc = -ENOMEM; | ||
1071 | goto out; | ||
1072 | } | ||
1073 | |||
1027 | for (i = 0; i < desc->out_num; i++) { | 1074 | for (i = 0; i < desc->out_num; i++) { |
1028 | u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, | 1075 | u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, |
1029 | (u32 *) in_env, (u32 *) out_env, 0); | 1076 | (u32 *) in_env, (u32 *) out_env, 0); |
@@ -1032,15 +1079,18 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm, | |||
1032 | if (out_size == UINT_MAX) { | 1079 | if (out_size == UINT_MAX) { |
1033 | dev_dbg(dev, "%s unknown output size cmd: %s field: %d\n", | 1080 | dev_dbg(dev, "%s unknown output size cmd: %s field: %d\n", |
1034 | dimm_name, cmd_name, i); | 1081 | dimm_name, cmd_name, i); |
1035 | return -EFAULT; | 1082 | rc = -EFAULT; |
1083 | goto out; | ||
1036 | } | 1084 | } |
1037 | if (out_len < sizeof(out_env)) | 1085 | if (out_len < ND_CMD_MAX_ENVELOPE) |
1038 | copy = min_t(u32, sizeof(out_env) - out_len, out_size); | 1086 | copy = min_t(u32, ND_CMD_MAX_ENVELOPE - out_len, out_size); |
1039 | else | 1087 | else |
1040 | copy = 0; | 1088 | copy = 0; |
1041 | if (copy && copy_from_user(&out_env[out_len], | 1089 | if (copy && copy_from_user(&out_env[out_len], |
1042 | p + in_len + out_len, copy)) | 1090 | p + in_len + out_len, copy)) { |
1043 | return -EFAULT; | 1091 | rc = -EFAULT; |
1092 | goto out; | ||
1093 | } | ||
1044 | out_len += out_size; | 1094 | out_len += out_size; |
1045 | } | 1095 | } |
1046 | 1096 | ||
@@ -1048,19 +1098,23 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm, | |||
1048 | if (buf_len > ND_IOCTL_MAX_BUFLEN) { | 1098 | if (buf_len > ND_IOCTL_MAX_BUFLEN) { |
1049 | dev_dbg(dev, "%s cmd: %s buf_len: %llu > %d\n", dimm_name, | 1099 | dev_dbg(dev, "%s cmd: %s buf_len: %llu > %d\n", dimm_name, |
1050 | cmd_name, buf_len, ND_IOCTL_MAX_BUFLEN); | 1100 | cmd_name, buf_len, ND_IOCTL_MAX_BUFLEN); |
1051 | return -EINVAL; | 1101 | rc = -EINVAL; |
1102 | goto out; | ||
1052 | } | 1103 | } |
1053 | 1104 | ||
1054 | buf = vmalloc(buf_len); | 1105 | buf = vmalloc(buf_len); |
1055 | if (!buf) | 1106 | if (!buf) { |
1056 | return -ENOMEM; | 1107 | rc = -ENOMEM; |
1108 | goto out; | ||
1109 | } | ||
1057 | 1110 | ||
1058 | if (copy_from_user(buf, p, buf_len)) { | 1111 | if (copy_from_user(buf, p, buf_len)) { |
1059 | rc = -EFAULT; | 1112 | rc = -EFAULT; |
1060 | goto out; | 1113 | goto out; |
1061 | } | 1114 | } |
1062 | 1115 | ||
1063 | nvdimm_bus_lock(&nvdimm_bus->dev); | 1116 | nd_device_lock(dev); |
1117 | nvdimm_bus_lock(dev); | ||
1064 | rc = nd_cmd_clear_to_send(nvdimm_bus, nvdimm, func, buf); | 1118 | rc = nd_cmd_clear_to_send(nvdimm_bus, nvdimm, func, buf); |
1065 | if (rc) | 1119 | if (rc) |
1066 | goto out_unlock; | 1120 | goto out_unlock; |
@@ -1075,39 +1129,24 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm, | |||
1075 | nvdimm_account_cleared_poison(nvdimm_bus, clear_err->address, | 1129 | nvdimm_account_cleared_poison(nvdimm_bus, clear_err->address, |
1076 | clear_err->cleared); | 1130 | clear_err->cleared); |
1077 | } | 1131 | } |
1078 | nvdimm_bus_unlock(&nvdimm_bus->dev); | ||
1079 | 1132 | ||
1080 | if (copy_to_user(p, buf, buf_len)) | 1133 | if (copy_to_user(p, buf, buf_len)) |
1081 | rc = -EFAULT; | 1134 | rc = -EFAULT; |
1082 | 1135 | ||
1083 | vfree(buf); | 1136 | out_unlock: |
1084 | return rc; | 1137 | nvdimm_bus_unlock(dev); |
1085 | 1138 | nd_device_unlock(dev); | |
1086 | out_unlock: | 1139 | out: |
1087 | nvdimm_bus_unlock(&nvdimm_bus->dev); | 1140 | kfree(in_env); |
1088 | out: | 1141 | kfree(out_env); |
1089 | vfree(buf); | 1142 | vfree(buf); |
1090 | return rc; | 1143 | return rc; |
1091 | } | 1144 | } |
1092 | 1145 | ||
1093 | static long nd_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | 1146 | enum nd_ioctl_mode { |
1094 | { | 1147 | BUS_IOCTL, |
1095 | long id = (long) file->private_data; | 1148 | DIMM_IOCTL, |
1096 | int rc = -ENXIO, ro; | 1149 | }; |
1097 | struct nvdimm_bus *nvdimm_bus; | ||
1098 | |||
1099 | ro = ((file->f_flags & O_ACCMODE) == O_RDONLY); | ||
1100 | mutex_lock(&nvdimm_bus_list_mutex); | ||
1101 | list_for_each_entry(nvdimm_bus, &nvdimm_bus_list, list) { | ||
1102 | if (nvdimm_bus->id == id) { | ||
1103 | rc = __nd_ioctl(nvdimm_bus, NULL, ro, cmd, arg); | ||
1104 | break; | ||
1105 | } | ||
1106 | } | ||
1107 | mutex_unlock(&nvdimm_bus_list_mutex); | ||
1108 | |||
1109 | return rc; | ||
1110 | } | ||
1111 | 1150 | ||
1112 | static int match_dimm(struct device *dev, void *data) | 1151 | static int match_dimm(struct device *dev, void *data) |
1113 | { | 1152 | { |
@@ -1122,31 +1161,62 @@ static int match_dimm(struct device *dev, void *data) | |||
1122 | return 0; | 1161 | return 0; |
1123 | } | 1162 | } |
1124 | 1163 | ||
1125 | static long nvdimm_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | 1164 | static long nd_ioctl(struct file *file, unsigned int cmd, unsigned long arg, |
1165 | enum nd_ioctl_mode mode) | ||
1166 | |||
1126 | { | 1167 | { |
1127 | int rc = -ENXIO, ro; | 1168 | struct nvdimm_bus *nvdimm_bus, *found = NULL; |
1128 | struct nvdimm_bus *nvdimm_bus; | 1169 | long id = (long) file->private_data; |
1170 | struct nvdimm *nvdimm = NULL; | ||
1171 | int rc, ro; | ||
1129 | 1172 | ||
1130 | ro = ((file->f_flags & O_ACCMODE) == O_RDONLY); | 1173 | ro = ((file->f_flags & O_ACCMODE) == O_RDONLY); |
1131 | mutex_lock(&nvdimm_bus_list_mutex); | 1174 | mutex_lock(&nvdimm_bus_list_mutex); |
1132 | list_for_each_entry(nvdimm_bus, &nvdimm_bus_list, list) { | 1175 | list_for_each_entry(nvdimm_bus, &nvdimm_bus_list, list) { |
1133 | struct device *dev = device_find_child(&nvdimm_bus->dev, | 1176 | if (mode == DIMM_IOCTL) { |
1134 | file->private_data, match_dimm); | 1177 | struct device *dev; |
1135 | struct nvdimm *nvdimm; | 1178 | |
1136 | 1179 | dev = device_find_child(&nvdimm_bus->dev, | |
1137 | if (!dev) | 1180 | file->private_data, match_dimm); |
1138 | continue; | 1181 | if (!dev) |
1182 | continue; | ||
1183 | nvdimm = to_nvdimm(dev); | ||
1184 | found = nvdimm_bus; | ||
1185 | } else if (nvdimm_bus->id == id) { | ||
1186 | found = nvdimm_bus; | ||
1187 | } | ||
1139 | 1188 | ||
1140 | nvdimm = to_nvdimm(dev); | 1189 | if (found) { |
1141 | rc = __nd_ioctl(nvdimm_bus, nvdimm, ro, cmd, arg); | 1190 | atomic_inc(&nvdimm_bus->ioctl_active); |
1142 | put_device(dev); | 1191 | break; |
1143 | break; | 1192 | } |
1144 | } | 1193 | } |
1145 | mutex_unlock(&nvdimm_bus_list_mutex); | 1194 | mutex_unlock(&nvdimm_bus_list_mutex); |
1146 | 1195 | ||
1196 | if (!found) | ||
1197 | return -ENXIO; | ||
1198 | |||
1199 | nvdimm_bus = found; | ||
1200 | rc = __nd_ioctl(nvdimm_bus, nvdimm, ro, cmd, arg); | ||
1201 | |||
1202 | if (nvdimm) | ||
1203 | put_device(&nvdimm->dev); | ||
1204 | if (atomic_dec_and_test(&nvdimm_bus->ioctl_active)) | ||
1205 | wake_up(&nvdimm_bus->wait); | ||
1206 | |||
1147 | return rc; | 1207 | return rc; |
1148 | } | 1208 | } |
1149 | 1209 | ||
1210 | static long bus_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | ||
1211 | { | ||
1212 | return nd_ioctl(file, cmd, arg, BUS_IOCTL); | ||
1213 | } | ||
1214 | |||
1215 | static long dimm_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | ||
1216 | { | ||
1217 | return nd_ioctl(file, cmd, arg, DIMM_IOCTL); | ||
1218 | } | ||
1219 | |||
1150 | static int nd_open(struct inode *inode, struct file *file) | 1220 | static int nd_open(struct inode *inode, struct file *file) |
1151 | { | 1221 | { |
1152 | long minor = iminor(inode); | 1222 | long minor = iminor(inode); |
@@ -1158,16 +1228,16 @@ static int nd_open(struct inode *inode, struct file *file) | |||
1158 | static const struct file_operations nvdimm_bus_fops = { | 1228 | static const struct file_operations nvdimm_bus_fops = { |
1159 | .owner = THIS_MODULE, | 1229 | .owner = THIS_MODULE, |
1160 | .open = nd_open, | 1230 | .open = nd_open, |
1161 | .unlocked_ioctl = nd_ioctl, | 1231 | .unlocked_ioctl = bus_ioctl, |
1162 | .compat_ioctl = nd_ioctl, | 1232 | .compat_ioctl = bus_ioctl, |
1163 | .llseek = noop_llseek, | 1233 | .llseek = noop_llseek, |
1164 | }; | 1234 | }; |
1165 | 1235 | ||
1166 | static const struct file_operations nvdimm_fops = { | 1236 | static const struct file_operations nvdimm_fops = { |
1167 | .owner = THIS_MODULE, | 1237 | .owner = THIS_MODULE, |
1168 | .open = nd_open, | 1238 | .open = nd_open, |
1169 | .unlocked_ioctl = nvdimm_ioctl, | 1239 | .unlocked_ioctl = dimm_ioctl, |
1170 | .compat_ioctl = nvdimm_ioctl, | 1240 | .compat_ioctl = dimm_ioctl, |
1171 | .llseek = noop_llseek, | 1241 | .llseek = noop_llseek, |
1172 | }; | 1242 | }; |
1173 | 1243 | ||
diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c index 5e1f060547bf..9204f1e9fd14 100644 --- a/drivers/nvdimm/core.c +++ b/drivers/nvdimm/core.c | |||
@@ -246,7 +246,7 @@ static int nd_uuid_parse(struct device *dev, u8 *uuid_out, const char *buf, | |||
246 | * | 246 | * |
247 | * Enforce that uuids can only be changed while the device is disabled | 247 | * Enforce that uuids can only be changed while the device is disabled |
248 | * (driver detached) | 248 | * (driver detached) |
249 | * LOCKING: expects device_lock() is held on entry | 249 | * LOCKING: expects nd_device_lock() is held on entry |
250 | */ | 250 | */ |
251 | int nd_uuid_store(struct device *dev, u8 **uuid_out, const char *buf, | 251 | int nd_uuid_store(struct device *dev, u8 **uuid_out, const char *buf, |
252 | size_t len) | 252 | size_t len) |
@@ -347,15 +347,15 @@ static DEVICE_ATTR_RO(provider); | |||
347 | 347 | ||
348 | static int flush_namespaces(struct device *dev, void *data) | 348 | static int flush_namespaces(struct device *dev, void *data) |
349 | { | 349 | { |
350 | device_lock(dev); | 350 | nd_device_lock(dev); |
351 | device_unlock(dev); | 351 | nd_device_unlock(dev); |
352 | return 0; | 352 | return 0; |
353 | } | 353 | } |
354 | 354 | ||
355 | static int flush_regions_dimms(struct device *dev, void *data) | 355 | static int flush_regions_dimms(struct device *dev, void *data) |
356 | { | 356 | { |
357 | device_lock(dev); | 357 | nd_device_lock(dev); |
358 | device_unlock(dev); | 358 | nd_device_unlock(dev); |
359 | device_for_each_child(dev, NULL, flush_namespaces); | 359 | device_for_each_child(dev, NULL, flush_namespaces); |
360 | return 0; | 360 | return 0; |
361 | } | 361 | } |
diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c index dfecd6e17043..29a065e769ea 100644 --- a/drivers/nvdimm/dimm_devs.c +++ b/drivers/nvdimm/dimm_devs.c | |||
@@ -484,12 +484,12 @@ static ssize_t security_store(struct device *dev, | |||
484 | * done while probing is idle and the DIMM is not in active use | 484 | * done while probing is idle and the DIMM is not in active use |
485 | * in any region. | 485 | * in any region. |
486 | */ | 486 | */ |
487 | device_lock(dev); | 487 | nd_device_lock(dev); |
488 | nvdimm_bus_lock(dev); | 488 | nvdimm_bus_lock(dev); |
489 | wait_nvdimm_bus_probe_idle(dev); | 489 | wait_nvdimm_bus_probe_idle(dev); |
490 | rc = __security_store(dev, buf, len); | 490 | rc = __security_store(dev, buf, len); |
491 | nvdimm_bus_unlock(dev); | 491 | nvdimm_bus_unlock(dev); |
492 | device_unlock(dev); | 492 | nd_device_unlock(dev); |
493 | 493 | ||
494 | return rc; | 494 | return rc; |
495 | } | 495 | } |
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c index 2d8d7e554877..a16e52251a30 100644 --- a/drivers/nvdimm/namespace_devs.c +++ b/drivers/nvdimm/namespace_devs.c | |||
@@ -410,7 +410,7 @@ static ssize_t alt_name_store(struct device *dev, | |||
410 | struct nd_region *nd_region = to_nd_region(dev->parent); | 410 | struct nd_region *nd_region = to_nd_region(dev->parent); |
411 | ssize_t rc; | 411 | ssize_t rc; |
412 | 412 | ||
413 | device_lock(dev); | 413 | nd_device_lock(dev); |
414 | nvdimm_bus_lock(dev); | 414 | nvdimm_bus_lock(dev); |
415 | wait_nvdimm_bus_probe_idle(dev); | 415 | wait_nvdimm_bus_probe_idle(dev); |
416 | rc = __alt_name_store(dev, buf, len); | 416 | rc = __alt_name_store(dev, buf, len); |
@@ -418,7 +418,7 @@ static ssize_t alt_name_store(struct device *dev, | |||
418 | rc = nd_namespace_label_update(nd_region, dev); | 418 | rc = nd_namespace_label_update(nd_region, dev); |
419 | dev_dbg(dev, "%s(%zd)\n", rc < 0 ? "fail " : "", rc); | 419 | dev_dbg(dev, "%s(%zd)\n", rc < 0 ? "fail " : "", rc); |
420 | nvdimm_bus_unlock(dev); | 420 | nvdimm_bus_unlock(dev); |
421 | device_unlock(dev); | 421 | nd_device_unlock(dev); |
422 | 422 | ||
423 | return rc < 0 ? rc : len; | 423 | return rc < 0 ? rc : len; |
424 | } | 424 | } |
@@ -1077,7 +1077,7 @@ static ssize_t size_store(struct device *dev, | |||
1077 | if (rc) | 1077 | if (rc) |
1078 | return rc; | 1078 | return rc; |
1079 | 1079 | ||
1080 | device_lock(dev); | 1080 | nd_device_lock(dev); |
1081 | nvdimm_bus_lock(dev); | 1081 | nvdimm_bus_lock(dev); |
1082 | wait_nvdimm_bus_probe_idle(dev); | 1082 | wait_nvdimm_bus_probe_idle(dev); |
1083 | rc = __size_store(dev, val); | 1083 | rc = __size_store(dev, val); |
@@ -1103,7 +1103,7 @@ static ssize_t size_store(struct device *dev, | |||
1103 | dev_dbg(dev, "%llx %s (%d)\n", val, rc < 0 ? "fail" : "success", rc); | 1103 | dev_dbg(dev, "%llx %s (%d)\n", val, rc < 0 ? "fail" : "success", rc); |
1104 | 1104 | ||
1105 | nvdimm_bus_unlock(dev); | 1105 | nvdimm_bus_unlock(dev); |
1106 | device_unlock(dev); | 1106 | nd_device_unlock(dev); |
1107 | 1107 | ||
1108 | return rc < 0 ? rc : len; | 1108 | return rc < 0 ? rc : len; |
1109 | } | 1109 | } |
@@ -1286,7 +1286,7 @@ static ssize_t uuid_store(struct device *dev, | |||
1286 | } else | 1286 | } else |
1287 | return -ENXIO; | 1287 | return -ENXIO; |
1288 | 1288 | ||
1289 | device_lock(dev); | 1289 | nd_device_lock(dev); |
1290 | nvdimm_bus_lock(dev); | 1290 | nvdimm_bus_lock(dev); |
1291 | wait_nvdimm_bus_probe_idle(dev); | 1291 | wait_nvdimm_bus_probe_idle(dev); |
1292 | if (to_ndns(dev)->claim) | 1292 | if (to_ndns(dev)->claim) |
@@ -1302,7 +1302,7 @@ static ssize_t uuid_store(struct device *dev, | |||
1302 | dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, | 1302 | dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, |
1303 | buf[len - 1] == '\n' ? "" : "\n"); | 1303 | buf[len - 1] == '\n' ? "" : "\n"); |
1304 | nvdimm_bus_unlock(dev); | 1304 | nvdimm_bus_unlock(dev); |
1305 | device_unlock(dev); | 1305 | nd_device_unlock(dev); |
1306 | 1306 | ||
1307 | return rc < 0 ? rc : len; | 1307 | return rc < 0 ? rc : len; |
1308 | } | 1308 | } |
@@ -1376,7 +1376,7 @@ static ssize_t sector_size_store(struct device *dev, | |||
1376 | } else | 1376 | } else |
1377 | return -ENXIO; | 1377 | return -ENXIO; |
1378 | 1378 | ||
1379 | device_lock(dev); | 1379 | nd_device_lock(dev); |
1380 | nvdimm_bus_lock(dev); | 1380 | nvdimm_bus_lock(dev); |
1381 | if (to_ndns(dev)->claim) | 1381 | if (to_ndns(dev)->claim) |
1382 | rc = -EBUSY; | 1382 | rc = -EBUSY; |
@@ -1387,7 +1387,7 @@ static ssize_t sector_size_store(struct device *dev, | |||
1387 | dev_dbg(dev, "result: %zd %s: %s%s", rc, rc < 0 ? "tried" : "wrote", | 1387 | dev_dbg(dev, "result: %zd %s: %s%s", rc, rc < 0 ? "tried" : "wrote", |
1388 | buf, buf[len - 1] == '\n' ? "" : "\n"); | 1388 | buf, buf[len - 1] == '\n' ? "" : "\n"); |
1389 | nvdimm_bus_unlock(dev); | 1389 | nvdimm_bus_unlock(dev); |
1390 | device_unlock(dev); | 1390 | nd_device_unlock(dev); |
1391 | 1391 | ||
1392 | return rc ? rc : len; | 1392 | return rc ? rc : len; |
1393 | } | 1393 | } |
@@ -1502,9 +1502,9 @@ static ssize_t holder_show(struct device *dev, | |||
1502 | struct nd_namespace_common *ndns = to_ndns(dev); | 1502 | struct nd_namespace_common *ndns = to_ndns(dev); |
1503 | ssize_t rc; | 1503 | ssize_t rc; |
1504 | 1504 | ||
1505 | device_lock(dev); | 1505 | nd_device_lock(dev); |
1506 | rc = sprintf(buf, "%s\n", ndns->claim ? dev_name(ndns->claim) : ""); | 1506 | rc = sprintf(buf, "%s\n", ndns->claim ? dev_name(ndns->claim) : ""); |
1507 | device_unlock(dev); | 1507 | nd_device_unlock(dev); |
1508 | 1508 | ||
1509 | return rc; | 1509 | return rc; |
1510 | } | 1510 | } |
@@ -1541,7 +1541,7 @@ static ssize_t holder_class_store(struct device *dev, | |||
1541 | struct nd_region *nd_region = to_nd_region(dev->parent); | 1541 | struct nd_region *nd_region = to_nd_region(dev->parent); |
1542 | ssize_t rc; | 1542 | ssize_t rc; |
1543 | 1543 | ||
1544 | device_lock(dev); | 1544 | nd_device_lock(dev); |
1545 | nvdimm_bus_lock(dev); | 1545 | nvdimm_bus_lock(dev); |
1546 | wait_nvdimm_bus_probe_idle(dev); | 1546 | wait_nvdimm_bus_probe_idle(dev); |
1547 | rc = __holder_class_store(dev, buf); | 1547 | rc = __holder_class_store(dev, buf); |
@@ -1549,7 +1549,7 @@ static ssize_t holder_class_store(struct device *dev, | |||
1549 | rc = nd_namespace_label_update(nd_region, dev); | 1549 | rc = nd_namespace_label_update(nd_region, dev); |
1550 | dev_dbg(dev, "%s(%zd)\n", rc < 0 ? "fail " : "", rc); | 1550 | dev_dbg(dev, "%s(%zd)\n", rc < 0 ? "fail " : "", rc); |
1551 | nvdimm_bus_unlock(dev); | 1551 | nvdimm_bus_unlock(dev); |
1552 | device_unlock(dev); | 1552 | nd_device_unlock(dev); |
1553 | 1553 | ||
1554 | return rc < 0 ? rc : len; | 1554 | return rc < 0 ? rc : len; |
1555 | } | 1555 | } |
@@ -1560,7 +1560,7 @@ static ssize_t holder_class_show(struct device *dev, | |||
1560 | struct nd_namespace_common *ndns = to_ndns(dev); | 1560 | struct nd_namespace_common *ndns = to_ndns(dev); |
1561 | ssize_t rc; | 1561 | ssize_t rc; |
1562 | 1562 | ||
1563 | device_lock(dev); | 1563 | nd_device_lock(dev); |
1564 | if (ndns->claim_class == NVDIMM_CCLASS_NONE) | 1564 | if (ndns->claim_class == NVDIMM_CCLASS_NONE) |
1565 | rc = sprintf(buf, "\n"); | 1565 | rc = sprintf(buf, "\n"); |
1566 | else if ((ndns->claim_class == NVDIMM_CCLASS_BTT) || | 1566 | else if ((ndns->claim_class == NVDIMM_CCLASS_BTT) || |
@@ -1572,7 +1572,7 @@ static ssize_t holder_class_show(struct device *dev, | |||
1572 | rc = sprintf(buf, "dax\n"); | 1572 | rc = sprintf(buf, "dax\n"); |
1573 | else | 1573 | else |
1574 | rc = sprintf(buf, "<unknown>\n"); | 1574 | rc = sprintf(buf, "<unknown>\n"); |
1575 | device_unlock(dev); | 1575 | nd_device_unlock(dev); |
1576 | 1576 | ||
1577 | return rc; | 1577 | return rc; |
1578 | } | 1578 | } |
@@ -1586,7 +1586,7 @@ static ssize_t mode_show(struct device *dev, | |||
1586 | char *mode; | 1586 | char *mode; |
1587 | ssize_t rc; | 1587 | ssize_t rc; |
1588 | 1588 | ||
1589 | device_lock(dev); | 1589 | nd_device_lock(dev); |
1590 | claim = ndns->claim; | 1590 | claim = ndns->claim; |
1591 | if (claim && is_nd_btt(claim)) | 1591 | if (claim && is_nd_btt(claim)) |
1592 | mode = "safe"; | 1592 | mode = "safe"; |
@@ -1599,7 +1599,7 @@ static ssize_t mode_show(struct device *dev, | |||
1599 | else | 1599 | else |
1600 | mode = "raw"; | 1600 | mode = "raw"; |
1601 | rc = sprintf(buf, "%s\n", mode); | 1601 | rc = sprintf(buf, "%s\n", mode); |
1602 | device_unlock(dev); | 1602 | nd_device_unlock(dev); |
1603 | 1603 | ||
1604 | return rc; | 1604 | return rc; |
1605 | } | 1605 | } |
@@ -1703,8 +1703,8 @@ struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev) | |||
1703 | * Flush any in-progess probes / removals in the driver | 1703 | * Flush any in-progess probes / removals in the driver |
1704 | * for the raw personality of this namespace. | 1704 | * for the raw personality of this namespace. |
1705 | */ | 1705 | */ |
1706 | device_lock(&ndns->dev); | 1706 | nd_device_lock(&ndns->dev); |
1707 | device_unlock(&ndns->dev); | 1707 | nd_device_unlock(&ndns->dev); |
1708 | if (ndns->dev.driver) { | 1708 | if (ndns->dev.driver) { |
1709 | dev_dbg(&ndns->dev, "is active, can't bind %s\n", | 1709 | dev_dbg(&ndns->dev, "is active, can't bind %s\n", |
1710 | dev_name(dev)); | 1710 | dev_name(dev)); |
diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h index 391e88de3a29..0ac52b6eb00e 100644 --- a/drivers/nvdimm/nd-core.h +++ b/drivers/nvdimm/nd-core.h | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <linux/sizes.h> | 9 | #include <linux/sizes.h> |
10 | #include <linux/mutex.h> | 10 | #include <linux/mutex.h> |
11 | #include <linux/nd.h> | 11 | #include <linux/nd.h> |
12 | #include "nd.h" | ||
12 | 13 | ||
13 | extern struct list_head nvdimm_bus_list; | 14 | extern struct list_head nvdimm_bus_list; |
14 | extern struct mutex nvdimm_bus_list_mutex; | 15 | extern struct mutex nvdimm_bus_list_mutex; |
@@ -17,10 +18,11 @@ extern struct workqueue_struct *nvdimm_wq; | |||
17 | 18 | ||
18 | struct nvdimm_bus { | 19 | struct nvdimm_bus { |
19 | struct nvdimm_bus_descriptor *nd_desc; | 20 | struct nvdimm_bus_descriptor *nd_desc; |
20 | wait_queue_head_t probe_wait; | 21 | wait_queue_head_t wait; |
21 | struct list_head list; | 22 | struct list_head list; |
22 | struct device dev; | 23 | struct device dev; |
23 | int id, probe_active; | 24 | int id, probe_active; |
25 | atomic_t ioctl_active; | ||
24 | struct list_head mapping_list; | 26 | struct list_head mapping_list; |
25 | struct mutex reconfig_mutex; | 27 | struct mutex reconfig_mutex; |
26 | struct badrange badrange; | 28 | struct badrange badrange; |
@@ -181,4 +183,71 @@ ssize_t nd_namespace_store(struct device *dev, | |||
181 | struct nd_namespace_common **_ndns, const char *buf, | 183 | struct nd_namespace_common **_ndns, const char *buf, |
182 | size_t len); | 184 | size_t len); |
183 | struct nd_pfn *to_nd_pfn_safe(struct device *dev); | 185 | struct nd_pfn *to_nd_pfn_safe(struct device *dev); |
186 | bool is_nvdimm_bus(struct device *dev); | ||
187 | |||
188 | #ifdef CONFIG_PROVE_LOCKING | ||
189 | extern struct class *nd_class; | ||
190 | |||
191 | enum { | ||
192 | LOCK_BUS, | ||
193 | LOCK_NDCTL, | ||
194 | LOCK_REGION, | ||
195 | LOCK_DIMM = LOCK_REGION, | ||
196 | LOCK_NAMESPACE, | ||
197 | LOCK_CLAIM, | ||
198 | }; | ||
199 | |||
200 | static inline void debug_nvdimm_lock(struct device *dev) | ||
201 | { | ||
202 | if (is_nd_region(dev)) | ||
203 | mutex_lock_nested(&dev->lockdep_mutex, LOCK_REGION); | ||
204 | else if (is_nvdimm(dev)) | ||
205 | mutex_lock_nested(&dev->lockdep_mutex, LOCK_DIMM); | ||
206 | else if (is_nd_btt(dev) || is_nd_pfn(dev) || is_nd_dax(dev)) | ||
207 | mutex_lock_nested(&dev->lockdep_mutex, LOCK_CLAIM); | ||
208 | else if (dev->parent && (is_nd_region(dev->parent))) | ||
209 | mutex_lock_nested(&dev->lockdep_mutex, LOCK_NAMESPACE); | ||
210 | else if (is_nvdimm_bus(dev)) | ||
211 | mutex_lock_nested(&dev->lockdep_mutex, LOCK_BUS); | ||
212 | else if (dev->class && dev->class == nd_class) | ||
213 | mutex_lock_nested(&dev->lockdep_mutex, LOCK_NDCTL); | ||
214 | else | ||
215 | dev_WARN(dev, "unknown lock level\n"); | ||
216 | } | ||
217 | |||
218 | static inline void debug_nvdimm_unlock(struct device *dev) | ||
219 | { | ||
220 | mutex_unlock(&dev->lockdep_mutex); | ||
221 | } | ||
222 | |||
223 | static inline void nd_device_lock(struct device *dev) | ||
224 | { | ||
225 | device_lock(dev); | ||
226 | debug_nvdimm_lock(dev); | ||
227 | } | ||
228 | |||
229 | static inline void nd_device_unlock(struct device *dev) | ||
230 | { | ||
231 | debug_nvdimm_unlock(dev); | ||
232 | device_unlock(dev); | ||
233 | } | ||
234 | #else | ||
235 | static inline void nd_device_lock(struct device *dev) | ||
236 | { | ||
237 | device_lock(dev); | ||
238 | } | ||
239 | |||
240 | static inline void nd_device_unlock(struct device *dev) | ||
241 | { | ||
242 | device_unlock(dev); | ||
243 | } | ||
244 | |||
245 | static inline void debug_nvdimm_lock(struct device *dev) | ||
246 | { | ||
247 | } | ||
248 | |||
249 | static inline void debug_nvdimm_unlock(struct device *dev) | ||
250 | { | ||
251 | } | ||
252 | #endif | ||
184 | #endif /* __ND_CORE_H__ */ | 253 | #endif /* __ND_CORE_H__ */ |
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c index df2bdbd22450..3e7b11cf1aae 100644 --- a/drivers/nvdimm/pfn_devs.c +++ b/drivers/nvdimm/pfn_devs.c | |||
@@ -67,7 +67,7 @@ static ssize_t mode_store(struct device *dev, | |||
67 | struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); | 67 | struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); |
68 | ssize_t rc = 0; | 68 | ssize_t rc = 0; |
69 | 69 | ||
70 | device_lock(dev); | 70 | nd_device_lock(dev); |
71 | nvdimm_bus_lock(dev); | 71 | nvdimm_bus_lock(dev); |
72 | if (dev->driver) | 72 | if (dev->driver) |
73 | rc = -EBUSY; | 73 | rc = -EBUSY; |
@@ -89,7 +89,7 @@ static ssize_t mode_store(struct device *dev, | |||
89 | dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, | 89 | dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, |
90 | buf[len - 1] == '\n' ? "" : "\n"); | 90 | buf[len - 1] == '\n' ? "" : "\n"); |
91 | nvdimm_bus_unlock(dev); | 91 | nvdimm_bus_unlock(dev); |
92 | device_unlock(dev); | 92 | nd_device_unlock(dev); |
93 | 93 | ||
94 | return rc ? rc : len; | 94 | return rc ? rc : len; |
95 | } | 95 | } |
@@ -132,14 +132,14 @@ static ssize_t align_store(struct device *dev, | |||
132 | struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); | 132 | struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); |
133 | ssize_t rc; | 133 | ssize_t rc; |
134 | 134 | ||
135 | device_lock(dev); | 135 | nd_device_lock(dev); |
136 | nvdimm_bus_lock(dev); | 136 | nvdimm_bus_lock(dev); |
137 | rc = nd_size_select_store(dev, buf, &nd_pfn->align, | 137 | rc = nd_size_select_store(dev, buf, &nd_pfn->align, |
138 | nd_pfn_supported_alignments()); | 138 | nd_pfn_supported_alignments()); |
139 | dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, | 139 | dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, |
140 | buf[len - 1] == '\n' ? "" : "\n"); | 140 | buf[len - 1] == '\n' ? "" : "\n"); |
141 | nvdimm_bus_unlock(dev); | 141 | nvdimm_bus_unlock(dev); |
142 | device_unlock(dev); | 142 | nd_device_unlock(dev); |
143 | 143 | ||
144 | return rc ? rc : len; | 144 | return rc ? rc : len; |
145 | } | 145 | } |
@@ -161,11 +161,11 @@ static ssize_t uuid_store(struct device *dev, | |||
161 | struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); | 161 | struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); |
162 | ssize_t rc; | 162 | ssize_t rc; |
163 | 163 | ||
164 | device_lock(dev); | 164 | nd_device_lock(dev); |
165 | rc = nd_uuid_store(dev, &nd_pfn->uuid, buf, len); | 165 | rc = nd_uuid_store(dev, &nd_pfn->uuid, buf, len); |
166 | dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, | 166 | dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, |
167 | buf[len - 1] == '\n' ? "" : "\n"); | 167 | buf[len - 1] == '\n' ? "" : "\n"); |
168 | device_unlock(dev); | 168 | nd_device_unlock(dev); |
169 | 169 | ||
170 | return rc ? rc : len; | 170 | return rc ? rc : len; |
171 | } | 171 | } |
@@ -190,13 +190,13 @@ static ssize_t namespace_store(struct device *dev, | |||
190 | struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); | 190 | struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); |
191 | ssize_t rc; | 191 | ssize_t rc; |
192 | 192 | ||
193 | device_lock(dev); | 193 | nd_device_lock(dev); |
194 | nvdimm_bus_lock(dev); | 194 | nvdimm_bus_lock(dev); |
195 | rc = nd_namespace_store(dev, &nd_pfn->ndns, buf, len); | 195 | rc = nd_namespace_store(dev, &nd_pfn->ndns, buf, len); |
196 | dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, | 196 | dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, |
197 | buf[len - 1] == '\n' ? "" : "\n"); | 197 | buf[len - 1] == '\n' ? "" : "\n"); |
198 | nvdimm_bus_unlock(dev); | 198 | nvdimm_bus_unlock(dev); |
199 | device_unlock(dev); | 199 | nd_device_unlock(dev); |
200 | 200 | ||
201 | return rc; | 201 | return rc; |
202 | } | 202 | } |
@@ -208,7 +208,7 @@ static ssize_t resource_show(struct device *dev, | |||
208 | struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); | 208 | struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); |
209 | ssize_t rc; | 209 | ssize_t rc; |
210 | 210 | ||
211 | device_lock(dev); | 211 | nd_device_lock(dev); |
212 | if (dev->driver) { | 212 | if (dev->driver) { |
213 | struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb; | 213 | struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb; |
214 | u64 offset = __le64_to_cpu(pfn_sb->dataoff); | 214 | u64 offset = __le64_to_cpu(pfn_sb->dataoff); |
@@ -222,7 +222,7 @@ static ssize_t resource_show(struct device *dev, | |||
222 | /* no address to convey if the pfn instance is disabled */ | 222 | /* no address to convey if the pfn instance is disabled */ |
223 | rc = -ENXIO; | 223 | rc = -ENXIO; |
224 | } | 224 | } |
225 | device_unlock(dev); | 225 | nd_device_unlock(dev); |
226 | 226 | ||
227 | return rc; | 227 | return rc; |
228 | } | 228 | } |
@@ -234,7 +234,7 @@ static ssize_t size_show(struct device *dev, | |||
234 | struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); | 234 | struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); |
235 | ssize_t rc; | 235 | ssize_t rc; |
236 | 236 | ||
237 | device_lock(dev); | 237 | nd_device_lock(dev); |
238 | if (dev->driver) { | 238 | if (dev->driver) { |
239 | struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb; | 239 | struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb; |
240 | u64 offset = __le64_to_cpu(pfn_sb->dataoff); | 240 | u64 offset = __le64_to_cpu(pfn_sb->dataoff); |
@@ -250,7 +250,7 @@ static ssize_t size_show(struct device *dev, | |||
250 | /* no size to convey if the pfn instance is disabled */ | 250 | /* no size to convey if the pfn instance is disabled */ |
251 | rc = -ENXIO; | 251 | rc = -ENXIO; |
252 | } | 252 | } |
253 | device_unlock(dev); | 253 | nd_device_unlock(dev); |
254 | 254 | ||
255 | return rc; | 255 | return rc; |
256 | } | 256 | } |
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index 2bf3acd69613..4c121dd03dd9 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c | |||
@@ -522,8 +522,8 @@ static int nd_pmem_remove(struct device *dev) | |||
522 | nvdimm_namespace_detach_btt(to_nd_btt(dev)); | 522 | nvdimm_namespace_detach_btt(to_nd_btt(dev)); |
523 | else { | 523 | else { |
524 | /* | 524 | /* |
525 | * Note, this assumes device_lock() context to not race | 525 | * Note, this assumes nd_device_lock() context to not |
526 | * nd_pmem_notify() | 526 | * race nd_pmem_notify() |
527 | */ | 527 | */ |
528 | sysfs_put(pmem->bb_state); | 528 | sysfs_put(pmem->bb_state); |
529 | pmem->bb_state = NULL; | 529 | pmem->bb_state = NULL; |
diff --git a/drivers/nvdimm/region.c b/drivers/nvdimm/region.c index ef46cc3a71ae..37bf8719a2a4 100644 --- a/drivers/nvdimm/region.c +++ b/drivers/nvdimm/region.c | |||
@@ -34,17 +34,6 @@ static int nd_region_probe(struct device *dev) | |||
34 | if (rc) | 34 | if (rc) |
35 | return rc; | 35 | return rc; |
36 | 36 | ||
37 | rc = nd_region_register_namespaces(nd_region, &err); | ||
38 | if (rc < 0) | ||
39 | return rc; | ||
40 | |||
41 | ndrd = dev_get_drvdata(dev); | ||
42 | ndrd->ns_active = rc; | ||
43 | ndrd->ns_count = rc + err; | ||
44 | |||
45 | if (rc && err && rc == err) | ||
46 | return -ENODEV; | ||
47 | |||
48 | if (is_nd_pmem(&nd_region->dev)) { | 37 | if (is_nd_pmem(&nd_region->dev)) { |
49 | struct resource ndr_res; | 38 | struct resource ndr_res; |
50 | 39 | ||
@@ -60,6 +49,17 @@ static int nd_region_probe(struct device *dev) | |||
60 | nvdimm_badblocks_populate(nd_region, &nd_region->bb, &ndr_res); | 49 | nvdimm_badblocks_populate(nd_region, &nd_region->bb, &ndr_res); |
61 | } | 50 | } |
62 | 51 | ||
52 | rc = nd_region_register_namespaces(nd_region, &err); | ||
53 | if (rc < 0) | ||
54 | return rc; | ||
55 | |||
56 | ndrd = dev_get_drvdata(dev); | ||
57 | ndrd->ns_active = rc; | ||
58 | ndrd->ns_count = rc + err; | ||
59 | |||
60 | if (rc && err && rc == err) | ||
61 | return -ENODEV; | ||
62 | |||
63 | nd_region->btt_seed = nd_btt_create(nd_region); | 63 | nd_region->btt_seed = nd_btt_create(nd_region); |
64 | nd_region->pfn_seed = nd_pfn_create(nd_region); | 64 | nd_region->pfn_seed = nd_pfn_create(nd_region); |
65 | nd_region->dax_seed = nd_dax_create(nd_region); | 65 | nd_region->dax_seed = nd_dax_create(nd_region); |
@@ -102,7 +102,7 @@ static int nd_region_remove(struct device *dev) | |||
102 | nvdimm_bus_unlock(dev); | 102 | nvdimm_bus_unlock(dev); |
103 | 103 | ||
104 | /* | 104 | /* |
105 | * Note, this assumes device_lock() context to not race | 105 | * Note, this assumes nd_device_lock() context to not race |
106 | * nd_region_notify() | 106 | * nd_region_notify() |
107 | */ | 107 | */ |
108 | sysfs_put(nd_region->bb_state); | 108 | sysfs_put(nd_region->bb_state); |
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c index 56f2227f192a..af30cbe7a8ea 100644 --- a/drivers/nvdimm/region_devs.c +++ b/drivers/nvdimm/region_devs.c | |||
@@ -331,7 +331,7 @@ static ssize_t set_cookie_show(struct device *dev, | |||
331 | * the v1.1 namespace label cookie definition. To read all this | 331 | * the v1.1 namespace label cookie definition. To read all this |
332 | * data we need to wait for probing to settle. | 332 | * data we need to wait for probing to settle. |
333 | */ | 333 | */ |
334 | device_lock(dev); | 334 | nd_device_lock(dev); |
335 | nvdimm_bus_lock(dev); | 335 | nvdimm_bus_lock(dev); |
336 | wait_nvdimm_bus_probe_idle(dev); | 336 | wait_nvdimm_bus_probe_idle(dev); |
337 | if (nd_region->ndr_mappings) { | 337 | if (nd_region->ndr_mappings) { |
@@ -348,7 +348,7 @@ static ssize_t set_cookie_show(struct device *dev, | |||
348 | } | 348 | } |
349 | } | 349 | } |
350 | nvdimm_bus_unlock(dev); | 350 | nvdimm_bus_unlock(dev); |
351 | device_unlock(dev); | 351 | nd_device_unlock(dev); |
352 | 352 | ||
353 | if (rc) | 353 | if (rc) |
354 | return rc; | 354 | return rc; |
@@ -424,10 +424,12 @@ static ssize_t available_size_show(struct device *dev, | |||
424 | * memory nvdimm_bus_lock() is dropped, but that's userspace's | 424 | * memory nvdimm_bus_lock() is dropped, but that's userspace's |
425 | * problem to not race itself. | 425 | * problem to not race itself. |
426 | */ | 426 | */ |
427 | nd_device_lock(dev); | ||
427 | nvdimm_bus_lock(dev); | 428 | nvdimm_bus_lock(dev); |
428 | wait_nvdimm_bus_probe_idle(dev); | 429 | wait_nvdimm_bus_probe_idle(dev); |
429 | available = nd_region_available_dpa(nd_region); | 430 | available = nd_region_available_dpa(nd_region); |
430 | nvdimm_bus_unlock(dev); | 431 | nvdimm_bus_unlock(dev); |
432 | nd_device_unlock(dev); | ||
431 | 433 | ||
432 | return sprintf(buf, "%llu\n", available); | 434 | return sprintf(buf, "%llu\n", available); |
433 | } | 435 | } |
@@ -439,10 +441,12 @@ static ssize_t max_available_extent_show(struct device *dev, | |||
439 | struct nd_region *nd_region = to_nd_region(dev); | 441 | struct nd_region *nd_region = to_nd_region(dev); |
440 | unsigned long long available = 0; | 442 | unsigned long long available = 0; |
441 | 443 | ||
444 | nd_device_lock(dev); | ||
442 | nvdimm_bus_lock(dev); | 445 | nvdimm_bus_lock(dev); |
443 | wait_nvdimm_bus_probe_idle(dev); | 446 | wait_nvdimm_bus_probe_idle(dev); |
444 | available = nd_region_allocatable_dpa(nd_region); | 447 | available = nd_region_allocatable_dpa(nd_region); |
445 | nvdimm_bus_unlock(dev); | 448 | nvdimm_bus_unlock(dev); |
449 | nd_device_unlock(dev); | ||
446 | 450 | ||
447 | return sprintf(buf, "%llu\n", available); | 451 | return sprintf(buf, "%llu\n", available); |
448 | } | 452 | } |
@@ -561,12 +565,12 @@ static ssize_t region_badblocks_show(struct device *dev, | |||
561 | struct nd_region *nd_region = to_nd_region(dev); | 565 | struct nd_region *nd_region = to_nd_region(dev); |
562 | ssize_t rc; | 566 | ssize_t rc; |
563 | 567 | ||
564 | device_lock(dev); | 568 | nd_device_lock(dev); |
565 | if (dev->driver) | 569 | if (dev->driver) |
566 | rc = badblocks_show(&nd_region->bb, buf, 0); | 570 | rc = badblocks_show(&nd_region->bb, buf, 0); |
567 | else | 571 | else |
568 | rc = -ENXIO; | 572 | rc = -ENXIO; |
569 | device_unlock(dev); | 573 | nd_device_unlock(dev); |
570 | 574 | ||
571 | return rc; | 575 | return rc; |
572 | } | 576 | } |
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index cc09b81fc7f4..8f3fbe5ca937 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c | |||
@@ -2311,17 +2311,15 @@ static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ct | |||
2311 | memset(subsys->subnqn + off, 0, sizeof(subsys->subnqn) - off); | 2311 | memset(subsys->subnqn + off, 0, sizeof(subsys->subnqn) - off); |
2312 | } | 2312 | } |
2313 | 2313 | ||
2314 | static void __nvme_release_subsystem(struct nvme_subsystem *subsys) | 2314 | static void nvme_release_subsystem(struct device *dev) |
2315 | { | 2315 | { |
2316 | struct nvme_subsystem *subsys = | ||
2317 | container_of(dev, struct nvme_subsystem, dev); | ||
2318 | |||
2316 | ida_simple_remove(&nvme_subsystems_ida, subsys->instance); | 2319 | ida_simple_remove(&nvme_subsystems_ida, subsys->instance); |
2317 | kfree(subsys); | 2320 | kfree(subsys); |
2318 | } | 2321 | } |
2319 | 2322 | ||
2320 | static void nvme_release_subsystem(struct device *dev) | ||
2321 | { | ||
2322 | __nvme_release_subsystem(container_of(dev, struct nvme_subsystem, dev)); | ||
2323 | } | ||
2324 | |||
2325 | static void nvme_destroy_subsystem(struct kref *ref) | 2323 | static void nvme_destroy_subsystem(struct kref *ref) |
2326 | { | 2324 | { |
2327 | struct nvme_subsystem *subsys = | 2325 | struct nvme_subsystem *subsys = |
@@ -2477,7 +2475,7 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) | |||
2477 | mutex_lock(&nvme_subsystems_lock); | 2475 | mutex_lock(&nvme_subsystems_lock); |
2478 | found = __nvme_find_get_subsystem(subsys->subnqn); | 2476 | found = __nvme_find_get_subsystem(subsys->subnqn); |
2479 | if (found) { | 2477 | if (found) { |
2480 | __nvme_release_subsystem(subsys); | 2478 | put_device(&subsys->dev); |
2481 | subsys = found; | 2479 | subsys = found; |
2482 | 2480 | ||
2483 | if (!nvme_validate_cntlid(subsys, ctrl, id)) { | 2481 | if (!nvme_validate_cntlid(subsys, ctrl, id)) { |
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index a9a927677970..4f0d0d12744e 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c | |||
@@ -12,11 +12,6 @@ module_param(multipath, bool, 0444); | |||
12 | MODULE_PARM_DESC(multipath, | 12 | MODULE_PARM_DESC(multipath, |
13 | "turn on native support for multiple controllers per subsystem"); | 13 | "turn on native support for multiple controllers per subsystem"); |
14 | 14 | ||
15 | inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl) | ||
16 | { | ||
17 | return multipath && ctrl->subsys && (ctrl->subsys->cmic & (1 << 3)); | ||
18 | } | ||
19 | |||
20 | /* | 15 | /* |
21 | * If multipathing is enabled we need to always use the subsystem instance | 16 | * If multipathing is enabled we need to always use the subsystem instance |
22 | * number for numbering our devices to avoid conflicts between subsystems that | 17 | * number for numbering our devices to avoid conflicts between subsystems that |
@@ -622,7 +617,8 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) | |||
622 | { | 617 | { |
623 | int error; | 618 | int error; |
624 | 619 | ||
625 | if (!nvme_ctrl_use_ana(ctrl)) | 620 | /* check if multipath is enabled and we have the capability */ |
621 | if (!multipath || !ctrl->subsys || !(ctrl->subsys->cmic & (1 << 3))) | ||
626 | return 0; | 622 | return 0; |
627 | 623 | ||
628 | ctrl->anacap = id->anacap; | 624 | ctrl->anacap = id->anacap; |
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 716a876119c8..26b563f9985b 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h | |||
@@ -485,7 +485,11 @@ extern const struct attribute_group *nvme_ns_id_attr_groups[]; | |||
485 | extern const struct block_device_operations nvme_ns_head_ops; | 485 | extern const struct block_device_operations nvme_ns_head_ops; |
486 | 486 | ||
487 | #ifdef CONFIG_NVME_MULTIPATH | 487 | #ifdef CONFIG_NVME_MULTIPATH |
488 | bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl); | 488 | static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl) |
489 | { | ||
490 | return ctrl->ana_log_buf != NULL; | ||
491 | } | ||
492 | |||
489 | void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns, | 493 | void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns, |
490 | struct nvme_ctrl *ctrl, int *flags); | 494 | struct nvme_ctrl *ctrl, int *flags); |
491 | void nvme_failover_req(struct request *req); | 495 | void nvme_failover_req(struct request *req); |
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index bb970ca82517..db160cee42ad 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c | |||
@@ -2254,9 +2254,7 @@ static int nvme_dev_add(struct nvme_dev *dev) | |||
2254 | if (!dev->ctrl.tagset) { | 2254 | if (!dev->ctrl.tagset) { |
2255 | dev->tagset.ops = &nvme_mq_ops; | 2255 | dev->tagset.ops = &nvme_mq_ops; |
2256 | dev->tagset.nr_hw_queues = dev->online_queues - 1; | 2256 | dev->tagset.nr_hw_queues = dev->online_queues - 1; |
2257 | dev->tagset.nr_maps = 1; /* default */ | 2257 | dev->tagset.nr_maps = 2; /* default + read */ |
2258 | if (dev->io_queues[HCTX_TYPE_READ]) | ||
2259 | dev->tagset.nr_maps++; | ||
2260 | if (dev->io_queues[HCTX_TYPE_POLL]) | 2258 | if (dev->io_queues[HCTX_TYPE_POLL]) |
2261 | dev->tagset.nr_maps++; | 2259 | dev->tagset.nr_maps++; |
2262 | dev->tagset.timeout = NVME_IO_TIMEOUT; | 2260 | dev->tagset.timeout = NVME_IO_TIMEOUT; |
@@ -3029,6 +3027,8 @@ static const struct pci_device_id nvme_id_table[] = { | |||
3029 | .driver_data = NVME_QUIRK_LIGHTNVM, }, | 3027 | .driver_data = NVME_QUIRK_LIGHTNVM, }, |
3030 | { PCI_DEVICE(0x1d1d, 0x2601), /* CNEX Granby */ | 3028 | { PCI_DEVICE(0x1d1d, 0x2601), /* CNEX Granby */ |
3031 | .driver_data = NVME_QUIRK_LIGHTNVM, }, | 3029 | .driver_data = NVME_QUIRK_LIGHTNVM, }, |
3030 | { PCI_DEVICE(0x10ec, 0x5762), /* ADATA SX6000LNP */ | ||
3031 | .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, }, | ||
3032 | { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, | 3032 | { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, |
3033 | { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001) }, | 3033 | { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001) }, |
3034 | { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) }, | 3034 | { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) }, |
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index 2d06b8095a19..df352b334ea7 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c | |||
@@ -723,8 +723,8 @@ static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd, | |||
723 | cpu_pm_pmu_setup(armpmu, cmd); | 723 | cpu_pm_pmu_setup(armpmu, cmd); |
724 | break; | 724 | break; |
725 | case CPU_PM_EXIT: | 725 | case CPU_PM_EXIT: |
726 | cpu_pm_pmu_setup(armpmu, cmd); | ||
727 | case CPU_PM_ENTER_FAILED: | 726 | case CPU_PM_ENTER_FAILED: |
727 | cpu_pm_pmu_setup(armpmu, cmd); | ||
728 | armpmu->start(armpmu); | 728 | armpmu->start(armpmu); |
729 | break; | 729 | break; |
730 | default: | 730 | default: |
diff --git a/drivers/platform/olpc/olpc-xo175-ec.c b/drivers/platform/olpc/olpc-xo175-ec.c index 48d6f0d87583..83ed1fbf73cf 100644 --- a/drivers/platform/olpc/olpc-xo175-ec.c +++ b/drivers/platform/olpc/olpc-xo175-ec.c | |||
@@ -736,6 +736,12 @@ static const struct of_device_id olpc_xo175_ec_of_match[] = { | |||
736 | }; | 736 | }; |
737 | MODULE_DEVICE_TABLE(of, olpc_xo175_ec_of_match); | 737 | MODULE_DEVICE_TABLE(of, olpc_xo175_ec_of_match); |
738 | 738 | ||
739 | static const struct spi_device_id olpc_xo175_ec_id_table[] = { | ||
740 | { "xo1.75-ec", 0 }, | ||
741 | {} | ||
742 | }; | ||
743 | MODULE_DEVICE_TABLE(spi, olpc_xo175_ec_id_table); | ||
744 | |||
739 | static struct spi_driver olpc_xo175_ec_spi_driver = { | 745 | static struct spi_driver olpc_xo175_ec_spi_driver = { |
740 | .driver = { | 746 | .driver = { |
741 | .name = "olpc-xo175-ec", | 747 | .name = "olpc-xo175-ec", |
diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c index 235c0b89f824..c510d0d72475 100644 --- a/drivers/platform/x86/intel_pmc_core.c +++ b/drivers/platform/x86/intel_pmc_core.c | |||
@@ -812,6 +812,7 @@ static const struct x86_cpu_id intel_pmc_core_ids[] = { | |||
812 | INTEL_CPU_FAM6(KABYLAKE_DESKTOP, spt_reg_map), | 812 | INTEL_CPU_FAM6(KABYLAKE_DESKTOP, spt_reg_map), |
813 | INTEL_CPU_FAM6(CANNONLAKE_MOBILE, cnp_reg_map), | 813 | INTEL_CPU_FAM6(CANNONLAKE_MOBILE, cnp_reg_map), |
814 | INTEL_CPU_FAM6(ICELAKE_MOBILE, icl_reg_map), | 814 | INTEL_CPU_FAM6(ICELAKE_MOBILE, icl_reg_map), |
815 | INTEL_CPU_FAM6(ICELAKE_NNPI, icl_reg_map), | ||
815 | {} | 816 | {} |
816 | }; | 817 | }; |
817 | 818 | ||
diff --git a/drivers/platform/x86/pcengines-apuv2.c b/drivers/platform/x86/pcengines-apuv2.c index b0d3110ae378..e4c68efac0c2 100644 --- a/drivers/platform/x86/pcengines-apuv2.c +++ b/drivers/platform/x86/pcengines-apuv2.c | |||
@@ -93,7 +93,7 @@ static struct gpiod_lookup_table gpios_led_table = { | |||
93 | 93 | ||
94 | static struct gpio_keys_button apu2_keys_buttons[] = { | 94 | static struct gpio_keys_button apu2_keys_buttons[] = { |
95 | { | 95 | { |
96 | .code = KEY_SETUP, | 96 | .code = KEY_RESTART, |
97 | .active_low = 1, | 97 | .active_low = 1, |
98 | .desc = "front button", | 98 | .desc = "front button", |
99 | .type = EV_KEY, | 99 | .type = EV_KEY, |
@@ -255,6 +255,4 @@ MODULE_DESCRIPTION("PC Engines APUv2/APUv3 board GPIO/LED/keys driver"); | |||
255 | MODULE_LICENSE("GPL"); | 255 | MODULE_LICENSE("GPL"); |
256 | MODULE_DEVICE_TABLE(dmi, apu_gpio_dmi_table); | 256 | MODULE_DEVICE_TABLE(dmi, apu_gpio_dmi_table); |
257 | MODULE_ALIAS("platform:pcengines-apuv2"); | 257 | MODULE_ALIAS("platform:pcengines-apuv2"); |
258 | MODULE_SOFTDEP("pre: platform:" AMD_FCH_GPIO_DRIVER_NAME); | 258 | MODULE_SOFTDEP("pre: platform:" AMD_FCH_GPIO_DRIVER_NAME " platform:leds-gpio platform:gpio_keys_polled"); |
259 | MODULE_SOFTDEP("pre: platform:leds-gpio"); | ||
260 | MODULE_SOFTDEP("pre: platform:gpio_keys_polled"); | ||
diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c index 9fd6dd342169..6df481896b5f 100644 --- a/drivers/powercap/intel_rapl_common.c +++ b/drivers/powercap/intel_rapl_common.c | |||
@@ -1454,7 +1454,7 @@ static void __exit rapl_exit(void) | |||
1454 | unregister_pm_notifier(&rapl_pm_notifier); | 1454 | unregister_pm_notifier(&rapl_pm_notifier); |
1455 | } | 1455 | } |
1456 | 1456 | ||
1457 | module_init(rapl_init); | 1457 | fs_initcall(rapl_init); |
1458 | module_exit(rapl_exit); | 1458 | module_exit(rapl_exit); |
1459 | 1459 | ||
1460 | MODULE_DESCRIPTION("Intel Runtime Average Power Limit (RAPL) common code"); | 1460 | MODULE_DESCRIPTION("Intel Runtime Average Power Limit (RAPL) common code"); |
diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c index 540e8aafc990..f808c5fa9838 100644 --- a/drivers/powercap/powercap_sys.c +++ b/drivers/powercap/powercap_sys.c | |||
@@ -671,7 +671,7 @@ static int __init powercap_init(void) | |||
671 | return class_register(&powercap_class); | 671 | return class_register(&powercap_class); |
672 | } | 672 | } |
673 | 673 | ||
674 | device_initcall(powercap_init); | 674 | fs_initcall(powercap_init); |
675 | 675 | ||
676 | MODULE_DESCRIPTION("PowerCap sysfs Driver"); | 676 | MODULE_DESCRIPTION("PowerCap sysfs Driver"); |
677 | MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>"); | 677 | MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>"); |
diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c index 152053361862..989506bd90b1 100644 --- a/drivers/regulator/axp20x-regulator.c +++ b/drivers/regulator/axp20x-regulator.c | |||
@@ -174,14 +174,14 @@ | |||
174 | #define AXP803_DCDC5_1140mV_STEPS 35 | 174 | #define AXP803_DCDC5_1140mV_STEPS 35 |
175 | #define AXP803_DCDC5_1140mV_END \ | 175 | #define AXP803_DCDC5_1140mV_END \ |
176 | (AXP803_DCDC5_1140mV_START + AXP803_DCDC5_1140mV_STEPS) | 176 | (AXP803_DCDC5_1140mV_START + AXP803_DCDC5_1140mV_STEPS) |
177 | #define AXP803_DCDC5_NUM_VOLTAGES 68 | 177 | #define AXP803_DCDC5_NUM_VOLTAGES 69 |
178 | 178 | ||
179 | #define AXP803_DCDC6_600mV_START 0x00 | 179 | #define AXP803_DCDC6_600mV_START 0x00 |
180 | #define AXP803_DCDC6_600mV_STEPS 50 | 180 | #define AXP803_DCDC6_600mV_STEPS 50 |
181 | #define AXP803_DCDC6_600mV_END \ | 181 | #define AXP803_DCDC6_600mV_END \ |
182 | (AXP803_DCDC6_600mV_START + AXP803_DCDC6_600mV_STEPS) | 182 | (AXP803_DCDC6_600mV_START + AXP803_DCDC6_600mV_STEPS) |
183 | #define AXP803_DCDC6_1120mV_START 0x33 | 183 | #define AXP803_DCDC6_1120mV_START 0x33 |
184 | #define AXP803_DCDC6_1120mV_STEPS 14 | 184 | #define AXP803_DCDC6_1120mV_STEPS 20 |
185 | #define AXP803_DCDC6_1120mV_END \ | 185 | #define AXP803_DCDC6_1120mV_END \ |
186 | (AXP803_DCDC6_1120mV_START + AXP803_DCDC6_1120mV_STEPS) | 186 | (AXP803_DCDC6_1120mV_START + AXP803_DCDC6_1120mV_STEPS) |
187 | #define AXP803_DCDC6_NUM_VOLTAGES 72 | 187 | #define AXP803_DCDC6_NUM_VOLTAGES 72 |
@@ -240,7 +240,7 @@ | |||
240 | #define AXP806_DCDCA_600mV_END \ | 240 | #define AXP806_DCDCA_600mV_END \ |
241 | (AXP806_DCDCA_600mV_START + AXP806_DCDCA_600mV_STEPS) | 241 | (AXP806_DCDCA_600mV_START + AXP806_DCDCA_600mV_STEPS) |
242 | #define AXP806_DCDCA_1120mV_START 0x33 | 242 | #define AXP806_DCDCA_1120mV_START 0x33 |
243 | #define AXP806_DCDCA_1120mV_STEPS 14 | 243 | #define AXP806_DCDCA_1120mV_STEPS 20 |
244 | #define AXP806_DCDCA_1120mV_END \ | 244 | #define AXP806_DCDCA_1120mV_END \ |
245 | (AXP806_DCDCA_1120mV_START + AXP806_DCDCA_1120mV_STEPS) | 245 | (AXP806_DCDCA_1120mV_START + AXP806_DCDCA_1120mV_STEPS) |
246 | #define AXP806_DCDCA_NUM_VOLTAGES 72 | 246 | #define AXP806_DCDCA_NUM_VOLTAGES 72 |
@@ -774,8 +774,8 @@ static const struct regulator_linear_range axp806_dcdcd_ranges[] = { | |||
774 | AXP806_DCDCD_600mV_END, | 774 | AXP806_DCDCD_600mV_END, |
775 | 20000), | 775 | 20000), |
776 | REGULATOR_LINEAR_RANGE(1600000, | 776 | REGULATOR_LINEAR_RANGE(1600000, |
777 | AXP806_DCDCD_600mV_START, | 777 | AXP806_DCDCD_1600mV_START, |
778 | AXP806_DCDCD_600mV_END, | 778 | AXP806_DCDCD_1600mV_END, |
779 | 100000), | 779 | 100000), |
780 | }; | 780 | }; |
781 | 781 | ||
diff --git a/drivers/regulator/lp87565-regulator.c b/drivers/regulator/lp87565-regulator.c index 5d067f7c2116..0c440c5e2832 100644 --- a/drivers/regulator/lp87565-regulator.c +++ b/drivers/regulator/lp87565-regulator.c | |||
@@ -163,7 +163,7 @@ static int lp87565_regulator_probe(struct platform_device *pdev) | |||
163 | struct lp87565 *lp87565 = dev_get_drvdata(pdev->dev.parent); | 163 | struct lp87565 *lp87565 = dev_get_drvdata(pdev->dev.parent); |
164 | struct regulator_config config = { }; | 164 | struct regulator_config config = { }; |
165 | struct regulator_dev *rdev; | 165 | struct regulator_dev *rdev; |
166 | int i, min_idx = LP87565_BUCK_0, max_idx = LP87565_BUCK_3; | 166 | int i, min_idx, max_idx; |
167 | 167 | ||
168 | platform_set_drvdata(pdev, lp87565); | 168 | platform_set_drvdata(pdev, lp87565); |
169 | 169 | ||
@@ -182,9 +182,9 @@ static int lp87565_regulator_probe(struct platform_device *pdev) | |||
182 | max_idx = LP87565_BUCK_3210; | 182 | max_idx = LP87565_BUCK_3210; |
183 | break; | 183 | break; |
184 | default: | 184 | default: |
185 | dev_err(lp87565->dev, "Invalid lp config %d\n", | 185 | min_idx = LP87565_BUCK_0; |
186 | lp87565->dev_type); | 186 | max_idx = LP87565_BUCK_3; |
187 | return -EINVAL; | 187 | break; |
188 | } | 188 | } |
189 | 189 | ||
190 | for (i = min_idx; i <= max_idx; i++) { | 190 | for (i = min_idx; i <= max_idx; i++) { |
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c index 397918ebba55..9112faa6a9a0 100644 --- a/drivers/regulator/of_regulator.c +++ b/drivers/regulator/of_regulator.c | |||
@@ -416,8 +416,10 @@ device_node *regulator_of_get_init_node(struct device *dev, | |||
416 | if (!name) | 416 | if (!name) |
417 | name = child->name; | 417 | name = child->name; |
418 | 418 | ||
419 | if (!strcmp(desc->of_match, name)) | 419 | if (!strcmp(desc->of_match, name)) { |
420 | of_node_put(search); | ||
420 | return of_node_get(child); | 421 | return of_node_get(child); |
422 | } | ||
421 | } | 423 | } |
422 | 424 | ||
423 | of_node_put(search); | 425 | of_node_put(search); |
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c index b9ce93e9df89..99f86612f775 100644 --- a/drivers/s390/block/dasd_alias.c +++ b/drivers/s390/block/dasd_alias.c | |||
@@ -383,6 +383,20 @@ suborder_not_supported(struct dasd_ccw_req *cqr) | |||
383 | char msg_format; | 383 | char msg_format; |
384 | char msg_no; | 384 | char msg_no; |
385 | 385 | ||
386 | /* | ||
387 | * intrc values ENODEV, ENOLINK and EPERM | ||
388 | * will be optained from sleep_on to indicate that no | ||
389 | * IO operation can be started | ||
390 | */ | ||
391 | if (cqr->intrc == -ENODEV) | ||
392 | return 1; | ||
393 | |||
394 | if (cqr->intrc == -ENOLINK) | ||
395 | return 1; | ||
396 | |||
397 | if (cqr->intrc == -EPERM) | ||
398 | return 1; | ||
399 | |||
386 | sense = dasd_get_sense(&cqr->irb); | 400 | sense = dasd_get_sense(&cqr->irb); |
387 | if (!sense) | 401 | if (!sense) |
388 | return 0; | 402 | return 0; |
@@ -447,12 +461,8 @@ static int read_unit_address_configuration(struct dasd_device *device, | |||
447 | lcu->flags &= ~NEED_UAC_UPDATE; | 461 | lcu->flags &= ~NEED_UAC_UPDATE; |
448 | spin_unlock_irqrestore(&lcu->lock, flags); | 462 | spin_unlock_irqrestore(&lcu->lock, flags); |
449 | 463 | ||
450 | do { | 464 | rc = dasd_sleep_on(cqr); |
451 | rc = dasd_sleep_on(cqr); | 465 | if (rc && !suborder_not_supported(cqr)) { |
452 | if (rc && suborder_not_supported(cqr)) | ||
453 | return -EOPNOTSUPP; | ||
454 | } while (rc && (cqr->retries > 0)); | ||
455 | if (rc) { | ||
456 | spin_lock_irqsave(&lcu->lock, flags); | 466 | spin_lock_irqsave(&lcu->lock, flags); |
457 | lcu->flags |= NEED_UAC_UPDATE; | 467 | lcu->flags |= NEED_UAC_UPDATE; |
458 | spin_unlock_irqrestore(&lcu->lock, flags); | 468 | spin_unlock_irqrestore(&lcu->lock, flags); |
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c index 8c9d412b6d33..e7cf0a1d4f71 100644 --- a/drivers/s390/char/con3215.c +++ b/drivers/s390/char/con3215.c | |||
@@ -398,6 +398,7 @@ static void raw3215_irq(struct ccw_device *cdev, unsigned long intparm, | |||
398 | } | 398 | } |
399 | if (dstat == 0x08) | 399 | if (dstat == 0x08) |
400 | break; | 400 | break; |
401 | /* else, fall through */ | ||
401 | case 0x04: | 402 | case 0x04: |
402 | /* Device end interrupt. */ | 403 | /* Device end interrupt. */ |
403 | if ((raw = req->info) == NULL) | 404 | if ((raw = req->info) == NULL) |
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c index 8d3370da2dfc..3e0b2f63a9d2 100644 --- a/drivers/s390/char/tape_core.c +++ b/drivers/s390/char/tape_core.c | |||
@@ -677,6 +677,7 @@ tape_generic_remove(struct ccw_device *cdev) | |||
677 | switch (device->tape_state) { | 677 | switch (device->tape_state) { |
678 | case TS_INIT: | 678 | case TS_INIT: |
679 | tape_state_set(device, TS_NOT_OPER); | 679 | tape_state_set(device, TS_NOT_OPER); |
680 | /* fallthrough */ | ||
680 | case TS_NOT_OPER: | 681 | case TS_NOT_OPER: |
681 | /* | 682 | /* |
682 | * Nothing to do. | 683 | * Nothing to do. |
@@ -949,6 +950,7 @@ __tape_start_request(struct tape_device *device, struct tape_request *request) | |||
949 | break; | 950 | break; |
950 | if (device->tape_state == TS_UNUSED) | 951 | if (device->tape_state == TS_UNUSED) |
951 | break; | 952 | break; |
953 | /* fallthrough */ | ||
952 | default: | 954 | default: |
953 | if (device->tape_state == TS_BLKUSE) | 955 | if (device->tape_state == TS_BLKUSE) |
954 | break; | 956 | break; |
@@ -1116,6 +1118,7 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb) | |||
1116 | case -ETIMEDOUT: | 1118 | case -ETIMEDOUT: |
1117 | DBF_LH(1, "(%08x): Request timed out\n", | 1119 | DBF_LH(1, "(%08x): Request timed out\n", |
1118 | device->cdev_id); | 1120 | device->cdev_id); |
1121 | /* fallthrough */ | ||
1119 | case -EIO: | 1122 | case -EIO: |
1120 | __tape_end_request(device, request, -EIO); | 1123 | __tape_end_request(device, request, -EIO); |
1121 | break; | 1124 | break; |
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index 730c4e68094b..4142c85e77d8 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c | |||
@@ -319,9 +319,7 @@ static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit, | |||
319 | int retries = 0, cc; | 319 | int retries = 0, cc; |
320 | unsigned long laob = 0; | 320 | unsigned long laob = 0; |
321 | 321 | ||
322 | WARN_ON_ONCE(aob && ((queue_type(q) != QDIO_IQDIO_QFMT) || | 322 | if (aob) { |
323 | !q->u.out.use_cq)); | ||
324 | if (q->u.out.use_cq && aob != 0) { | ||
325 | fc = QDIO_SIGA_WRITEQ; | 323 | fc = QDIO_SIGA_WRITEQ; |
326 | laob = aob; | 324 | laob = aob; |
327 | } | 325 | } |
@@ -621,9 +619,6 @@ static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q, | |||
621 | { | 619 | { |
622 | unsigned long phys_aob = 0; | 620 | unsigned long phys_aob = 0; |
623 | 621 | ||
624 | if (!q->use_cq) | ||
625 | return 0; | ||
626 | |||
627 | if (!q->aobs[bufnr]) { | 622 | if (!q->aobs[bufnr]) { |
628 | struct qaob *aob = qdio_allocate_aob(); | 623 | struct qaob *aob = qdio_allocate_aob(); |
629 | q->aobs[bufnr] = aob; | 624 | q->aobs[bufnr] = aob; |
@@ -1308,6 +1303,8 @@ static void qdio_detect_hsicq(struct qdio_irq *irq_ptr) | |||
1308 | 1303 | ||
1309 | for_each_output_queue(irq_ptr, q, i) { | 1304 | for_each_output_queue(irq_ptr, q, i) { |
1310 | if (use_cq) { | 1305 | if (use_cq) { |
1306 | if (multicast_outbound(q)) | ||
1307 | continue; | ||
1311 | if (qdio_enable_async_operation(&q->u.out) < 0) { | 1308 | if (qdio_enable_async_operation(&q->u.out) < 0) { |
1312 | use_cq = 0; | 1309 | use_cq = 0; |
1313 | continue; | 1310 | continue; |
@@ -1553,18 +1550,19 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags, | |||
1553 | /* One SIGA-W per buffer required for unicast HSI */ | 1550 | /* One SIGA-W per buffer required for unicast HSI */ |
1554 | WARN_ON_ONCE(count > 1 && !multicast_outbound(q)); | 1551 | WARN_ON_ONCE(count > 1 && !multicast_outbound(q)); |
1555 | 1552 | ||
1556 | phys_aob = qdio_aob_for_buffer(&q->u.out, bufnr); | 1553 | if (q->u.out.use_cq) |
1554 | phys_aob = qdio_aob_for_buffer(&q->u.out, bufnr); | ||
1557 | 1555 | ||
1558 | rc = qdio_kick_outbound_q(q, phys_aob); | 1556 | rc = qdio_kick_outbound_q(q, phys_aob); |
1559 | } else if (need_siga_sync(q)) { | 1557 | } else if (need_siga_sync(q)) { |
1560 | rc = qdio_siga_sync_q(q); | 1558 | rc = qdio_siga_sync_q(q); |
1559 | } else if (count < QDIO_MAX_BUFFERS_PER_Q && | ||
1560 | get_buf_state(q, prev_buf(bufnr), &state, 0) > 0 && | ||
1561 | state == SLSB_CU_OUTPUT_PRIMED) { | ||
1562 | /* The previous buffer is not processed yet, tack on. */ | ||
1563 | qperf_inc(q, fast_requeue); | ||
1561 | } else { | 1564 | } else { |
1562 | /* try to fast requeue buffers */ | 1565 | rc = qdio_kick_outbound_q(q, 0); |
1563 | get_buf_state(q, prev_buf(bufnr), &state, 0); | ||
1564 | if (state != SLSB_CU_OUTPUT_PRIMED) | ||
1565 | rc = qdio_kick_outbound_q(q, 0); | ||
1566 | else | ||
1567 | qperf_inc(q, fast_requeue); | ||
1568 | } | 1566 | } |
1569 | 1567 | ||
1570 | /* in case of SIGA errors we must process the error immediately */ | 1568 | /* in case of SIGA errors we must process the error immediately */ |
diff --git a/drivers/s390/cio/vfio_ccw_async.c b/drivers/s390/cio/vfio_ccw_async.c index 8c1d2357ef5b..7a838e3d7c0f 100644 --- a/drivers/s390/cio/vfio_ccw_async.c +++ b/drivers/s390/cio/vfio_ccw_async.c | |||
@@ -70,7 +70,7 @@ static void vfio_ccw_async_region_release(struct vfio_ccw_private *private, | |||
70 | 70 | ||
71 | } | 71 | } |
72 | 72 | ||
73 | const struct vfio_ccw_regops vfio_ccw_async_region_ops = { | 73 | static const struct vfio_ccw_regops vfio_ccw_async_region_ops = { |
74 | .read = vfio_ccw_async_region_read, | 74 | .read = vfio_ccw_async_region_read, |
75 | .write = vfio_ccw_async_region_write, | 75 | .write = vfio_ccw_async_region_write, |
76 | .release = vfio_ccw_async_region_release, | 76 | .release = vfio_ccw_async_region_release, |
diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c index 1d4c893ead23..3645d1720c4b 100644 --- a/drivers/s390/cio/vfio_ccw_cp.c +++ b/drivers/s390/cio/vfio_ccw_cp.c | |||
@@ -72,8 +72,10 @@ static int pfn_array_alloc(struct pfn_array *pa, u64 iova, unsigned int len) | |||
72 | sizeof(*pa->pa_iova_pfn) + | 72 | sizeof(*pa->pa_iova_pfn) + |
73 | sizeof(*pa->pa_pfn), | 73 | sizeof(*pa->pa_pfn), |
74 | GFP_KERNEL); | 74 | GFP_KERNEL); |
75 | if (unlikely(!pa->pa_iova_pfn)) | 75 | if (unlikely(!pa->pa_iova_pfn)) { |
76 | pa->pa_nr = 0; | ||
76 | return -ENOMEM; | 77 | return -ENOMEM; |
78 | } | ||
77 | pa->pa_pfn = pa->pa_iova_pfn + pa->pa_nr; | 79 | pa->pa_pfn = pa->pa_iova_pfn + pa->pa_nr; |
78 | 80 | ||
79 | pa->pa_iova_pfn[0] = pa->pa_iova >> PAGE_SHIFT; | 81 | pa->pa_iova_pfn[0] = pa->pa_iova >> PAGE_SHIFT; |
@@ -421,7 +423,7 @@ static int ccwchain_loop_tic(struct ccwchain *chain, | |||
421 | static int ccwchain_handle_ccw(u32 cda, struct channel_program *cp) | 423 | static int ccwchain_handle_ccw(u32 cda, struct channel_program *cp) |
422 | { | 424 | { |
423 | struct ccwchain *chain; | 425 | struct ccwchain *chain; |
424 | int len; | 426 | int len, ret; |
425 | 427 | ||
426 | /* Copy 2K (the most we support today) of possible CCWs */ | 428 | /* Copy 2K (the most we support today) of possible CCWs */ |
427 | len = copy_from_iova(cp->mdev, cp->guest_cp, cda, | 429 | len = copy_from_iova(cp->mdev, cp->guest_cp, cda, |
@@ -448,7 +450,12 @@ static int ccwchain_handle_ccw(u32 cda, struct channel_program *cp) | |||
448 | memcpy(chain->ch_ccw, cp->guest_cp, len * sizeof(struct ccw1)); | 450 | memcpy(chain->ch_ccw, cp->guest_cp, len * sizeof(struct ccw1)); |
449 | 451 | ||
450 | /* Loop for tics on this new chain. */ | 452 | /* Loop for tics on this new chain. */ |
451 | return ccwchain_loop_tic(chain, cp); | 453 | ret = ccwchain_loop_tic(chain, cp); |
454 | |||
455 | if (ret) | ||
456 | ccwchain_free(chain); | ||
457 | |||
458 | return ret; | ||
452 | } | 459 | } |
453 | 460 | ||
454 | /* Loop for TICs. */ | 461 | /* Loop for TICs. */ |
@@ -642,17 +649,16 @@ int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb) | |||
642 | 649 | ||
643 | /* Build a ccwchain for the first CCW segment */ | 650 | /* Build a ccwchain for the first CCW segment */ |
644 | ret = ccwchain_handle_ccw(orb->cmd.cpa, cp); | 651 | ret = ccwchain_handle_ccw(orb->cmd.cpa, cp); |
645 | if (ret) | ||
646 | cp_free(cp); | ||
647 | |||
648 | /* It is safe to force: if not set but idals used | ||
649 | * ccwchain_calc_length returns an error. | ||
650 | */ | ||
651 | cp->orb.cmd.c64 = 1; | ||
652 | 652 | ||
653 | if (!ret) | 653 | if (!ret) { |
654 | cp->initialized = true; | 654 | cp->initialized = true; |
655 | 655 | ||
656 | /* It is safe to force: if it was not set but idals used | ||
657 | * ccwchain_calc_length would have returned an error. | ||
658 | */ | ||
659 | cp->orb.cmd.c64 = 1; | ||
660 | } | ||
661 | |||
656 | return ret; | 662 | return ret; |
657 | } | 663 | } |
658 | 664 | ||
diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c index 2b90a5ecaeb9..9208c0e56c33 100644 --- a/drivers/s390/cio/vfio_ccw_drv.c +++ b/drivers/s390/cio/vfio_ccw_drv.c | |||
@@ -88,7 +88,7 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work) | |||
88 | (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)); | 88 | (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)); |
89 | if (scsw_is_solicited(&irb->scsw)) { | 89 | if (scsw_is_solicited(&irb->scsw)) { |
90 | cp_update_scsw(&private->cp, &irb->scsw); | 90 | cp_update_scsw(&private->cp, &irb->scsw); |
91 | if (is_final) | 91 | if (is_final && private->state == VFIO_CCW_STATE_CP_PENDING) |
92 | cp_free(&private->cp); | 92 | cp_free(&private->cp); |
93 | } | 93 | } |
94 | mutex_lock(&private->io_mutex); | 94 | mutex_lock(&private->io_mutex); |
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c index 5ea83dc4f1d7..dad2be333d82 100644 --- a/drivers/s390/crypto/ap_queue.c +++ b/drivers/s390/crypto/ap_queue.c | |||
@@ -152,6 +152,7 @@ static struct ap_queue_status ap_sm_recv(struct ap_queue *aq) | |||
152 | ap_msg->receive(aq, ap_msg, aq->reply); | 152 | ap_msg->receive(aq, ap_msg, aq->reply); |
153 | break; | 153 | break; |
154 | } | 154 | } |
155 | /* fall through */ | ||
155 | case AP_RESPONSE_NO_PENDING_REPLY: | 156 | case AP_RESPONSE_NO_PENDING_REPLY: |
156 | if (!status.queue_empty || aq->queue_count <= 0) | 157 | if (!status.queue_empty || aq->queue_count <= 0) |
157 | break; | 158 | break; |
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c index 12fe9deb265e..a36251d138fb 100644 --- a/drivers/s390/crypto/zcrypt_msgtype6.c +++ b/drivers/s390/crypto/zcrypt_msgtype6.c | |||
@@ -801,10 +801,7 @@ static int convert_response_ica(struct zcrypt_queue *zq, | |||
801 | if (msg->cprbx.cprb_ver_id == 0x02) | 801 | if (msg->cprbx.cprb_ver_id == 0x02) |
802 | return convert_type86_ica(zq, reply, | 802 | return convert_type86_ica(zq, reply, |
803 | outputdata, outputdatalength); | 803 | outputdata, outputdatalength); |
804 | /* | 804 | /* fall through - wrong cprb version is an unknown response */ |
805 | * Fall through, no break, incorrect cprb version is an unknown | ||
806 | * response | ||
807 | */ | ||
808 | default: /* Unknown response type, this should NEVER EVER happen */ | 805 | default: /* Unknown response type, this should NEVER EVER happen */ |
809 | zq->online = 0; | 806 | zq->online = 0; |
810 | pr_err("Cryptographic device %02x.%04x failed and was set offline\n", | 807 | pr_err("Cryptographic device %02x.%04x failed and was set offline\n", |
@@ -837,10 +834,7 @@ static int convert_response_xcrb(struct zcrypt_queue *zq, | |||
837 | } | 834 | } |
838 | if (msg->cprbx.cprb_ver_id == 0x02) | 835 | if (msg->cprbx.cprb_ver_id == 0x02) |
839 | return convert_type86_xcrb(zq, reply, xcRB); | 836 | return convert_type86_xcrb(zq, reply, xcRB); |
840 | /* | 837 | /* fall through - wrong cprb version is an unknown response */ |
841 | * Fall through, no break, incorrect cprb version is an unknown | ||
842 | * response | ||
843 | */ | ||
844 | default: /* Unknown response type, this should NEVER EVER happen */ | 838 | default: /* Unknown response type, this should NEVER EVER happen */ |
845 | xcRB->status = 0x0008044DL; /* HDD_InvalidParm */ | 839 | xcRB->status = 0x0008044DL; /* HDD_InvalidParm */ |
846 | zq->online = 0; | 840 | zq->online = 0; |
@@ -870,7 +864,7 @@ static int convert_response_ep11_xcrb(struct zcrypt_queue *zq, | |||
870 | return convert_error(zq, reply); | 864 | return convert_error(zq, reply); |
871 | if (msg->cprbx.cprb_ver_id == 0x04) | 865 | if (msg->cprbx.cprb_ver_id == 0x04) |
872 | return convert_type86_ep11_xcrb(zq, reply, xcRB); | 866 | return convert_type86_ep11_xcrb(zq, reply, xcRB); |
873 | /* Fall through, no break, incorrect cprb version is an unknown resp.*/ | 867 | /* fall through - wrong cprb version is an unknown resp */ |
874 | default: /* Unknown response type, this should NEVER EVER happen */ | 868 | default: /* Unknown response type, this should NEVER EVER happen */ |
875 | zq->online = 0; | 869 | zq->online = 0; |
876 | pr_err("Cryptographic device %02x.%04x failed and was set offline\n", | 870 | pr_err("Cryptographic device %02x.%04x failed and was set offline\n", |
@@ -900,10 +894,7 @@ static int convert_response_rng(struct zcrypt_queue *zq, | |||
900 | return -EINVAL; | 894 | return -EINVAL; |
901 | if (msg->cprbx.cprb_ver_id == 0x02) | 895 | if (msg->cprbx.cprb_ver_id == 0x02) |
902 | return convert_type86_rng(zq, reply, data); | 896 | return convert_type86_rng(zq, reply, data); |
903 | /* | 897 | /* fall through - wrong cprb version is an unknown response */ |
904 | * Fall through, no break, incorrect cprb version is an unknown | ||
905 | * response | ||
906 | */ | ||
907 | default: /* Unknown response type, this should NEVER EVER happen */ | 898 | default: /* Unknown response type, this should NEVER EVER happen */ |
908 | zq->online = 0; | 899 | zq->online = 0; |
909 | pr_err("Cryptographic device %02x.%04x failed and was set offline\n", | 900 | pr_err("Cryptographic device %02x.%04x failed and was set offline\n", |
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c index 1a55e5942d36..957889a42d2e 100644 --- a/drivers/s390/virtio/virtio_ccw.c +++ b/drivers/s390/virtio/virtio_ccw.c | |||
@@ -145,6 +145,8 @@ struct airq_info { | |||
145 | struct airq_iv *aiv; | 145 | struct airq_iv *aiv; |
146 | }; | 146 | }; |
147 | static struct airq_info *airq_areas[MAX_AIRQ_AREAS]; | 147 | static struct airq_info *airq_areas[MAX_AIRQ_AREAS]; |
148 | static DEFINE_MUTEX(airq_areas_lock); | ||
149 | |||
148 | static u8 *summary_indicators; | 150 | static u8 *summary_indicators; |
149 | 151 | ||
150 | static inline u8 *get_summary_indicator(struct airq_info *info) | 152 | static inline u8 *get_summary_indicator(struct airq_info *info) |
@@ -265,9 +267,11 @@ static unsigned long get_airq_indicator(struct virtqueue *vqs[], int nvqs, | |||
265 | unsigned long bit, flags; | 267 | unsigned long bit, flags; |
266 | 268 | ||
267 | for (i = 0; i < MAX_AIRQ_AREAS && !indicator_addr; i++) { | 269 | for (i = 0; i < MAX_AIRQ_AREAS && !indicator_addr; i++) { |
270 | mutex_lock(&airq_areas_lock); | ||
268 | if (!airq_areas[i]) | 271 | if (!airq_areas[i]) |
269 | airq_areas[i] = new_airq_info(i); | 272 | airq_areas[i] = new_airq_info(i); |
270 | info = airq_areas[i]; | 273 | info = airq_areas[i]; |
274 | mutex_unlock(&airq_areas_lock); | ||
271 | if (!info) | 275 | if (!info) |
272 | return 0; | 276 | return 0; |
273 | write_lock_irqsave(&info->lock, flags); | 277 | write_lock_irqsave(&info->lock, flags); |
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 75f66f8ad3ea..1b92f3c19ff3 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig | |||
@@ -1523,10 +1523,10 @@ config SCSI_VIRTIO | |||
1523 | 1523 | ||
1524 | source "drivers/scsi/csiostor/Kconfig" | 1524 | source "drivers/scsi/csiostor/Kconfig" |
1525 | 1525 | ||
1526 | endif # SCSI_LOWLEVEL | ||
1527 | |||
1528 | source "drivers/scsi/pcmcia/Kconfig" | 1526 | source "drivers/scsi/pcmcia/Kconfig" |
1529 | 1527 | ||
1528 | endif # SCSI_LOWLEVEL | ||
1529 | |||
1530 | source "drivers/scsi/device_handler/Kconfig" | 1530 | source "drivers/scsi/device_handler/Kconfig" |
1531 | 1531 | ||
1532 | endmenu | 1532 | endmenu |
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c index f0066f8a1786..4971104b1817 100644 --- a/drivers/scsi/device_handler/scsi_dh_alua.c +++ b/drivers/scsi/device_handler/scsi_dh_alua.c | |||
@@ -40,6 +40,7 @@ | |||
40 | #define ALUA_FAILOVER_TIMEOUT 60 | 40 | #define ALUA_FAILOVER_TIMEOUT 60 |
41 | #define ALUA_FAILOVER_RETRIES 5 | 41 | #define ALUA_FAILOVER_RETRIES 5 |
42 | #define ALUA_RTPG_DELAY_MSECS 5 | 42 | #define ALUA_RTPG_DELAY_MSECS 5 |
43 | #define ALUA_RTPG_RETRY_DELAY 2 | ||
43 | 44 | ||
44 | /* device handler flags */ | 45 | /* device handler flags */ |
45 | #define ALUA_OPTIMIZE_STPG 0x01 | 46 | #define ALUA_OPTIMIZE_STPG 0x01 |
@@ -682,7 +683,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg) | |||
682 | case SCSI_ACCESS_STATE_TRANSITIONING: | 683 | case SCSI_ACCESS_STATE_TRANSITIONING: |
683 | if (time_before(jiffies, pg->expiry)) { | 684 | if (time_before(jiffies, pg->expiry)) { |
684 | /* State transition, retry */ | 685 | /* State transition, retry */ |
685 | pg->interval = 2; | 686 | pg->interval = ALUA_RTPG_RETRY_DELAY; |
686 | err = SCSI_DH_RETRY; | 687 | err = SCSI_DH_RETRY; |
687 | } else { | 688 | } else { |
688 | struct alua_dh_data *h; | 689 | struct alua_dh_data *h; |
@@ -807,6 +808,8 @@ static void alua_rtpg_work(struct work_struct *work) | |||
807 | spin_lock_irqsave(&pg->lock, flags); | 808 | spin_lock_irqsave(&pg->lock, flags); |
808 | pg->flags &= ~ALUA_PG_RUNNING; | 809 | pg->flags &= ~ALUA_PG_RUNNING; |
809 | pg->flags |= ALUA_PG_RUN_RTPG; | 810 | pg->flags |= ALUA_PG_RUN_RTPG; |
811 | if (!pg->interval) | ||
812 | pg->interval = ALUA_RTPG_RETRY_DELAY; | ||
810 | spin_unlock_irqrestore(&pg->lock, flags); | 813 | spin_unlock_irqrestore(&pg->lock, flags); |
811 | queue_delayed_work(kaluad_wq, &pg->rtpg_work, | 814 | queue_delayed_work(kaluad_wq, &pg->rtpg_work, |
812 | pg->interval * HZ); | 815 | pg->interval * HZ); |
@@ -818,6 +821,8 @@ static void alua_rtpg_work(struct work_struct *work) | |||
818 | spin_lock_irqsave(&pg->lock, flags); | 821 | spin_lock_irqsave(&pg->lock, flags); |
819 | if (err == SCSI_DH_RETRY || pg->flags & ALUA_PG_RUN_RTPG) { | 822 | if (err == SCSI_DH_RETRY || pg->flags & ALUA_PG_RUN_RTPG) { |
820 | pg->flags &= ~ALUA_PG_RUNNING; | 823 | pg->flags &= ~ALUA_PG_RUNNING; |
824 | if (!pg->interval && !(pg->flags & ALUA_PG_RUN_RTPG)) | ||
825 | pg->interval = ALUA_RTPG_RETRY_DELAY; | ||
821 | pg->flags |= ALUA_PG_RUN_RTPG; | 826 | pg->flags |= ALUA_PG_RUN_RTPG; |
822 | spin_unlock_irqrestore(&pg->lock, flags); | 827 | spin_unlock_irqrestore(&pg->lock, flags); |
823 | queue_delayed_work(kaluad_wq, &pg->rtpg_work, | 828 | queue_delayed_work(kaluad_wq, &pg->rtpg_work, |
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c index 590ec8009f52..1791a393795d 100644 --- a/drivers/scsi/fcoe/fcoe_ctlr.c +++ b/drivers/scsi/fcoe/fcoe_ctlr.c | |||
@@ -1019,7 +1019,7 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb) | |||
1019 | { | 1019 | { |
1020 | struct fcoe_fcf *fcf; | 1020 | struct fcoe_fcf *fcf; |
1021 | struct fcoe_fcf new; | 1021 | struct fcoe_fcf new; |
1022 | unsigned long sol_tov = msecs_to_jiffies(FCOE_CTRL_SOL_TOV); | 1022 | unsigned long sol_tov = msecs_to_jiffies(FCOE_CTLR_SOL_TOV); |
1023 | int first = 0; | 1023 | int first = 0; |
1024 | int mtu_valid; | 1024 | int mtu_valid; |
1025 | int found = 0; | 1025 | int found = 0; |
@@ -2005,7 +2005,7 @@ EXPORT_SYMBOL_GPL(fcoe_wwn_from_mac); | |||
2005 | */ | 2005 | */ |
2006 | static inline struct fcoe_rport *fcoe_ctlr_rport(struct fc_rport_priv *rdata) | 2006 | static inline struct fcoe_rport *fcoe_ctlr_rport(struct fc_rport_priv *rdata) |
2007 | { | 2007 | { |
2008 | return (struct fcoe_rport *)(rdata + 1); | 2008 | return container_of(rdata, struct fcoe_rport, rdata); |
2009 | } | 2009 | } |
2010 | 2010 | ||
2011 | /** | 2011 | /** |
@@ -2269,7 +2269,7 @@ static void fcoe_ctlr_vn_start(struct fcoe_ctlr *fip) | |||
2269 | */ | 2269 | */ |
2270 | static int fcoe_ctlr_vn_parse(struct fcoe_ctlr *fip, | 2270 | static int fcoe_ctlr_vn_parse(struct fcoe_ctlr *fip, |
2271 | struct sk_buff *skb, | 2271 | struct sk_buff *skb, |
2272 | struct fc_rport_priv *rdata) | 2272 | struct fcoe_rport *frport) |
2273 | { | 2273 | { |
2274 | struct fip_header *fiph; | 2274 | struct fip_header *fiph; |
2275 | struct fip_desc *desc = NULL; | 2275 | struct fip_desc *desc = NULL; |
@@ -2277,16 +2277,12 @@ static int fcoe_ctlr_vn_parse(struct fcoe_ctlr *fip, | |||
2277 | struct fip_wwn_desc *wwn = NULL; | 2277 | struct fip_wwn_desc *wwn = NULL; |
2278 | struct fip_vn_desc *vn = NULL; | 2278 | struct fip_vn_desc *vn = NULL; |
2279 | struct fip_size_desc *size = NULL; | 2279 | struct fip_size_desc *size = NULL; |
2280 | struct fcoe_rport *frport; | ||
2281 | size_t rlen; | 2280 | size_t rlen; |
2282 | size_t dlen; | 2281 | size_t dlen; |
2283 | u32 desc_mask = 0; | 2282 | u32 desc_mask = 0; |
2284 | u32 dtype; | 2283 | u32 dtype; |
2285 | u8 sub; | 2284 | u8 sub; |
2286 | 2285 | ||
2287 | memset(rdata, 0, sizeof(*rdata) + sizeof(*frport)); | ||
2288 | frport = fcoe_ctlr_rport(rdata); | ||
2289 | |||
2290 | fiph = (struct fip_header *)skb->data; | 2286 | fiph = (struct fip_header *)skb->data; |
2291 | frport->flags = ntohs(fiph->fip_flags); | 2287 | frport->flags = ntohs(fiph->fip_flags); |
2292 | 2288 | ||
@@ -2349,15 +2345,17 @@ static int fcoe_ctlr_vn_parse(struct fcoe_ctlr *fip, | |||
2349 | if (dlen != sizeof(struct fip_wwn_desc)) | 2345 | if (dlen != sizeof(struct fip_wwn_desc)) |
2350 | goto len_err; | 2346 | goto len_err; |
2351 | wwn = (struct fip_wwn_desc *)desc; | 2347 | wwn = (struct fip_wwn_desc *)desc; |
2352 | rdata->ids.node_name = get_unaligned_be64(&wwn->fd_wwn); | 2348 | frport->rdata.ids.node_name = |
2349 | get_unaligned_be64(&wwn->fd_wwn); | ||
2353 | break; | 2350 | break; |
2354 | case FIP_DT_VN_ID: | 2351 | case FIP_DT_VN_ID: |
2355 | if (dlen != sizeof(struct fip_vn_desc)) | 2352 | if (dlen != sizeof(struct fip_vn_desc)) |
2356 | goto len_err; | 2353 | goto len_err; |
2357 | vn = (struct fip_vn_desc *)desc; | 2354 | vn = (struct fip_vn_desc *)desc; |
2358 | memcpy(frport->vn_mac, vn->fd_mac, ETH_ALEN); | 2355 | memcpy(frport->vn_mac, vn->fd_mac, ETH_ALEN); |
2359 | rdata->ids.port_id = ntoh24(vn->fd_fc_id); | 2356 | frport->rdata.ids.port_id = ntoh24(vn->fd_fc_id); |
2360 | rdata->ids.port_name = get_unaligned_be64(&vn->fd_wwpn); | 2357 | frport->rdata.ids.port_name = |
2358 | get_unaligned_be64(&vn->fd_wwpn); | ||
2361 | break; | 2359 | break; |
2362 | case FIP_DT_FC4F: | 2360 | case FIP_DT_FC4F: |
2363 | if (dlen != sizeof(struct fip_fc4_feat)) | 2361 | if (dlen != sizeof(struct fip_fc4_feat)) |
@@ -2403,16 +2401,14 @@ static void fcoe_ctlr_vn_send_claim(struct fcoe_ctlr *fip) | |||
2403 | /** | 2401 | /** |
2404 | * fcoe_ctlr_vn_probe_req() - handle incoming VN2VN probe request. | 2402 | * fcoe_ctlr_vn_probe_req() - handle incoming VN2VN probe request. |
2405 | * @fip: The FCoE controller | 2403 | * @fip: The FCoE controller |
2406 | * @rdata: parsed remote port with frport from the probe request | 2404 | * @frport: parsed FCoE rport from the probe request |
2407 | * | 2405 | * |
2408 | * Called with ctlr_mutex held. | 2406 | * Called with ctlr_mutex held. |
2409 | */ | 2407 | */ |
2410 | static void fcoe_ctlr_vn_probe_req(struct fcoe_ctlr *fip, | 2408 | static void fcoe_ctlr_vn_probe_req(struct fcoe_ctlr *fip, |
2411 | struct fc_rport_priv *rdata) | 2409 | struct fcoe_rport *frport) |
2412 | { | 2410 | { |
2413 | struct fcoe_rport *frport = fcoe_ctlr_rport(rdata); | 2411 | if (frport->rdata.ids.port_id != fip->port_id) |
2414 | |||
2415 | if (rdata->ids.port_id != fip->port_id) | ||
2416 | return; | 2412 | return; |
2417 | 2413 | ||
2418 | switch (fip->state) { | 2414 | switch (fip->state) { |
@@ -2432,7 +2428,7 @@ static void fcoe_ctlr_vn_probe_req(struct fcoe_ctlr *fip, | |||
2432 | * Probe's REC bit is not set. | 2428 | * Probe's REC bit is not set. |
2433 | * If we don't reply, we will change our address. | 2429 | * If we don't reply, we will change our address. |
2434 | */ | 2430 | */ |
2435 | if (fip->lp->wwpn > rdata->ids.port_name && | 2431 | if (fip->lp->wwpn > frport->rdata.ids.port_name && |
2436 | !(frport->flags & FIP_FL_REC_OR_P2P)) { | 2432 | !(frport->flags & FIP_FL_REC_OR_P2P)) { |
2437 | LIBFCOE_FIP_DBG(fip, "vn_probe_req: " | 2433 | LIBFCOE_FIP_DBG(fip, "vn_probe_req: " |
2438 | "port_id collision\n"); | 2434 | "port_id collision\n"); |
@@ -2456,14 +2452,14 @@ static void fcoe_ctlr_vn_probe_req(struct fcoe_ctlr *fip, | |||
2456 | /** | 2452 | /** |
2457 | * fcoe_ctlr_vn_probe_reply() - handle incoming VN2VN probe reply. | 2453 | * fcoe_ctlr_vn_probe_reply() - handle incoming VN2VN probe reply. |
2458 | * @fip: The FCoE controller | 2454 | * @fip: The FCoE controller |
2459 | * @rdata: parsed remote port with frport from the probe request | 2455 | * @frport: parsed FCoE rport from the probe request |
2460 | * | 2456 | * |
2461 | * Called with ctlr_mutex held. | 2457 | * Called with ctlr_mutex held. |
2462 | */ | 2458 | */ |
2463 | static void fcoe_ctlr_vn_probe_reply(struct fcoe_ctlr *fip, | 2459 | static void fcoe_ctlr_vn_probe_reply(struct fcoe_ctlr *fip, |
2464 | struct fc_rport_priv *rdata) | 2460 | struct fcoe_rport *frport) |
2465 | { | 2461 | { |
2466 | if (rdata->ids.port_id != fip->port_id) | 2462 | if (frport->rdata.ids.port_id != fip->port_id) |
2467 | return; | 2463 | return; |
2468 | switch (fip->state) { | 2464 | switch (fip->state) { |
2469 | case FIP_ST_VNMP_START: | 2465 | case FIP_ST_VNMP_START: |
@@ -2486,11 +2482,11 @@ static void fcoe_ctlr_vn_probe_reply(struct fcoe_ctlr *fip, | |||
2486 | /** | 2482 | /** |
2487 | * fcoe_ctlr_vn_add() - Add a VN2VN entry to the list, based on a claim reply. | 2483 | * fcoe_ctlr_vn_add() - Add a VN2VN entry to the list, based on a claim reply. |
2488 | * @fip: The FCoE controller | 2484 | * @fip: The FCoE controller |
2489 | * @new: newly-parsed remote port with frport as a template for new rdata | 2485 | * @new: newly-parsed FCoE rport as a template for new rdata |
2490 | * | 2486 | * |
2491 | * Called with ctlr_mutex held. | 2487 | * Called with ctlr_mutex held. |
2492 | */ | 2488 | */ |
2493 | static void fcoe_ctlr_vn_add(struct fcoe_ctlr *fip, struct fc_rport_priv *new) | 2489 | static void fcoe_ctlr_vn_add(struct fcoe_ctlr *fip, struct fcoe_rport *new) |
2494 | { | 2490 | { |
2495 | struct fc_lport *lport = fip->lp; | 2491 | struct fc_lport *lport = fip->lp; |
2496 | struct fc_rport_priv *rdata; | 2492 | struct fc_rport_priv *rdata; |
@@ -2498,7 +2494,7 @@ static void fcoe_ctlr_vn_add(struct fcoe_ctlr *fip, struct fc_rport_priv *new) | |||
2498 | struct fcoe_rport *frport; | 2494 | struct fcoe_rport *frport; |
2499 | u32 port_id; | 2495 | u32 port_id; |
2500 | 2496 | ||
2501 | port_id = new->ids.port_id; | 2497 | port_id = new->rdata.ids.port_id; |
2502 | if (port_id == fip->port_id) | 2498 | if (port_id == fip->port_id) |
2503 | return; | 2499 | return; |
2504 | 2500 | ||
@@ -2515,22 +2511,28 @@ static void fcoe_ctlr_vn_add(struct fcoe_ctlr *fip, struct fc_rport_priv *new) | |||
2515 | rdata->disc_id = lport->disc.disc_id; | 2511 | rdata->disc_id = lport->disc.disc_id; |
2516 | 2512 | ||
2517 | ids = &rdata->ids; | 2513 | ids = &rdata->ids; |
2518 | if ((ids->port_name != -1 && ids->port_name != new->ids.port_name) || | 2514 | if ((ids->port_name != -1 && |
2519 | (ids->node_name != -1 && ids->node_name != new->ids.node_name)) { | 2515 | ids->port_name != new->rdata.ids.port_name) || |
2516 | (ids->node_name != -1 && | ||
2517 | ids->node_name != new->rdata.ids.node_name)) { | ||
2520 | mutex_unlock(&rdata->rp_mutex); | 2518 | mutex_unlock(&rdata->rp_mutex); |
2521 | LIBFCOE_FIP_DBG(fip, "vn_add rport logoff %6.6x\n", port_id); | 2519 | LIBFCOE_FIP_DBG(fip, "vn_add rport logoff %6.6x\n", port_id); |
2522 | fc_rport_logoff(rdata); | 2520 | fc_rport_logoff(rdata); |
2523 | mutex_lock(&rdata->rp_mutex); | 2521 | mutex_lock(&rdata->rp_mutex); |
2524 | } | 2522 | } |
2525 | ids->port_name = new->ids.port_name; | 2523 | ids->port_name = new->rdata.ids.port_name; |
2526 | ids->node_name = new->ids.node_name; | 2524 | ids->node_name = new->rdata.ids.node_name; |
2527 | mutex_unlock(&rdata->rp_mutex); | 2525 | mutex_unlock(&rdata->rp_mutex); |
2528 | 2526 | ||
2529 | frport = fcoe_ctlr_rport(rdata); | 2527 | frport = fcoe_ctlr_rport(rdata); |
2530 | LIBFCOE_FIP_DBG(fip, "vn_add rport %6.6x %s state %d\n", | 2528 | LIBFCOE_FIP_DBG(fip, "vn_add rport %6.6x %s state %d\n", |
2531 | port_id, frport->fcoe_len ? "old" : "new", | 2529 | port_id, frport->fcoe_len ? "old" : "new", |
2532 | rdata->rp_state); | 2530 | rdata->rp_state); |
2533 | *frport = *fcoe_ctlr_rport(new); | 2531 | frport->fcoe_len = new->fcoe_len; |
2532 | frport->flags = new->flags; | ||
2533 | frport->login_count = new->login_count; | ||
2534 | memcpy(frport->enode_mac, new->enode_mac, ETH_ALEN); | ||
2535 | memcpy(frport->vn_mac, new->vn_mac, ETH_ALEN); | ||
2534 | frport->time = 0; | 2536 | frport->time = 0; |
2535 | } | 2537 | } |
2536 | 2538 | ||
@@ -2562,16 +2564,14 @@ static int fcoe_ctlr_vn_lookup(struct fcoe_ctlr *fip, u32 port_id, u8 *mac) | |||
2562 | /** | 2564 | /** |
2563 | * fcoe_ctlr_vn_claim_notify() - handle received FIP VN2VN Claim Notification | 2565 | * fcoe_ctlr_vn_claim_notify() - handle received FIP VN2VN Claim Notification |
2564 | * @fip: The FCoE controller | 2566 | * @fip: The FCoE controller |
2565 | * @new: newly-parsed remote port with frport as a template for new rdata | 2567 | * @new: newly-parsed FCoE rport as a template for new rdata |
2566 | * | 2568 | * |
2567 | * Called with ctlr_mutex held. | 2569 | * Called with ctlr_mutex held. |
2568 | */ | 2570 | */ |
2569 | static void fcoe_ctlr_vn_claim_notify(struct fcoe_ctlr *fip, | 2571 | static void fcoe_ctlr_vn_claim_notify(struct fcoe_ctlr *fip, |
2570 | struct fc_rport_priv *new) | 2572 | struct fcoe_rport *new) |
2571 | { | 2573 | { |
2572 | struct fcoe_rport *frport = fcoe_ctlr_rport(new); | 2574 | if (new->flags & FIP_FL_REC_OR_P2P) { |
2573 | |||
2574 | if (frport->flags & FIP_FL_REC_OR_P2P) { | ||
2575 | LIBFCOE_FIP_DBG(fip, "send probe req for P2P/REC\n"); | 2575 | LIBFCOE_FIP_DBG(fip, "send probe req for P2P/REC\n"); |
2576 | fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0); | 2576 | fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0); |
2577 | return; | 2577 | return; |
@@ -2580,7 +2580,7 @@ static void fcoe_ctlr_vn_claim_notify(struct fcoe_ctlr *fip, | |||
2580 | case FIP_ST_VNMP_START: | 2580 | case FIP_ST_VNMP_START: |
2581 | case FIP_ST_VNMP_PROBE1: | 2581 | case FIP_ST_VNMP_PROBE1: |
2582 | case FIP_ST_VNMP_PROBE2: | 2582 | case FIP_ST_VNMP_PROBE2: |
2583 | if (new->ids.port_id == fip->port_id) { | 2583 | if (new->rdata.ids.port_id == fip->port_id) { |
2584 | LIBFCOE_FIP_DBG(fip, "vn_claim_notify: " | 2584 | LIBFCOE_FIP_DBG(fip, "vn_claim_notify: " |
2585 | "restart, state %d\n", | 2585 | "restart, state %d\n", |
2586 | fip->state); | 2586 | fip->state); |
@@ -2589,8 +2589,8 @@ static void fcoe_ctlr_vn_claim_notify(struct fcoe_ctlr *fip, | |||
2589 | break; | 2589 | break; |
2590 | case FIP_ST_VNMP_CLAIM: | 2590 | case FIP_ST_VNMP_CLAIM: |
2591 | case FIP_ST_VNMP_UP: | 2591 | case FIP_ST_VNMP_UP: |
2592 | if (new->ids.port_id == fip->port_id) { | 2592 | if (new->rdata.ids.port_id == fip->port_id) { |
2593 | if (new->ids.port_name > fip->lp->wwpn) { | 2593 | if (new->rdata.ids.port_name > fip->lp->wwpn) { |
2594 | LIBFCOE_FIP_DBG(fip, "vn_claim_notify: " | 2594 | LIBFCOE_FIP_DBG(fip, "vn_claim_notify: " |
2595 | "restart, port_id collision\n"); | 2595 | "restart, port_id collision\n"); |
2596 | fcoe_ctlr_vn_restart(fip); | 2596 | fcoe_ctlr_vn_restart(fip); |
@@ -2602,15 +2602,16 @@ static void fcoe_ctlr_vn_claim_notify(struct fcoe_ctlr *fip, | |||
2602 | break; | 2602 | break; |
2603 | } | 2603 | } |
2604 | LIBFCOE_FIP_DBG(fip, "vn_claim_notify: send reply to %x\n", | 2604 | LIBFCOE_FIP_DBG(fip, "vn_claim_notify: send reply to %x\n", |
2605 | new->ids.port_id); | 2605 | new->rdata.ids.port_id); |
2606 | fcoe_ctlr_vn_send(fip, FIP_SC_VN_CLAIM_REP, frport->enode_mac, | 2606 | fcoe_ctlr_vn_send(fip, FIP_SC_VN_CLAIM_REP, new->enode_mac, |
2607 | min((u32)frport->fcoe_len, | 2607 | min((u32)new->fcoe_len, |
2608 | fcoe_ctlr_fcoe_size(fip))); | 2608 | fcoe_ctlr_fcoe_size(fip))); |
2609 | fcoe_ctlr_vn_add(fip, new); | 2609 | fcoe_ctlr_vn_add(fip, new); |
2610 | break; | 2610 | break; |
2611 | default: | 2611 | default: |
2612 | LIBFCOE_FIP_DBG(fip, "vn_claim_notify: " | 2612 | LIBFCOE_FIP_DBG(fip, "vn_claim_notify: " |
2613 | "ignoring claim from %x\n", new->ids.port_id); | 2613 | "ignoring claim from %x\n", |
2614 | new->rdata.ids.port_id); | ||
2614 | break; | 2615 | break; |
2615 | } | 2616 | } |
2616 | } | 2617 | } |
@@ -2618,15 +2619,15 @@ static void fcoe_ctlr_vn_claim_notify(struct fcoe_ctlr *fip, | |||
2618 | /** | 2619 | /** |
2619 | * fcoe_ctlr_vn_claim_resp() - handle received Claim Response | 2620 | * fcoe_ctlr_vn_claim_resp() - handle received Claim Response |
2620 | * @fip: The FCoE controller that received the frame | 2621 | * @fip: The FCoE controller that received the frame |
2621 | * @new: newly-parsed remote port with frport from the Claim Response | 2622 | * @new: newly-parsed FCoE rport from the Claim Response |
2622 | * | 2623 | * |
2623 | * Called with ctlr_mutex held. | 2624 | * Called with ctlr_mutex held. |
2624 | */ | 2625 | */ |
2625 | static void fcoe_ctlr_vn_claim_resp(struct fcoe_ctlr *fip, | 2626 | static void fcoe_ctlr_vn_claim_resp(struct fcoe_ctlr *fip, |
2626 | struct fc_rport_priv *new) | 2627 | struct fcoe_rport *new) |
2627 | { | 2628 | { |
2628 | LIBFCOE_FIP_DBG(fip, "claim resp from from rport %x - state %s\n", | 2629 | LIBFCOE_FIP_DBG(fip, "claim resp from from rport %x - state %s\n", |
2629 | new->ids.port_id, fcoe_ctlr_state(fip->state)); | 2630 | new->rdata.ids.port_id, fcoe_ctlr_state(fip->state)); |
2630 | if (fip->state == FIP_ST_VNMP_UP || fip->state == FIP_ST_VNMP_CLAIM) | 2631 | if (fip->state == FIP_ST_VNMP_UP || fip->state == FIP_ST_VNMP_CLAIM) |
2631 | fcoe_ctlr_vn_add(fip, new); | 2632 | fcoe_ctlr_vn_add(fip, new); |
2632 | } | 2633 | } |
@@ -2634,28 +2635,28 @@ static void fcoe_ctlr_vn_claim_resp(struct fcoe_ctlr *fip, | |||
2634 | /** | 2635 | /** |
2635 | * fcoe_ctlr_vn_beacon() - handle received beacon. | 2636 | * fcoe_ctlr_vn_beacon() - handle received beacon. |
2636 | * @fip: The FCoE controller that received the frame | 2637 | * @fip: The FCoE controller that received the frame |
2637 | * @new: newly-parsed remote port with frport from the Beacon | 2638 | * @new: newly-parsed FCoE rport from the Beacon |
2638 | * | 2639 | * |
2639 | * Called with ctlr_mutex held. | 2640 | * Called with ctlr_mutex held. |
2640 | */ | 2641 | */ |
2641 | static void fcoe_ctlr_vn_beacon(struct fcoe_ctlr *fip, | 2642 | static void fcoe_ctlr_vn_beacon(struct fcoe_ctlr *fip, |
2642 | struct fc_rport_priv *new) | 2643 | struct fcoe_rport *new) |
2643 | { | 2644 | { |
2644 | struct fc_lport *lport = fip->lp; | 2645 | struct fc_lport *lport = fip->lp; |
2645 | struct fc_rport_priv *rdata; | 2646 | struct fc_rport_priv *rdata; |
2646 | struct fcoe_rport *frport; | 2647 | struct fcoe_rport *frport; |
2647 | 2648 | ||
2648 | frport = fcoe_ctlr_rport(new); | 2649 | if (new->flags & FIP_FL_REC_OR_P2P) { |
2649 | if (frport->flags & FIP_FL_REC_OR_P2P) { | ||
2650 | LIBFCOE_FIP_DBG(fip, "p2p beacon while in vn2vn mode\n"); | 2650 | LIBFCOE_FIP_DBG(fip, "p2p beacon while in vn2vn mode\n"); |
2651 | fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0); | 2651 | fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0); |
2652 | return; | 2652 | return; |
2653 | } | 2653 | } |
2654 | rdata = fc_rport_lookup(lport, new->ids.port_id); | 2654 | rdata = fc_rport_lookup(lport, new->rdata.ids.port_id); |
2655 | if (rdata) { | 2655 | if (rdata) { |
2656 | if (rdata->ids.node_name == new->ids.node_name && | 2656 | if (rdata->ids.node_name == new->rdata.ids.node_name && |
2657 | rdata->ids.port_name == new->ids.port_name) { | 2657 | rdata->ids.port_name == new->rdata.ids.port_name) { |
2658 | frport = fcoe_ctlr_rport(rdata); | 2658 | frport = fcoe_ctlr_rport(rdata); |
2659 | |||
2659 | LIBFCOE_FIP_DBG(fip, "beacon from rport %x\n", | 2660 | LIBFCOE_FIP_DBG(fip, "beacon from rport %x\n", |
2660 | rdata->ids.port_id); | 2661 | rdata->ids.port_id); |
2661 | if (!frport->time && fip->state == FIP_ST_VNMP_UP) { | 2662 | if (!frport->time && fip->state == FIP_ST_VNMP_UP) { |
@@ -2678,7 +2679,7 @@ static void fcoe_ctlr_vn_beacon(struct fcoe_ctlr *fip, | |||
2678 | * Don't add the neighbor yet. | 2679 | * Don't add the neighbor yet. |
2679 | */ | 2680 | */ |
2680 | LIBFCOE_FIP_DBG(fip, "beacon from new rport %x. sending claim notify\n", | 2681 | LIBFCOE_FIP_DBG(fip, "beacon from new rport %x. sending claim notify\n", |
2681 | new->ids.port_id); | 2682 | new->rdata.ids.port_id); |
2682 | if (time_after(jiffies, | 2683 | if (time_after(jiffies, |
2683 | fip->sol_time + msecs_to_jiffies(FIP_VN_ANN_WAIT))) | 2684 | fip->sol_time + msecs_to_jiffies(FIP_VN_ANN_WAIT))) |
2684 | fcoe_ctlr_vn_send_claim(fip); | 2685 | fcoe_ctlr_vn_send_claim(fip); |
@@ -2738,10 +2739,7 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb) | |||
2738 | { | 2739 | { |
2739 | struct fip_header *fiph; | 2740 | struct fip_header *fiph; |
2740 | enum fip_vn2vn_subcode sub; | 2741 | enum fip_vn2vn_subcode sub; |
2741 | struct { | 2742 | struct fcoe_rport frport = { }; |
2742 | struct fc_rport_priv rdata; | ||
2743 | struct fcoe_rport frport; | ||
2744 | } buf; | ||
2745 | int rc, vlan_id = 0; | 2743 | int rc, vlan_id = 0; |
2746 | 2744 | ||
2747 | fiph = (struct fip_header *)skb->data; | 2745 | fiph = (struct fip_header *)skb->data; |
@@ -2757,7 +2755,7 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb) | |||
2757 | goto drop; | 2755 | goto drop; |
2758 | } | 2756 | } |
2759 | 2757 | ||
2760 | rc = fcoe_ctlr_vn_parse(fip, skb, &buf.rdata); | 2758 | rc = fcoe_ctlr_vn_parse(fip, skb, &frport); |
2761 | if (rc) { | 2759 | if (rc) { |
2762 | LIBFCOE_FIP_DBG(fip, "vn_recv vn_parse error %d\n", rc); | 2760 | LIBFCOE_FIP_DBG(fip, "vn_recv vn_parse error %d\n", rc); |
2763 | goto drop; | 2761 | goto drop; |
@@ -2766,19 +2764,19 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb) | |||
2766 | mutex_lock(&fip->ctlr_mutex); | 2764 | mutex_lock(&fip->ctlr_mutex); |
2767 | switch (sub) { | 2765 | switch (sub) { |
2768 | case FIP_SC_VN_PROBE_REQ: | 2766 | case FIP_SC_VN_PROBE_REQ: |
2769 | fcoe_ctlr_vn_probe_req(fip, &buf.rdata); | 2767 | fcoe_ctlr_vn_probe_req(fip, &frport); |
2770 | break; | 2768 | break; |
2771 | case FIP_SC_VN_PROBE_REP: | 2769 | case FIP_SC_VN_PROBE_REP: |
2772 | fcoe_ctlr_vn_probe_reply(fip, &buf.rdata); | 2770 | fcoe_ctlr_vn_probe_reply(fip, &frport); |
2773 | break; | 2771 | break; |
2774 | case FIP_SC_VN_CLAIM_NOTIFY: | 2772 | case FIP_SC_VN_CLAIM_NOTIFY: |
2775 | fcoe_ctlr_vn_claim_notify(fip, &buf.rdata); | 2773 | fcoe_ctlr_vn_claim_notify(fip, &frport); |
2776 | break; | 2774 | break; |
2777 | case FIP_SC_VN_CLAIM_REP: | 2775 | case FIP_SC_VN_CLAIM_REP: |
2778 | fcoe_ctlr_vn_claim_resp(fip, &buf.rdata); | 2776 | fcoe_ctlr_vn_claim_resp(fip, &frport); |
2779 | break; | 2777 | break; |
2780 | case FIP_SC_VN_BEACON: | 2778 | case FIP_SC_VN_BEACON: |
2781 | fcoe_ctlr_vn_beacon(fip, &buf.rdata); | 2779 | fcoe_ctlr_vn_beacon(fip, &frport); |
2782 | break; | 2780 | break; |
2783 | default: | 2781 | default: |
2784 | LIBFCOE_FIP_DBG(fip, "vn_recv unknown subcode %d\n", sub); | 2782 | LIBFCOE_FIP_DBG(fip, "vn_recv unknown subcode %d\n", sub); |
@@ -2802,22 +2800,18 @@ drop: | |||
2802 | */ | 2800 | */ |
2803 | static int fcoe_ctlr_vlan_parse(struct fcoe_ctlr *fip, | 2801 | static int fcoe_ctlr_vlan_parse(struct fcoe_ctlr *fip, |
2804 | struct sk_buff *skb, | 2802 | struct sk_buff *skb, |
2805 | struct fc_rport_priv *rdata) | 2803 | struct fcoe_rport *frport) |
2806 | { | 2804 | { |
2807 | struct fip_header *fiph; | 2805 | struct fip_header *fiph; |
2808 | struct fip_desc *desc = NULL; | 2806 | struct fip_desc *desc = NULL; |
2809 | struct fip_mac_desc *macd = NULL; | 2807 | struct fip_mac_desc *macd = NULL; |
2810 | struct fip_wwn_desc *wwn = NULL; | 2808 | struct fip_wwn_desc *wwn = NULL; |
2811 | struct fcoe_rport *frport; | ||
2812 | size_t rlen; | 2809 | size_t rlen; |
2813 | size_t dlen; | 2810 | size_t dlen; |
2814 | u32 desc_mask = 0; | 2811 | u32 desc_mask = 0; |
2815 | u32 dtype; | 2812 | u32 dtype; |
2816 | u8 sub; | 2813 | u8 sub; |
2817 | 2814 | ||
2818 | memset(rdata, 0, sizeof(*rdata) + sizeof(*frport)); | ||
2819 | frport = fcoe_ctlr_rport(rdata); | ||
2820 | |||
2821 | fiph = (struct fip_header *)skb->data; | 2815 | fiph = (struct fip_header *)skb->data; |
2822 | frport->flags = ntohs(fiph->fip_flags); | 2816 | frport->flags = ntohs(fiph->fip_flags); |
2823 | 2817 | ||
@@ -2871,7 +2865,8 @@ static int fcoe_ctlr_vlan_parse(struct fcoe_ctlr *fip, | |||
2871 | if (dlen != sizeof(struct fip_wwn_desc)) | 2865 | if (dlen != sizeof(struct fip_wwn_desc)) |
2872 | goto len_err; | 2866 | goto len_err; |
2873 | wwn = (struct fip_wwn_desc *)desc; | 2867 | wwn = (struct fip_wwn_desc *)desc; |
2874 | rdata->ids.node_name = get_unaligned_be64(&wwn->fd_wwn); | 2868 | frport->rdata.ids.node_name = |
2869 | get_unaligned_be64(&wwn->fd_wwn); | ||
2875 | break; | 2870 | break; |
2876 | default: | 2871 | default: |
2877 | LIBFCOE_FIP_DBG(fip, "unexpected descriptor type %x " | 2872 | LIBFCOE_FIP_DBG(fip, "unexpected descriptor type %x " |
@@ -2957,13 +2952,13 @@ static void fcoe_ctlr_vlan_send(struct fcoe_ctlr *fip, | |||
2957 | /** | 2952 | /** |
2958 | * fcoe_ctlr_vlan_disk_reply() - send FIP VLAN Discovery Notification. | 2953 | * fcoe_ctlr_vlan_disk_reply() - send FIP VLAN Discovery Notification. |
2959 | * @fip: The FCoE controller | 2954 | * @fip: The FCoE controller |
2955 | * @frport: The newly-parsed FCoE rport from the Discovery Request | ||
2960 | * | 2956 | * |
2961 | * Called with ctlr_mutex held. | 2957 | * Called with ctlr_mutex held. |
2962 | */ | 2958 | */ |
2963 | static void fcoe_ctlr_vlan_disc_reply(struct fcoe_ctlr *fip, | 2959 | static void fcoe_ctlr_vlan_disc_reply(struct fcoe_ctlr *fip, |
2964 | struct fc_rport_priv *rdata) | 2960 | struct fcoe_rport *frport) |
2965 | { | 2961 | { |
2966 | struct fcoe_rport *frport = fcoe_ctlr_rport(rdata); | ||
2967 | enum fip_vlan_subcode sub = FIP_SC_VL_NOTE; | 2962 | enum fip_vlan_subcode sub = FIP_SC_VL_NOTE; |
2968 | 2963 | ||
2969 | if (fip->mode == FIP_MODE_VN2VN) | 2964 | if (fip->mode == FIP_MODE_VN2VN) |
@@ -2982,22 +2977,19 @@ static int fcoe_ctlr_vlan_recv(struct fcoe_ctlr *fip, struct sk_buff *skb) | |||
2982 | { | 2977 | { |
2983 | struct fip_header *fiph; | 2978 | struct fip_header *fiph; |
2984 | enum fip_vlan_subcode sub; | 2979 | enum fip_vlan_subcode sub; |
2985 | struct { | 2980 | struct fcoe_rport frport = { }; |
2986 | struct fc_rport_priv rdata; | ||
2987 | struct fcoe_rport frport; | ||
2988 | } buf; | ||
2989 | int rc; | 2981 | int rc; |
2990 | 2982 | ||
2991 | fiph = (struct fip_header *)skb->data; | 2983 | fiph = (struct fip_header *)skb->data; |
2992 | sub = fiph->fip_subcode; | 2984 | sub = fiph->fip_subcode; |
2993 | rc = fcoe_ctlr_vlan_parse(fip, skb, &buf.rdata); | 2985 | rc = fcoe_ctlr_vlan_parse(fip, skb, &frport); |
2994 | if (rc) { | 2986 | if (rc) { |
2995 | LIBFCOE_FIP_DBG(fip, "vlan_recv vlan_parse error %d\n", rc); | 2987 | LIBFCOE_FIP_DBG(fip, "vlan_recv vlan_parse error %d\n", rc); |
2996 | goto drop; | 2988 | goto drop; |
2997 | } | 2989 | } |
2998 | mutex_lock(&fip->ctlr_mutex); | 2990 | mutex_lock(&fip->ctlr_mutex); |
2999 | if (sub == FIP_SC_VL_REQ) | 2991 | if (sub == FIP_SC_VL_REQ) |
3000 | fcoe_ctlr_vlan_disc_reply(fip, &buf.rdata); | 2992 | fcoe_ctlr_vlan_disc_reply(fip, &frport); |
3001 | mutex_unlock(&fip->ctlr_mutex); | 2993 | mutex_unlock(&fip->ctlr_mutex); |
3002 | 2994 | ||
3003 | drop: | 2995 | drop: |
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 43a6b5350775..1bb6aada93fa 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c | |||
@@ -2334,6 +2334,8 @@ static int handle_ioaccel_mode2_error(struct ctlr_info *h, | |||
2334 | case IOACCEL2_SERV_RESPONSE_COMPLETE: | 2334 | case IOACCEL2_SERV_RESPONSE_COMPLETE: |
2335 | switch (c2->error_data.status) { | 2335 | switch (c2->error_data.status) { |
2336 | case IOACCEL2_STATUS_SR_TASK_COMP_GOOD: | 2336 | case IOACCEL2_STATUS_SR_TASK_COMP_GOOD: |
2337 | if (cmd) | ||
2338 | cmd->result = 0; | ||
2337 | break; | 2339 | break; |
2338 | case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND: | 2340 | case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND: |
2339 | cmd->result |= SAM_STAT_CHECK_CONDITION; | 2341 | cmd->result |= SAM_STAT_CHECK_CONDITION; |
@@ -2483,8 +2485,10 @@ static void process_ioaccel2_completion(struct ctlr_info *h, | |||
2483 | 2485 | ||
2484 | /* check for good status */ | 2486 | /* check for good status */ |
2485 | if (likely(c2->error_data.serv_response == 0 && | 2487 | if (likely(c2->error_data.serv_response == 0 && |
2486 | c2->error_data.status == 0)) | 2488 | c2->error_data.status == 0)) { |
2489 | cmd->result = 0; | ||
2487 | return hpsa_cmd_free_and_done(h, c, cmd); | 2490 | return hpsa_cmd_free_and_done(h, c, cmd); |
2491 | } | ||
2488 | 2492 | ||
2489 | /* | 2493 | /* |
2490 | * Any RAID offload error results in retry which will use | 2494 | * Any RAID offload error results in retry which will use |
@@ -5654,6 +5658,12 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd) | |||
5654 | return SCSI_MLQUEUE_DEVICE_BUSY; | 5658 | return SCSI_MLQUEUE_DEVICE_BUSY; |
5655 | 5659 | ||
5656 | /* | 5660 | /* |
5661 | * This is necessary because the SML doesn't zero out this field during | ||
5662 | * error recovery. | ||
5663 | */ | ||
5664 | cmd->result = 0; | ||
5665 | |||
5666 | /* | ||
5657 | * Call alternate submit routine for I/O accelerated commands. | 5667 | * Call alternate submit routine for I/O accelerated commands. |
5658 | * Retries always go down the normal I/O path. | 5668 | * Retries always go down the normal I/O path. |
5659 | */ | 5669 | */ |
@@ -6081,8 +6091,6 @@ static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h, | |||
6081 | if (idx != h->last_collision_tag) { /* Print once per tag */ | 6091 | if (idx != h->last_collision_tag) { /* Print once per tag */ |
6082 | dev_warn(&h->pdev->dev, | 6092 | dev_warn(&h->pdev->dev, |
6083 | "%s: tag collision (tag=%d)\n", __func__, idx); | 6093 | "%s: tag collision (tag=%d)\n", __func__, idx); |
6084 | if (c->scsi_cmd != NULL) | ||
6085 | scsi_print_command(c->scsi_cmd); | ||
6086 | if (scmd) | 6094 | if (scmd) |
6087 | scsi_print_command(scmd); | 6095 | scsi_print_command(scmd); |
6088 | h->last_collision_tag = idx; | 6096 | h->last_collision_tag = idx; |
@@ -7798,7 +7806,7 @@ static void hpsa_free_pci_init(struct ctlr_info *h) | |||
7798 | hpsa_disable_interrupt_mode(h); /* pci_init 2 */ | 7806 | hpsa_disable_interrupt_mode(h); /* pci_init 2 */ |
7799 | /* | 7807 | /* |
7800 | * call pci_disable_device before pci_release_regions per | 7808 | * call pci_disable_device before pci_release_regions per |
7801 | * Documentation/PCI/pci.rst | 7809 | * Documentation/driver-api/pci/pci.rst |
7802 | */ | 7810 | */ |
7803 | pci_disable_device(h->pdev); /* pci_init 1 */ | 7811 | pci_disable_device(h->pdev); /* pci_init 1 */ |
7804 | pci_release_regions(h->pdev); /* pci_init 2 */ | 7812 | pci_release_regions(h->pdev); /* pci_init 2 */ |
@@ -7881,7 +7889,7 @@ clean2: /* intmode+region, pci */ | |||
7881 | clean1: | 7889 | clean1: |
7882 | /* | 7890 | /* |
7883 | * call pci_disable_device before pci_release_regions per | 7891 | * call pci_disable_device before pci_release_regions per |
7884 | * Documentation/PCI/pci.rst | 7892 | * Documentation/driver-api/pci/pci.rst |
7885 | */ | 7893 | */ |
7886 | pci_disable_device(h->pdev); | 7894 | pci_disable_device(h->pdev); |
7887 | pci_release_regions(h->pdev); | 7895 | pci_release_regions(h->pdev); |
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index acd16e0d52cf..8cdbac076a1b 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c | |||
@@ -4864,8 +4864,8 @@ static int ibmvfc_remove(struct vio_dev *vdev) | |||
4864 | 4864 | ||
4865 | spin_lock_irqsave(vhost->host->host_lock, flags); | 4865 | spin_lock_irqsave(vhost->host->host_lock, flags); |
4866 | ibmvfc_purge_requests(vhost, DID_ERROR); | 4866 | ibmvfc_purge_requests(vhost, DID_ERROR); |
4867 | ibmvfc_free_event_pool(vhost); | ||
4868 | spin_unlock_irqrestore(vhost->host->host_lock, flags); | 4867 | spin_unlock_irqrestore(vhost->host->host_lock, flags); |
4868 | ibmvfc_free_event_pool(vhost); | ||
4869 | 4869 | ||
4870 | ibmvfc_free_mem(vhost); | 4870 | ibmvfc_free_mem(vhost); |
4871 | spin_lock(&ibmvfc_driver_lock); | 4871 | spin_lock(&ibmvfc_driver_lock); |
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index e0f3852fdad1..da6e97d8dc3b 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c | |||
@@ -128,6 +128,7 @@ EXPORT_SYMBOL(fc_rport_lookup); | |||
128 | struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, u32 port_id) | 128 | struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, u32 port_id) |
129 | { | 129 | { |
130 | struct fc_rport_priv *rdata; | 130 | struct fc_rport_priv *rdata; |
131 | size_t rport_priv_size = sizeof(*rdata); | ||
131 | 132 | ||
132 | lockdep_assert_held(&lport->disc.disc_mutex); | 133 | lockdep_assert_held(&lport->disc.disc_mutex); |
133 | 134 | ||
@@ -135,7 +136,9 @@ struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, u32 port_id) | |||
135 | if (rdata) | 136 | if (rdata) |
136 | return rdata; | 137 | return rdata; |
137 | 138 | ||
138 | rdata = kzalloc(sizeof(*rdata) + lport->rport_priv_size, GFP_KERNEL); | 139 | if (lport->rport_priv_size > 0) |
140 | rport_priv_size = lport->rport_priv_size; | ||
141 | rdata = kzalloc(rport_priv_size, GFP_KERNEL); | ||
139 | if (!rdata) | 142 | if (!rdata) |
140 | return NULL; | 143 | return NULL; |
141 | 144 | ||
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index b2339d04a700..f9f07935556e 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c | |||
@@ -3163,6 +3163,7 @@ fw_crash_buffer_show(struct device *cdev, | |||
3163 | (struct megasas_instance *) shost->hostdata; | 3163 | (struct megasas_instance *) shost->hostdata; |
3164 | u32 size; | 3164 | u32 size; |
3165 | unsigned long dmachunk = CRASH_DMA_BUF_SIZE; | 3165 | unsigned long dmachunk = CRASH_DMA_BUF_SIZE; |
3166 | unsigned long chunk_left_bytes; | ||
3166 | unsigned long src_addr; | 3167 | unsigned long src_addr; |
3167 | unsigned long flags; | 3168 | unsigned long flags; |
3168 | u32 buff_offset; | 3169 | u32 buff_offset; |
@@ -3186,6 +3187,8 @@ fw_crash_buffer_show(struct device *cdev, | |||
3186 | } | 3187 | } |
3187 | 3188 | ||
3188 | size = (instance->fw_crash_buffer_size * dmachunk) - buff_offset; | 3189 | size = (instance->fw_crash_buffer_size * dmachunk) - buff_offset; |
3190 | chunk_left_bytes = dmachunk - (buff_offset % dmachunk); | ||
3191 | size = (size > chunk_left_bytes) ? chunk_left_bytes : size; | ||
3189 | size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size; | 3192 | size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size; |
3190 | 3193 | ||
3191 | src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] + | 3194 | src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] + |
@@ -8763,7 +8766,7 @@ static int __init megasas_init(void) | |||
8763 | 8766 | ||
8764 | if ((event_log_level < MFI_EVT_CLASS_DEBUG) || | 8767 | if ((event_log_level < MFI_EVT_CLASS_DEBUG) || |
8765 | (event_log_level > MFI_EVT_CLASS_DEAD)) { | 8768 | (event_log_level > MFI_EVT_CLASS_DEAD)) { |
8766 | printk(KERN_WARNING "megarid_sas: provided event log level is out of range, setting it to default 2(CLASS_CRITICAL), permissible range is: -2 to 4\n"); | 8769 | pr_warn("megaraid_sas: provided event log level is out of range, setting it to default 2(CLASS_CRITICAL), permissible range is: -2 to 4\n"); |
8767 | event_log_level = MFI_EVT_CLASS_CRITICAL; | 8770 | event_log_level = MFI_EVT_CLASS_CRITICAL; |
8768 | } | 8771 | } |
8769 | 8772 | ||
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index a32b3f0fcd15..120e3c4de8c2 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c | |||
@@ -537,7 +537,7 @@ static int megasas_create_sg_sense_fusion(struct megasas_instance *instance) | |||
537 | return 0; | 537 | return 0; |
538 | } | 538 | } |
539 | 539 | ||
540 | int | 540 | static int |
541 | megasas_alloc_cmdlist_fusion(struct megasas_instance *instance) | 541 | megasas_alloc_cmdlist_fusion(struct megasas_instance *instance) |
542 | { | 542 | { |
543 | u32 max_mpt_cmd, i, j; | 543 | u32 max_mpt_cmd, i, j; |
@@ -576,7 +576,8 @@ megasas_alloc_cmdlist_fusion(struct megasas_instance *instance) | |||
576 | 576 | ||
577 | return 0; | 577 | return 0; |
578 | } | 578 | } |
579 | int | 579 | |
580 | static int | ||
580 | megasas_alloc_request_fusion(struct megasas_instance *instance) | 581 | megasas_alloc_request_fusion(struct megasas_instance *instance) |
581 | { | 582 | { |
582 | struct fusion_context *fusion; | 583 | struct fusion_context *fusion; |
@@ -657,7 +658,7 @@ retry_alloc: | |||
657 | return 0; | 658 | return 0; |
658 | } | 659 | } |
659 | 660 | ||
660 | int | 661 | static int |
661 | megasas_alloc_reply_fusion(struct megasas_instance *instance) | 662 | megasas_alloc_reply_fusion(struct megasas_instance *instance) |
662 | { | 663 | { |
663 | int i, count; | 664 | int i, count; |
@@ -734,7 +735,7 @@ megasas_alloc_reply_fusion(struct megasas_instance *instance) | |||
734 | return 0; | 735 | return 0; |
735 | } | 736 | } |
736 | 737 | ||
737 | int | 738 | static int |
738 | megasas_alloc_rdpq_fusion(struct megasas_instance *instance) | 739 | megasas_alloc_rdpq_fusion(struct megasas_instance *instance) |
739 | { | 740 | { |
740 | int i, j, k, msix_count; | 741 | int i, j, k, msix_count; |
@@ -916,7 +917,7 @@ megasas_free_reply_fusion(struct megasas_instance *instance) { | |||
916 | * and is used as SMID of the cmd. | 917 | * and is used as SMID of the cmd. |
917 | * SMID value range is from 1 to max_fw_cmds. | 918 | * SMID value range is from 1 to max_fw_cmds. |
918 | */ | 919 | */ |
919 | int | 920 | static int |
920 | megasas_alloc_cmds_fusion(struct megasas_instance *instance) | 921 | megasas_alloc_cmds_fusion(struct megasas_instance *instance) |
921 | { | 922 | { |
922 | int i; | 923 | int i; |
@@ -1736,7 +1737,7 @@ static inline void megasas_free_ioc_init_cmd(struct megasas_instance *instance) | |||
1736 | * | 1737 | * |
1737 | * This is the main function for initializing firmware. | 1738 | * This is the main function for initializing firmware. |
1738 | */ | 1739 | */ |
1739 | u32 | 1740 | static u32 |
1740 | megasas_init_adapter_fusion(struct megasas_instance *instance) | 1741 | megasas_init_adapter_fusion(struct megasas_instance *instance) |
1741 | { | 1742 | { |
1742 | struct fusion_context *fusion; | 1743 | struct fusion_context *fusion; |
@@ -1962,7 +1963,7 @@ megasas_fusion_stop_watchdog(struct megasas_instance *instance) | |||
1962 | * @ext_status : ext status of cmd returned by FW | 1963 | * @ext_status : ext status of cmd returned by FW |
1963 | */ | 1964 | */ |
1964 | 1965 | ||
1965 | void | 1966 | static void |
1966 | map_cmd_status(struct fusion_context *fusion, | 1967 | map_cmd_status(struct fusion_context *fusion, |
1967 | struct scsi_cmnd *scmd, u8 status, u8 ext_status, | 1968 | struct scsi_cmnd *scmd, u8 status, u8 ext_status, |
1968 | u32 data_length, u8 *sense) | 1969 | u32 data_length, u8 *sense) |
@@ -2375,7 +2376,7 @@ int megasas_make_sgl(struct megasas_instance *instance, struct scsi_cmnd *scp, | |||
2375 | * | 2376 | * |
2376 | * Used to set the PD LBA in CDB for FP IOs | 2377 | * Used to set the PD LBA in CDB for FP IOs |
2377 | */ | 2378 | */ |
2378 | void | 2379 | static void |
2379 | megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len, | 2380 | megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len, |
2380 | struct IO_REQUEST_INFO *io_info, struct scsi_cmnd *scp, | 2381 | struct IO_REQUEST_INFO *io_info, struct scsi_cmnd *scp, |
2381 | struct MR_DRV_RAID_MAP_ALL *local_map_ptr, u32 ref_tag) | 2382 | struct MR_DRV_RAID_MAP_ALL *local_map_ptr, u32 ref_tag) |
@@ -2714,7 +2715,7 @@ megasas_set_raidflag_cpu_affinity(struct fusion_context *fusion, | |||
2714 | * Prepares the io_request and chain elements (sg_frame) for IO | 2715 | * Prepares the io_request and chain elements (sg_frame) for IO |
2715 | * The IO can be for PD (Fast Path) or LD | 2716 | * The IO can be for PD (Fast Path) or LD |
2716 | */ | 2717 | */ |
2717 | void | 2718 | static void |
2718 | megasas_build_ldio_fusion(struct megasas_instance *instance, | 2719 | megasas_build_ldio_fusion(struct megasas_instance *instance, |
2719 | struct scsi_cmnd *scp, | 2720 | struct scsi_cmnd *scp, |
2720 | struct megasas_cmd_fusion *cmd) | 2721 | struct megasas_cmd_fusion *cmd) |
@@ -3211,7 +3212,7 @@ megasas_build_syspd_fusion(struct megasas_instance *instance, | |||
3211 | * Invokes helper functions to prepare request frames | 3212 | * Invokes helper functions to prepare request frames |
3212 | * and sets flags appropriate for IO/Non-IO cmd | 3213 | * and sets flags appropriate for IO/Non-IO cmd |
3213 | */ | 3214 | */ |
3214 | int | 3215 | static int |
3215 | megasas_build_io_fusion(struct megasas_instance *instance, | 3216 | megasas_build_io_fusion(struct megasas_instance *instance, |
3216 | struct scsi_cmnd *scp, | 3217 | struct scsi_cmnd *scp, |
3217 | struct megasas_cmd_fusion *cmd) | 3218 | struct megasas_cmd_fusion *cmd) |
@@ -3325,9 +3326,9 @@ megasas_get_request_descriptor(struct megasas_instance *instance, u16 index) | |||
3325 | /* megasas_prepate_secondRaid1_IO | 3326 | /* megasas_prepate_secondRaid1_IO |
3326 | * It prepares the raid 1 second IO | 3327 | * It prepares the raid 1 second IO |
3327 | */ | 3328 | */ |
3328 | void megasas_prepare_secondRaid1_IO(struct megasas_instance *instance, | 3329 | static void megasas_prepare_secondRaid1_IO(struct megasas_instance *instance, |
3329 | struct megasas_cmd_fusion *cmd, | 3330 | struct megasas_cmd_fusion *cmd, |
3330 | struct megasas_cmd_fusion *r1_cmd) | 3331 | struct megasas_cmd_fusion *r1_cmd) |
3331 | { | 3332 | { |
3332 | union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc, *req_desc2 = NULL; | 3333 | union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc, *req_desc2 = NULL; |
3333 | struct fusion_context *fusion; | 3334 | struct fusion_context *fusion; |
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index 684662888792..050c0f029ef9 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c | |||
@@ -2703,6 +2703,8 @@ _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev) | |||
2703 | { | 2703 | { |
2704 | u64 required_mask, coherent_mask; | 2704 | u64 required_mask, coherent_mask; |
2705 | struct sysinfo s; | 2705 | struct sysinfo s; |
2706 | /* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */ | ||
2707 | int dma_mask = (ioc->hba_mpi_version_belonged > MPI2_VERSION) ? 63 : 64; | ||
2706 | 2708 | ||
2707 | if (ioc->is_mcpu_endpoint) | 2709 | if (ioc->is_mcpu_endpoint) |
2708 | goto try_32bit; | 2710 | goto try_32bit; |
@@ -2712,17 +2714,17 @@ _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev) | |||
2712 | goto try_32bit; | 2714 | goto try_32bit; |
2713 | 2715 | ||
2714 | if (ioc->dma_mask) | 2716 | if (ioc->dma_mask) |
2715 | coherent_mask = DMA_BIT_MASK(64); | 2717 | coherent_mask = DMA_BIT_MASK(dma_mask); |
2716 | else | 2718 | else |
2717 | coherent_mask = DMA_BIT_MASK(32); | 2719 | coherent_mask = DMA_BIT_MASK(32); |
2718 | 2720 | ||
2719 | if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) || | 2721 | if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(dma_mask)) || |
2720 | dma_set_coherent_mask(&pdev->dev, coherent_mask)) | 2722 | dma_set_coherent_mask(&pdev->dev, coherent_mask)) |
2721 | goto try_32bit; | 2723 | goto try_32bit; |
2722 | 2724 | ||
2723 | ioc->base_add_sg_single = &_base_add_sg_single_64; | 2725 | ioc->base_add_sg_single = &_base_add_sg_single_64; |
2724 | ioc->sge_size = sizeof(Mpi2SGESimple64_t); | 2726 | ioc->sge_size = sizeof(Mpi2SGESimple64_t); |
2725 | ioc->dma_mask = 64; | 2727 | ioc->dma_mask = dma_mask; |
2726 | goto out; | 2728 | goto out; |
2727 | 2729 | ||
2728 | try_32bit: | 2730 | try_32bit: |
@@ -2744,7 +2746,7 @@ static int | |||
2744 | _base_change_consistent_dma_mask(struct MPT3SAS_ADAPTER *ioc, | 2746 | _base_change_consistent_dma_mask(struct MPT3SAS_ADAPTER *ioc, |
2745 | struct pci_dev *pdev) | 2747 | struct pci_dev *pdev) |
2746 | { | 2748 | { |
2747 | if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { | 2749 | if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(ioc->dma_mask))) { |
2748 | if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) | 2750 | if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) |
2749 | return -ENODEV; | 2751 | return -ENODEV; |
2750 | } | 2752 | } |
@@ -4989,7 +4991,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc) | |||
4989 | total_sz += sz; | 4991 | total_sz += sz; |
4990 | } while (ioc->rdpq_array_enable && (++i < ioc->reply_queue_count)); | 4992 | } while (ioc->rdpq_array_enable && (++i < ioc->reply_queue_count)); |
4991 | 4993 | ||
4992 | if (ioc->dma_mask == 64) { | 4994 | if (ioc->dma_mask > 32) { |
4993 | if (_base_change_consistent_dma_mask(ioc, ioc->pdev) != 0) { | 4995 | if (_base_change_consistent_dma_mask(ioc, ioc->pdev) != 0) { |
4994 | ioc_warn(ioc, "no suitable consistent DMA mask for %s\n", | 4996 | ioc_warn(ioc, "no suitable consistent DMA mask for %s\n", |
4995 | pci_name(ioc->pdev)); | 4997 | pci_name(ioc->pdev)); |
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 4059655639d9..da83034d4759 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c | |||
@@ -4877,7 +4877,7 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags) | |||
4877 | ql_log(ql_log_warn, vha, 0xd049, | 4877 | ql_log(ql_log_warn, vha, 0xd049, |
4878 | "Failed to allocate ct_sns request.\n"); | 4878 | "Failed to allocate ct_sns request.\n"); |
4879 | kfree(fcport); | 4879 | kfree(fcport); |
4880 | fcport = NULL; | 4880 | return NULL; |
4881 | } | 4881 | } |
4882 | 4882 | ||
4883 | INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn); | 4883 | INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn); |
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 9381171c2fc0..11e64b50497f 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
@@ -1784,8 +1784,10 @@ void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q) | |||
1784 | blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize); | 1784 | blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize); |
1785 | } | 1785 | } |
1786 | 1786 | ||
1787 | shost->max_sectors = min_t(unsigned int, shost->max_sectors, | 1787 | if (dev->dma_mask) { |
1788 | dma_max_mapping_size(dev) << SECTOR_SHIFT); | 1788 | shost->max_sectors = min_t(unsigned int, shost->max_sectors, |
1789 | dma_max_mapping_size(dev) >> SECTOR_SHIFT); | ||
1790 | } | ||
1789 | blk_queue_max_hw_sectors(q, shost->max_sectors); | 1791 | blk_queue_max_hw_sectors(q, shost->max_sectors); |
1790 | if (shost->unchecked_isa_dma) | 1792 | if (shost->unchecked_isa_dma) |
1791 | blk_queue_bounce_limit(q, BLK_BOUNCE_ISA); | 1793 | blk_queue_bounce_limit(q, BLK_BOUNCE_ISA); |
diff --git a/drivers/soc/fsl/qe/qe.c b/drivers/soc/fsl/qe/qe.c index 62c6ba17991a..c9519e62308c 100644 --- a/drivers/soc/fsl/qe/qe.c +++ b/drivers/soc/fsl/qe/qe.c | |||
@@ -419,7 +419,7 @@ static void qe_upload_microcode(const void *base, | |||
419 | /* | 419 | /* |
420 | * Upload a microcode to the I-RAM at a specific address. | 420 | * Upload a microcode to the I-RAM at a specific address. |
421 | * | 421 | * |
422 | * See Documentation/powerpc/qe_firmware.txt for information on QE microcode | 422 | * See Documentation/powerpc/qe_firmware.rst for information on QE microcode |
423 | * uploading. | 423 | * uploading. |
424 | * | 424 | * |
425 | * Currently, only version 1 is supported, so the 'version' field must be | 425 | * Currently, only version 1 is supported, so the 'version' field must be |
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c index 6f243a90c844..840b1b8ff3dc 100644 --- a/drivers/spi/spi-bcm2835.c +++ b/drivers/spi/spi-bcm2835.c | |||
@@ -834,7 +834,8 @@ static int bcm2835_spi_transfer_one(struct spi_controller *ctlr, | |||
834 | bcm2835_wr(bs, BCM2835_SPI_CLK, cdiv); | 834 | bcm2835_wr(bs, BCM2835_SPI_CLK, cdiv); |
835 | 835 | ||
836 | /* handle all the 3-wire mode */ | 836 | /* handle all the 3-wire mode */ |
837 | if ((spi->mode & SPI_3WIRE) && (tfr->rx_buf)) | 837 | if (spi->mode & SPI_3WIRE && tfr->rx_buf && |
838 | tfr->rx_buf != ctlr->dummy_rx) | ||
838 | cs |= BCM2835_SPI_CS_REN; | 839 | cs |= BCM2835_SPI_CS_REN; |
839 | else | 840 | else |
840 | cs &= ~BCM2835_SPI_CS_REN; | 841 | cs &= ~BCM2835_SPI_CS_REN; |
diff --git a/drivers/spi/spi-fsl-qspi.c b/drivers/spi/spi-fsl-qspi.c index 41a49b93ca60..448c00e4065b 100644 --- a/drivers/spi/spi-fsl-qspi.c +++ b/drivers/spi/spi-fsl-qspi.c | |||
@@ -206,7 +206,7 @@ static const struct fsl_qspi_devtype_data imx6sx_data = { | |||
206 | }; | 206 | }; |
207 | 207 | ||
208 | static const struct fsl_qspi_devtype_data imx7d_data = { | 208 | static const struct fsl_qspi_devtype_data imx7d_data = { |
209 | .rxfifo = SZ_512, | 209 | .rxfifo = SZ_128, |
210 | .txfifo = SZ_512, | 210 | .txfifo = SZ_512, |
211 | .ahb_buf_size = SZ_1K, | 211 | .ahb_buf_size = SZ_1K, |
212 | .quirks = QUADSPI_QUIRK_TKT253890 | QUADSPI_QUIRK_4X_INT_CLK, | 212 | .quirks = QUADSPI_QUIRK_TKT253890 | QUADSPI_QUIRK_4X_INT_CLK, |
diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c index eca9d52ecf65..9eb82150666e 100644 --- a/drivers/spi/spi-gpio.c +++ b/drivers/spi/spi-gpio.c | |||
@@ -410,6 +410,12 @@ static int spi_gpio_probe(struct platform_device *pdev) | |||
410 | 410 | ||
411 | bb = &spi_gpio->bitbang; | 411 | bb = &spi_gpio->bitbang; |
412 | bb->master = master; | 412 | bb->master = master; |
413 | /* | ||
414 | * There is some additional business, apart from driving the CS GPIO | ||
415 | * line, that we need to do on selection. This makes the local | ||
416 | * callback for chipselect always get called. | ||
417 | */ | ||
418 | master->flags |= SPI_MASTER_GPIO_SS; | ||
413 | bb->chipselect = spi_gpio_chipselect; | 419 | bb->chipselect = spi_gpio_chipselect; |
414 | bb->set_line_direction = spi_gpio_set_direction; | 420 | bb->set_line_direction = spi_gpio_set_direction; |
415 | 421 | ||
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c index fc7ab4b26880..bb6a14d1ab0f 100644 --- a/drivers/spi/spi-pxa2xx.c +++ b/drivers/spi/spi-pxa2xx.c | |||
@@ -1457,6 +1457,14 @@ static const struct pci_device_id pxa2xx_spi_pci_compound_match[] = { | |||
1457 | { PCI_VDEVICE(INTEL, 0x02aa), LPSS_CNL_SSP }, | 1457 | { PCI_VDEVICE(INTEL, 0x02aa), LPSS_CNL_SSP }, |
1458 | { PCI_VDEVICE(INTEL, 0x02ab), LPSS_CNL_SSP }, | 1458 | { PCI_VDEVICE(INTEL, 0x02ab), LPSS_CNL_SSP }, |
1459 | { PCI_VDEVICE(INTEL, 0x02fb), LPSS_CNL_SSP }, | 1459 | { PCI_VDEVICE(INTEL, 0x02fb), LPSS_CNL_SSP }, |
1460 | /* TGL-LP */ | ||
1461 | { PCI_VDEVICE(INTEL, 0xa0aa), LPSS_CNL_SSP }, | ||
1462 | { PCI_VDEVICE(INTEL, 0xa0ab), LPSS_CNL_SSP }, | ||
1463 | { PCI_VDEVICE(INTEL, 0xa0de), LPSS_CNL_SSP }, | ||
1464 | { PCI_VDEVICE(INTEL, 0xa0df), LPSS_CNL_SSP }, | ||
1465 | { PCI_VDEVICE(INTEL, 0xa0fb), LPSS_CNL_SSP }, | ||
1466 | { PCI_VDEVICE(INTEL, 0xa0fd), LPSS_CNL_SSP }, | ||
1467 | { PCI_VDEVICE(INTEL, 0xa0fe), LPSS_CNL_SSP }, | ||
1460 | { }, | 1468 | { }, |
1461 | }; | 1469 | }; |
1462 | 1470 | ||
@@ -1831,14 +1839,16 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) | |||
1831 | status = devm_spi_register_controller(&pdev->dev, controller); | 1839 | status = devm_spi_register_controller(&pdev->dev, controller); |
1832 | if (status != 0) { | 1840 | if (status != 0) { |
1833 | dev_err(&pdev->dev, "problem registering spi controller\n"); | 1841 | dev_err(&pdev->dev, "problem registering spi controller\n"); |
1834 | goto out_error_clock_enabled; | 1842 | goto out_error_pm_runtime_enabled; |
1835 | } | 1843 | } |
1836 | 1844 | ||
1837 | return status; | 1845 | return status; |
1838 | 1846 | ||
1839 | out_error_clock_enabled: | 1847 | out_error_pm_runtime_enabled: |
1840 | pm_runtime_put_noidle(&pdev->dev); | 1848 | pm_runtime_put_noidle(&pdev->dev); |
1841 | pm_runtime_disable(&pdev->dev); | 1849 | pm_runtime_disable(&pdev->dev); |
1850 | |||
1851 | out_error_clock_enabled: | ||
1842 | clk_disable_unprepare(ssp->clk); | 1852 | clk_disable_unprepare(ssp->clk); |
1843 | 1853 | ||
1844 | out_error_dma_irq_alloc: | 1854 | out_error_dma_irq_alloc: |
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_cm.c b/drivers/target/iscsi/cxgbit/cxgbit_cm.c index 22dd4c457d6a..c70caf4ea490 100644 --- a/drivers/target/iscsi/cxgbit/cxgbit_cm.c +++ b/drivers/target/iscsi/cxgbit/cxgbit_cm.c | |||
@@ -875,10 +875,12 @@ static u8 cxgbit_get_iscsi_dcb_priority(struct net_device *ndev, u16 local_port) | |||
875 | return 0; | 875 | return 0; |
876 | 876 | ||
877 | if (caps & DCB_CAP_DCBX_VER_IEEE) { | 877 | if (caps & DCB_CAP_DCBX_VER_IEEE) { |
878 | iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_ANY; | 878 | iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_STREAM; |
879 | |||
880 | ret = dcb_ieee_getapp_mask(ndev, &iscsi_dcb_app); | 879 | ret = dcb_ieee_getapp_mask(ndev, &iscsi_dcb_app); |
881 | 880 | if (!ret) { | |
881 | iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_ANY; | ||
882 | ret = dcb_ieee_getapp_mask(ndev, &iscsi_dcb_app); | ||
883 | } | ||
882 | } else if (caps & DCB_CAP_DCBX_VER_CEE) { | 884 | } else if (caps & DCB_CAP_DCBX_VER_CEE) { |
883 | iscsi_dcb_app.selector = DCB_APP_IDTYPE_PORTNUM; | 885 | iscsi_dcb_app.selector = DCB_APP_IDTYPE_PORTNUM; |
884 | 886 | ||
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_main.c b/drivers/target/iscsi/cxgbit/cxgbit_main.c index 343b129c2cfa..e877b917c15f 100644 --- a/drivers/target/iscsi/cxgbit/cxgbit_main.c +++ b/drivers/target/iscsi/cxgbit/cxgbit_main.c | |||
@@ -589,7 +589,8 @@ static void cxgbit_dcb_workfn(struct work_struct *work) | |||
589 | iscsi_app = &dcb_work->dcb_app; | 589 | iscsi_app = &dcb_work->dcb_app; |
590 | 590 | ||
591 | if (iscsi_app->dcbx & DCB_CAP_DCBX_VER_IEEE) { | 591 | if (iscsi_app->dcbx & DCB_CAP_DCBX_VER_IEEE) { |
592 | if (iscsi_app->app.selector != IEEE_8021QAZ_APP_SEL_ANY) | 592 | if ((iscsi_app->app.selector != IEEE_8021QAZ_APP_SEL_STREAM) && |
593 | (iscsi_app->app.selector != IEEE_8021QAZ_APP_SEL_ANY)) | ||
593 | goto out; | 594 | goto out; |
594 | 595 | ||
595 | priority = iscsi_app->app.priority; | 596 | priority = iscsi_app->app.priority; |
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c index 213ab3cc6b80..d3446acf9bbd 100644 --- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c +++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c | |||
@@ -487,6 +487,7 @@ static int proc_thermal_rapl_add(struct pci_dev *pdev, | |||
487 | rapl_mmio_cpu_online, rapl_mmio_cpu_down_prep); | 487 | rapl_mmio_cpu_online, rapl_mmio_cpu_down_prep); |
488 | if (ret < 0) { | 488 | if (ret < 0) { |
489 | powercap_unregister_control_type(rapl_mmio_priv.control_type); | 489 | powercap_unregister_control_type(rapl_mmio_priv.control_type); |
490 | rapl_mmio_priv.control_type = NULL; | ||
490 | return ret; | 491 | return ret; |
491 | } | 492 | } |
492 | rapl_mmio_priv.pcap_rapl_online = ret; | 493 | rapl_mmio_priv.pcap_rapl_online = ret; |
@@ -496,6 +497,9 @@ static int proc_thermal_rapl_add(struct pci_dev *pdev, | |||
496 | 497 | ||
497 | static void proc_thermal_rapl_remove(void) | 498 | static void proc_thermal_rapl_remove(void) |
498 | { | 499 | { |
500 | if (IS_ERR_OR_NULL(rapl_mmio_priv.control_type)) | ||
501 | return; | ||
502 | |||
499 | cpuhp_remove_state(rapl_mmio_priv.pcap_rapl_online); | 503 | cpuhp_remove_state(rapl_mmio_priv.pcap_rapl_online); |
500 | powercap_unregister_control_type(rapl_mmio_priv.control_type); | 504 | powercap_unregister_control_type(rapl_mmio_priv.control_type); |
501 | } | 505 | } |
diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c index cb4db1b3ca3c..5fb214e67d73 100644 --- a/drivers/tty/hvc/hvcs.c +++ b/drivers/tty/hvc/hvcs.c | |||
@@ -47,7 +47,7 @@ | |||
47 | * using the 2.6 Linux kernel kref construct. | 47 | * using the 2.6 Linux kernel kref construct. |
48 | * | 48 | * |
49 | * For direction on installation and usage of this driver please reference | 49 | * For direction on installation and usage of this driver please reference |
50 | * Documentation/powerpc/hvcs.txt. | 50 | * Documentation/powerpc/hvcs.rst. |
51 | */ | 51 | */ |
52 | 52 | ||
53 | #include <linux/device.h> | 53 | #include <linux/device.h> |
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig index fd385c8c53a5..3083dbae35f7 100644 --- a/drivers/tty/serial/Kconfig +++ b/drivers/tty/serial/Kconfig | |||
@@ -1035,25 +1035,6 @@ config SERIAL_VT8500_CONSOLE | |||
1035 | depends on SERIAL_VT8500=y | 1035 | depends on SERIAL_VT8500=y |
1036 | select SERIAL_CORE_CONSOLE | 1036 | select SERIAL_CORE_CONSOLE |
1037 | 1037 | ||
1038 | config SERIAL_NETX | ||
1039 | tristate "NetX serial port support" | ||
1040 | depends on ARCH_NETX | ||
1041 | select SERIAL_CORE | ||
1042 | help | ||
1043 | If you have a machine based on a Hilscher NetX SoC you | ||
1044 | can enable its onboard serial port by enabling this option. | ||
1045 | |||
1046 | To compile this driver as a module, choose M here: the | ||
1047 | module will be called netx-serial. | ||
1048 | |||
1049 | config SERIAL_NETX_CONSOLE | ||
1050 | bool "Console on NetX serial port" | ||
1051 | depends on SERIAL_NETX=y | ||
1052 | select SERIAL_CORE_CONSOLE | ||
1053 | help | ||
1054 | If you have enabled the serial port on the Hilscher NetX SoC | ||
1055 | you can make it the console by answering Y to this option. | ||
1056 | |||
1057 | config SERIAL_OMAP | 1038 | config SERIAL_OMAP |
1058 | tristate "OMAP serial port support" | 1039 | tristate "OMAP serial port support" |
1059 | depends on ARCH_OMAP2PLUS | 1040 | depends on ARCH_OMAP2PLUS |
diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile index 7cd7cabfa6c4..15a0fccadf7e 100644 --- a/drivers/tty/serial/Makefile +++ b/drivers/tty/serial/Makefile | |||
@@ -59,7 +59,6 @@ obj-$(CONFIG_SERIAL_ATMEL) += atmel_serial.o | |||
59 | obj-$(CONFIG_SERIAL_UARTLITE) += uartlite.o | 59 | obj-$(CONFIG_SERIAL_UARTLITE) += uartlite.o |
60 | obj-$(CONFIG_SERIAL_MSM) += msm_serial.o | 60 | obj-$(CONFIG_SERIAL_MSM) += msm_serial.o |
61 | obj-$(CONFIG_SERIAL_QCOM_GENI) += qcom_geni_serial.o | 61 | obj-$(CONFIG_SERIAL_QCOM_GENI) += qcom_geni_serial.o |
62 | obj-$(CONFIG_SERIAL_NETX) += netx-serial.o | ||
63 | obj-$(CONFIG_SERIAL_KS8695) += serial_ks8695.o | 62 | obj-$(CONFIG_SERIAL_KS8695) += serial_ks8695.o |
64 | obj-$(CONFIG_SERIAL_OMAP) += omap-serial.o | 63 | obj-$(CONFIG_SERIAL_OMAP) += omap-serial.o |
65 | obj-$(CONFIG_SERIAL_ALTERA_UART) += altera_uart.o | 64 | obj-$(CONFIG_SERIAL_ALTERA_UART) += altera_uart.o |
diff --git a/drivers/tty/serial/netx-serial.c b/drivers/tty/serial/netx-serial.c deleted file mode 100644 index b3556863491f..000000000000 --- a/drivers/tty/serial/netx-serial.c +++ /dev/null | |||
@@ -1,733 +0,0 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* | ||
3 | * Copyright (c) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix | ||
4 | */ | ||
5 | |||
6 | #if defined(CONFIG_SERIAL_NETX_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) | ||
7 | #define SUPPORT_SYSRQ | ||
8 | #endif | ||
9 | |||
10 | #include <linux/device.h> | ||
11 | #include <linux/module.h> | ||
12 | #include <linux/ioport.h> | ||
13 | #include <linux/init.h> | ||
14 | #include <linux/console.h> | ||
15 | #include <linux/sysrq.h> | ||
16 | #include <linux/platform_device.h> | ||
17 | #include <linux/tty.h> | ||
18 | #include <linux/tty_flip.h> | ||
19 | #include <linux/serial_core.h> | ||
20 | #include <linux/serial.h> | ||
21 | |||
22 | #include <asm/io.h> | ||
23 | #include <asm/irq.h> | ||
24 | #include <mach/hardware.h> | ||
25 | #include <mach/netx-regs.h> | ||
26 | |||
27 | /* We've been assigned a range on the "Low-density serial ports" major */ | ||
28 | #define SERIAL_NX_MAJOR 204 | ||
29 | #define MINOR_START 170 | ||
30 | |||
31 | enum uart_regs { | ||
32 | UART_DR = 0x00, | ||
33 | UART_SR = 0x04, | ||
34 | UART_LINE_CR = 0x08, | ||
35 | UART_BAUDDIV_MSB = 0x0c, | ||
36 | UART_BAUDDIV_LSB = 0x10, | ||
37 | UART_CR = 0x14, | ||
38 | UART_FR = 0x18, | ||
39 | UART_IIR = 0x1c, | ||
40 | UART_ILPR = 0x20, | ||
41 | UART_RTS_CR = 0x24, | ||
42 | UART_RTS_LEAD = 0x28, | ||
43 | UART_RTS_TRAIL = 0x2c, | ||
44 | UART_DRV_ENABLE = 0x30, | ||
45 | UART_BRM_CR = 0x34, | ||
46 | UART_RXFIFO_IRQLEVEL = 0x38, | ||
47 | UART_TXFIFO_IRQLEVEL = 0x3c, | ||
48 | }; | ||
49 | |||
50 | #define SR_FE (1<<0) | ||
51 | #define SR_PE (1<<1) | ||
52 | #define SR_BE (1<<2) | ||
53 | #define SR_OE (1<<3) | ||
54 | |||
55 | #define LINE_CR_BRK (1<<0) | ||
56 | #define LINE_CR_PEN (1<<1) | ||
57 | #define LINE_CR_EPS (1<<2) | ||
58 | #define LINE_CR_STP2 (1<<3) | ||
59 | #define LINE_CR_FEN (1<<4) | ||
60 | #define LINE_CR_5BIT (0<<5) | ||
61 | #define LINE_CR_6BIT (1<<5) | ||
62 | #define LINE_CR_7BIT (2<<5) | ||
63 | #define LINE_CR_8BIT (3<<5) | ||
64 | #define LINE_CR_BITS_MASK (3<<5) | ||
65 | |||
66 | #define CR_UART_EN (1<<0) | ||
67 | #define CR_SIREN (1<<1) | ||
68 | #define CR_SIRLP (1<<2) | ||
69 | #define CR_MSIE (1<<3) | ||
70 | #define CR_RIE (1<<4) | ||
71 | #define CR_TIE (1<<5) | ||
72 | #define CR_RTIE (1<<6) | ||
73 | #define CR_LBE (1<<7) | ||
74 | |||
75 | #define FR_CTS (1<<0) | ||
76 | #define FR_DSR (1<<1) | ||
77 | #define FR_DCD (1<<2) | ||
78 | #define FR_BUSY (1<<3) | ||
79 | #define FR_RXFE (1<<4) | ||
80 | #define FR_TXFF (1<<5) | ||
81 | #define FR_RXFF (1<<6) | ||
82 | #define FR_TXFE (1<<7) | ||
83 | |||
84 | #define IIR_MIS (1<<0) | ||
85 | #define IIR_RIS (1<<1) | ||
86 | #define IIR_TIS (1<<2) | ||
87 | #define IIR_RTIS (1<<3) | ||
88 | #define IIR_MASK 0xf | ||
89 | |||
90 | #define RTS_CR_AUTO (1<<0) | ||
91 | #define RTS_CR_RTS (1<<1) | ||
92 | #define RTS_CR_COUNT (1<<2) | ||
93 | #define RTS_CR_MOD2 (1<<3) | ||
94 | #define RTS_CR_RTS_POL (1<<4) | ||
95 | #define RTS_CR_CTS_CTR (1<<5) | ||
96 | #define RTS_CR_CTS_POL (1<<6) | ||
97 | #define RTS_CR_STICK (1<<7) | ||
98 | |||
99 | #define UART_PORT_SIZE 0x40 | ||
100 | #define DRIVER_NAME "netx-uart" | ||
101 | |||
102 | struct netx_port { | ||
103 | struct uart_port port; | ||
104 | }; | ||
105 | |||
106 | static void netx_stop_tx(struct uart_port *port) | ||
107 | { | ||
108 | unsigned int val; | ||
109 | val = readl(port->membase + UART_CR); | ||
110 | writel(val & ~CR_TIE, port->membase + UART_CR); | ||
111 | } | ||
112 | |||
113 | static void netx_stop_rx(struct uart_port *port) | ||
114 | { | ||
115 | unsigned int val; | ||
116 | val = readl(port->membase + UART_CR); | ||
117 | writel(val & ~CR_RIE, port->membase + UART_CR); | ||
118 | } | ||
119 | |||
120 | static void netx_enable_ms(struct uart_port *port) | ||
121 | { | ||
122 | unsigned int val; | ||
123 | val = readl(port->membase + UART_CR); | ||
124 | writel(val | CR_MSIE, port->membase + UART_CR); | ||
125 | } | ||
126 | |||
127 | static inline void netx_transmit_buffer(struct uart_port *port) | ||
128 | { | ||
129 | struct circ_buf *xmit = &port->state->xmit; | ||
130 | |||
131 | if (port->x_char) { | ||
132 | writel(port->x_char, port->membase + UART_DR); | ||
133 | port->icount.tx++; | ||
134 | port->x_char = 0; | ||
135 | return; | ||
136 | } | ||
137 | |||
138 | if (uart_tx_stopped(port) || uart_circ_empty(xmit)) { | ||
139 | netx_stop_tx(port); | ||
140 | return; | ||
141 | } | ||
142 | |||
143 | do { | ||
144 | /* send xmit->buf[xmit->tail] | ||
145 | * out the port here */ | ||
146 | writel(xmit->buf[xmit->tail], port->membase + UART_DR); | ||
147 | xmit->tail = (xmit->tail + 1) & | ||
148 | (UART_XMIT_SIZE - 1); | ||
149 | port->icount.tx++; | ||
150 | if (uart_circ_empty(xmit)) | ||
151 | break; | ||
152 | } while (!(readl(port->membase + UART_FR) & FR_TXFF)); | ||
153 | |||
154 | if (uart_circ_empty(xmit)) | ||
155 | netx_stop_tx(port); | ||
156 | } | ||
157 | |||
158 | static void netx_start_tx(struct uart_port *port) | ||
159 | { | ||
160 | writel( | ||
161 | readl(port->membase + UART_CR) | CR_TIE, port->membase + UART_CR); | ||
162 | |||
163 | if (!(readl(port->membase + UART_FR) & FR_TXFF)) | ||
164 | netx_transmit_buffer(port); | ||
165 | } | ||
166 | |||
167 | static unsigned int netx_tx_empty(struct uart_port *port) | ||
168 | { | ||
169 | return readl(port->membase + UART_FR) & FR_BUSY ? 0 : TIOCSER_TEMT; | ||
170 | } | ||
171 | |||
172 | static void netx_txint(struct uart_port *port) | ||
173 | { | ||
174 | struct circ_buf *xmit = &port->state->xmit; | ||
175 | |||
176 | if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { | ||
177 | netx_stop_tx(port); | ||
178 | return; | ||
179 | } | ||
180 | |||
181 | netx_transmit_buffer(port); | ||
182 | |||
183 | if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) | ||
184 | uart_write_wakeup(port); | ||
185 | } | ||
186 | |||
187 | static void netx_rxint(struct uart_port *port, unsigned long *flags) | ||
188 | { | ||
189 | unsigned char rx, flg, status; | ||
190 | |||
191 | while (!(readl(port->membase + UART_FR) & FR_RXFE)) { | ||
192 | rx = readl(port->membase + UART_DR); | ||
193 | flg = TTY_NORMAL; | ||
194 | port->icount.rx++; | ||
195 | status = readl(port->membase + UART_SR); | ||
196 | if (status & SR_BE) { | ||
197 | writel(0, port->membase + UART_SR); | ||
198 | if (uart_handle_break(port)) | ||
199 | continue; | ||
200 | } | ||
201 | |||
202 | if (unlikely(status & (SR_FE | SR_PE | SR_OE))) { | ||
203 | |||
204 | if (status & SR_PE) | ||
205 | port->icount.parity++; | ||
206 | else if (status & SR_FE) | ||
207 | port->icount.frame++; | ||
208 | if (status & SR_OE) | ||
209 | port->icount.overrun++; | ||
210 | |||
211 | status &= port->read_status_mask; | ||
212 | |||
213 | if (status & SR_BE) | ||
214 | flg = TTY_BREAK; | ||
215 | else if (status & SR_PE) | ||
216 | flg = TTY_PARITY; | ||
217 | else if (status & SR_FE) | ||
218 | flg = TTY_FRAME; | ||
219 | } | ||
220 | |||
221 | if (uart_handle_sysrq_char(port, rx)) | ||
222 | continue; | ||
223 | |||
224 | uart_insert_char(port, status, SR_OE, rx, flg); | ||
225 | } | ||
226 | |||
227 | spin_unlock_irqrestore(&port->lock, *flags); | ||
228 | tty_flip_buffer_push(&port->state->port); | ||
229 | spin_lock_irqsave(&port->lock, *flags); | ||
230 | } | ||
231 | |||
232 | static irqreturn_t netx_int(int irq, void *dev_id) | ||
233 | { | ||
234 | struct uart_port *port = dev_id; | ||
235 | unsigned long flags; | ||
236 | unsigned char status; | ||
237 | |||
238 | spin_lock_irqsave(&port->lock,flags); | ||
239 | |||
240 | status = readl(port->membase + UART_IIR) & IIR_MASK; | ||
241 | while (status) { | ||
242 | if (status & IIR_RIS) | ||
243 | netx_rxint(port, &flags); | ||
244 | if (status & IIR_TIS) | ||
245 | netx_txint(port); | ||
246 | if (status & IIR_MIS) { | ||
247 | if (readl(port->membase + UART_FR) & FR_CTS) | ||
248 | uart_handle_cts_change(port, 1); | ||
249 | else | ||
250 | uart_handle_cts_change(port, 0); | ||
251 | } | ||
252 | writel(0, port->membase + UART_IIR); | ||
253 | status = readl(port->membase + UART_IIR) & IIR_MASK; | ||
254 | } | ||
255 | |||
256 | spin_unlock_irqrestore(&port->lock,flags); | ||
257 | return IRQ_HANDLED; | ||
258 | } | ||
259 | |||
260 | static unsigned int netx_get_mctrl(struct uart_port *port) | ||
261 | { | ||
262 | unsigned int ret = TIOCM_DSR | TIOCM_CAR; | ||
263 | |||
264 | if (readl(port->membase + UART_FR) & FR_CTS) | ||
265 | ret |= TIOCM_CTS; | ||
266 | |||
267 | return ret; | ||
268 | } | ||
269 | |||
270 | static void netx_set_mctrl(struct uart_port *port, unsigned int mctrl) | ||
271 | { | ||
272 | unsigned int val; | ||
273 | |||
274 | /* FIXME: Locking needed ? */ | ||
275 | if (mctrl & TIOCM_RTS) { | ||
276 | val = readl(port->membase + UART_RTS_CR); | ||
277 | writel(val | RTS_CR_RTS, port->membase + UART_RTS_CR); | ||
278 | } | ||
279 | } | ||
280 | |||
281 | static void netx_break_ctl(struct uart_port *port, int break_state) | ||
282 | { | ||
283 | unsigned int line_cr; | ||
284 | spin_lock_irq(&port->lock); | ||
285 | |||
286 | line_cr = readl(port->membase + UART_LINE_CR); | ||
287 | if (break_state != 0) | ||
288 | line_cr |= LINE_CR_BRK; | ||
289 | else | ||
290 | line_cr &= ~LINE_CR_BRK; | ||
291 | writel(line_cr, port->membase + UART_LINE_CR); | ||
292 | |||
293 | spin_unlock_irq(&port->lock); | ||
294 | } | ||
295 | |||
296 | static int netx_startup(struct uart_port *port) | ||
297 | { | ||
298 | int ret; | ||
299 | |||
300 | ret = request_irq(port->irq, netx_int, 0, | ||
301 | DRIVER_NAME, port); | ||
302 | if (ret) { | ||
303 | dev_err(port->dev, "unable to grab irq%d\n",port->irq); | ||
304 | goto exit; | ||
305 | } | ||
306 | |||
307 | writel(readl(port->membase + UART_LINE_CR) | LINE_CR_FEN, | ||
308 | port->membase + UART_LINE_CR); | ||
309 | |||
310 | writel(CR_MSIE | CR_RIE | CR_TIE | CR_RTIE | CR_UART_EN, | ||
311 | port->membase + UART_CR); | ||
312 | |||
313 | exit: | ||
314 | return ret; | ||
315 | } | ||
316 | |||
317 | static void netx_shutdown(struct uart_port *port) | ||
318 | { | ||
319 | writel(0, port->membase + UART_CR) ; | ||
320 | |||
321 | free_irq(port->irq, port); | ||
322 | } | ||
323 | |||
324 | static void | ||
325 | netx_set_termios(struct uart_port *port, struct ktermios *termios, | ||
326 | struct ktermios *old) | ||
327 | { | ||
328 | unsigned int baud, quot; | ||
329 | unsigned char old_cr; | ||
330 | unsigned char line_cr = LINE_CR_FEN; | ||
331 | unsigned char rts_cr = 0; | ||
332 | |||
333 | switch (termios->c_cflag & CSIZE) { | ||
334 | case CS5: | ||
335 | line_cr |= LINE_CR_5BIT; | ||
336 | break; | ||
337 | case CS6: | ||
338 | line_cr |= LINE_CR_6BIT; | ||
339 | break; | ||
340 | case CS7: | ||
341 | line_cr |= LINE_CR_7BIT; | ||
342 | break; | ||
343 | case CS8: | ||
344 | line_cr |= LINE_CR_8BIT; | ||
345 | break; | ||
346 | } | ||
347 | |||
348 | if (termios->c_cflag & CSTOPB) | ||
349 | line_cr |= LINE_CR_STP2; | ||
350 | |||
351 | if (termios->c_cflag & PARENB) { | ||
352 | line_cr |= LINE_CR_PEN; | ||
353 | if (!(termios->c_cflag & PARODD)) | ||
354 | line_cr |= LINE_CR_EPS; | ||
355 | } | ||
356 | |||
357 | if (termios->c_cflag & CRTSCTS) | ||
358 | rts_cr = RTS_CR_AUTO | RTS_CR_CTS_CTR | RTS_CR_RTS_POL; | ||
359 | |||
360 | baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16); | ||
361 | quot = baud * 4096; | ||
362 | quot /= 1000; | ||
363 | quot *= 256; | ||
364 | quot /= 100000; | ||
365 | |||
366 | spin_lock_irq(&port->lock); | ||
367 | |||
368 | uart_update_timeout(port, termios->c_cflag, baud); | ||
369 | |||
370 | old_cr = readl(port->membase + UART_CR); | ||
371 | |||
372 | /* disable interrupts */ | ||
373 | writel(old_cr & ~(CR_MSIE | CR_RIE | CR_TIE | CR_RTIE), | ||
374 | port->membase + UART_CR); | ||
375 | |||
376 | /* drain transmitter */ | ||
377 | while (readl(port->membase + UART_FR) & FR_BUSY); | ||
378 | |||
379 | /* disable UART */ | ||
380 | writel(old_cr & ~CR_UART_EN, port->membase + UART_CR); | ||
381 | |||
382 | /* modem status interrupts */ | ||
383 | old_cr &= ~CR_MSIE; | ||
384 | if (UART_ENABLE_MS(port, termios->c_cflag)) | ||
385 | old_cr |= CR_MSIE; | ||
386 | |||
387 | writel((quot>>8) & 0xff, port->membase + UART_BAUDDIV_MSB); | ||
388 | writel(quot & 0xff, port->membase + UART_BAUDDIV_LSB); | ||
389 | writel(line_cr, port->membase + UART_LINE_CR); | ||
390 | |||
391 | writel(rts_cr, port->membase + UART_RTS_CR); | ||
392 | |||
393 | /* | ||
394 | * Characters to ignore | ||
395 | */ | ||
396 | port->ignore_status_mask = 0; | ||
397 | if (termios->c_iflag & IGNPAR) | ||
398 | port->ignore_status_mask |= SR_PE; | ||
399 | if (termios->c_iflag & IGNBRK) { | ||
400 | port->ignore_status_mask |= SR_BE; | ||
401 | /* | ||
402 | * If we're ignoring parity and break indicators, | ||
403 | * ignore overruns too (for real raw support). | ||
404 | */ | ||
405 | if (termios->c_iflag & IGNPAR) | ||
406 | port->ignore_status_mask |= SR_PE; | ||
407 | } | ||
408 | |||
409 | port->read_status_mask = 0; | ||
410 | if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK)) | ||
411 | port->read_status_mask |= SR_BE; | ||
412 | if (termios->c_iflag & INPCK) | ||
413 | port->read_status_mask |= SR_PE | SR_FE; | ||
414 | |||
415 | writel(old_cr, port->membase + UART_CR); | ||
416 | |||
417 | spin_unlock_irq(&port->lock); | ||
418 | } | ||
419 | |||
420 | static const char *netx_type(struct uart_port *port) | ||
421 | { | ||
422 | return port->type == PORT_NETX ? "NETX" : NULL; | ||
423 | } | ||
424 | |||
425 | static void netx_release_port(struct uart_port *port) | ||
426 | { | ||
427 | release_mem_region(port->mapbase, UART_PORT_SIZE); | ||
428 | } | ||
429 | |||
430 | static int netx_request_port(struct uart_port *port) | ||
431 | { | ||
432 | return request_mem_region(port->mapbase, UART_PORT_SIZE, | ||
433 | DRIVER_NAME) != NULL ? 0 : -EBUSY; | ||
434 | } | ||
435 | |||
436 | static void netx_config_port(struct uart_port *port, int flags) | ||
437 | { | ||
438 | if (flags & UART_CONFIG_TYPE && netx_request_port(port) == 0) | ||
439 | port->type = PORT_NETX; | ||
440 | } | ||
441 | |||
442 | static int | ||
443 | netx_verify_port(struct uart_port *port, struct serial_struct *ser) | ||
444 | { | ||
445 | int ret = 0; | ||
446 | |||
447 | if (ser->type != PORT_UNKNOWN && ser->type != PORT_NETX) | ||
448 | ret = -EINVAL; | ||
449 | |||
450 | return ret; | ||
451 | } | ||
452 | |||
453 | static struct uart_ops netx_pops = { | ||
454 | .tx_empty = netx_tx_empty, | ||
455 | .set_mctrl = netx_set_mctrl, | ||
456 | .get_mctrl = netx_get_mctrl, | ||
457 | .stop_tx = netx_stop_tx, | ||
458 | .start_tx = netx_start_tx, | ||
459 | .stop_rx = netx_stop_rx, | ||
460 | .enable_ms = netx_enable_ms, | ||
461 | .break_ctl = netx_break_ctl, | ||
462 | .startup = netx_startup, | ||
463 | .shutdown = netx_shutdown, | ||
464 | .set_termios = netx_set_termios, | ||
465 | .type = netx_type, | ||
466 | .release_port = netx_release_port, | ||
467 | .request_port = netx_request_port, | ||
468 | .config_port = netx_config_port, | ||
469 | .verify_port = netx_verify_port, | ||
470 | }; | ||
471 | |||
472 | static struct netx_port netx_ports[] = { | ||
473 | { | ||
474 | .port = { | ||
475 | .type = PORT_NETX, | ||
476 | .iotype = UPIO_MEM, | ||
477 | .membase = (char __iomem *)io_p2v(NETX_PA_UART0), | ||
478 | .mapbase = NETX_PA_UART0, | ||
479 | .irq = NETX_IRQ_UART0, | ||
480 | .uartclk = 100000000, | ||
481 | .fifosize = 16, | ||
482 | .flags = UPF_BOOT_AUTOCONF, | ||
483 | .ops = &netx_pops, | ||
484 | .line = 0, | ||
485 | }, | ||
486 | }, { | ||
487 | .port = { | ||
488 | .type = PORT_NETX, | ||
489 | .iotype = UPIO_MEM, | ||
490 | .membase = (char __iomem *)io_p2v(NETX_PA_UART1), | ||
491 | .mapbase = NETX_PA_UART1, | ||
492 | .irq = NETX_IRQ_UART1, | ||
493 | .uartclk = 100000000, | ||
494 | .fifosize = 16, | ||
495 | .flags = UPF_BOOT_AUTOCONF, | ||
496 | .ops = &netx_pops, | ||
497 | .line = 1, | ||
498 | }, | ||
499 | }, { | ||
500 | .port = { | ||
501 | .type = PORT_NETX, | ||
502 | .iotype = UPIO_MEM, | ||
503 | .membase = (char __iomem *)io_p2v(NETX_PA_UART2), | ||
504 | .mapbase = NETX_PA_UART2, | ||
505 | .irq = NETX_IRQ_UART2, | ||
506 | .uartclk = 100000000, | ||
507 | .fifosize = 16, | ||
508 | .flags = UPF_BOOT_AUTOCONF, | ||
509 | .ops = &netx_pops, | ||
510 | .line = 2, | ||
511 | }, | ||
512 | } | ||
513 | }; | ||
514 | |||
515 | #ifdef CONFIG_SERIAL_NETX_CONSOLE | ||
516 | |||
517 | static void netx_console_putchar(struct uart_port *port, int ch) | ||
518 | { | ||
519 | while (readl(port->membase + UART_FR) & FR_BUSY); | ||
520 | writel(ch, port->membase + UART_DR); | ||
521 | } | ||
522 | |||
523 | static void | ||
524 | netx_console_write(struct console *co, const char *s, unsigned int count) | ||
525 | { | ||
526 | struct uart_port *port = &netx_ports[co->index].port; | ||
527 | unsigned char cr_save; | ||
528 | |||
529 | cr_save = readl(port->membase + UART_CR); | ||
530 | writel(cr_save | CR_UART_EN, port->membase + UART_CR); | ||
531 | |||
532 | uart_console_write(port, s, count, netx_console_putchar); | ||
533 | |||
534 | while (readl(port->membase + UART_FR) & FR_BUSY); | ||
535 | writel(cr_save, port->membase + UART_CR); | ||
536 | } | ||
537 | |||
538 | static void __init | ||
539 | netx_console_get_options(struct uart_port *port, int *baud, | ||
540 | int *parity, int *bits, int *flow) | ||
541 | { | ||
542 | unsigned char line_cr; | ||
543 | |||
544 | *baud = (readl(port->membase + UART_BAUDDIV_MSB) << 8) | | ||
545 | readl(port->membase + UART_BAUDDIV_LSB); | ||
546 | *baud *= 1000; | ||
547 | *baud /= 4096; | ||
548 | *baud *= 1000; | ||
549 | *baud /= 256; | ||
550 | *baud *= 100; | ||
551 | |||
552 | line_cr = readl(port->membase + UART_LINE_CR); | ||
553 | *parity = 'n'; | ||
554 | if (line_cr & LINE_CR_PEN) { | ||
555 | if (line_cr & LINE_CR_EPS) | ||
556 | *parity = 'e'; | ||
557 | else | ||
558 | *parity = 'o'; | ||
559 | } | ||
560 | |||
561 | switch (line_cr & LINE_CR_BITS_MASK) { | ||
562 | case LINE_CR_8BIT: | ||
563 | *bits = 8; | ||
564 | break; | ||
565 | case LINE_CR_7BIT: | ||
566 | *bits = 7; | ||
567 | break; | ||
568 | case LINE_CR_6BIT: | ||
569 | *bits = 6; | ||
570 | break; | ||
571 | case LINE_CR_5BIT: | ||
572 | *bits = 5; | ||
573 | break; | ||
574 | } | ||
575 | |||
576 | if (readl(port->membase + UART_RTS_CR) & RTS_CR_AUTO) | ||
577 | *flow = 'r'; | ||
578 | } | ||
579 | |||
580 | static int __init | ||
581 | netx_console_setup(struct console *co, char *options) | ||
582 | { | ||
583 | struct netx_port *sport; | ||
584 | int baud = 9600; | ||
585 | int bits = 8; | ||
586 | int parity = 'n'; | ||
587 | int flow = 'n'; | ||
588 | |||
589 | /* | ||
590 | * Check whether an invalid uart number has been specified, and | ||
591 | * if so, search for the first available port that does have | ||
592 | * console support. | ||
593 | */ | ||
594 | if (co->index == -1 || co->index >= ARRAY_SIZE(netx_ports)) | ||
595 | co->index = 0; | ||
596 | sport = &netx_ports[co->index]; | ||
597 | |||
598 | if (options) { | ||
599 | uart_parse_options(options, &baud, &parity, &bits, &flow); | ||
600 | } else { | ||
601 | /* if the UART is enabled, assume it has been correctly setup | ||
602 | * by the bootloader and get the options | ||
603 | */ | ||
604 | if (readl(sport->port.membase + UART_CR) & CR_UART_EN) { | ||
605 | netx_console_get_options(&sport->port, &baud, | ||
606 | &parity, &bits, &flow); | ||
607 | } | ||
608 | |||
609 | } | ||
610 | |||
611 | return uart_set_options(&sport->port, co, baud, parity, bits, flow); | ||
612 | } | ||
613 | |||
614 | static struct uart_driver netx_reg; | ||
615 | static struct console netx_console = { | ||
616 | .name = "ttyNX", | ||
617 | .write = netx_console_write, | ||
618 | .device = uart_console_device, | ||
619 | .setup = netx_console_setup, | ||
620 | .flags = CON_PRINTBUFFER, | ||
621 | .index = -1, | ||
622 | .data = &netx_reg, | ||
623 | }; | ||
624 | |||
625 | static int __init netx_console_init(void) | ||
626 | { | ||
627 | register_console(&netx_console); | ||
628 | return 0; | ||
629 | } | ||
630 | console_initcall(netx_console_init); | ||
631 | |||
632 | #define NETX_CONSOLE &netx_console | ||
633 | #else | ||
634 | #define NETX_CONSOLE NULL | ||
635 | #endif | ||
636 | |||
637 | static struct uart_driver netx_reg = { | ||
638 | .owner = THIS_MODULE, | ||
639 | .driver_name = DRIVER_NAME, | ||
640 | .dev_name = "ttyNX", | ||
641 | .major = SERIAL_NX_MAJOR, | ||
642 | .minor = MINOR_START, | ||
643 | .nr = ARRAY_SIZE(netx_ports), | ||
644 | .cons = NETX_CONSOLE, | ||
645 | }; | ||
646 | |||
647 | static int serial_netx_suspend(struct platform_device *pdev, pm_message_t state) | ||
648 | { | ||
649 | struct netx_port *sport = platform_get_drvdata(pdev); | ||
650 | |||
651 | if (sport) | ||
652 | uart_suspend_port(&netx_reg, &sport->port); | ||
653 | |||
654 | return 0; | ||
655 | } | ||
656 | |||
657 | static int serial_netx_resume(struct platform_device *pdev) | ||
658 | { | ||
659 | struct netx_port *sport = platform_get_drvdata(pdev); | ||
660 | |||
661 | if (sport) | ||
662 | uart_resume_port(&netx_reg, &sport->port); | ||
663 | |||
664 | return 0; | ||
665 | } | ||
666 | |||
667 | static int serial_netx_probe(struct platform_device *pdev) | ||
668 | { | ||
669 | struct uart_port *port = &netx_ports[pdev->id].port; | ||
670 | |||
671 | dev_info(&pdev->dev, "initialising\n"); | ||
672 | |||
673 | port->dev = &pdev->dev; | ||
674 | |||
675 | writel(1, port->membase + UART_RXFIFO_IRQLEVEL); | ||
676 | uart_add_one_port(&netx_reg, &netx_ports[pdev->id].port); | ||
677 | platform_set_drvdata(pdev, &netx_ports[pdev->id]); | ||
678 | |||
679 | return 0; | ||
680 | } | ||
681 | |||
682 | static int serial_netx_remove(struct platform_device *pdev) | ||
683 | { | ||
684 | struct netx_port *sport = platform_get_drvdata(pdev); | ||
685 | |||
686 | if (sport) | ||
687 | uart_remove_one_port(&netx_reg, &sport->port); | ||
688 | |||
689 | return 0; | ||
690 | } | ||
691 | |||
692 | static struct platform_driver serial_netx_driver = { | ||
693 | .probe = serial_netx_probe, | ||
694 | .remove = serial_netx_remove, | ||
695 | |||
696 | .suspend = serial_netx_suspend, | ||
697 | .resume = serial_netx_resume, | ||
698 | |||
699 | .driver = { | ||
700 | .name = DRIVER_NAME, | ||
701 | }, | ||
702 | }; | ||
703 | |||
704 | static int __init netx_serial_init(void) | ||
705 | { | ||
706 | int ret; | ||
707 | |||
708 | printk(KERN_INFO "Serial: NetX driver\n"); | ||
709 | |||
710 | ret = uart_register_driver(&netx_reg); | ||
711 | if (ret) | ||
712 | return ret; | ||
713 | |||
714 | ret = platform_driver_register(&serial_netx_driver); | ||
715 | if (ret != 0) | ||
716 | uart_unregister_driver(&netx_reg); | ||
717 | |||
718 | return 0; | ||
719 | } | ||
720 | |||
721 | static void __exit netx_serial_exit(void) | ||
722 | { | ||
723 | platform_driver_unregister(&serial_netx_driver); | ||
724 | uart_unregister_driver(&netx_reg); | ||
725 | } | ||
726 | |||
727 | module_init(netx_serial_init); | ||
728 | module_exit(netx_serial_exit); | ||
729 | |||
730 | MODULE_AUTHOR("Sascha Hauer"); | ||
731 | MODULE_DESCRIPTION("NetX serial port driver"); | ||
732 | MODULE_LICENSE("GPL"); | ||
733 | MODULE_ALIAS("platform:" DRIVER_NAME); | ||
diff --git a/drivers/tty/tty_ldsem.c b/drivers/tty/tty_ldsem.c index 717292c1c0df..60ff236a3d63 100644 --- a/drivers/tty/tty_ldsem.c +++ b/drivers/tty/tty_ldsem.c | |||
@@ -93,8 +93,7 @@ static void __ldsem_wake_readers(struct ld_semaphore *sem) | |||
93 | 93 | ||
94 | list_for_each_entry_safe(waiter, next, &sem->read_wait, list) { | 94 | list_for_each_entry_safe(waiter, next, &sem->read_wait, list) { |
95 | tsk = waiter->task; | 95 | tsk = waiter->task; |
96 | smp_mb(); | 96 | smp_store_release(&waiter->task, NULL); |
97 | waiter->task = NULL; | ||
98 | wake_up_process(tsk); | 97 | wake_up_process(tsk); |
99 | put_task_struct(tsk); | 98 | put_task_struct(tsk); |
100 | } | 99 | } |
@@ -194,7 +193,7 @@ down_read_failed(struct ld_semaphore *sem, long count, long timeout) | |||
194 | for (;;) { | 193 | for (;;) { |
195 | set_current_state(TASK_UNINTERRUPTIBLE); | 194 | set_current_state(TASK_UNINTERRUPTIBLE); |
196 | 195 | ||
197 | if (!waiter.task) | 196 | if (!smp_load_acquire(&waiter.task)) |
198 | break; | 197 | break; |
199 | if (!timeout) | 198 | if (!timeout) |
200 | break; | 199 | break; |
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index ec92f36ab5c4..34aa39d1aed9 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c | |||
@@ -3771,7 +3771,11 @@ static ssize_t show_bind(struct device *dev, struct device_attribute *attr, | |||
3771 | char *buf) | 3771 | char *buf) |
3772 | { | 3772 | { |
3773 | struct con_driver *con = dev_get_drvdata(dev); | 3773 | struct con_driver *con = dev_get_drvdata(dev); |
3774 | int bind = con_is_bound(con->con); | 3774 | int bind; |
3775 | |||
3776 | console_lock(); | ||
3777 | bind = con_is_bound(con->con); | ||
3778 | console_unlock(); | ||
3775 | 3779 | ||
3776 | return snprintf(buf, PAGE_SIZE, "%i\n", bind); | 3780 | return snprintf(buf, PAGE_SIZE, "%i\n", bind); |
3777 | } | 3781 | } |
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index 88533938ce19..9320787ac2e6 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c | |||
@@ -3052,8 +3052,8 @@ int usb_hcd_setup_local_mem(struct usb_hcd *hcd, phys_addr_t phys_addr, | |||
3052 | 3052 | ||
3053 | local_mem = devm_memremap(hcd->self.sysdev, phys_addr, | 3053 | local_mem = devm_memremap(hcd->self.sysdev, phys_addr, |
3054 | size, MEMREMAP_WC); | 3054 | size, MEMREMAP_WC); |
3055 | if (!local_mem) | 3055 | if (IS_ERR(local_mem)) |
3056 | return -ENOMEM; | 3056 | return PTR_ERR(local_mem); |
3057 | 3057 | ||
3058 | /* | 3058 | /* |
3059 | * Here we pass a dma_addr_t but the arg type is a phys_addr_t. | 3059 | * Here we pass a dma_addr_t but the arg type is a phys_addr_t. |
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c index fe9422d3bcdc..b0882c13a1d1 100644 --- a/drivers/usb/host/ehci-pci.c +++ b/drivers/usb/host/ehci-pci.c | |||
@@ -149,7 +149,7 @@ static int ehci_pci_setup(struct usb_hcd *hcd) | |||
149 | break; | 149 | break; |
150 | case PCI_VENDOR_ID_AMD: | 150 | case PCI_VENDOR_ID_AMD: |
151 | /* AMD PLL quirk */ | 151 | /* AMD PLL quirk */ |
152 | if (usb_amd_find_chipset_info()) | 152 | if (usb_amd_quirk_pll_check()) |
153 | ehci->amd_pll_fix = 1; | 153 | ehci->amd_pll_fix = 1; |
154 | /* AMD8111 EHCI doesn't work, according to AMD errata */ | 154 | /* AMD8111 EHCI doesn't work, according to AMD errata */ |
155 | if (pdev->device == 0x7463) { | 155 | if (pdev->device == 0x7463) { |
@@ -186,7 +186,7 @@ static int ehci_pci_setup(struct usb_hcd *hcd) | |||
186 | break; | 186 | break; |
187 | case PCI_VENDOR_ID_ATI: | 187 | case PCI_VENDOR_ID_ATI: |
188 | /* AMD PLL quirk */ | 188 | /* AMD PLL quirk */ |
189 | if (usb_amd_find_chipset_info()) | 189 | if (usb_amd_quirk_pll_check()) |
190 | ehci->amd_pll_fix = 1; | 190 | ehci->amd_pll_fix = 1; |
191 | 191 | ||
192 | /* | 192 | /* |
diff --git a/drivers/usb/host/hwa-hc.c b/drivers/usb/host/hwa-hc.c index 09a8ebd95588..6968b9f2b76b 100644 --- a/drivers/usb/host/hwa-hc.c +++ b/drivers/usb/host/hwa-hc.c | |||
@@ -159,7 +159,7 @@ out: | |||
159 | return result; | 159 | return result; |
160 | 160 | ||
161 | error_set_cluster_id: | 161 | error_set_cluster_id: |
162 | wusb_cluster_id_put(wusbhc->cluster_id); | 162 | wusb_cluster_id_put(addr); |
163 | error_cluster_id_get: | 163 | error_cluster_id_get: |
164 | goto out; | 164 | goto out; |
165 | 165 | ||
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c index a033f7d855e0..f4e13a3fddee 100644 --- a/drivers/usb/host/ohci-pci.c +++ b/drivers/usb/host/ohci-pci.c | |||
@@ -152,7 +152,7 @@ static int ohci_quirk_amd700(struct usb_hcd *hcd) | |||
152 | { | 152 | { |
153 | struct ohci_hcd *ohci = hcd_to_ohci(hcd); | 153 | struct ohci_hcd *ohci = hcd_to_ohci(hcd); |
154 | 154 | ||
155 | if (usb_amd_find_chipset_info()) | 155 | if (usb_amd_quirk_pll_check()) |
156 | ohci->flags |= OHCI_QUIRK_AMD_PLL; | 156 | ohci->flags |= OHCI_QUIRK_AMD_PLL; |
157 | 157 | ||
158 | /* SB800 needs pre-fetch fix */ | 158 | /* SB800 needs pre-fetch fix */ |
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c index 3ce71cbfbb58..f6d04491df60 100644 --- a/drivers/usb/host/pci-quirks.c +++ b/drivers/usb/host/pci-quirks.c | |||
@@ -132,7 +132,7 @@ static struct amd_chipset_info { | |||
132 | struct amd_chipset_type sb_type; | 132 | struct amd_chipset_type sb_type; |
133 | int isoc_reqs; | 133 | int isoc_reqs; |
134 | int probe_count; | 134 | int probe_count; |
135 | int probe_result; | 135 | bool need_pll_quirk; |
136 | } amd_chipset; | 136 | } amd_chipset; |
137 | 137 | ||
138 | static DEFINE_SPINLOCK(amd_lock); | 138 | static DEFINE_SPINLOCK(amd_lock); |
@@ -201,11 +201,11 @@ void sb800_prefetch(struct device *dev, int on) | |||
201 | } | 201 | } |
202 | EXPORT_SYMBOL_GPL(sb800_prefetch); | 202 | EXPORT_SYMBOL_GPL(sb800_prefetch); |
203 | 203 | ||
204 | int usb_amd_find_chipset_info(void) | 204 | static void usb_amd_find_chipset_info(void) |
205 | { | 205 | { |
206 | unsigned long flags; | 206 | unsigned long flags; |
207 | struct amd_chipset_info info; | 207 | struct amd_chipset_info info; |
208 | int ret; | 208 | info.need_pll_quirk = 0; |
209 | 209 | ||
210 | spin_lock_irqsave(&amd_lock, flags); | 210 | spin_lock_irqsave(&amd_lock, flags); |
211 | 211 | ||
@@ -213,27 +213,34 @@ int usb_amd_find_chipset_info(void) | |||
213 | if (amd_chipset.probe_count > 0) { | 213 | if (amd_chipset.probe_count > 0) { |
214 | amd_chipset.probe_count++; | 214 | amd_chipset.probe_count++; |
215 | spin_unlock_irqrestore(&amd_lock, flags); | 215 | spin_unlock_irqrestore(&amd_lock, flags); |
216 | return amd_chipset.probe_result; | 216 | return; |
217 | } | 217 | } |
218 | memset(&info, 0, sizeof(info)); | 218 | memset(&info, 0, sizeof(info)); |
219 | spin_unlock_irqrestore(&amd_lock, flags); | 219 | spin_unlock_irqrestore(&amd_lock, flags); |
220 | 220 | ||
221 | if (!amd_chipset_sb_type_init(&info)) { | 221 | if (!amd_chipset_sb_type_init(&info)) { |
222 | ret = 0; | ||
223 | goto commit; | 222 | goto commit; |
224 | } | 223 | } |
225 | 224 | ||
226 | /* Below chipset generations needn't enable AMD PLL quirk */ | 225 | switch (info.sb_type.gen) { |
227 | if (info.sb_type.gen == AMD_CHIPSET_UNKNOWN || | 226 | case AMD_CHIPSET_SB700: |
228 | info.sb_type.gen == AMD_CHIPSET_SB600 || | 227 | info.need_pll_quirk = info.sb_type.rev <= 0x3B; |
229 | info.sb_type.gen == AMD_CHIPSET_YANGTZE || | 228 | break; |
230 | (info.sb_type.gen == AMD_CHIPSET_SB700 && | 229 | case AMD_CHIPSET_SB800: |
231 | info.sb_type.rev > 0x3b)) { | 230 | case AMD_CHIPSET_HUDSON2: |
231 | case AMD_CHIPSET_BOLTON: | ||
232 | info.need_pll_quirk = 1; | ||
233 | break; | ||
234 | default: | ||
235 | info.need_pll_quirk = 0; | ||
236 | break; | ||
237 | } | ||
238 | |||
239 | if (!info.need_pll_quirk) { | ||
232 | if (info.smbus_dev) { | 240 | if (info.smbus_dev) { |
233 | pci_dev_put(info.smbus_dev); | 241 | pci_dev_put(info.smbus_dev); |
234 | info.smbus_dev = NULL; | 242 | info.smbus_dev = NULL; |
235 | } | 243 | } |
236 | ret = 0; | ||
237 | goto commit; | 244 | goto commit; |
238 | } | 245 | } |
239 | 246 | ||
@@ -252,7 +259,6 @@ int usb_amd_find_chipset_info(void) | |||
252 | } | 259 | } |
253 | } | 260 | } |
254 | 261 | ||
255 | ret = info.probe_result = 1; | ||
256 | printk(KERN_DEBUG "QUIRK: Enable AMD PLL fix\n"); | 262 | printk(KERN_DEBUG "QUIRK: Enable AMD PLL fix\n"); |
257 | 263 | ||
258 | commit: | 264 | commit: |
@@ -263,7 +269,6 @@ commit: | |||
263 | 269 | ||
264 | /* Mark that we where here */ | 270 | /* Mark that we where here */ |
265 | amd_chipset.probe_count++; | 271 | amd_chipset.probe_count++; |
266 | ret = amd_chipset.probe_result; | ||
267 | 272 | ||
268 | spin_unlock_irqrestore(&amd_lock, flags); | 273 | spin_unlock_irqrestore(&amd_lock, flags); |
269 | 274 | ||
@@ -276,10 +281,7 @@ commit: | |||
276 | amd_chipset = info; | 281 | amd_chipset = info; |
277 | spin_unlock_irqrestore(&amd_lock, flags); | 282 | spin_unlock_irqrestore(&amd_lock, flags); |
278 | } | 283 | } |
279 | |||
280 | return ret; | ||
281 | } | 284 | } |
282 | EXPORT_SYMBOL_GPL(usb_amd_find_chipset_info); | ||
283 | 285 | ||
284 | int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *pdev) | 286 | int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *pdev) |
285 | { | 287 | { |
@@ -315,6 +317,13 @@ bool usb_amd_prefetch_quirk(void) | |||
315 | } | 317 | } |
316 | EXPORT_SYMBOL_GPL(usb_amd_prefetch_quirk); | 318 | EXPORT_SYMBOL_GPL(usb_amd_prefetch_quirk); |
317 | 319 | ||
320 | bool usb_amd_quirk_pll_check(void) | ||
321 | { | ||
322 | usb_amd_find_chipset_info(); | ||
323 | return amd_chipset.need_pll_quirk; | ||
324 | } | ||
325 | EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_check); | ||
326 | |||
318 | /* | 327 | /* |
319 | * The hardware normally enables the A-link power management feature, which | 328 | * The hardware normally enables the A-link power management feature, which |
320 | * lets the system lower the power consumption in idle states. | 329 | * lets the system lower the power consumption in idle states. |
@@ -520,7 +529,7 @@ void usb_amd_dev_put(void) | |||
520 | amd_chipset.nb_type = 0; | 529 | amd_chipset.nb_type = 0; |
521 | memset(&amd_chipset.sb_type, 0, sizeof(amd_chipset.sb_type)); | 530 | memset(&amd_chipset.sb_type, 0, sizeof(amd_chipset.sb_type)); |
522 | amd_chipset.isoc_reqs = 0; | 531 | amd_chipset.isoc_reqs = 0; |
523 | amd_chipset.probe_result = 0; | 532 | amd_chipset.need_pll_quirk = 0; |
524 | 533 | ||
525 | spin_unlock_irqrestore(&amd_lock, flags); | 534 | spin_unlock_irqrestore(&amd_lock, flags); |
526 | 535 | ||
diff --git a/drivers/usb/host/pci-quirks.h b/drivers/usb/host/pci-quirks.h index 63c633077d9e..e729de21fad7 100644 --- a/drivers/usb/host/pci-quirks.h +++ b/drivers/usb/host/pci-quirks.h | |||
@@ -5,11 +5,11 @@ | |||
5 | #ifdef CONFIG_USB_PCI | 5 | #ifdef CONFIG_USB_PCI |
6 | void uhci_reset_hc(struct pci_dev *pdev, unsigned long base); | 6 | void uhci_reset_hc(struct pci_dev *pdev, unsigned long base); |
7 | int uhci_check_and_reset_hc(struct pci_dev *pdev, unsigned long base); | 7 | int uhci_check_and_reset_hc(struct pci_dev *pdev, unsigned long base); |
8 | int usb_amd_find_chipset_info(void); | ||
9 | int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *pdev); | 8 | int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *pdev); |
10 | bool usb_amd_hang_symptom_quirk(void); | 9 | bool usb_amd_hang_symptom_quirk(void); |
11 | bool usb_amd_prefetch_quirk(void); | 10 | bool usb_amd_prefetch_quirk(void); |
12 | void usb_amd_dev_put(void); | 11 | void usb_amd_dev_put(void); |
12 | bool usb_amd_quirk_pll_check(void); | ||
13 | void usb_amd_quirk_pll_disable(void); | 13 | void usb_amd_quirk_pll_disable(void); |
14 | void usb_amd_quirk_pll_enable(void); | 14 | void usb_amd_quirk_pll_enable(void); |
15 | void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev); | 15 | void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev); |
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index c2fe218e051f..1e0236e90687 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c | |||
@@ -130,7 +130,7 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) | |||
130 | xhci->quirks |= XHCI_AMD_0x96_HOST; | 130 | xhci->quirks |= XHCI_AMD_0x96_HOST; |
131 | 131 | ||
132 | /* AMD PLL quirk */ | 132 | /* AMD PLL quirk */ |
133 | if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_find_chipset_info()) | 133 | if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_quirk_pll_check()) |
134 | xhci->quirks |= XHCI_AMD_PLL_FIX; | 134 | xhci->quirks |= XHCI_AMD_PLL_FIX; |
135 | 135 | ||
136 | if (pdev->vendor == PCI_VENDOR_ID_AMD && | 136 | if (pdev->vendor == PCI_VENDOR_ID_AMD && |
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 7a264962a1a9..f5c41448d067 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h | |||
@@ -2175,7 +2175,8 @@ static inline bool xhci_urb_suitable_for_idt(struct urb *urb) | |||
2175 | if (!usb_endpoint_xfer_isoc(&urb->ep->desc) && usb_urb_dir_out(urb) && | 2175 | if (!usb_endpoint_xfer_isoc(&urb->ep->desc) && usb_urb_dir_out(urb) && |
2176 | usb_endpoint_maxp(&urb->ep->desc) >= TRB_IDT_MAX_SIZE && | 2176 | usb_endpoint_maxp(&urb->ep->desc) >= TRB_IDT_MAX_SIZE && |
2177 | urb->transfer_buffer_length <= TRB_IDT_MAX_SIZE && | 2177 | urb->transfer_buffer_length <= TRB_IDT_MAX_SIZE && |
2178 | !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) | 2178 | !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP) && |
2179 | !urb->num_sgs) | ||
2179 | return true; | 2180 | return true; |
2180 | 2181 | ||
2181 | return false; | 2182 | return false; |
diff --git a/drivers/usb/misc/usb251xb.c b/drivers/usb/misc/usb251xb.c index 4d6ae3795a88..6ca9111d150a 100644 --- a/drivers/usb/misc/usb251xb.c +++ b/drivers/usb/misc/usb251xb.c | |||
@@ -375,7 +375,8 @@ out_err: | |||
375 | 375 | ||
376 | #ifdef CONFIG_OF | 376 | #ifdef CONFIG_OF |
377 | static void usb251xb_get_ports_field(struct usb251xb *hub, | 377 | static void usb251xb_get_ports_field(struct usb251xb *hub, |
378 | const char *prop_name, u8 port_cnt, u8 *fld) | 378 | const char *prop_name, u8 port_cnt, |
379 | bool ds_only, u8 *fld) | ||
379 | { | 380 | { |
380 | struct device *dev = hub->dev; | 381 | struct device *dev = hub->dev; |
381 | struct property *prop; | 382 | struct property *prop; |
@@ -383,7 +384,7 @@ static void usb251xb_get_ports_field(struct usb251xb *hub, | |||
383 | u32 port; | 384 | u32 port; |
384 | 385 | ||
385 | of_property_for_each_u32(dev->of_node, prop_name, prop, p, port) { | 386 | of_property_for_each_u32(dev->of_node, prop_name, prop, p, port) { |
386 | if ((port >= 1) && (port <= port_cnt)) | 387 | if ((port >= ds_only ? 1 : 0) && (port <= port_cnt)) |
387 | *fld |= BIT(port); | 388 | *fld |= BIT(port); |
388 | else | 389 | else |
389 | dev_warn(dev, "port %u doesn't exist\n", port); | 390 | dev_warn(dev, "port %u doesn't exist\n", port); |
@@ -501,15 +502,15 @@ static int usb251xb_get_ofdata(struct usb251xb *hub, | |||
501 | 502 | ||
502 | hub->non_rem_dev = USB251XB_DEF_NON_REMOVABLE_DEVICES; | 503 | hub->non_rem_dev = USB251XB_DEF_NON_REMOVABLE_DEVICES; |
503 | usb251xb_get_ports_field(hub, "non-removable-ports", data->port_cnt, | 504 | usb251xb_get_ports_field(hub, "non-removable-ports", data->port_cnt, |
504 | &hub->non_rem_dev); | 505 | true, &hub->non_rem_dev); |
505 | 506 | ||
506 | hub->port_disable_sp = USB251XB_DEF_PORT_DISABLE_SELF; | 507 | hub->port_disable_sp = USB251XB_DEF_PORT_DISABLE_SELF; |
507 | usb251xb_get_ports_field(hub, "sp-disabled-ports", data->port_cnt, | 508 | usb251xb_get_ports_field(hub, "sp-disabled-ports", data->port_cnt, |
508 | &hub->port_disable_sp); | 509 | true, &hub->port_disable_sp); |
509 | 510 | ||
510 | hub->port_disable_bp = USB251XB_DEF_PORT_DISABLE_BUS; | 511 | hub->port_disable_bp = USB251XB_DEF_PORT_DISABLE_BUS; |
511 | usb251xb_get_ports_field(hub, "bp-disabled-ports", data->port_cnt, | 512 | usb251xb_get_ports_field(hub, "bp-disabled-ports", data->port_cnt, |
512 | &hub->port_disable_bp); | 513 | true, &hub->port_disable_bp); |
513 | 514 | ||
514 | hub->max_power_sp = USB251XB_DEF_MAX_POWER_SELF; | 515 | hub->max_power_sp = USB251XB_DEF_MAX_POWER_SELF; |
515 | if (!of_property_read_u32(np, "sp-max-total-current-microamp", | 516 | if (!of_property_read_u32(np, "sp-max-total-current-microamp", |
@@ -573,9 +574,7 @@ static int usb251xb_get_ofdata(struct usb251xb *hub, | |||
573 | */ | 574 | */ |
574 | hub->port_swap = USB251XB_DEF_PORT_SWAP; | 575 | hub->port_swap = USB251XB_DEF_PORT_SWAP; |
575 | usb251xb_get_ports_field(hub, "swap-dx-lanes", data->port_cnt, | 576 | usb251xb_get_ports_field(hub, "swap-dx-lanes", data->port_cnt, |
576 | &hub->port_swap); | 577 | false, &hub->port_swap); |
577 | if (of_get_property(np, "swap-us-lanes", NULL)) | ||
578 | hub->port_swap |= BIT(0); | ||
579 | 578 | ||
580 | /* The following parameters are currently not exposed to devicetree, but | 579 | /* The following parameters are currently not exposed to devicetree, but |
581 | * may be as soon as needed. | 580 | * may be as soon as needed. |
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c index 30790240aec6..05b80211290d 100644 --- a/drivers/usb/storage/scsiglue.c +++ b/drivers/usb/storage/scsiglue.c | |||
@@ -28,6 +28,8 @@ | |||
28 | * status of a command. | 28 | * status of a command. |
29 | */ | 29 | */ |
30 | 30 | ||
31 | #include <linux/blkdev.h> | ||
32 | #include <linux/dma-mapping.h> | ||
31 | #include <linux/module.h> | 33 | #include <linux/module.h> |
32 | #include <linux/mutex.h> | 34 | #include <linux/mutex.h> |
33 | 35 | ||
@@ -99,6 +101,7 @@ static int slave_alloc (struct scsi_device *sdev) | |||
99 | static int slave_configure(struct scsi_device *sdev) | 101 | static int slave_configure(struct scsi_device *sdev) |
100 | { | 102 | { |
101 | struct us_data *us = host_to_us(sdev->host); | 103 | struct us_data *us = host_to_us(sdev->host); |
104 | struct device *dev = us->pusb_dev->bus->sysdev; | ||
102 | 105 | ||
103 | /* | 106 | /* |
104 | * Many devices have trouble transferring more than 32KB at a time, | 107 | * Many devices have trouble transferring more than 32KB at a time, |
@@ -129,6 +132,14 @@ static int slave_configure(struct scsi_device *sdev) | |||
129 | } | 132 | } |
130 | 133 | ||
131 | /* | 134 | /* |
135 | * The max_hw_sectors should be up to maximum size of a mapping for | ||
136 | * the device. Otherwise, a DMA API might fail on swiotlb environment. | ||
137 | */ | ||
138 | blk_queue_max_hw_sectors(sdev->request_queue, | ||
139 | min_t(size_t, queue_max_hw_sectors(sdev->request_queue), | ||
140 | dma_max_mapping_size(dev) >> SECTOR_SHIFT)); | ||
141 | |||
142 | /* | ||
132 | * Some USB host controllers can't do DMA; they have to use PIO. | 143 | * Some USB host controllers can't do DMA; they have to use PIO. |
133 | * They indicate this by setting their dma_mask to NULL. For | 144 | * They indicate this by setting their dma_mask to NULL. For |
134 | * such controllers we need to make sure the block layer sets | 145 | * such controllers we need to make sure the block layer sets |
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index 819296332913..42a8c2a13ab1 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h | |||
@@ -96,7 +96,7 @@ struct vhost_uaddr { | |||
96 | }; | 96 | }; |
97 | 97 | ||
98 | #if defined(CONFIG_MMU_NOTIFIER) && ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 0 | 98 | #if defined(CONFIG_MMU_NOTIFIER) && ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 0 |
99 | #define VHOST_ARCH_CAN_ACCEL_UACCESS 1 | 99 | #define VHOST_ARCH_CAN_ACCEL_UACCESS 0 |
100 | #else | 100 | #else |
101 | #define VHOST_ARCH_CAN_ACCEL_UACCESS 0 | 101 | #define VHOST_ARCH_CAN_ACCEL_UACCESS 0 |
102 | #endif | 102 | #endif |
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index 4c339c7e66e5..a446a7221e13 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c | |||
@@ -1143,7 +1143,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma) | |||
1143 | goto out_put_map; | 1143 | goto out_put_map; |
1144 | 1144 | ||
1145 | if (!use_ptemod) { | 1145 | if (!use_ptemod) { |
1146 | err = vm_map_pages(vma, map->pages, map->count); | 1146 | err = vm_map_pages_zero(vma, map->pages, map->count); |
1147 | if (err) | 1147 | if (err) |
1148 | goto out_put_map; | 1148 | goto out_put_map; |
1149 | } else { | 1149 | } else { |
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c index 2f5ce7230a43..c6070e70dd73 100644 --- a/drivers/xen/privcmd.c +++ b/drivers/xen/privcmd.c | |||
@@ -724,25 +724,6 @@ static long privcmd_ioctl_restrict(struct file *file, void __user *udata) | |||
724 | return 0; | 724 | return 0; |
725 | } | 725 | } |
726 | 726 | ||
727 | struct remap_pfn { | ||
728 | struct mm_struct *mm; | ||
729 | struct page **pages; | ||
730 | pgprot_t prot; | ||
731 | unsigned long i; | ||
732 | }; | ||
733 | |||
734 | static int remap_pfn_fn(pte_t *ptep, unsigned long addr, void *data) | ||
735 | { | ||
736 | struct remap_pfn *r = data; | ||
737 | struct page *page = r->pages[r->i]; | ||
738 | pte_t pte = pte_mkspecial(pfn_pte(page_to_pfn(page), r->prot)); | ||
739 | |||
740 | set_pte_at(r->mm, addr, ptep, pte); | ||
741 | r->i++; | ||
742 | |||
743 | return 0; | ||
744 | } | ||
745 | |||
746 | static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata) | 727 | static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata) |
747 | { | 728 | { |
748 | struct privcmd_data *data = file->private_data; | 729 | struct privcmd_data *data = file->private_data; |
@@ -774,7 +755,8 @@ static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata) | |||
774 | goto out; | 755 | goto out; |
775 | } | 756 | } |
776 | 757 | ||
777 | if (xen_feature(XENFEAT_auto_translated_physmap)) { | 758 | if (IS_ENABLED(CONFIG_XEN_AUTO_XLATE) && |
759 | xen_feature(XENFEAT_auto_translated_physmap)) { | ||
778 | unsigned int nr = DIV_ROUND_UP(kdata.num, XEN_PFN_PER_PAGE); | 760 | unsigned int nr = DIV_ROUND_UP(kdata.num, XEN_PFN_PER_PAGE); |
779 | struct page **pages; | 761 | struct page **pages; |
780 | unsigned int i; | 762 | unsigned int i; |
@@ -808,16 +790,9 @@ static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata) | |||
808 | if (rc) | 790 | if (rc) |
809 | goto out; | 791 | goto out; |
810 | 792 | ||
811 | if (xen_feature(XENFEAT_auto_translated_physmap)) { | 793 | if (IS_ENABLED(CONFIG_XEN_AUTO_XLATE) && |
812 | struct remap_pfn r = { | 794 | xen_feature(XENFEAT_auto_translated_physmap)) { |
813 | .mm = vma->vm_mm, | 795 | rc = xen_remap_vma_range(vma, kdata.addr, kdata.num << PAGE_SHIFT); |
814 | .pages = vma->vm_private_data, | ||
815 | .prot = vma->vm_page_prot, | ||
816 | }; | ||
817 | |||
818 | rc = apply_to_page_range(r.mm, kdata.addr, | ||
819 | kdata.num << PAGE_SHIFT, | ||
820 | remap_pfn_fn, &r); | ||
821 | } else { | 796 | } else { |
822 | unsigned int domid = | 797 | unsigned int domid = |
823 | (xdata.flags & XENMEM_rsrc_acq_caller_owned) ? | 798 | (xdata.flags & XENMEM_rsrc_acq_caller_owned) ? |
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index cfbe46785a3b..ae1df496bf38 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c | |||
@@ -83,34 +83,18 @@ static inline dma_addr_t xen_virt_to_bus(void *address) | |||
83 | return xen_phys_to_bus(virt_to_phys(address)); | 83 | return xen_phys_to_bus(virt_to_phys(address)); |
84 | } | 84 | } |
85 | 85 | ||
86 | static int check_pages_physically_contiguous(unsigned long xen_pfn, | 86 | static inline int range_straddles_page_boundary(phys_addr_t p, size_t size) |
87 | unsigned int offset, | ||
88 | size_t length) | ||
89 | { | 87 | { |
90 | unsigned long next_bfn; | 88 | unsigned long next_bfn, xen_pfn = XEN_PFN_DOWN(p); |
91 | int i; | 89 | unsigned int i, nr_pages = XEN_PFN_UP(xen_offset_in_page(p) + size); |
92 | int nr_pages; | ||
93 | 90 | ||
94 | next_bfn = pfn_to_bfn(xen_pfn); | 91 | next_bfn = pfn_to_bfn(xen_pfn); |
95 | nr_pages = (offset + length + XEN_PAGE_SIZE-1) >> XEN_PAGE_SHIFT; | ||
96 | 92 | ||
97 | for (i = 1; i < nr_pages; i++) { | 93 | for (i = 1; i < nr_pages; i++) |
98 | if (pfn_to_bfn(++xen_pfn) != ++next_bfn) | 94 | if (pfn_to_bfn(++xen_pfn) != ++next_bfn) |
99 | return 0; | 95 | return 1; |
100 | } | ||
101 | return 1; | ||
102 | } | ||
103 | 96 | ||
104 | static inline int range_straddles_page_boundary(phys_addr_t p, size_t size) | 97 | return 0; |
105 | { | ||
106 | unsigned long xen_pfn = XEN_PFN_DOWN(p); | ||
107 | unsigned int offset = p & ~XEN_PAGE_MASK; | ||
108 | |||
109 | if (offset + size <= XEN_PAGE_SIZE) | ||
110 | return 0; | ||
111 | if (check_pages_physically_contiguous(xen_pfn, offset, size)) | ||
112 | return 0; | ||
113 | return 1; | ||
114 | } | 98 | } |
115 | 99 | ||
116 | static int is_xen_swiotlb_buffer(dma_addr_t dma_addr) | 100 | static int is_xen_swiotlb_buffer(dma_addr_t dma_addr) |
@@ -338,6 +322,7 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size, | |||
338 | xen_free_coherent_pages(hwdev, size, ret, (dma_addr_t)phys, attrs); | 322 | xen_free_coherent_pages(hwdev, size, ret, (dma_addr_t)phys, attrs); |
339 | return NULL; | 323 | return NULL; |
340 | } | 324 | } |
325 | SetPageXenRemapped(virt_to_page(ret)); | ||
341 | } | 326 | } |
342 | memset(ret, 0, size); | 327 | memset(ret, 0, size); |
343 | return ret; | 328 | return ret; |
@@ -361,8 +346,9 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, | |||
361 | /* Convert the size to actually allocated. */ | 346 | /* Convert the size to actually allocated. */ |
362 | size = 1UL << (order + XEN_PAGE_SHIFT); | 347 | size = 1UL << (order + XEN_PAGE_SHIFT); |
363 | 348 | ||
364 | if (((dev_addr + size - 1 <= dma_mask)) || | 349 | if (!WARN_ON((dev_addr + size - 1 > dma_mask) || |
365 | range_straddles_page_boundary(phys, size)) | 350 | range_straddles_page_boundary(phys, size)) && |
351 | TestClearPageXenRemapped(virt_to_page(vaddr))) | ||
366 | xen_destroy_contiguous_region(phys, order); | 352 | xen_destroy_contiguous_region(phys, order); |
367 | 353 | ||
368 | xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs); | 354 | xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs); |
diff --git a/drivers/xen/xen-pciback/conf_space_capability.c b/drivers/xen/xen-pciback/conf_space_capability.c index 73427d8e0116..e5694133ebe5 100644 --- a/drivers/xen/xen-pciback/conf_space_capability.c +++ b/drivers/xen/xen-pciback/conf_space_capability.c | |||
@@ -116,13 +116,12 @@ static int pm_ctrl_write(struct pci_dev *dev, int offset, u16 new_value, | |||
116 | { | 116 | { |
117 | int err; | 117 | int err; |
118 | u16 old_value; | 118 | u16 old_value; |
119 | pci_power_t new_state, old_state; | 119 | pci_power_t new_state; |
120 | 120 | ||
121 | err = pci_read_config_word(dev, offset, &old_value); | 121 | err = pci_read_config_word(dev, offset, &old_value); |
122 | if (err) | 122 | if (err) |
123 | goto out; | 123 | goto out; |
124 | 124 | ||
125 | old_state = (pci_power_t)(old_value & PCI_PM_CTRL_STATE_MASK); | ||
126 | new_state = (pci_power_t)(new_value & PCI_PM_CTRL_STATE_MASK); | 125 | new_state = (pci_power_t)(new_value & PCI_PM_CTRL_STATE_MASK); |
127 | 126 | ||
128 | new_value &= PM_OK_BITS; | 127 | new_value &= PM_OK_BITS; |
diff --git a/drivers/xen/xlate_mmu.c b/drivers/xen/xlate_mmu.c index ba883a80b3c0..7b1077f0abcb 100644 --- a/drivers/xen/xlate_mmu.c +++ b/drivers/xen/xlate_mmu.c | |||
@@ -262,3 +262,35 @@ int __init xen_xlate_map_ballooned_pages(xen_pfn_t **gfns, void **virt, | |||
262 | return 0; | 262 | return 0; |
263 | } | 263 | } |
264 | EXPORT_SYMBOL_GPL(xen_xlate_map_ballooned_pages); | 264 | EXPORT_SYMBOL_GPL(xen_xlate_map_ballooned_pages); |
265 | |||
266 | struct remap_pfn { | ||
267 | struct mm_struct *mm; | ||
268 | struct page **pages; | ||
269 | pgprot_t prot; | ||
270 | unsigned long i; | ||
271 | }; | ||
272 | |||
273 | static int remap_pfn_fn(pte_t *ptep, unsigned long addr, void *data) | ||
274 | { | ||
275 | struct remap_pfn *r = data; | ||
276 | struct page *page = r->pages[r->i]; | ||
277 | pte_t pte = pte_mkspecial(pfn_pte(page_to_pfn(page), r->prot)); | ||
278 | |||
279 | set_pte_at(r->mm, addr, ptep, pte); | ||
280 | r->i++; | ||
281 | |||
282 | return 0; | ||
283 | } | ||
284 | |||
285 | /* Used by the privcmd module, but has to be built-in on ARM */ | ||
286 | int xen_remap_vma_range(struct vm_area_struct *vma, unsigned long addr, unsigned long len) | ||
287 | { | ||
288 | struct remap_pfn r = { | ||
289 | .mm = vma->vm_mm, | ||
290 | .pages = vma->vm_private_data, | ||
291 | .prot = vma->vm_page_prot, | ||
292 | }; | ||
293 | |||
294 | return apply_to_page_range(vma->vm_mm, addr, len, remap_pfn_fn, &r); | ||
295 | } | ||
296 | EXPORT_SYMBOL_GPL(xen_remap_vma_range); | ||