diff options
Diffstat (limited to 'drivers')
287 files changed, 13826 insertions, 2981 deletions
diff --git a/drivers/acpi/acpi_configfs.c b/drivers/acpi/acpi_configfs.c index b58850389094..81bfc6197293 100644 --- a/drivers/acpi/acpi_configfs.c +++ b/drivers/acpi/acpi_configfs.c | |||
| @@ -97,12 +97,12 @@ static ssize_t acpi_table_aml_read(struct config_item *cfg, | |||
| 97 | 97 | ||
| 98 | CONFIGFS_BIN_ATTR(acpi_table_, aml, NULL, MAX_ACPI_TABLE_SIZE); | 98 | CONFIGFS_BIN_ATTR(acpi_table_, aml, NULL, MAX_ACPI_TABLE_SIZE); |
| 99 | 99 | ||
| 100 | struct configfs_bin_attribute *acpi_table_bin_attrs[] = { | 100 | static struct configfs_bin_attribute *acpi_table_bin_attrs[] = { |
| 101 | &acpi_table_attr_aml, | 101 | &acpi_table_attr_aml, |
| 102 | NULL, | 102 | NULL, |
| 103 | }; | 103 | }; |
| 104 | 104 | ||
| 105 | ssize_t acpi_table_signature_show(struct config_item *cfg, char *str) | 105 | static ssize_t acpi_table_signature_show(struct config_item *cfg, char *str) |
| 106 | { | 106 | { |
| 107 | struct acpi_table_header *h = get_header(cfg); | 107 | struct acpi_table_header *h = get_header(cfg); |
| 108 | 108 | ||
| @@ -112,7 +112,7 @@ ssize_t acpi_table_signature_show(struct config_item *cfg, char *str) | |||
| 112 | return sprintf(str, "%.*s\n", ACPI_NAME_SIZE, h->signature); | 112 | return sprintf(str, "%.*s\n", ACPI_NAME_SIZE, h->signature); |
| 113 | } | 113 | } |
| 114 | 114 | ||
| 115 | ssize_t acpi_table_length_show(struct config_item *cfg, char *str) | 115 | static ssize_t acpi_table_length_show(struct config_item *cfg, char *str) |
| 116 | { | 116 | { |
| 117 | struct acpi_table_header *h = get_header(cfg); | 117 | struct acpi_table_header *h = get_header(cfg); |
| 118 | 118 | ||
| @@ -122,7 +122,7 @@ ssize_t acpi_table_length_show(struct config_item *cfg, char *str) | |||
| 122 | return sprintf(str, "%d\n", h->length); | 122 | return sprintf(str, "%d\n", h->length); |
| 123 | } | 123 | } |
| 124 | 124 | ||
| 125 | ssize_t acpi_table_revision_show(struct config_item *cfg, char *str) | 125 | static ssize_t acpi_table_revision_show(struct config_item *cfg, char *str) |
| 126 | { | 126 | { |
| 127 | struct acpi_table_header *h = get_header(cfg); | 127 | struct acpi_table_header *h = get_header(cfg); |
| 128 | 128 | ||
| @@ -132,7 +132,7 @@ ssize_t acpi_table_revision_show(struct config_item *cfg, char *str) | |||
| 132 | return sprintf(str, "%d\n", h->revision); | 132 | return sprintf(str, "%d\n", h->revision); |
| 133 | } | 133 | } |
| 134 | 134 | ||
| 135 | ssize_t acpi_table_oem_id_show(struct config_item *cfg, char *str) | 135 | static ssize_t acpi_table_oem_id_show(struct config_item *cfg, char *str) |
| 136 | { | 136 | { |
| 137 | struct acpi_table_header *h = get_header(cfg); | 137 | struct acpi_table_header *h = get_header(cfg); |
| 138 | 138 | ||
| @@ -142,7 +142,7 @@ ssize_t acpi_table_oem_id_show(struct config_item *cfg, char *str) | |||
| 142 | return sprintf(str, "%.*s\n", ACPI_OEM_ID_SIZE, h->oem_id); | 142 | return sprintf(str, "%.*s\n", ACPI_OEM_ID_SIZE, h->oem_id); |
| 143 | } | 143 | } |
| 144 | 144 | ||
| 145 | ssize_t acpi_table_oem_table_id_show(struct config_item *cfg, char *str) | 145 | static ssize_t acpi_table_oem_table_id_show(struct config_item *cfg, char *str) |
| 146 | { | 146 | { |
| 147 | struct acpi_table_header *h = get_header(cfg); | 147 | struct acpi_table_header *h = get_header(cfg); |
| 148 | 148 | ||
| @@ -152,7 +152,7 @@ ssize_t acpi_table_oem_table_id_show(struct config_item *cfg, char *str) | |||
| 152 | return sprintf(str, "%.*s\n", ACPI_OEM_TABLE_ID_SIZE, h->oem_table_id); | 152 | return sprintf(str, "%.*s\n", ACPI_OEM_TABLE_ID_SIZE, h->oem_table_id); |
| 153 | } | 153 | } |
| 154 | 154 | ||
| 155 | ssize_t acpi_table_oem_revision_show(struct config_item *cfg, char *str) | 155 | static ssize_t acpi_table_oem_revision_show(struct config_item *cfg, char *str) |
| 156 | { | 156 | { |
| 157 | struct acpi_table_header *h = get_header(cfg); | 157 | struct acpi_table_header *h = get_header(cfg); |
| 158 | 158 | ||
| @@ -162,7 +162,8 @@ ssize_t acpi_table_oem_revision_show(struct config_item *cfg, char *str) | |||
| 162 | return sprintf(str, "%d\n", h->oem_revision); | 162 | return sprintf(str, "%d\n", h->oem_revision); |
| 163 | } | 163 | } |
| 164 | 164 | ||
| 165 | ssize_t acpi_table_asl_compiler_id_show(struct config_item *cfg, char *str) | 165 | static ssize_t acpi_table_asl_compiler_id_show(struct config_item *cfg, |
| 166 | char *str) | ||
| 166 | { | 167 | { |
| 167 | struct acpi_table_header *h = get_header(cfg); | 168 | struct acpi_table_header *h = get_header(cfg); |
| 168 | 169 | ||
| @@ -172,8 +173,8 @@ ssize_t acpi_table_asl_compiler_id_show(struct config_item *cfg, char *str) | |||
| 172 | return sprintf(str, "%.*s\n", ACPI_NAME_SIZE, h->asl_compiler_id); | 173 | return sprintf(str, "%.*s\n", ACPI_NAME_SIZE, h->asl_compiler_id); |
| 173 | } | 174 | } |
| 174 | 175 | ||
| 175 | ssize_t acpi_table_asl_compiler_revision_show(struct config_item *cfg, | 176 | static ssize_t acpi_table_asl_compiler_revision_show(struct config_item *cfg, |
| 176 | char *str) | 177 | char *str) |
| 177 | { | 178 | { |
| 178 | struct acpi_table_header *h = get_header(cfg); | 179 | struct acpi_table_header *h = get_header(cfg); |
| 179 | 180 | ||
| @@ -192,7 +193,7 @@ CONFIGFS_ATTR_RO(acpi_table_, oem_revision); | |||
| 192 | CONFIGFS_ATTR_RO(acpi_table_, asl_compiler_id); | 193 | CONFIGFS_ATTR_RO(acpi_table_, asl_compiler_id); |
| 193 | CONFIGFS_ATTR_RO(acpi_table_, asl_compiler_revision); | 194 | CONFIGFS_ATTR_RO(acpi_table_, asl_compiler_revision); |
| 194 | 195 | ||
| 195 | struct configfs_attribute *acpi_table_attrs[] = { | 196 | static struct configfs_attribute *acpi_table_attrs[] = { |
| 196 | &acpi_table_attr_signature, | 197 | &acpi_table_attr_signature, |
| 197 | &acpi_table_attr_length, | 198 | &acpi_table_attr_length, |
| 198 | &acpi_table_attr_revision, | 199 | &acpi_table_attr_revision, |
| @@ -232,7 +233,7 @@ static void acpi_table_drop_item(struct config_group *group, | |||
| 232 | acpi_tb_unload_table(table->index); | 233 | acpi_tb_unload_table(table->index); |
| 233 | } | 234 | } |
| 234 | 235 | ||
| 235 | struct configfs_group_operations acpi_table_group_ops = { | 236 | static struct configfs_group_operations acpi_table_group_ops = { |
| 236 | .make_item = acpi_table_make_item, | 237 | .make_item = acpi_table_make_item, |
| 237 | .drop_item = acpi_table_drop_item, | 238 | .drop_item = acpi_table_drop_item, |
| 238 | }; | 239 | }; |
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index 5f94c35d165f..1e2a10a06b9d 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c | |||
| @@ -18,7 +18,7 @@ | |||
| 18 | #include <linux/mutex.h> | 18 | #include <linux/mutex.h> |
| 19 | #include <linux/pci.h> | 19 | #include <linux/pci.h> |
| 20 | #include <linux/platform_device.h> | 20 | #include <linux/platform_device.h> |
| 21 | #include <linux/platform_data/clk-lpss.h> | 21 | #include <linux/platform_data/x86/clk-lpss.h> |
| 22 | #include <linux/platform_data/x86/pmc_atom.h> | 22 | #include <linux/platform_data/x86/pmc_atom.h> |
| 23 | #include <linux/pm_domain.h> | 23 | #include <linux/pm_domain.h> |
| 24 | #include <linux/pm_runtime.h> | 24 | #include <linux/pm_runtime.h> |
diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c index 545e91420cde..8940054d6250 100644 --- a/drivers/acpi/device_sysfs.c +++ b/drivers/acpi/device_sysfs.c | |||
| @@ -202,11 +202,15 @@ static int create_of_modalias(struct acpi_device *acpi_dev, char *modalias, | |||
| 202 | { | 202 | { |
| 203 | struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER }; | 203 | struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER }; |
| 204 | const union acpi_object *of_compatible, *obj; | 204 | const union acpi_object *of_compatible, *obj; |
| 205 | acpi_status status; | ||
| 205 | int len, count; | 206 | int len, count; |
| 206 | int i, nval; | 207 | int i, nval; |
| 207 | char *c; | 208 | char *c; |
| 208 | 209 | ||
| 209 | acpi_get_name(acpi_dev->handle, ACPI_SINGLE_NAME, &buf); | 210 | status = acpi_get_name(acpi_dev->handle, ACPI_SINGLE_NAME, &buf); |
| 211 | if (ACPI_FAILURE(status)) | ||
| 212 | return -ENODEV; | ||
| 213 | |||
| 210 | /* DT strings are all in lower case */ | 214 | /* DT strings are all in lower case */ |
| 211 | for (c = buf.pointer; *c != '\0'; c++) | 215 | for (c = buf.pointer; *c != '\0'; c++) |
| 212 | *c = tolower(*c); | 216 | *c = tolower(*c); |
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index e18ade5d74e9..df8979008dd4 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c | |||
| @@ -55,6 +55,10 @@ static bool no_init_ars; | |||
| 55 | module_param(no_init_ars, bool, 0644); | 55 | module_param(no_init_ars, bool, 0644); |
| 56 | MODULE_PARM_DESC(no_init_ars, "Skip ARS run at nfit init time"); | 56 | MODULE_PARM_DESC(no_init_ars, "Skip ARS run at nfit init time"); |
| 57 | 57 | ||
| 58 | static bool force_labels; | ||
| 59 | module_param(force_labels, bool, 0444); | ||
| 60 | MODULE_PARM_DESC(force_labels, "Opt-in to labels despite missing methods"); | ||
| 61 | |||
| 58 | LIST_HEAD(acpi_descs); | 62 | LIST_HEAD(acpi_descs); |
| 59 | DEFINE_MUTEX(acpi_desc_lock); | 63 | DEFINE_MUTEX(acpi_desc_lock); |
| 60 | 64 | ||
| @@ -415,7 +419,7 @@ static int cmd_to_func(struct nfit_mem *nfit_mem, unsigned int cmd, | |||
| 415 | if (call_pkg) { | 419 | if (call_pkg) { |
| 416 | int i; | 420 | int i; |
| 417 | 421 | ||
| 418 | if (nfit_mem->family != call_pkg->nd_family) | 422 | if (nfit_mem && nfit_mem->family != call_pkg->nd_family) |
| 419 | return -ENOTTY; | 423 | return -ENOTTY; |
| 420 | 424 | ||
| 421 | for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++) | 425 | for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++) |
| @@ -424,6 +428,10 @@ static int cmd_to_func(struct nfit_mem *nfit_mem, unsigned int cmd, | |||
| 424 | return call_pkg->nd_command; | 428 | return call_pkg->nd_command; |
| 425 | } | 429 | } |
| 426 | 430 | ||
| 431 | /* In the !call_pkg case, bus commands == bus functions */ | ||
| 432 | if (!nfit_mem) | ||
| 433 | return cmd; | ||
| 434 | |||
| 427 | /* Linux ND commands == NVDIMM_FAMILY_INTEL function numbers */ | 435 | /* Linux ND commands == NVDIMM_FAMILY_INTEL function numbers */ |
| 428 | if (nfit_mem->family == NVDIMM_FAMILY_INTEL) | 436 | if (nfit_mem->family == NVDIMM_FAMILY_INTEL) |
| 429 | return cmd; | 437 | return cmd; |
| @@ -454,17 +462,18 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, | |||
| 454 | if (cmd_rc) | 462 | if (cmd_rc) |
| 455 | *cmd_rc = -EINVAL; | 463 | *cmd_rc = -EINVAL; |
| 456 | 464 | ||
| 465 | if (cmd == ND_CMD_CALL) | ||
| 466 | call_pkg = buf; | ||
| 467 | func = cmd_to_func(nfit_mem, cmd, call_pkg); | ||
| 468 | if (func < 0) | ||
| 469 | return func; | ||
| 470 | |||
| 457 | if (nvdimm) { | 471 | if (nvdimm) { |
| 458 | struct acpi_device *adev = nfit_mem->adev; | 472 | struct acpi_device *adev = nfit_mem->adev; |
| 459 | 473 | ||
| 460 | if (!adev) | 474 | if (!adev) |
| 461 | return -ENOTTY; | 475 | return -ENOTTY; |
| 462 | 476 | ||
| 463 | if (cmd == ND_CMD_CALL) | ||
| 464 | call_pkg = buf; | ||
| 465 | func = cmd_to_func(nfit_mem, cmd, call_pkg); | ||
| 466 | if (func < 0) | ||
| 467 | return func; | ||
| 468 | dimm_name = nvdimm_name(nvdimm); | 477 | dimm_name = nvdimm_name(nvdimm); |
| 469 | cmd_name = nvdimm_cmd_name(cmd); | 478 | cmd_name = nvdimm_cmd_name(cmd); |
| 470 | cmd_mask = nvdimm_cmd_mask(nvdimm); | 479 | cmd_mask = nvdimm_cmd_mask(nvdimm); |
| @@ -475,12 +484,9 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, | |||
| 475 | } else { | 484 | } else { |
| 476 | struct acpi_device *adev = to_acpi_dev(acpi_desc); | 485 | struct acpi_device *adev = to_acpi_dev(acpi_desc); |
| 477 | 486 | ||
| 478 | func = cmd; | ||
| 479 | cmd_name = nvdimm_bus_cmd_name(cmd); | 487 | cmd_name = nvdimm_bus_cmd_name(cmd); |
| 480 | cmd_mask = nd_desc->cmd_mask; | 488 | cmd_mask = nd_desc->cmd_mask; |
| 481 | dsm_mask = cmd_mask; | 489 | dsm_mask = nd_desc->bus_dsm_mask; |
| 482 | if (cmd == ND_CMD_CALL) | ||
| 483 | dsm_mask = nd_desc->bus_dsm_mask; | ||
| 484 | desc = nd_cmd_bus_desc(cmd); | 490 | desc = nd_cmd_bus_desc(cmd); |
| 485 | guid = to_nfit_uuid(NFIT_DEV_BUS); | 491 | guid = to_nfit_uuid(NFIT_DEV_BUS); |
| 486 | handle = adev->handle; | 492 | handle = adev->handle; |
| @@ -554,6 +560,13 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, | |||
| 554 | return -EINVAL; | 560 | return -EINVAL; |
| 555 | } | 561 | } |
| 556 | 562 | ||
| 563 | if (out_obj->type != ACPI_TYPE_BUFFER) { | ||
| 564 | dev_dbg(dev, "%s unexpected output object type cmd: %s type: %d\n", | ||
| 565 | dimm_name, cmd_name, out_obj->type); | ||
| 566 | rc = -EINVAL; | ||
| 567 | goto out; | ||
| 568 | } | ||
| 569 | |||
| 557 | if (call_pkg) { | 570 | if (call_pkg) { |
| 558 | call_pkg->nd_fw_size = out_obj->buffer.length; | 571 | call_pkg->nd_fw_size = out_obj->buffer.length; |
| 559 | memcpy(call_pkg->nd_payload + call_pkg->nd_size_in, | 572 | memcpy(call_pkg->nd_payload + call_pkg->nd_size_in, |
| @@ -572,13 +585,6 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, | |||
| 572 | return 0; | 585 | return 0; |
| 573 | } | 586 | } |
| 574 | 587 | ||
| 575 | if (out_obj->package.type != ACPI_TYPE_BUFFER) { | ||
| 576 | dev_dbg(dev, "%s unexpected output object type cmd: %s type: %d\n", | ||
| 577 | dimm_name, cmd_name, out_obj->type); | ||
| 578 | rc = -EINVAL; | ||
| 579 | goto out; | ||
| 580 | } | ||
| 581 | |||
| 582 | dev_dbg(dev, "%s cmd: %s output length: %d\n", dimm_name, | 588 | dev_dbg(dev, "%s cmd: %s output length: %d\n", dimm_name, |
| 583 | cmd_name, out_obj->buffer.length); | 589 | cmd_name, out_obj->buffer.length); |
| 584 | print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 4, | 590 | print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 4, |
| @@ -1317,19 +1323,30 @@ static ssize_t scrub_show(struct device *dev, | |||
| 1317 | struct device_attribute *attr, char *buf) | 1323 | struct device_attribute *attr, char *buf) |
| 1318 | { | 1324 | { |
| 1319 | struct nvdimm_bus_descriptor *nd_desc; | 1325 | struct nvdimm_bus_descriptor *nd_desc; |
| 1326 | struct acpi_nfit_desc *acpi_desc; | ||
| 1320 | ssize_t rc = -ENXIO; | 1327 | ssize_t rc = -ENXIO; |
| 1328 | bool busy; | ||
| 1321 | 1329 | ||
| 1322 | device_lock(dev); | 1330 | device_lock(dev); |
| 1323 | nd_desc = dev_get_drvdata(dev); | 1331 | nd_desc = dev_get_drvdata(dev); |
| 1324 | if (nd_desc) { | 1332 | if (!nd_desc) { |
| 1325 | struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); | 1333 | device_unlock(dev); |
| 1334 | return rc; | ||
| 1335 | } | ||
| 1336 | acpi_desc = to_acpi_desc(nd_desc); | ||
| 1326 | 1337 | ||
| 1327 | mutex_lock(&acpi_desc->init_mutex); | 1338 | mutex_lock(&acpi_desc->init_mutex); |
| 1328 | rc = sprintf(buf, "%d%s", acpi_desc->scrub_count, | 1339 | busy = test_bit(ARS_BUSY, &acpi_desc->scrub_flags) |
| 1329 | acpi_desc->scrub_busy | 1340 | && !test_bit(ARS_CANCEL, &acpi_desc->scrub_flags); |
| 1330 | && !acpi_desc->cancel ? "+\n" : "\n"); | 1341 | rc = sprintf(buf, "%d%s", acpi_desc->scrub_count, busy ? "+\n" : "\n"); |
| 1331 | mutex_unlock(&acpi_desc->init_mutex); | 1342 | /* Allow an admin to poll the busy state at a higher rate */ |
| 1343 | if (busy && capable(CAP_SYS_RAWIO) && !test_and_set_bit(ARS_POLL, | ||
| 1344 | &acpi_desc->scrub_flags)) { | ||
| 1345 | acpi_desc->scrub_tmo = 1; | ||
| 1346 | mod_delayed_work(nfit_wq, &acpi_desc->dwork, HZ); | ||
| 1332 | } | 1347 | } |
| 1348 | |||
| 1349 | mutex_unlock(&acpi_desc->init_mutex); | ||
| 1333 | device_unlock(dev); | 1350 | device_unlock(dev); |
| 1334 | return rc; | 1351 | return rc; |
| 1335 | } | 1352 | } |
| @@ -1759,14 +1776,14 @@ static bool acpi_nvdimm_has_method(struct acpi_device *adev, char *method) | |||
| 1759 | 1776 | ||
| 1760 | __weak void nfit_intel_shutdown_status(struct nfit_mem *nfit_mem) | 1777 | __weak void nfit_intel_shutdown_status(struct nfit_mem *nfit_mem) |
| 1761 | { | 1778 | { |
| 1779 | struct device *dev = &nfit_mem->adev->dev; | ||
| 1762 | struct nd_intel_smart smart = { 0 }; | 1780 | struct nd_intel_smart smart = { 0 }; |
| 1763 | union acpi_object in_buf = { | 1781 | union acpi_object in_buf = { |
| 1764 | .type = ACPI_TYPE_BUFFER, | 1782 | .buffer.type = ACPI_TYPE_BUFFER, |
| 1765 | .buffer.pointer = (char *) &smart, | 1783 | .buffer.length = 0, |
| 1766 | .buffer.length = sizeof(smart), | ||
| 1767 | }; | 1784 | }; |
| 1768 | union acpi_object in_obj = { | 1785 | union acpi_object in_obj = { |
| 1769 | .type = ACPI_TYPE_PACKAGE, | 1786 | .package.type = ACPI_TYPE_PACKAGE, |
| 1770 | .package.count = 1, | 1787 | .package.count = 1, |
| 1771 | .package.elements = &in_buf, | 1788 | .package.elements = &in_buf, |
| 1772 | }; | 1789 | }; |
| @@ -1781,8 +1798,15 @@ __weak void nfit_intel_shutdown_status(struct nfit_mem *nfit_mem) | |||
| 1781 | return; | 1798 | return; |
| 1782 | 1799 | ||
| 1783 | out_obj = acpi_evaluate_dsm(handle, guid, revid, func, &in_obj); | 1800 | out_obj = acpi_evaluate_dsm(handle, guid, revid, func, &in_obj); |
| 1784 | if (!out_obj) | 1801 | if (!out_obj || out_obj->type != ACPI_TYPE_BUFFER |
| 1802 | || out_obj->buffer.length < sizeof(smart)) { | ||
| 1803 | dev_dbg(dev->parent, "%s: failed to retrieve initial health\n", | ||
| 1804 | dev_name(dev)); | ||
| 1805 | ACPI_FREE(out_obj); | ||
| 1785 | return; | 1806 | return; |
| 1807 | } | ||
| 1808 | memcpy(&smart, out_obj->buffer.pointer, sizeof(smart)); | ||
| 1809 | ACPI_FREE(out_obj); | ||
| 1786 | 1810 | ||
| 1787 | if (smart.flags & ND_INTEL_SMART_SHUTDOWN_VALID) { | 1811 | if (smart.flags & ND_INTEL_SMART_SHUTDOWN_VALID) { |
| 1788 | if (smart.shutdown_state) | 1812 | if (smart.shutdown_state) |
| @@ -1793,7 +1817,6 @@ __weak void nfit_intel_shutdown_status(struct nfit_mem *nfit_mem) | |||
| 1793 | set_bit(NFIT_MEM_DIRTY_COUNT, &nfit_mem->flags); | 1817 | set_bit(NFIT_MEM_DIRTY_COUNT, &nfit_mem->flags); |
| 1794 | nfit_mem->dirty_shutdown = smart.shutdown_count; | 1818 | nfit_mem->dirty_shutdown = smart.shutdown_count; |
| 1795 | } | 1819 | } |
| 1796 | ACPI_FREE(out_obj); | ||
| 1797 | } | 1820 | } |
| 1798 | 1821 | ||
| 1799 | static void populate_shutdown_status(struct nfit_mem *nfit_mem) | 1822 | static void populate_shutdown_status(struct nfit_mem *nfit_mem) |
| @@ -1861,9 +1884,17 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc, | |||
| 1861 | dev_set_drvdata(&adev_dimm->dev, nfit_mem); | 1884 | dev_set_drvdata(&adev_dimm->dev, nfit_mem); |
| 1862 | 1885 | ||
| 1863 | /* | 1886 | /* |
| 1864 | * Until standardization materializes we need to consider 4 | 1887 | * There are 4 "legacy" NVDIMM command sets |
| 1865 | * different command sets. Note, that checking for function0 (bit0) | 1888 | * (NVDIMM_FAMILY_{INTEL,MSFT,HPE1,HPE2}) that were created before |
| 1866 | * tells us if any commands are reachable through this GUID. | 1889 | * an EFI working group was established to constrain this |
| 1890 | * proliferation. The nfit driver probes for the supported command | ||
| 1891 | * set by GUID. Note, if you're a platform developer looking to add | ||
| 1892 | * a new command set to this probe, consider using an existing set, | ||
| 1893 | * or otherwise seek approval to publish the command set at | ||
| 1894 | * http://www.uefi.org/RFIC_LIST. | ||
| 1895 | * | ||
| 1896 | * Note, that checking for function0 (bit0) tells us if any commands | ||
| 1897 | * are reachable through this GUID. | ||
| 1867 | */ | 1898 | */ |
| 1868 | for (i = 0; i <= NVDIMM_FAMILY_MAX; i++) | 1899 | for (i = 0; i <= NVDIMM_FAMILY_MAX; i++) |
| 1869 | if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1)) | 1900 | if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1)) |
| @@ -1886,6 +1917,8 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc, | |||
| 1886 | dsm_mask &= ~(1 << 8); | 1917 | dsm_mask &= ~(1 << 8); |
| 1887 | } else if (nfit_mem->family == NVDIMM_FAMILY_MSFT) { | 1918 | } else if (nfit_mem->family == NVDIMM_FAMILY_MSFT) { |
| 1888 | dsm_mask = 0xffffffff; | 1919 | dsm_mask = 0xffffffff; |
| 1920 | } else if (nfit_mem->family == NVDIMM_FAMILY_HYPERV) { | ||
| 1921 | dsm_mask = 0x1f; | ||
| 1889 | } else { | 1922 | } else { |
| 1890 | dev_dbg(dev, "unknown dimm command family\n"); | 1923 | dev_dbg(dev, "unknown dimm command family\n"); |
| 1891 | nfit_mem->family = -1; | 1924 | nfit_mem->family = -1; |
| @@ -1915,18 +1948,32 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc, | |||
| 1915 | | 1 << ND_CMD_SET_CONFIG_DATA; | 1948 | | 1 << ND_CMD_SET_CONFIG_DATA; |
| 1916 | if (family == NVDIMM_FAMILY_INTEL | 1949 | if (family == NVDIMM_FAMILY_INTEL |
| 1917 | && (dsm_mask & label_mask) == label_mask) | 1950 | && (dsm_mask & label_mask) == label_mask) |
| 1918 | return 0; | 1951 | /* skip _LS{I,R,W} enabling */; |
| 1952 | else { | ||
| 1953 | if (acpi_nvdimm_has_method(adev_dimm, "_LSI") | ||
| 1954 | && acpi_nvdimm_has_method(adev_dimm, "_LSR")) { | ||
| 1955 | dev_dbg(dev, "%s: has _LSR\n", dev_name(&adev_dimm->dev)); | ||
| 1956 | set_bit(NFIT_MEM_LSR, &nfit_mem->flags); | ||
| 1957 | } | ||
| 1919 | 1958 | ||
| 1920 | if (acpi_nvdimm_has_method(adev_dimm, "_LSI") | 1959 | if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags) |
| 1921 | && acpi_nvdimm_has_method(adev_dimm, "_LSR")) { | 1960 | && acpi_nvdimm_has_method(adev_dimm, "_LSW")) { |
| 1922 | dev_dbg(dev, "%s: has _LSR\n", dev_name(&adev_dimm->dev)); | 1961 | dev_dbg(dev, "%s: has _LSW\n", dev_name(&adev_dimm->dev)); |
| 1923 | set_bit(NFIT_MEM_LSR, &nfit_mem->flags); | 1962 | set_bit(NFIT_MEM_LSW, &nfit_mem->flags); |
| 1924 | } | 1963 | } |
| 1925 | 1964 | ||
| 1926 | if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags) | 1965 | /* |
| 1927 | && acpi_nvdimm_has_method(adev_dimm, "_LSW")) { | 1966 | * Quirk read-only label configurations to preserve |
| 1928 | dev_dbg(dev, "%s: has _LSW\n", dev_name(&adev_dimm->dev)); | 1967 | * access to label-less namespaces by default. |
| 1929 | set_bit(NFIT_MEM_LSW, &nfit_mem->flags); | 1968 | */ |
| 1969 | if (!test_bit(NFIT_MEM_LSW, &nfit_mem->flags) | ||
| 1970 | && !force_labels) { | ||
| 1971 | dev_dbg(dev, "%s: No _LSW, disable labels\n", | ||
| 1972 | dev_name(&adev_dimm->dev)); | ||
| 1973 | clear_bit(NFIT_MEM_LSR, &nfit_mem->flags); | ||
| 1974 | } else | ||
| 1975 | dev_dbg(dev, "%s: Force enable labels\n", | ||
| 1976 | dev_name(&adev_dimm->dev)); | ||
| 1930 | } | 1977 | } |
| 1931 | 1978 | ||
| 1932 | populate_shutdown_status(nfit_mem); | 1979 | populate_shutdown_status(nfit_mem); |
| @@ -2027,6 +2074,10 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc) | |||
| 2027 | cmd_mask |= nfit_mem->dsm_mask & NVDIMM_STANDARD_CMDMASK; | 2074 | cmd_mask |= nfit_mem->dsm_mask & NVDIMM_STANDARD_CMDMASK; |
| 2028 | } | 2075 | } |
| 2029 | 2076 | ||
| 2077 | /* Quirk to ignore LOCAL for labels on HYPERV DIMMs */ | ||
| 2078 | if (nfit_mem->family == NVDIMM_FAMILY_HYPERV) | ||
| 2079 | set_bit(NDD_NOBLK, &flags); | ||
| 2080 | |||
| 2030 | if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags)) { | 2081 | if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags)) { |
| 2031 | set_bit(ND_CMD_GET_CONFIG_SIZE, &cmd_mask); | 2082 | set_bit(ND_CMD_GET_CONFIG_SIZE, &cmd_mask); |
| 2032 | set_bit(ND_CMD_GET_CONFIG_DATA, &cmd_mask); | 2083 | set_bit(ND_CMD_GET_CONFIG_DATA, &cmd_mask); |
| @@ -2050,7 +2101,7 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc) | |||
| 2050 | if ((mem_flags & ACPI_NFIT_MEM_FAILED_MASK) == 0) | 2101 | if ((mem_flags & ACPI_NFIT_MEM_FAILED_MASK) == 0) |
| 2051 | continue; | 2102 | continue; |
| 2052 | 2103 | ||
| 2053 | dev_info(acpi_desc->dev, "%s flags:%s%s%s%s%s\n", | 2104 | dev_err(acpi_desc->dev, "Error found in NVDIMM %s flags:%s%s%s%s%s\n", |
| 2054 | nvdimm_name(nvdimm), | 2105 | nvdimm_name(nvdimm), |
| 2055 | mem_flags & ACPI_NFIT_MEM_SAVE_FAILED ? " save_fail" : "", | 2106 | mem_flags & ACPI_NFIT_MEM_SAVE_FAILED ? " save_fail" : "", |
| 2056 | mem_flags & ACPI_NFIT_MEM_RESTORE_FAILED ? " restore_fail":"", | 2107 | mem_flags & ACPI_NFIT_MEM_RESTORE_FAILED ? " restore_fail":"", |
| @@ -2641,7 +2692,10 @@ static int ars_start(struct acpi_nfit_desc *acpi_desc, | |||
| 2641 | 2692 | ||
| 2642 | if (rc < 0) | 2693 | if (rc < 0) |
| 2643 | return rc; | 2694 | return rc; |
| 2644 | return cmd_rc; | 2695 | if (cmd_rc < 0) |
| 2696 | return cmd_rc; | ||
| 2697 | set_bit(ARS_VALID, &acpi_desc->scrub_flags); | ||
| 2698 | return 0; | ||
| 2645 | } | 2699 | } |
| 2646 | 2700 | ||
| 2647 | static int ars_continue(struct acpi_nfit_desc *acpi_desc) | 2701 | static int ars_continue(struct acpi_nfit_desc *acpi_desc) |
| @@ -2651,11 +2705,11 @@ static int ars_continue(struct acpi_nfit_desc *acpi_desc) | |||
| 2651 | struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; | 2705 | struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; |
| 2652 | struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status; | 2706 | struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status; |
| 2653 | 2707 | ||
| 2654 | memset(&ars_start, 0, sizeof(ars_start)); | 2708 | ars_start = (struct nd_cmd_ars_start) { |
| 2655 | ars_start.address = ars_status->restart_address; | 2709 | .address = ars_status->restart_address, |
| 2656 | ars_start.length = ars_status->restart_length; | 2710 | .length = ars_status->restart_length, |
| 2657 | ars_start.type = ars_status->type; | 2711 | .type = ars_status->type, |
| 2658 | ars_start.flags = acpi_desc->ars_start_flags; | 2712 | }; |
| 2659 | rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start, | 2713 | rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start, |
| 2660 | sizeof(ars_start), &cmd_rc); | 2714 | sizeof(ars_start), &cmd_rc); |
| 2661 | if (rc < 0) | 2715 | if (rc < 0) |
| @@ -2734,6 +2788,17 @@ static int ars_status_process_records(struct acpi_nfit_desc *acpi_desc) | |||
| 2734 | */ | 2788 | */ |
| 2735 | if (ars_status->out_length < 44) | 2789 | if (ars_status->out_length < 44) |
| 2736 | return 0; | 2790 | return 0; |
| 2791 | |||
| 2792 | /* | ||
| 2793 | * Ignore potentially stale results that are only refreshed | ||
| 2794 | * after a start-ARS event. | ||
| 2795 | */ | ||
| 2796 | if (!test_and_clear_bit(ARS_VALID, &acpi_desc->scrub_flags)) { | ||
| 2797 | dev_dbg(acpi_desc->dev, "skip %d stale records\n", | ||
| 2798 | ars_status->num_records); | ||
| 2799 | return 0; | ||
| 2800 | } | ||
| 2801 | |||
| 2737 | for (i = 0; i < ars_status->num_records; i++) { | 2802 | for (i = 0; i < ars_status->num_records; i++) { |
| 2738 | /* only process full records */ | 2803 | /* only process full records */ |
| 2739 | if (ars_status->out_length | 2804 | if (ars_status->out_length |
| @@ -3004,14 +3069,16 @@ static int ars_register(struct acpi_nfit_desc *acpi_desc, | |||
| 3004 | { | 3069 | { |
| 3005 | int rc; | 3070 | int rc; |
| 3006 | 3071 | ||
| 3007 | if (no_init_ars || test_bit(ARS_FAILED, &nfit_spa->ars_state)) | 3072 | if (test_bit(ARS_FAILED, &nfit_spa->ars_state)) |
| 3008 | return acpi_nfit_register_region(acpi_desc, nfit_spa); | 3073 | return acpi_nfit_register_region(acpi_desc, nfit_spa); |
| 3009 | 3074 | ||
| 3010 | set_bit(ARS_REQ_SHORT, &nfit_spa->ars_state); | 3075 | set_bit(ARS_REQ_SHORT, &nfit_spa->ars_state); |
| 3011 | set_bit(ARS_REQ_LONG, &nfit_spa->ars_state); | 3076 | if (!no_init_ars) |
| 3077 | set_bit(ARS_REQ_LONG, &nfit_spa->ars_state); | ||
| 3012 | 3078 | ||
| 3013 | switch (acpi_nfit_query_poison(acpi_desc)) { | 3079 | switch (acpi_nfit_query_poison(acpi_desc)) { |
| 3014 | case 0: | 3080 | case 0: |
| 3081 | case -ENOSPC: | ||
| 3015 | case -EAGAIN: | 3082 | case -EAGAIN: |
| 3016 | rc = ars_start(acpi_desc, nfit_spa, ARS_REQ_SHORT); | 3083 | rc = ars_start(acpi_desc, nfit_spa, ARS_REQ_SHORT); |
| 3017 | /* shouldn't happen, try again later */ | 3084 | /* shouldn't happen, try again later */ |
| @@ -3036,7 +3103,6 @@ static int ars_register(struct acpi_nfit_desc *acpi_desc, | |||
| 3036 | break; | 3103 | break; |
| 3037 | case -EBUSY: | 3104 | case -EBUSY: |
| 3038 | case -ENOMEM: | 3105 | case -ENOMEM: |
| 3039 | case -ENOSPC: | ||
| 3040 | /* | 3106 | /* |
| 3041 | * BIOS was using ARS, wait for it to complete (or | 3107 | * BIOS was using ARS, wait for it to complete (or |
| 3042 | * resources to become available) and then perform our | 3108 | * resources to become available) and then perform our |
| @@ -3071,7 +3137,7 @@ static unsigned int __acpi_nfit_scrub(struct acpi_nfit_desc *acpi_desc, | |||
| 3071 | 3137 | ||
| 3072 | lockdep_assert_held(&acpi_desc->init_mutex); | 3138 | lockdep_assert_held(&acpi_desc->init_mutex); |
| 3073 | 3139 | ||
| 3074 | if (acpi_desc->cancel) | 3140 | if (test_bit(ARS_CANCEL, &acpi_desc->scrub_flags)) |
| 3075 | return 0; | 3141 | return 0; |
| 3076 | 3142 | ||
| 3077 | if (query_rc == -EBUSY) { | 3143 | if (query_rc == -EBUSY) { |
| @@ -3145,7 +3211,7 @@ static void __sched_ars(struct acpi_nfit_desc *acpi_desc, unsigned int tmo) | |||
| 3145 | { | 3211 | { |
| 3146 | lockdep_assert_held(&acpi_desc->init_mutex); | 3212 | lockdep_assert_held(&acpi_desc->init_mutex); |
| 3147 | 3213 | ||
| 3148 | acpi_desc->scrub_busy = 1; | 3214 | set_bit(ARS_BUSY, &acpi_desc->scrub_flags); |
| 3149 | /* note this should only be set from within the workqueue */ | 3215 | /* note this should only be set from within the workqueue */ |
| 3150 | if (tmo) | 3216 | if (tmo) |
| 3151 | acpi_desc->scrub_tmo = tmo; | 3217 | acpi_desc->scrub_tmo = tmo; |
| @@ -3161,7 +3227,7 @@ static void notify_ars_done(struct acpi_nfit_desc *acpi_desc) | |||
| 3161 | { | 3227 | { |
| 3162 | lockdep_assert_held(&acpi_desc->init_mutex); | 3228 | lockdep_assert_held(&acpi_desc->init_mutex); |
| 3163 | 3229 | ||
| 3164 | acpi_desc->scrub_busy = 0; | 3230 | clear_bit(ARS_BUSY, &acpi_desc->scrub_flags); |
| 3165 | acpi_desc->scrub_count++; | 3231 | acpi_desc->scrub_count++; |
| 3166 | if (acpi_desc->scrub_count_state) | 3232 | if (acpi_desc->scrub_count_state) |
| 3167 | sysfs_notify_dirent(acpi_desc->scrub_count_state); | 3233 | sysfs_notify_dirent(acpi_desc->scrub_count_state); |
| @@ -3182,6 +3248,7 @@ static void acpi_nfit_scrub(struct work_struct *work) | |||
| 3182 | else | 3248 | else |
| 3183 | notify_ars_done(acpi_desc); | 3249 | notify_ars_done(acpi_desc); |
| 3184 | memset(acpi_desc->ars_status, 0, acpi_desc->max_ars); | 3250 | memset(acpi_desc->ars_status, 0, acpi_desc->max_ars); |
| 3251 | clear_bit(ARS_POLL, &acpi_desc->scrub_flags); | ||
| 3185 | mutex_unlock(&acpi_desc->init_mutex); | 3252 | mutex_unlock(&acpi_desc->init_mutex); |
| 3186 | } | 3253 | } |
| 3187 | 3254 | ||
| @@ -3216,6 +3283,7 @@ static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc) | |||
| 3216 | struct nfit_spa *nfit_spa; | 3283 | struct nfit_spa *nfit_spa; |
| 3217 | int rc; | 3284 | int rc; |
| 3218 | 3285 | ||
| 3286 | set_bit(ARS_VALID, &acpi_desc->scrub_flags); | ||
| 3219 | list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { | 3287 | list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { |
| 3220 | switch (nfit_spa_type(nfit_spa->spa)) { | 3288 | switch (nfit_spa_type(nfit_spa->spa)) { |
| 3221 | case NFIT_SPA_VOLATILE: | 3289 | case NFIT_SPA_VOLATILE: |
| @@ -3450,7 +3518,7 @@ int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, | |||
| 3450 | struct nfit_spa *nfit_spa; | 3518 | struct nfit_spa *nfit_spa; |
| 3451 | 3519 | ||
| 3452 | mutex_lock(&acpi_desc->init_mutex); | 3520 | mutex_lock(&acpi_desc->init_mutex); |
| 3453 | if (acpi_desc->cancel) { | 3521 | if (test_bit(ARS_CANCEL, &acpi_desc->scrub_flags)) { |
| 3454 | mutex_unlock(&acpi_desc->init_mutex); | 3522 | mutex_unlock(&acpi_desc->init_mutex); |
| 3455 | return 0; | 3523 | return 0; |
| 3456 | } | 3524 | } |
| @@ -3529,7 +3597,7 @@ void acpi_nfit_shutdown(void *data) | |||
| 3529 | mutex_unlock(&acpi_desc_lock); | 3597 | mutex_unlock(&acpi_desc_lock); |
| 3530 | 3598 | ||
| 3531 | mutex_lock(&acpi_desc->init_mutex); | 3599 | mutex_lock(&acpi_desc->init_mutex); |
| 3532 | acpi_desc->cancel = 1; | 3600 | set_bit(ARS_CANCEL, &acpi_desc->scrub_flags); |
| 3533 | cancel_delayed_work_sync(&acpi_desc->dwork); | 3601 | cancel_delayed_work_sync(&acpi_desc->dwork); |
| 3534 | mutex_unlock(&acpi_desc->init_mutex); | 3602 | mutex_unlock(&acpi_desc->init_mutex); |
| 3535 | 3603 | ||
| @@ -3729,6 +3797,7 @@ static __init int nfit_init(void) | |||
| 3729 | guid_parse(UUID_NFIT_DIMM_N_HPE1, &nfit_uuid[NFIT_DEV_DIMM_N_HPE1]); | 3797 | guid_parse(UUID_NFIT_DIMM_N_HPE1, &nfit_uuid[NFIT_DEV_DIMM_N_HPE1]); |
| 3730 | guid_parse(UUID_NFIT_DIMM_N_HPE2, &nfit_uuid[NFIT_DEV_DIMM_N_HPE2]); | 3798 | guid_parse(UUID_NFIT_DIMM_N_HPE2, &nfit_uuid[NFIT_DEV_DIMM_N_HPE2]); |
| 3731 | guid_parse(UUID_NFIT_DIMM_N_MSFT, &nfit_uuid[NFIT_DEV_DIMM_N_MSFT]); | 3799 | guid_parse(UUID_NFIT_DIMM_N_MSFT, &nfit_uuid[NFIT_DEV_DIMM_N_MSFT]); |
| 3800 | guid_parse(UUID_NFIT_DIMM_N_HYPERV, &nfit_uuid[NFIT_DEV_DIMM_N_HYPERV]); | ||
| 3732 | 3801 | ||
| 3733 | nfit_wq = create_singlethread_workqueue("nfit"); | 3802 | nfit_wq = create_singlethread_workqueue("nfit"); |
| 3734 | if (!nfit_wq) | 3803 | if (!nfit_wq) |
diff --git a/drivers/acpi/nfit/nfit.h b/drivers/acpi/nfit/nfit.h index 33691aecfcee..2f8cf2a11e3b 100644 --- a/drivers/acpi/nfit/nfit.h +++ b/drivers/acpi/nfit/nfit.h | |||
| @@ -34,11 +34,14 @@ | |||
| 34 | /* https://msdn.microsoft.com/library/windows/hardware/mt604741 */ | 34 | /* https://msdn.microsoft.com/library/windows/hardware/mt604741 */ |
| 35 | #define UUID_NFIT_DIMM_N_MSFT "1ee68b36-d4bd-4a1a-9a16-4f8e53d46e05" | 35 | #define UUID_NFIT_DIMM_N_MSFT "1ee68b36-d4bd-4a1a-9a16-4f8e53d46e05" |
| 36 | 36 | ||
| 37 | /* http://www.uefi.org/RFIC_LIST (see "Virtual NVDIMM 0x1901") */ | ||
| 38 | #define UUID_NFIT_DIMM_N_HYPERV "5746c5f2-a9a2-4264-ad0e-e4ddc9e09e80" | ||
| 39 | |||
| 37 | #define ACPI_NFIT_MEM_FAILED_MASK (ACPI_NFIT_MEM_SAVE_FAILED \ | 40 | #define ACPI_NFIT_MEM_FAILED_MASK (ACPI_NFIT_MEM_SAVE_FAILED \ |
| 38 | | ACPI_NFIT_MEM_RESTORE_FAILED | ACPI_NFIT_MEM_FLUSH_FAILED \ | 41 | | ACPI_NFIT_MEM_RESTORE_FAILED | ACPI_NFIT_MEM_FLUSH_FAILED \ |
| 39 | | ACPI_NFIT_MEM_NOT_ARMED | ACPI_NFIT_MEM_MAP_FAILED) | 42 | | ACPI_NFIT_MEM_NOT_ARMED | ACPI_NFIT_MEM_MAP_FAILED) |
| 40 | 43 | ||
| 41 | #define NVDIMM_FAMILY_MAX NVDIMM_FAMILY_MSFT | 44 | #define NVDIMM_FAMILY_MAX NVDIMM_FAMILY_HYPERV |
| 42 | 45 | ||
| 43 | #define NVDIMM_STANDARD_CMDMASK \ | 46 | #define NVDIMM_STANDARD_CMDMASK \ |
| 44 | (1 << ND_CMD_SMART | 1 << ND_CMD_SMART_THRESHOLD | 1 << ND_CMD_DIMM_FLAGS \ | 47 | (1 << ND_CMD_SMART | 1 << ND_CMD_SMART_THRESHOLD | 1 << ND_CMD_DIMM_FLAGS \ |
| @@ -94,6 +97,7 @@ enum nfit_uuids { | |||
| 94 | NFIT_DEV_DIMM_N_HPE1 = NVDIMM_FAMILY_HPE1, | 97 | NFIT_DEV_DIMM_N_HPE1 = NVDIMM_FAMILY_HPE1, |
| 95 | NFIT_DEV_DIMM_N_HPE2 = NVDIMM_FAMILY_HPE2, | 98 | NFIT_DEV_DIMM_N_HPE2 = NVDIMM_FAMILY_HPE2, |
| 96 | NFIT_DEV_DIMM_N_MSFT = NVDIMM_FAMILY_MSFT, | 99 | NFIT_DEV_DIMM_N_MSFT = NVDIMM_FAMILY_MSFT, |
| 100 | NFIT_DEV_DIMM_N_HYPERV = NVDIMM_FAMILY_HYPERV, | ||
| 97 | NFIT_SPA_VOLATILE, | 101 | NFIT_SPA_VOLATILE, |
| 98 | NFIT_SPA_PM, | 102 | NFIT_SPA_PM, |
| 99 | NFIT_SPA_DCR, | 103 | NFIT_SPA_DCR, |
| @@ -210,6 +214,13 @@ struct nfit_mem { | |||
| 210 | int family; | 214 | int family; |
| 211 | }; | 215 | }; |
| 212 | 216 | ||
| 217 | enum scrub_flags { | ||
| 218 | ARS_BUSY, | ||
| 219 | ARS_CANCEL, | ||
| 220 | ARS_VALID, | ||
| 221 | ARS_POLL, | ||
| 222 | }; | ||
| 223 | |||
| 213 | struct acpi_nfit_desc { | 224 | struct acpi_nfit_desc { |
| 214 | struct nvdimm_bus_descriptor nd_desc; | 225 | struct nvdimm_bus_descriptor nd_desc; |
| 215 | struct acpi_table_header acpi_header; | 226 | struct acpi_table_header acpi_header; |
| @@ -223,7 +234,6 @@ struct acpi_nfit_desc { | |||
| 223 | struct list_head idts; | 234 | struct list_head idts; |
| 224 | struct nvdimm_bus *nvdimm_bus; | 235 | struct nvdimm_bus *nvdimm_bus; |
| 225 | struct device *dev; | 236 | struct device *dev; |
| 226 | u8 ars_start_flags; | ||
| 227 | struct nd_cmd_ars_status *ars_status; | 237 | struct nd_cmd_ars_status *ars_status; |
| 228 | struct nfit_spa *scrub_spa; | 238 | struct nfit_spa *scrub_spa; |
| 229 | struct delayed_work dwork; | 239 | struct delayed_work dwork; |
| @@ -232,8 +242,7 @@ struct acpi_nfit_desc { | |||
| 232 | unsigned int max_ars; | 242 | unsigned int max_ars; |
| 233 | unsigned int scrub_count; | 243 | unsigned int scrub_count; |
| 234 | unsigned int scrub_mode; | 244 | unsigned int scrub_mode; |
| 235 | unsigned int scrub_busy:1; | 245 | unsigned long scrub_flags; |
| 236 | unsigned int cancel:1; | ||
| 237 | unsigned long dimm_cmd_force_en; | 246 | unsigned long dimm_cmd_force_en; |
| 238 | unsigned long bus_cmd_force_en; | 247 | unsigned long bus_cmd_force_en; |
| 239 | unsigned long bus_nfit_cmd_force_en; | 248 | unsigned long bus_nfit_cmd_force_en; |
diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c index ad31c50de3be..065c4fc245d1 100644 --- a/drivers/acpi/pptt.c +++ b/drivers/acpi/pptt.c | |||
| @@ -209,6 +209,9 @@ static int acpi_pptt_leaf_node(struct acpi_table_header *table_hdr, | |||
| 209 | struct acpi_pptt_processor *cpu_node; | 209 | struct acpi_pptt_processor *cpu_node; |
| 210 | u32 proc_sz; | 210 | u32 proc_sz; |
| 211 | 211 | ||
| 212 | if (table_hdr->revision > 1) | ||
| 213 | return (node->flags & ACPI_PPTT_ACPI_LEAF_NODE); | ||
| 214 | |||
| 212 | table_end = (unsigned long)table_hdr + table_hdr->length; | 215 | table_end = (unsigned long)table_hdr + table_hdr->length; |
| 213 | node_entry = ACPI_PTR_DIFF(node, table_hdr); | 216 | node_entry = ACPI_PTR_DIFF(node, table_hdr); |
| 214 | entry = ACPI_ADD_PTR(struct acpi_subtable_header, table_hdr, | 217 | entry = ACPI_ADD_PTR(struct acpi_subtable_header, table_hdr, |
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c index 41324f0b1bee..fa76f5e41b5c 100644 --- a/drivers/acpi/sysfs.c +++ b/drivers/acpi/sysfs.c | |||
| @@ -648,26 +648,29 @@ static void acpi_global_event_handler(u32 event_type, acpi_handle device, | |||
| 648 | } | 648 | } |
| 649 | } | 649 | } |
| 650 | 650 | ||
| 651 | static int get_status(u32 index, acpi_event_status *status, | 651 | static int get_status(u32 index, acpi_event_status *ret, |
| 652 | acpi_handle *handle) | 652 | acpi_handle *handle) |
| 653 | { | 653 | { |
| 654 | int result; | 654 | acpi_status status; |
| 655 | 655 | ||
| 656 | if (index >= num_gpes + ACPI_NUM_FIXED_EVENTS) | 656 | if (index >= num_gpes + ACPI_NUM_FIXED_EVENTS) |
| 657 | return -EINVAL; | 657 | return -EINVAL; |
| 658 | 658 | ||
| 659 | if (index < num_gpes) { | 659 | if (index < num_gpes) { |
| 660 | result = acpi_get_gpe_device(index, handle); | 660 | status = acpi_get_gpe_device(index, handle); |
| 661 | if (result) { | 661 | if (ACPI_FAILURE(status)) { |
| 662 | ACPI_EXCEPTION((AE_INFO, AE_NOT_FOUND, | 662 | ACPI_EXCEPTION((AE_INFO, AE_NOT_FOUND, |
| 663 | "Invalid GPE 0x%x", index)); | 663 | "Invalid GPE 0x%x", index)); |
| 664 | return result; | 664 | return -ENXIO; |
| 665 | } | 665 | } |
| 666 | result = acpi_get_gpe_status(*handle, index, status); | 666 | status = acpi_get_gpe_status(*handle, index, ret); |
| 667 | } else if (index < (num_gpes + ACPI_NUM_FIXED_EVENTS)) | 667 | } else { |
| 668 | result = acpi_get_event_status(index - num_gpes, status); | 668 | status = acpi_get_event_status(index - num_gpes, ret); |
| 669 | } | ||
| 670 | if (ACPI_FAILURE(status)) | ||
| 671 | return -EIO; | ||
| 669 | 672 | ||
| 670 | return result; | 673 | return 0; |
| 671 | } | 674 | } |
| 672 | 675 | ||
| 673 | static ssize_t counter_show(struct kobject *kobj, | 676 | static ssize_t counter_show(struct kobject *kobj, |
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 2c334c01fc43..76c9969b7124 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c | |||
| @@ -6,6 +6,8 @@ | |||
| 6 | * This file is released under the GPLv2. | 6 | * This file is released under the GPLv2. |
| 7 | */ | 7 | */ |
| 8 | 8 | ||
| 9 | #define pr_fmt(fmt) "PM: " fmt | ||
| 10 | |||
| 9 | #include <linux/delay.h> | 11 | #include <linux/delay.h> |
| 10 | #include <linux/kernel.h> | 12 | #include <linux/kernel.h> |
| 11 | #include <linux/io.h> | 13 | #include <linux/io.h> |
| @@ -457,19 +459,19 @@ static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed) | |||
| 457 | 459 | ||
| 458 | time_start = ktime_get(); | 460 | time_start = ktime_get(); |
| 459 | ret = genpd->power_off(genpd); | 461 | ret = genpd->power_off(genpd); |
| 460 | if (ret == -EBUSY) | 462 | if (ret) |
| 461 | return ret; | 463 | return ret; |
| 462 | 464 | ||
| 463 | elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); | 465 | elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); |
| 464 | if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns) | 466 | if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns) |
| 465 | return ret; | 467 | return 0; |
| 466 | 468 | ||
| 467 | genpd->states[state_idx].power_off_latency_ns = elapsed_ns; | 469 | genpd->states[state_idx].power_off_latency_ns = elapsed_ns; |
| 468 | genpd->max_off_time_changed = true; | 470 | genpd->max_off_time_changed = true; |
| 469 | pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n", | 471 | pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n", |
| 470 | genpd->name, "off", elapsed_ns); | 472 | genpd->name, "off", elapsed_ns); |
| 471 | 473 | ||
| 472 | return ret; | 474 | return 0; |
| 473 | } | 475 | } |
| 474 | 476 | ||
| 475 | /** | 477 | /** |
| @@ -1657,8 +1659,8 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, | |||
| 1657 | genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING); | 1659 | genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING); |
| 1658 | 1660 | ||
| 1659 | if (!list_empty(&subdomain->master_links) || subdomain->device_count) { | 1661 | if (!list_empty(&subdomain->master_links) || subdomain->device_count) { |
| 1660 | pr_warn("%s: unable to remove subdomain %s\n", genpd->name, | 1662 | pr_warn("%s: unable to remove subdomain %s\n", |
| 1661 | subdomain->name); | 1663 | genpd->name, subdomain->name); |
| 1662 | ret = -EBUSY; | 1664 | ret = -EBUSY; |
| 1663 | goto out; | 1665 | goto out; |
| 1664 | } | 1666 | } |
| @@ -1766,8 +1768,8 @@ int pm_genpd_init(struct generic_pm_domain *genpd, | |||
| 1766 | ret = genpd_set_default_power_state(genpd); | 1768 | ret = genpd_set_default_power_state(genpd); |
| 1767 | if (ret) | 1769 | if (ret) |
| 1768 | return ret; | 1770 | return ret; |
| 1769 | } else if (!gov) { | 1771 | } else if (!gov && genpd->state_count > 1) { |
| 1770 | pr_warn("%s : no governor for states\n", genpd->name); | 1772 | pr_warn("%s: no governor for states\n", genpd->name); |
| 1771 | } | 1773 | } |
| 1772 | 1774 | ||
| 1773 | device_initialize(&genpd->dev); | 1775 | device_initialize(&genpd->dev); |
| @@ -2514,7 +2516,7 @@ static int genpd_parse_state(struct genpd_power_state *genpd_state, | |||
| 2514 | &entry_latency); | 2516 | &entry_latency); |
| 2515 | if (err) { | 2517 | if (err) { |
| 2516 | pr_debug(" * %pOF missing entry-latency-us property\n", | 2518 | pr_debug(" * %pOF missing entry-latency-us property\n", |
| 2517 | state_node); | 2519 | state_node); |
| 2518 | return -EINVAL; | 2520 | return -EINVAL; |
| 2519 | } | 2521 | } |
| 2520 | 2522 | ||
| @@ -2522,7 +2524,7 @@ static int genpd_parse_state(struct genpd_power_state *genpd_state, | |||
| 2522 | &exit_latency); | 2524 | &exit_latency); |
| 2523 | if (err) { | 2525 | if (err) { |
| 2524 | pr_debug(" * %pOF missing exit-latency-us property\n", | 2526 | pr_debug(" * %pOF missing exit-latency-us property\n", |
| 2525 | state_node); | 2527 | state_node); |
| 2526 | return -EINVAL; | 2528 | return -EINVAL; |
| 2527 | } | 2529 | } |
| 2528 | 2530 | ||
diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c index 99896fbf18e4..4d07e38a8247 100644 --- a/drivers/base/power/domain_governor.c +++ b/drivers/base/power/domain_governor.c | |||
| @@ -128,7 +128,6 @@ static bool __default_power_down_ok(struct dev_pm_domain *pd, | |||
| 128 | off_on_time_ns = genpd->states[state].power_off_latency_ns + | 128 | off_on_time_ns = genpd->states[state].power_off_latency_ns + |
| 129 | genpd->states[state].power_on_latency_ns; | 129 | genpd->states[state].power_on_latency_ns; |
| 130 | 130 | ||
| 131 | |||
| 132 | min_off_time_ns = -1; | 131 | min_off_time_ns = -1; |
| 133 | /* | 132 | /* |
| 134 | * Check if subdomains can be off for enough time. | 133 | * Check if subdomains can be off for enough time. |
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 5a8149829ab3..f80d298de3fa 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c | |||
| @@ -17,6 +17,8 @@ | |||
| 17 | * subsystem list maintains. | 17 | * subsystem list maintains. |
| 18 | */ | 18 | */ |
| 19 | 19 | ||
| 20 | #define pr_fmt(fmt) "PM: " fmt | ||
| 21 | |||
| 20 | #include <linux/device.h> | 22 | #include <linux/device.h> |
| 21 | #include <linux/export.h> | 23 | #include <linux/export.h> |
| 22 | #include <linux/mutex.h> | 24 | #include <linux/mutex.h> |
| @@ -128,7 +130,7 @@ void device_pm_add(struct device *dev) | |||
| 128 | if (device_pm_not_required(dev)) | 130 | if (device_pm_not_required(dev)) |
| 129 | return; | 131 | return; |
| 130 | 132 | ||
| 131 | pr_debug("PM: Adding info for %s:%s\n", | 133 | pr_debug("Adding info for %s:%s\n", |
| 132 | dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); | 134 | dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); |
| 133 | device_pm_check_callbacks(dev); | 135 | device_pm_check_callbacks(dev); |
| 134 | mutex_lock(&dpm_list_mtx); | 136 | mutex_lock(&dpm_list_mtx); |
| @@ -149,7 +151,7 @@ void device_pm_remove(struct device *dev) | |||
| 149 | if (device_pm_not_required(dev)) | 151 | if (device_pm_not_required(dev)) |
| 150 | return; | 152 | return; |
| 151 | 153 | ||
| 152 | pr_debug("PM: Removing info for %s:%s\n", | 154 | pr_debug("Removing info for %s:%s\n", |
| 153 | dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); | 155 | dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); |
| 154 | complete_all(&dev->power.completion); | 156 | complete_all(&dev->power.completion); |
| 155 | mutex_lock(&dpm_list_mtx); | 157 | mutex_lock(&dpm_list_mtx); |
| @@ -168,7 +170,7 @@ void device_pm_remove(struct device *dev) | |||
| 168 | */ | 170 | */ |
| 169 | void device_pm_move_before(struct device *deva, struct device *devb) | 171 | void device_pm_move_before(struct device *deva, struct device *devb) |
| 170 | { | 172 | { |
| 171 | pr_debug("PM: Moving %s:%s before %s:%s\n", | 173 | pr_debug("Moving %s:%s before %s:%s\n", |
| 172 | deva->bus ? deva->bus->name : "No Bus", dev_name(deva), | 174 | deva->bus ? deva->bus->name : "No Bus", dev_name(deva), |
| 173 | devb->bus ? devb->bus->name : "No Bus", dev_name(devb)); | 175 | devb->bus ? devb->bus->name : "No Bus", dev_name(devb)); |
| 174 | /* Delete deva from dpm_list and reinsert before devb. */ | 176 | /* Delete deva from dpm_list and reinsert before devb. */ |
| @@ -182,7 +184,7 @@ void device_pm_move_before(struct device *deva, struct device *devb) | |||
| 182 | */ | 184 | */ |
| 183 | void device_pm_move_after(struct device *deva, struct device *devb) | 185 | void device_pm_move_after(struct device *deva, struct device *devb) |
| 184 | { | 186 | { |
| 185 | pr_debug("PM: Moving %s:%s after %s:%s\n", | 187 | pr_debug("Moving %s:%s after %s:%s\n", |
| 186 | deva->bus ? deva->bus->name : "No Bus", dev_name(deva), | 188 | deva->bus ? deva->bus->name : "No Bus", dev_name(deva), |
| 187 | devb->bus ? devb->bus->name : "No Bus", dev_name(devb)); | 189 | devb->bus ? devb->bus->name : "No Bus", dev_name(devb)); |
| 188 | /* Delete deva from dpm_list and reinsert after devb. */ | 190 | /* Delete deva from dpm_list and reinsert after devb. */ |
| @@ -195,7 +197,7 @@ void device_pm_move_after(struct device *deva, struct device *devb) | |||
| 195 | */ | 197 | */ |
| 196 | void device_pm_move_last(struct device *dev) | 198 | void device_pm_move_last(struct device *dev) |
| 197 | { | 199 | { |
| 198 | pr_debug("PM: Moving %s:%s to end of list\n", | 200 | pr_debug("Moving %s:%s to end of list\n", |
| 199 | dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); | 201 | dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); |
| 200 | list_move_tail(&dev->power.entry, &dpm_list); | 202 | list_move_tail(&dev->power.entry, &dpm_list); |
| 201 | } | 203 | } |
| @@ -418,8 +420,8 @@ static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info) | |||
| 418 | static void pm_dev_err(struct device *dev, pm_message_t state, const char *info, | 420 | static void pm_dev_err(struct device *dev, pm_message_t state, const char *info, |
| 419 | int error) | 421 | int error) |
| 420 | { | 422 | { |
| 421 | printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n", | 423 | pr_err("Device %s failed to %s%s: error %d\n", |
| 422 | dev_name(dev), pm_verb(state.event), info, error); | 424 | dev_name(dev), pm_verb(state.event), info, error); |
| 423 | } | 425 | } |
| 424 | 426 | ||
| 425 | static void dpm_show_time(ktime_t starttime, pm_message_t state, int error, | 427 | static void dpm_show_time(ktime_t starttime, pm_message_t state, int error, |
| @@ -2022,8 +2024,7 @@ int dpm_prepare(pm_message_t state) | |||
| 2022 | error = 0; | 2024 | error = 0; |
| 2023 | continue; | 2025 | continue; |
| 2024 | } | 2026 | } |
| 2025 | printk(KERN_INFO "PM: Device %s not prepared " | 2027 | pr_info("Device %s not prepared for power transition: code %d\n", |
| 2026 | "for power transition: code %d\n", | ||
| 2027 | dev_name(dev), error); | 2028 | dev_name(dev), error); |
| 2028 | put_device(dev); | 2029 | put_device(dev); |
| 2029 | break; | 2030 | break; |
| @@ -2062,7 +2063,7 @@ EXPORT_SYMBOL_GPL(dpm_suspend_start); | |||
| 2062 | void __suspend_report_result(const char *function, void *fn, int ret) | 2063 | void __suspend_report_result(const char *function, void *fn, int ret) |
| 2063 | { | 2064 | { |
| 2064 | if (ret) | 2065 | if (ret) |
| 2065 | printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret); | 2066 | pr_err("%s(): %pF returns %d\n", function, fn, ret); |
| 2066 | } | 2067 | } |
| 2067 | EXPORT_SYMBOL_GPL(__suspend_report_result); | 2068 | EXPORT_SYMBOL_GPL(__suspend_report_result); |
| 2068 | 2069 | ||
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h index c511def48b48..ec33fbdb919b 100644 --- a/drivers/base/power/power.h +++ b/drivers/base/power/power.h | |||
| @@ -21,6 +21,7 @@ static inline void pm_runtime_early_init(struct device *dev) | |||
| 21 | extern void pm_runtime_init(struct device *dev); | 21 | extern void pm_runtime_init(struct device *dev); |
| 22 | extern void pm_runtime_reinit(struct device *dev); | 22 | extern void pm_runtime_reinit(struct device *dev); |
| 23 | extern void pm_runtime_remove(struct device *dev); | 23 | extern void pm_runtime_remove(struct device *dev); |
| 24 | extern u64 pm_runtime_active_time(struct device *dev); | ||
| 24 | 25 | ||
| 25 | #define WAKE_IRQ_DEDICATED_ALLOCATED BIT(0) | 26 | #define WAKE_IRQ_DEDICATED_ALLOCATED BIT(0) |
| 26 | #define WAKE_IRQ_DEDICATED_MANAGED BIT(1) | 27 | #define WAKE_IRQ_DEDICATED_MANAGED BIT(1) |
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c index 3382542b39b7..f80e402ef778 100644 --- a/drivers/base/power/qos.c +++ b/drivers/base/power/qos.c | |||
| @@ -22,7 +22,7 @@ | |||
| 22 | * per-device constraint data struct. | 22 | * per-device constraint data struct. |
| 23 | * | 23 | * |
| 24 | * Note about the per-device constraint data struct allocation: | 24 | * Note about the per-device constraint data struct allocation: |
| 25 | * . The per-device constraints data struct ptr is tored into the device | 25 | * . The per-device constraints data struct ptr is stored into the device |
| 26 | * dev_pm_info. | 26 | * dev_pm_info. |
| 27 | * . To minimize the data usage by the per-device constraints, the data struct | 27 | * . To minimize the data usage by the per-device constraints, the data struct |
| 28 | * is only allocated at the first call to dev_pm_qos_add_request. | 28 | * is only allocated at the first call to dev_pm_qos_add_request. |
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index a80dbf08a99c..977db40378b0 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c | |||
| @@ -64,7 +64,7 @@ static int rpm_suspend(struct device *dev, int rpmflags); | |||
| 64 | * runtime_status field is updated, to account the time in the old state | 64 | * runtime_status field is updated, to account the time in the old state |
| 65 | * correctly. | 65 | * correctly. |
| 66 | */ | 66 | */ |
| 67 | void update_pm_runtime_accounting(struct device *dev) | 67 | static void update_pm_runtime_accounting(struct device *dev) |
| 68 | { | 68 | { |
| 69 | u64 now, last, delta; | 69 | u64 now, last, delta; |
| 70 | 70 | ||
| @@ -98,7 +98,7 @@ static void __update_runtime_status(struct device *dev, enum rpm_status status) | |||
| 98 | dev->power.runtime_status = status; | 98 | dev->power.runtime_status = status; |
| 99 | } | 99 | } |
| 100 | 100 | ||
| 101 | u64 pm_runtime_suspended_time(struct device *dev) | 101 | static u64 rpm_get_accounted_time(struct device *dev, bool suspended) |
| 102 | { | 102 | { |
| 103 | u64 time; | 103 | u64 time; |
| 104 | unsigned long flags; | 104 | unsigned long flags; |
| @@ -106,12 +106,22 @@ u64 pm_runtime_suspended_time(struct device *dev) | |||
| 106 | spin_lock_irqsave(&dev->power.lock, flags); | 106 | spin_lock_irqsave(&dev->power.lock, flags); |
| 107 | 107 | ||
| 108 | update_pm_runtime_accounting(dev); | 108 | update_pm_runtime_accounting(dev); |
| 109 | time = dev->power.suspended_time; | 109 | time = suspended ? dev->power.suspended_time : dev->power.active_time; |
| 110 | 110 | ||
| 111 | spin_unlock_irqrestore(&dev->power.lock, flags); | 111 | spin_unlock_irqrestore(&dev->power.lock, flags); |
| 112 | 112 | ||
| 113 | return time; | 113 | return time; |
| 114 | } | 114 | } |
| 115 | |||
| 116 | u64 pm_runtime_active_time(struct device *dev) | ||
| 117 | { | ||
| 118 | return rpm_get_accounted_time(dev, false); | ||
| 119 | } | ||
| 120 | |||
| 121 | u64 pm_runtime_suspended_time(struct device *dev) | ||
| 122 | { | ||
| 123 | return rpm_get_accounted_time(dev, true); | ||
| 124 | } | ||
| 115 | EXPORT_SYMBOL_GPL(pm_runtime_suspended_time); | 125 | EXPORT_SYMBOL_GPL(pm_runtime_suspended_time); |
| 116 | 126 | ||
| 117 | /** | 127 | /** |
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c index c6bf76124184..1226e441ddfe 100644 --- a/drivers/base/power/sysfs.c +++ b/drivers/base/power/sysfs.c | |||
| @@ -125,13 +125,9 @@ static ssize_t runtime_active_time_show(struct device *dev, | |||
| 125 | struct device_attribute *attr, char *buf) | 125 | struct device_attribute *attr, char *buf) |
| 126 | { | 126 | { |
| 127 | int ret; | 127 | int ret; |
| 128 | u64 tmp; | 128 | u64 tmp = pm_runtime_active_time(dev); |
| 129 | spin_lock_irq(&dev->power.lock); | ||
| 130 | update_pm_runtime_accounting(dev); | ||
| 131 | tmp = dev->power.active_time; | ||
| 132 | do_div(tmp, NSEC_PER_MSEC); | 129 | do_div(tmp, NSEC_PER_MSEC); |
| 133 | ret = sprintf(buf, "%llu\n", tmp); | 130 | ret = sprintf(buf, "%llu\n", tmp); |
| 134 | spin_unlock_irq(&dev->power.lock); | ||
| 135 | return ret; | 131 | return ret; |
| 136 | } | 132 | } |
| 137 | 133 | ||
| @@ -141,13 +137,9 @@ static ssize_t runtime_suspended_time_show(struct device *dev, | |||
| 141 | struct device_attribute *attr, char *buf) | 137 | struct device_attribute *attr, char *buf) |
| 142 | { | 138 | { |
| 143 | int ret; | 139 | int ret; |
| 144 | u64 tmp; | 140 | u64 tmp = pm_runtime_suspended_time(dev); |
| 145 | spin_lock_irq(&dev->power.lock); | ||
| 146 | update_pm_runtime_accounting(dev); | ||
| 147 | tmp = dev->power.suspended_time; | ||
| 148 | do_div(tmp, NSEC_PER_MSEC); | 141 | do_div(tmp, NSEC_PER_MSEC); |
| 149 | ret = sprintf(buf, "%llu\n", tmp); | 142 | ret = sprintf(buf, "%llu\n", tmp); |
| 150 | spin_unlock_irq(&dev->power.lock); | ||
| 151 | return ret; | 143 | return ret; |
| 152 | } | 144 | } |
| 153 | 145 | ||
diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c index b11f47a1e819..2bd9d2c744ca 100644 --- a/drivers/base/power/trace.c +++ b/drivers/base/power/trace.c | |||
| @@ -7,6 +7,8 @@ | |||
| 7 | * devices may be working. | 7 | * devices may be working. |
| 8 | */ | 8 | */ |
| 9 | 9 | ||
| 10 | #define pr_fmt(fmt) "PM: " fmt | ||
| 11 | |||
| 10 | #include <linux/pm-trace.h> | 12 | #include <linux/pm-trace.h> |
| 11 | #include <linux/export.h> | 13 | #include <linux/export.h> |
| 12 | #include <linux/rtc.h> | 14 | #include <linux/rtc.h> |
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index f1fee72ed970..bb1ae175fae1 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c | |||
| @@ -6,6 +6,8 @@ | |||
| 6 | * This file is released under the GPLv2. | 6 | * This file is released under the GPLv2. |
| 7 | */ | 7 | */ |
| 8 | 8 | ||
| 9 | #define pr_fmt(fmt) "PM: " fmt | ||
| 10 | |||
| 9 | #include <linux/device.h> | 11 | #include <linux/device.h> |
| 10 | #include <linux/slab.h> | 12 | #include <linux/slab.h> |
| 11 | #include <linux/sched/signal.h> | 13 | #include <linux/sched/signal.h> |
| @@ -106,23 +108,6 @@ struct wakeup_source *wakeup_source_create(const char *name) | |||
| 106 | } | 108 | } |
| 107 | EXPORT_SYMBOL_GPL(wakeup_source_create); | 109 | EXPORT_SYMBOL_GPL(wakeup_source_create); |
| 108 | 110 | ||
| 109 | /** | ||
| 110 | * wakeup_source_drop - Prepare a struct wakeup_source object for destruction. | ||
| 111 | * @ws: Wakeup source to prepare for destruction. | ||
| 112 | * | ||
| 113 | * Callers must ensure that __pm_stay_awake() or __pm_wakeup_event() will never | ||
| 114 | * be run in parallel with this function for the same wakeup source object. | ||
| 115 | */ | ||
| 116 | void wakeup_source_drop(struct wakeup_source *ws) | ||
| 117 | { | ||
| 118 | if (!ws) | ||
| 119 | return; | ||
| 120 | |||
| 121 | del_timer_sync(&ws->timer); | ||
| 122 | __pm_relax(ws); | ||
| 123 | } | ||
| 124 | EXPORT_SYMBOL_GPL(wakeup_source_drop); | ||
| 125 | |||
| 126 | /* | 111 | /* |
| 127 | * Record wakeup_source statistics being deleted into a dummy wakeup_source. | 112 | * Record wakeup_source statistics being deleted into a dummy wakeup_source. |
| 128 | */ | 113 | */ |
| @@ -162,7 +147,7 @@ void wakeup_source_destroy(struct wakeup_source *ws) | |||
| 162 | if (!ws) | 147 | if (!ws) |
| 163 | return; | 148 | return; |
| 164 | 149 | ||
| 165 | wakeup_source_drop(ws); | 150 | __pm_relax(ws); |
| 166 | wakeup_source_record(ws); | 151 | wakeup_source_record(ws); |
| 167 | kfree_const(ws->name); | 152 | kfree_const(ws->name); |
| 168 | kfree(ws); | 153 | kfree(ws); |
| @@ -205,6 +190,13 @@ void wakeup_source_remove(struct wakeup_source *ws) | |||
| 205 | list_del_rcu(&ws->entry); | 190 | list_del_rcu(&ws->entry); |
| 206 | raw_spin_unlock_irqrestore(&events_lock, flags); | 191 | raw_spin_unlock_irqrestore(&events_lock, flags); |
| 207 | synchronize_srcu(&wakeup_srcu); | 192 | synchronize_srcu(&wakeup_srcu); |
| 193 | |||
| 194 | del_timer_sync(&ws->timer); | ||
| 195 | /* | ||
| 196 | * Clear timer.function to make wakeup_source_not_registered() treat | ||
| 197 | * this wakeup source as not registered. | ||
| 198 | */ | ||
| 199 | ws->timer.function = NULL; | ||
| 208 | } | 200 | } |
| 209 | EXPORT_SYMBOL_GPL(wakeup_source_remove); | 201 | EXPORT_SYMBOL_GPL(wakeup_source_remove); |
| 210 | 202 | ||
| @@ -853,7 +845,7 @@ bool pm_wakeup_pending(void) | |||
| 853 | raw_spin_unlock_irqrestore(&events_lock, flags); | 845 | raw_spin_unlock_irqrestore(&events_lock, flags); |
| 854 | 846 | ||
| 855 | if (ret) { | 847 | if (ret) { |
| 856 | pr_debug("PM: Wakeup pending, aborting suspend\n"); | 848 | pr_debug("Wakeup pending, aborting suspend\n"); |
| 857 | pm_print_active_wakeup_sources(); | 849 | pm_print_active_wakeup_sources(); |
| 858 | } | 850 | } |
| 859 | 851 | ||
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig index d2f0bb5ba47e..e705aab9e38b 100644 --- a/drivers/clk/Kconfig +++ b/drivers/clk/Kconfig | |||
| @@ -290,6 +290,12 @@ config COMMON_CLK_BD718XX | |||
| 290 | This driver supports ROHM BD71837 and ROHM BD71847 | 290 | This driver supports ROHM BD71837 and ROHM BD71847 |
| 291 | PMICs clock gates. | 291 | PMICs clock gates. |
| 292 | 292 | ||
| 293 | config COMMON_CLK_FIXED_MMIO | ||
| 294 | bool "Clock driver for Memory Mapped Fixed values" | ||
| 295 | depends on COMMON_CLK && OF | ||
| 296 | help | ||
| 297 | Support for Memory Mapped IO Fixed clocks | ||
| 298 | |||
| 293 | source "drivers/clk/actions/Kconfig" | 299 | source "drivers/clk/actions/Kconfig" |
| 294 | source "drivers/clk/bcm/Kconfig" | 300 | source "drivers/clk/bcm/Kconfig" |
| 295 | source "drivers/clk/hisilicon/Kconfig" | 301 | source "drivers/clk/hisilicon/Kconfig" |
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile index 8a9440a97500..1db133652f0c 100644 --- a/drivers/clk/Makefile +++ b/drivers/clk/Makefile | |||
| @@ -27,6 +27,7 @@ obj-$(CONFIG_COMMON_CLK_CDCE925) += clk-cdce925.o | |||
| 27 | obj-$(CONFIG_ARCH_CLPS711X) += clk-clps711x.o | 27 | obj-$(CONFIG_ARCH_CLPS711X) += clk-clps711x.o |
| 28 | obj-$(CONFIG_COMMON_CLK_CS2000_CP) += clk-cs2000-cp.o | 28 | obj-$(CONFIG_COMMON_CLK_CS2000_CP) += clk-cs2000-cp.o |
| 29 | obj-$(CONFIG_ARCH_EFM32) += clk-efm32gg.o | 29 | obj-$(CONFIG_ARCH_EFM32) += clk-efm32gg.o |
| 30 | obj-$(CONFIG_COMMON_CLK_FIXED_MMIO) += clk-fixed-mmio.o | ||
| 30 | obj-$(CONFIG_COMMON_CLK_GEMINI) += clk-gemini.o | 31 | obj-$(CONFIG_COMMON_CLK_GEMINI) += clk-gemini.o |
| 31 | obj-$(CONFIG_COMMON_CLK_ASPEED) += clk-aspeed.o | 32 | obj-$(CONFIG_COMMON_CLK_ASPEED) += clk-aspeed.o |
| 32 | obj-$(CONFIG_ARCH_HIGHBANK) += clk-highbank.o | 33 | obj-$(CONFIG_ARCH_HIGHBANK) += clk-highbank.o |
| @@ -78,7 +79,7 @@ obj-$(CONFIG_ARCH_K3) += keystone/ | |||
| 78 | obj-$(CONFIG_ARCH_KEYSTONE) += keystone/ | 79 | obj-$(CONFIG_ARCH_KEYSTONE) += keystone/ |
| 79 | obj-$(CONFIG_MACH_LOONGSON32) += loongson1/ | 80 | obj-$(CONFIG_MACH_LOONGSON32) += loongson1/ |
| 80 | obj-y += mediatek/ | 81 | obj-y += mediatek/ |
| 81 | obj-$(CONFIG_COMMON_CLK_AMLOGIC) += meson/ | 82 | obj-$(CONFIG_ARCH_MESON) += meson/ |
| 82 | obj-$(CONFIG_MACH_PIC32) += microchip/ | 83 | obj-$(CONFIG_MACH_PIC32) += microchip/ |
| 83 | ifeq ($(CONFIG_COMMON_CLK), y) | 84 | ifeq ($(CONFIG_COMMON_CLK), y) |
| 84 | obj-$(CONFIG_ARCH_MMP) += mmp/ | 85 | obj-$(CONFIG_ARCH_MMP) += mmp/ |
diff --git a/drivers/clk/actions/Kconfig b/drivers/clk/actions/Kconfig index 04f0a6355726..5b45ca35757e 100644 --- a/drivers/clk/actions/Kconfig +++ b/drivers/clk/actions/Kconfig | |||
| @@ -9,6 +9,11 @@ if CLK_ACTIONS | |||
| 9 | 9 | ||
| 10 | # SoC Drivers | 10 | # SoC Drivers |
| 11 | 11 | ||
| 12 | config CLK_OWL_S500 | ||
| 13 | bool "Support for the Actions Semi OWL S500 clocks" | ||
| 14 | depends on ARCH_ACTIONS || COMPILE_TEST | ||
| 15 | default ARCH_ACTIONS | ||
| 16 | |||
| 12 | config CLK_OWL_S700 | 17 | config CLK_OWL_S700 |
| 13 | bool "Support for the Actions Semi OWL S700 clocks" | 18 | bool "Support for the Actions Semi OWL S700 clocks" |
| 14 | depends on (ARM64 && ARCH_ACTIONS) || COMPILE_TEST | 19 | depends on (ARM64 && ARCH_ACTIONS) || COMPILE_TEST |
diff --git a/drivers/clk/actions/Makefile b/drivers/clk/actions/Makefile index ccfdf9781cef..a2588e55c790 100644 --- a/drivers/clk/actions/Makefile +++ b/drivers/clk/actions/Makefile | |||
| @@ -10,5 +10,6 @@ clk-owl-y += owl-pll.o | |||
| 10 | clk-owl-y += owl-reset.o | 10 | clk-owl-y += owl-reset.o |
| 11 | 11 | ||
| 12 | # SoC support | 12 | # SoC support |
| 13 | obj-$(CONFIG_CLK_OWL_S500) += owl-s500.o | ||
| 13 | obj-$(CONFIG_CLK_OWL_S700) += owl-s700.o | 14 | obj-$(CONFIG_CLK_OWL_S700) += owl-s700.o |
| 14 | obj-$(CONFIG_CLK_OWL_S900) += owl-s900.o | 15 | obj-$(CONFIG_CLK_OWL_S900) += owl-s900.o |
diff --git a/drivers/clk/actions/owl-pll.c b/drivers/clk/actions/owl-pll.c index 058e06d7099f..02437bdedf4d 100644 --- a/drivers/clk/actions/owl-pll.c +++ b/drivers/clk/actions/owl-pll.c | |||
| @@ -179,7 +179,7 @@ static int owl_pll_set_rate(struct clk_hw *hw, unsigned long rate, | |||
| 179 | 179 | ||
| 180 | regmap_write(common->regmap, pll_hw->reg, reg); | 180 | regmap_write(common->regmap, pll_hw->reg, reg); |
| 181 | 181 | ||
| 182 | udelay(PLL_STABILITY_WAIT_US); | 182 | udelay(pll_hw->delay); |
| 183 | 183 | ||
| 184 | return 0; | 184 | return 0; |
| 185 | } | 185 | } |
diff --git a/drivers/clk/actions/owl-pll.h b/drivers/clk/actions/owl-pll.h index 0aae30abd5dc..6fb0d45bb088 100644 --- a/drivers/clk/actions/owl-pll.h +++ b/drivers/clk/actions/owl-pll.h | |||
| @@ -13,6 +13,8 @@ | |||
| 13 | 13 | ||
| 14 | #include "owl-common.h" | 14 | #include "owl-common.h" |
| 15 | 15 | ||
| 16 | #define OWL_PLL_DEF_DELAY 50 | ||
| 17 | |||
| 16 | /* last entry should have rate = 0 */ | 18 | /* last entry should have rate = 0 */ |
| 17 | struct clk_pll_table { | 19 | struct clk_pll_table { |
| 18 | unsigned int val; | 20 | unsigned int val; |
| @@ -27,6 +29,7 @@ struct owl_pll_hw { | |||
| 27 | u8 width; | 29 | u8 width; |
| 28 | u8 min_mul; | 30 | u8 min_mul; |
| 29 | u8 max_mul; | 31 | u8 max_mul; |
| 32 | u8 delay; | ||
| 30 | const struct clk_pll_table *table; | 33 | const struct clk_pll_table *table; |
| 31 | }; | 34 | }; |
| 32 | 35 | ||
| @@ -36,7 +39,7 @@ struct owl_pll { | |||
| 36 | }; | 39 | }; |
| 37 | 40 | ||
| 38 | #define OWL_PLL_HW(_reg, _bfreq, _bit_idx, _shift, \ | 41 | #define OWL_PLL_HW(_reg, _bfreq, _bit_idx, _shift, \ |
| 39 | _width, _min_mul, _max_mul, _table) \ | 42 | _width, _min_mul, _max_mul, _delay, _table) \ |
| 40 | { \ | 43 | { \ |
| 41 | .reg = _reg, \ | 44 | .reg = _reg, \ |
| 42 | .bfreq = _bfreq, \ | 45 | .bfreq = _bfreq, \ |
| @@ -45,6 +48,7 @@ struct owl_pll { | |||
| 45 | .width = _width, \ | 48 | .width = _width, \ |
| 46 | .min_mul = _min_mul, \ | 49 | .min_mul = _min_mul, \ |
| 47 | .max_mul = _max_mul, \ | 50 | .max_mul = _max_mul, \ |
| 51 | .delay = _delay, \ | ||
| 48 | .table = _table, \ | 52 | .table = _table, \ |
| 49 | } | 53 | } |
| 50 | 54 | ||
| @@ -52,8 +56,8 @@ struct owl_pll { | |||
| 52 | _shift, _width, _min_mul, _max_mul, _table, _flags) \ | 56 | _shift, _width, _min_mul, _max_mul, _table, _flags) \ |
| 53 | struct owl_pll _struct = { \ | 57 | struct owl_pll _struct = { \ |
| 54 | .pll_hw = OWL_PLL_HW(_reg, _bfreq, _bit_idx, _shift, \ | 58 | .pll_hw = OWL_PLL_HW(_reg, _bfreq, _bit_idx, _shift, \ |
| 55 | _width, _min_mul, \ | 59 | _width, _min_mul, _max_mul, \ |
| 56 | _max_mul, _table), \ | 60 | OWL_PLL_DEF_DELAY, _table), \ |
| 57 | .common = { \ | 61 | .common = { \ |
| 58 | .regmap = NULL, \ | 62 | .regmap = NULL, \ |
| 59 | .hw.init = CLK_HW_INIT(_name, \ | 63 | .hw.init = CLK_HW_INIT(_name, \ |
| @@ -67,8 +71,23 @@ struct owl_pll { | |||
| 67 | _shift, _width, _min_mul, _max_mul, _table, _flags) \ | 71 | _shift, _width, _min_mul, _max_mul, _table, _flags) \ |
| 68 | struct owl_pll _struct = { \ | 72 | struct owl_pll _struct = { \ |
| 69 | .pll_hw = OWL_PLL_HW(_reg, _bfreq, _bit_idx, _shift, \ | 73 | .pll_hw = OWL_PLL_HW(_reg, _bfreq, _bit_idx, _shift, \ |
| 70 | _width, _min_mul, \ | 74 | _width, _min_mul, _max_mul, \ |
| 71 | _max_mul, _table), \ | 75 | OWL_PLL_DEF_DELAY, _table), \ |
| 76 | .common = { \ | ||
| 77 | .regmap = NULL, \ | ||
| 78 | .hw.init = CLK_HW_INIT_NO_PARENT(_name, \ | ||
| 79 | &owl_pll_ops, \ | ||
| 80 | _flags), \ | ||
| 81 | }, \ | ||
| 82 | } | ||
| 83 | |||
| 84 | #define OWL_PLL_NO_PARENT_DELAY(_struct, _name, _reg, _bfreq, _bit_idx, \ | ||
| 85 | _shift, _width, _min_mul, _max_mul, _delay, _table, \ | ||
| 86 | _flags) \ | ||
| 87 | struct owl_pll _struct = { \ | ||
| 88 | .pll_hw = OWL_PLL_HW(_reg, _bfreq, _bit_idx, _shift, \ | ||
| 89 | _width, _min_mul, _max_mul, \ | ||
| 90 | _delay, _table), \ | ||
| 72 | .common = { \ | 91 | .common = { \ |
| 73 | .regmap = NULL, \ | 92 | .regmap = NULL, \ |
| 74 | .hw.init = CLK_HW_INIT_NO_PARENT(_name, \ | 93 | .hw.init = CLK_HW_INIT_NO_PARENT(_name, \ |
| @@ -78,7 +97,6 @@ struct owl_pll { | |||
| 78 | } | 97 | } |
| 79 | 98 | ||
| 80 | #define mul_mask(m) ((1 << ((m)->width)) - 1) | 99 | #define mul_mask(m) ((1 << ((m)->width)) - 1) |
| 81 | #define PLL_STABILITY_WAIT_US (50) | ||
| 82 | 100 | ||
| 83 | static inline struct owl_pll *hw_to_owl_pll(const struct clk_hw *hw) | 101 | static inline struct owl_pll *hw_to_owl_pll(const struct clk_hw *hw) |
| 84 | { | 102 | { |
diff --git a/drivers/clk/actions/owl-s500.c b/drivers/clk/actions/owl-s500.c new file mode 100644 index 000000000000..e2007ac4d235 --- /dev/null +++ b/drivers/clk/actions/owl-s500.c | |||
| @@ -0,0 +1,525 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0+ | ||
| 2 | /* | ||
| 3 | * Actions Semi Owl S500 SoC clock driver | ||
| 4 | * | ||
| 5 | * Copyright (c) 2014 Actions Semi Inc. | ||
| 6 | * Author: David Liu <liuwei@actions-semi.com> | ||
| 7 | * | ||
| 8 | * Copyright (c) 2018 Linaro Ltd. | ||
| 9 | * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org> | ||
| 10 | * | ||
| 11 | * Copyright (c) 2018 LSI-TEC - Caninos Loucos | ||
| 12 | * Author: Edgar Bernardi Righi <edgar.righi@lsitec.org.br> | ||
| 13 | */ | ||
| 14 | |||
| 15 | #include <linux/clk-provider.h> | ||
| 16 | #include <linux/platform_device.h> | ||
| 17 | |||
| 18 | #include "owl-common.h" | ||
| 19 | #include "owl-composite.h" | ||
| 20 | #include "owl-divider.h" | ||
| 21 | #include "owl-factor.h" | ||
| 22 | #include "owl-fixed-factor.h" | ||
| 23 | #include "owl-gate.h" | ||
| 24 | #include "owl-mux.h" | ||
| 25 | #include "owl-pll.h" | ||
| 26 | |||
| 27 | #include <dt-bindings/clock/actions,s500-cmu.h> | ||
| 28 | |||
| 29 | #define CMU_COREPLL (0x0000) | ||
| 30 | #define CMU_DEVPLL (0x0004) | ||
| 31 | #define CMU_DDRPLL (0x0008) | ||
| 32 | #define CMU_NANDPLL (0x000C) | ||
| 33 | #define CMU_DISPLAYPLL (0x0010) | ||
| 34 | #define CMU_AUDIOPLL (0x0014) | ||
| 35 | #define CMU_TVOUTPLL (0x0018) | ||
| 36 | #define CMU_BUSCLK (0x001C) | ||
| 37 | #define CMU_SENSORCLK (0x0020) | ||
| 38 | #define CMU_LCDCLK (0x0024) | ||
| 39 | #define CMU_DSICLK (0x0028) | ||
| 40 | #define CMU_CSICLK (0x002C) | ||
| 41 | #define CMU_DECLK (0x0030) | ||
| 42 | #define CMU_BISPCLK (0x0034) | ||
| 43 | #define CMU_BUSCLK1 (0x0038) | ||
| 44 | #define CMU_VDECLK (0x0040) | ||
| 45 | #define CMU_VCECLK (0x0044) | ||
| 46 | #define CMU_NANDCCLK (0x004C) | ||
| 47 | #define CMU_SD0CLK (0x0050) | ||
| 48 | #define CMU_SD1CLK (0x0054) | ||
| 49 | #define CMU_SD2CLK (0x0058) | ||
| 50 | #define CMU_UART0CLK (0x005C) | ||
| 51 | #define CMU_UART1CLK (0x0060) | ||
| 52 | #define CMU_UART2CLK (0x0064) | ||
| 53 | #define CMU_PWM4CLK (0x0068) | ||
| 54 | #define CMU_PWM5CLK (0x006C) | ||
| 55 | #define CMU_PWM0CLK (0x0070) | ||
| 56 | #define CMU_PWM1CLK (0x0074) | ||
| 57 | #define CMU_PWM2CLK (0x0078) | ||
| 58 | #define CMU_PWM3CLK (0x007C) | ||
| 59 | #define CMU_USBPLL (0x0080) | ||
| 60 | #define CMU_ETHERNETPLL (0x0084) | ||
| 61 | #define CMU_CVBSPLL (0x0088) | ||
| 62 | #define CMU_LENSCLK (0x008C) | ||
| 63 | #define CMU_GPU3DCLK (0x0090) | ||
| 64 | #define CMU_CORECTL (0x009C) | ||
| 65 | #define CMU_DEVCLKEN0 (0x00A0) | ||
| 66 | #define CMU_DEVCLKEN1 (0x00A4) | ||
| 67 | #define CMU_DEVRST0 (0x00A8) | ||
| 68 | #define CMU_DEVRST1 (0x00AC) | ||
| 69 | #define CMU_UART3CLK (0x00B0) | ||
| 70 | #define CMU_UART4CLK (0x00B4) | ||
| 71 | #define CMU_UART5CLK (0x00B8) | ||
| 72 | #define CMU_UART6CLK (0x00BC) | ||
| 73 | #define CMU_SSCLK (0x00C0) | ||
| 74 | #define CMU_DIGITALDEBUG (0x00D0) | ||
| 75 | #define CMU_ANALOGDEBUG (0x00D4) | ||
| 76 | #define CMU_COREPLLDEBUG (0x00D8) | ||
| 77 | #define CMU_DEVPLLDEBUG (0x00DC) | ||
| 78 | #define CMU_DDRPLLDEBUG (0x00E0) | ||
| 79 | #define CMU_NANDPLLDEBUG (0x00E4) | ||
| 80 | #define CMU_DISPLAYPLLDEBUG (0x00E8) | ||
| 81 | #define CMU_TVOUTPLLDEBUG (0x00EC) | ||
| 82 | #define CMU_DEEPCOLORPLLDEBUG (0x00F4) | ||
| 83 | #define CMU_AUDIOPLL_ETHPLLDEBUG (0x00F8) | ||
| 84 | #define CMU_CVBSPLLDEBUG (0x00FC) | ||
| 85 | |||
| 86 | #define OWL_S500_COREPLL_DELAY (150) | ||
| 87 | #define OWL_S500_DDRPLL_DELAY (63) | ||
| 88 | #define OWL_S500_DEVPLL_DELAY (28) | ||
| 89 | #define OWL_S500_NANDPLL_DELAY (44) | ||
| 90 | #define OWL_S500_DISPLAYPLL_DELAY (57) | ||
| 91 | #define OWL_S500_ETHERNETPLL_DELAY (25) | ||
| 92 | #define OWL_S500_AUDIOPLL_DELAY (100) | ||
| 93 | |||
| 94 | static const struct clk_pll_table clk_audio_pll_table[] = { | ||
| 95 | { 0, 45158400 }, { 1, 49152000 }, | ||
| 96 | { 0, 0 }, | ||
| 97 | }; | ||
| 98 | |||
| 99 | /* pll clocks */ | ||
| 100 | static OWL_PLL_NO_PARENT_DELAY(ethernet_pll_clk, "ethernet_pll_clk", CMU_ETHERNETPLL, 500000000, 0, 0, 0, 0, 0, OWL_S500_ETHERNETPLL_DELAY, NULL, CLK_IGNORE_UNUSED); | ||
| 101 | static OWL_PLL_NO_PARENT_DELAY(core_pll_clk, "core_pll_clk", CMU_COREPLL, 12000000, 9, 0, 8, 4, 134, OWL_S500_COREPLL_DELAY, NULL, CLK_IGNORE_UNUSED); | ||
| 102 | static OWL_PLL_NO_PARENT_DELAY(ddr_pll_clk, "ddr_pll_clk", CMU_DDRPLL, 12000000, 8, 0, 8, 1, 67, OWL_S500_DDRPLL_DELAY, NULL, CLK_IGNORE_UNUSED); | ||
| 103 | static OWL_PLL_NO_PARENT_DELAY(nand_pll_clk, "nand_pll_clk", CMU_NANDPLL, 6000000, 8, 0, 7, 2, 86, OWL_S500_NANDPLL_DELAY, NULL, CLK_IGNORE_UNUSED); | ||
| 104 | static OWL_PLL_NO_PARENT_DELAY(display_pll_clk, "display_pll_clk", CMU_DISPLAYPLL, 6000000, 8, 0, 8, 2, 126, OWL_S500_DISPLAYPLL_DELAY, NULL, CLK_IGNORE_UNUSED); | ||
| 105 | static OWL_PLL_NO_PARENT_DELAY(dev_pll_clk, "dev_pll_clk", CMU_DEVPLL, 6000000, 8, 0, 7, 8, 126, OWL_S500_DEVPLL_DELAY, NULL, CLK_IGNORE_UNUSED); | ||
| 106 | static OWL_PLL_NO_PARENT_DELAY(audio_pll_clk, "audio_pll_clk", CMU_AUDIOPLL, 0, 4, 0, 1, 0, 0, OWL_S500_AUDIOPLL_DELAY, clk_audio_pll_table, CLK_IGNORE_UNUSED); | ||
| 107 | |||
| 108 | static const char * const dev_clk_mux_p[] = { "hosc", "dev_pll_clk" }; | ||
| 109 | static const char * const bisp_clk_mux_p[] = { "display_pll_clk", "dev_clk" }; | ||
| 110 | static const char * const sensor_clk_mux_p[] = { "hosc", "bisp_clk" }; | ||
| 111 | static const char * const sd_clk_mux_p[] = { "dev_clk", "nand_pll_clk" }; | ||
| 112 | static const char * const pwm_clk_mux_p[] = { "losc", "hosc" }; | ||
| 113 | static const char * const ahbprediv_clk_mux_p[] = { "dev_clk", "display_pll_clk", "nand_pll_clk", "ddr_pll_clk" }; | ||
| 114 | static const char * const uart_clk_mux_p[] = { "hosc", "dev_pll_clk" }; | ||
| 115 | static const char * const de_clk_mux_p[] = { "display_pll_clk", "dev_clk" }; | ||
| 116 | static const char * const i2s_clk_mux_p[] = { "audio_pll_clk" }; | ||
| 117 | static const char * const hde_clk_mux_p[] = { "dev_clk", "display_pll_clk", "nand_pll_clk", "ddr_pll_clk" }; | ||
| 118 | static const char * const nand_clk_mux_p[] = { "nand_pll_clk", "display_pll_clk", "dev_clk", "ddr_pll_clk" }; | ||
| 119 | |||
| 120 | static struct clk_factor_table sd_factor_table[] = { | ||
| 121 | /* bit0 ~ 4 */ | ||
| 122 | { 0, 1, 1 }, { 1, 1, 2 }, { 2, 1, 3 }, { 3, 1, 4 }, | ||
| 123 | { 4, 1, 5 }, { 5, 1, 6 }, { 6, 1, 7 }, { 7, 1, 8 }, | ||
| 124 | { 8, 1, 9 }, { 9, 1, 10 }, { 10, 1, 11 }, { 11, 1, 12 }, | ||
| 125 | { 12, 1, 13 }, { 13, 1, 14 }, { 14, 1, 15 }, { 15, 1, 16 }, | ||
| 126 | { 16, 1, 17 }, { 17, 1, 18 }, { 18, 1, 19 }, { 19, 1, 20 }, | ||
| 127 | { 20, 1, 21 }, { 21, 1, 22 }, { 22, 1, 23 }, { 23, 1, 24 }, | ||
| 128 | { 24, 1, 25 }, { 25, 1, 26 }, { 26, 1, 27 }, { 27, 1, 28 }, | ||
| 129 | { 28, 1, 29 }, { 29, 1, 30 }, { 30, 1, 31 }, { 31, 1, 32 }, | ||
| 130 | |||
| 131 | /* bit8: /128 */ | ||
| 132 | { 256, 1, 1 * 128 }, { 257, 1, 2 * 128 }, { 258, 1, 3 * 128 }, { 259, 1, 4 * 128 }, | ||
| 133 | { 260, 1, 5 * 128 }, { 261, 1, 6 * 128 }, { 262, 1, 7 * 128 }, { 263, 1, 8 * 128 }, | ||
| 134 | { 264, 1, 9 * 128 }, { 265, 1, 10 * 128 }, { 266, 1, 11 * 128 }, { 267, 1, 12 * 128 }, | ||
| 135 | { 268, 1, 13 * 128 }, { 269, 1, 14 * 128 }, { 270, 1, 15 * 128 }, { 271, 1, 16 * 128 }, | ||
| 136 | { 272, 1, 17 * 128 }, { 273, 1, 18 * 128 }, { 274, 1, 19 * 128 }, { 275, 1, 20 * 128 }, | ||
| 137 | { 276, 1, 21 * 128 }, { 277, 1, 22 * 128 }, { 278, 1, 23 * 128 }, { 279, 1, 24 * 128 }, | ||
| 138 | { 280, 1, 25 * 128 }, { 281, 1, 26 * 128 }, { 282, 1, 27 * 128 }, { 283, 1, 28 * 128 }, | ||
| 139 | { 284, 1, 29 * 128 }, { 285, 1, 30 * 128 }, { 286, 1, 31 * 128 }, { 287, 1, 32 * 128 }, | ||
| 140 | { 0, 0, 0 }, | ||
| 141 | }; | ||
| 142 | |||
| 143 | static struct clk_factor_table bisp_factor_table[] = { | ||
| 144 | { 0, 1, 1 }, { 1, 1, 2 }, { 2, 1, 3 }, { 3, 1, 4 }, | ||
| 145 | { 4, 1, 5 }, { 5, 1, 6 }, { 6, 1, 7 }, { 7, 1, 8 }, | ||
| 146 | { 0, 0, 0 }, | ||
| 147 | }; | ||
| 148 | |||
| 149 | static struct clk_factor_table ahb_factor_table[] = { | ||
| 150 | { 1, 1, 2 }, { 2, 1, 3 }, | ||
| 151 | { 0, 0, 0 }, | ||
| 152 | }; | ||
| 153 | |||
| 154 | static struct clk_div_table rmii_ref_div_table[] = { | ||
| 155 | { 0, 4 }, { 1, 10 }, | ||
| 156 | { 0, 0 }, | ||
| 157 | }; | ||
| 158 | |||
| 159 | static struct clk_div_table i2s_div_table[] = { | ||
| 160 | { 0, 1 }, { 1, 2 }, { 2, 3 }, { 3, 4 }, | ||
| 161 | { 4, 6 }, { 5, 8 }, { 6, 12 }, { 7, 16 }, | ||
| 162 | { 8, 24 }, | ||
| 163 | { 0, 0 }, | ||
| 164 | }; | ||
| 165 | |||
| 166 | static struct clk_div_table nand_div_table[] = { | ||
| 167 | { 0, 1 }, { 1, 2 }, { 2, 4 }, { 3, 6 }, | ||
| 168 | { 4, 8 }, { 5, 10 }, { 6, 12 }, { 7, 14 }, | ||
| 169 | { 8, 16 }, { 9, 18 }, { 10, 20 }, { 11, 22 }, | ||
| 170 | { 0, 0 }, | ||
| 171 | }; | ||
| 172 | |||
| 173 | /* mux clock */ | ||
| 174 | static OWL_MUX(dev_clk, "dev_clk", dev_clk_mux_p, CMU_DEVPLL, 12, 1, CLK_SET_RATE_PARENT); | ||
| 175 | static OWL_MUX(ahbprediv_clk, "ahbprediv_clk", ahbprediv_clk_mux_p, CMU_BUSCLK1, 8, 3, CLK_SET_RATE_PARENT); | ||
| 176 | |||
| 177 | /* gate clocks */ | ||
| 178 | static OWL_GATE(spi0_clk, "spi0_clk", "ahb_clk", CMU_DEVCLKEN1, 10, 0, CLK_IGNORE_UNUSED); | ||
| 179 | static OWL_GATE(spi1_clk, "spi1_clk", "ahb_clk", CMU_DEVCLKEN1, 11, 0, CLK_IGNORE_UNUSED); | ||
| 180 | static OWL_GATE(spi2_clk, "spi2_clk", "ahb_clk", CMU_DEVCLKEN1, 12, 0, CLK_IGNORE_UNUSED); | ||
| 181 | static OWL_GATE(spi3_clk, "spi3_clk", "ahb_clk", CMU_DEVCLKEN1, 13, 0, CLK_IGNORE_UNUSED); | ||
| 182 | static OWL_GATE(timer_clk, "timer_clk", "hosc", CMU_DEVCLKEN1, 27, 0, 0); | ||
| 183 | static OWL_GATE(hdmi_clk, "hdmi_clk", "hosc", CMU_DEVCLKEN1, 3, 0, 0); | ||
| 184 | |||
| 185 | /* divider clocks */ | ||
| 186 | static OWL_DIVIDER(h_clk, "h_clk", "ahbprevdiv_clk", CMU_BUSCLK1, 12, 2, NULL, 0, 0); | ||
| 187 | static OWL_DIVIDER(rmii_ref_clk, "rmii_ref_clk", "ethernet_pll_clk", CMU_ETHERNETPLL, 1, 1, rmii_ref_div_table, 0, 0); | ||
| 188 | |||
| 189 | /* factor clocks */ | ||
| 190 | static OWL_FACTOR(ahb_clk, "ahb_clk", "h_clk", CMU_BUSCLK1, 2, 2, ahb_factor_table, 0, 0); | ||
| 191 | static OWL_FACTOR(de1_clk, "de_clk1", "de_clk", CMU_DECLK, 0, 3, bisp_factor_table, 0, 0); | ||
| 192 | static OWL_FACTOR(de2_clk, "de_clk2", "de_clk", CMU_DECLK, 4, 3, bisp_factor_table, 0, 0); | ||
| 193 | |||
| 194 | /* composite clocks */ | ||
| 195 | static OWL_COMP_FACTOR(vce_clk, "vce_clk", hde_clk_mux_p, | ||
| 196 | OWL_MUX_HW(CMU_VCECLK, 4, 2), | ||
| 197 | OWL_GATE_HW(CMU_DEVCLKEN0, 26, 0), | ||
| 198 | OWL_FACTOR_HW(CMU_VCECLK, 0, 3, 0, bisp_factor_table), | ||
| 199 | 0); | ||
| 200 | |||
| 201 | static OWL_COMP_FACTOR(vde_clk, "vde_clk", hde_clk_mux_p, | ||
| 202 | OWL_MUX_HW(CMU_VDECLK, 4, 2), | ||
| 203 | OWL_GATE_HW(CMU_DEVCLKEN0, 25, 0), | ||
| 204 | OWL_FACTOR_HW(CMU_VDECLK, 0, 3, 0, bisp_factor_table), | ||
| 205 | 0); | ||
| 206 | |||
| 207 | static OWL_COMP_FACTOR(bisp_clk, "bisp_clk", bisp_clk_mux_p, | ||
| 208 | OWL_MUX_HW(CMU_BISPCLK, 4, 1), | ||
| 209 | OWL_GATE_HW(CMU_DEVCLKEN0, 14, 0), | ||
| 210 | OWL_FACTOR_HW(CMU_BISPCLK, 0, 3, 0, bisp_factor_table), | ||
| 211 | 0); | ||
| 212 | |||
| 213 | static OWL_COMP_FACTOR(sensor0_clk, "sensor0_clk", sensor_clk_mux_p, | ||
| 214 | OWL_MUX_HW(CMU_SENSORCLK, 4, 1), | ||
| 215 | OWL_GATE_HW(CMU_DEVCLKEN0, 14, 0), | ||
| 216 | OWL_FACTOR_HW(CMU_SENSORCLK, 0, 3, 0, bisp_factor_table), | ||
| 217 | CLK_IGNORE_UNUSED); | ||
| 218 | |||
| 219 | static OWL_COMP_FACTOR(sensor1_clk, "sensor1_clk", sensor_clk_mux_p, | ||
| 220 | OWL_MUX_HW(CMU_SENSORCLK, 4, 1), | ||
| 221 | OWL_GATE_HW(CMU_DEVCLKEN0, 14, 0), | ||
| 222 | OWL_FACTOR_HW(CMU_SENSORCLK, 8, 3, 0, bisp_factor_table), | ||
| 223 | CLK_IGNORE_UNUSED); | ||
| 224 | |||
| 225 | static OWL_COMP_FACTOR(sd0_clk, "sd0_clk", sd_clk_mux_p, | ||
| 226 | OWL_MUX_HW(CMU_SD0CLK, 9, 1), | ||
| 227 | OWL_GATE_HW(CMU_DEVCLKEN0, 5, 0), | ||
| 228 | OWL_FACTOR_HW(CMU_SD0CLK, 0, 9, 0, sd_factor_table), | ||
| 229 | 0); | ||
| 230 | |||
| 231 | static OWL_COMP_FACTOR(sd1_clk, "sd1_clk", sd_clk_mux_p, | ||
| 232 | OWL_MUX_HW(CMU_SD1CLK, 9, 1), | ||
| 233 | OWL_GATE_HW(CMU_DEVCLKEN0, 6, 0), | ||
| 234 | OWL_FACTOR_HW(CMU_SD1CLK, 0, 9, 0, sd_factor_table), | ||
| 235 | 0); | ||
| 236 | |||
| 237 | static OWL_COMP_FACTOR(sd2_clk, "sd2_clk", sd_clk_mux_p, | ||
| 238 | OWL_MUX_HW(CMU_SD2CLK, 9, 1), | ||
| 239 | OWL_GATE_HW(CMU_DEVCLKEN0, 7, 0), | ||
| 240 | OWL_FACTOR_HW(CMU_SD2CLK, 0, 9, 0, sd_factor_table), | ||
| 241 | 0); | ||
| 242 | |||
| 243 | static OWL_COMP_DIV(pwm0_clk, "pwm0_clk", pwm_clk_mux_p, | ||
| 244 | OWL_MUX_HW(CMU_PWM0CLK, 12, 1), | ||
| 245 | OWL_GATE_HW(CMU_DEVCLKEN1, 23, 0), | ||
| 246 | OWL_DIVIDER_HW(CMU_PWM0CLK, 0, 10, 0, NULL), | ||
| 247 | 0); | ||
| 248 | |||
| 249 | static OWL_COMP_DIV(pwm1_clk, "pwm1_clk", pwm_clk_mux_p, | ||
| 250 | OWL_MUX_HW(CMU_PWM1CLK, 12, 1), | ||
| 251 | OWL_GATE_HW(CMU_DEVCLKEN1, 24, 0), | ||
| 252 | OWL_DIVIDER_HW(CMU_PWM1CLK, 0, 10, 0, NULL), | ||
| 253 | 0); | ||
| 254 | |||
| 255 | static OWL_COMP_DIV(pwm2_clk, "pwm2_clk", pwm_clk_mux_p, | ||
| 256 | OWL_MUX_HW(CMU_PWM2CLK, 12, 1), | ||
| 257 | OWL_GATE_HW(CMU_DEVCLKEN1, 25, 0), | ||
| 258 | OWL_DIVIDER_HW(CMU_PWM2CLK, 0, 10, 0, NULL), | ||
| 259 | 0); | ||
| 260 | |||
| 261 | static OWL_COMP_DIV(pwm3_clk, "pwm3_clk", pwm_clk_mux_p, | ||
| 262 | OWL_MUX_HW(CMU_PWM3CLK, 12, 1), | ||
| 263 | OWL_GATE_HW(CMU_DEVCLKEN1, 26, 0), | ||
| 264 | OWL_DIVIDER_HW(CMU_PWM3CLK, 0, 10, 0, NULL), | ||
| 265 | 0); | ||
| 266 | |||
| 267 | static OWL_COMP_DIV(pwm4_clk, "pwm4_clk", pwm_clk_mux_p, | ||
| 268 | OWL_MUX_HW(CMU_PWM4CLK, 12, 1), | ||
| 269 | OWL_GATE_HW(CMU_DEVCLKEN0, 11, 0), | ||
| 270 | OWL_DIVIDER_HW(CMU_PWM4CLK, 0, 10, 0, NULL), | ||
| 271 | 0); | ||
| 272 | |||
| 273 | static OWL_COMP_DIV(pwm5_clk, "pwm5_clk", pwm_clk_mux_p, | ||
| 274 | OWL_MUX_HW(CMU_PWM5CLK, 12, 1), | ||
| 275 | OWL_GATE_HW(CMU_DEVCLKEN0, 0, 0), | ||
| 276 | OWL_DIVIDER_HW(CMU_PWM5CLK, 0, 10, 0, NULL), | ||
| 277 | 0); | ||
| 278 | |||
| 279 | static OWL_COMP_PASS(de_clk, "de_clk", de_clk_mux_p, | ||
| 280 | OWL_MUX_HW(CMU_DECLK, 12, 1), | ||
| 281 | OWL_GATE_HW(CMU_DEVCLKEN0, 8, 0), | ||
| 282 | 0); | ||
| 283 | |||
| 284 | static OWL_COMP_FIXED_FACTOR(i2c0_clk, "i2c0_clk", "ethernet_pll_clk", | ||
| 285 | OWL_GATE_HW(CMU_DEVCLKEN1, 14, 0), | ||
| 286 | 1, 5, 0); | ||
| 287 | |||
| 288 | static OWL_COMP_FIXED_FACTOR(i2c1_clk, "i2c1_clk", "ethernet_pll_clk", | ||
| 289 | OWL_GATE_HW(CMU_DEVCLKEN1, 15, 0), | ||
| 290 | 1, 5, 0); | ||
| 291 | |||
| 292 | static OWL_COMP_FIXED_FACTOR(i2c2_clk, "i2c2_clk", "ethernet_pll_clk", | ||
| 293 | OWL_GATE_HW(CMU_DEVCLKEN1, 30, 0), | ||
| 294 | 1, 5, 0); | ||
| 295 | |||
| 296 | static OWL_COMP_FIXED_FACTOR(i2c3_clk, "i2c3_clk", "ethernet_pll_clk", | ||
| 297 | OWL_GATE_HW(CMU_DEVCLKEN1, 31, 0), | ||
| 298 | 1, 5, 0); | ||
| 299 | |||
| 300 | static OWL_COMP_DIV(uart0_clk, "uart0_clk", uart_clk_mux_p, | ||
| 301 | OWL_MUX_HW(CMU_UART0CLK, 16, 1), | ||
| 302 | OWL_GATE_HW(CMU_DEVCLKEN1, 6, 0), | ||
| 303 | OWL_DIVIDER_HW(CMU_UART1CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL), | ||
| 304 | CLK_IGNORE_UNUSED); | ||
| 305 | |||
| 306 | static OWL_COMP_DIV(uart1_clk, "uart1_clk", uart_clk_mux_p, | ||
| 307 | OWL_MUX_HW(CMU_UART1CLK, 16, 1), | ||
| 308 | OWL_GATE_HW(CMU_DEVCLKEN1, 7, 0), | ||
| 309 | OWL_DIVIDER_HW(CMU_UART1CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL), | ||
| 310 | CLK_IGNORE_UNUSED); | ||
| 311 | |||
| 312 | static OWL_COMP_DIV(uart2_clk, "uart2_clk", uart_clk_mux_p, | ||
| 313 | OWL_MUX_HW(CMU_UART2CLK, 16, 1), | ||
| 314 | OWL_GATE_HW(CMU_DEVCLKEN1, 8, 0), | ||
| 315 | OWL_DIVIDER_HW(CMU_UART1CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL), | ||
| 316 | CLK_IGNORE_UNUSED); | ||
| 317 | |||
| 318 | static OWL_COMP_DIV(uart3_clk, "uart3_clk", uart_clk_mux_p, | ||
| 319 | OWL_MUX_HW(CMU_UART3CLK, 16, 1), | ||
| 320 | OWL_GATE_HW(CMU_DEVCLKEN1, 19, 0), | ||
| 321 | OWL_DIVIDER_HW(CMU_UART1CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL), | ||
| 322 | CLK_IGNORE_UNUSED); | ||
| 323 | |||
| 324 | static OWL_COMP_DIV(uart4_clk, "uart4_clk", uart_clk_mux_p, | ||
| 325 | OWL_MUX_HW(CMU_UART4CLK, 16, 1), | ||
| 326 | OWL_GATE_HW(CMU_DEVCLKEN1, 20, 0), | ||
| 327 | OWL_DIVIDER_HW(CMU_UART1CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL), | ||
| 328 | CLK_IGNORE_UNUSED); | ||
| 329 | |||
| 330 | static OWL_COMP_DIV(uart5_clk, "uart5_clk", uart_clk_mux_p, | ||
| 331 | OWL_MUX_HW(CMU_UART5CLK, 16, 1), | ||
| 332 | OWL_GATE_HW(CMU_DEVCLKEN1, 21, 0), | ||
| 333 | OWL_DIVIDER_HW(CMU_UART1CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL), | ||
| 334 | CLK_IGNORE_UNUSED); | ||
| 335 | |||
| 336 | static OWL_COMP_DIV(uart6_clk, "uart6_clk", uart_clk_mux_p, | ||
| 337 | OWL_MUX_HW(CMU_UART6CLK, 16, 1), | ||
| 338 | OWL_GATE_HW(CMU_DEVCLKEN1, 18, 0), | ||
| 339 | OWL_DIVIDER_HW(CMU_UART1CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL), | ||
| 340 | CLK_IGNORE_UNUSED); | ||
| 341 | |||
| 342 | static OWL_COMP_DIV(i2srx_clk, "i2srx_clk", i2s_clk_mux_p, | ||
| 343 | OWL_MUX_HW(CMU_AUDIOPLL, 24, 1), | ||
| 344 | OWL_GATE_HW(CMU_DEVCLKEN0, 21, 0), | ||
| 345 | OWL_DIVIDER_HW(CMU_AUDIOPLL, 20, 4, 0, i2s_div_table), | ||
| 346 | 0); | ||
| 347 | |||
| 348 | static OWL_COMP_DIV(i2stx_clk, "i2stx_clk", i2s_clk_mux_p, | ||
| 349 | OWL_MUX_HW(CMU_AUDIOPLL, 24, 1), | ||
| 350 | OWL_GATE_HW(CMU_DEVCLKEN0, 20, 0), | ||
| 351 | OWL_DIVIDER_HW(CMU_AUDIOPLL, 16, 4, 0, i2s_div_table), | ||
| 352 | 0); | ||
| 353 | |||
| 354 | static OWL_COMP_DIV(hdmia_clk, "hdmia_clk", i2s_clk_mux_p, | ||
| 355 | OWL_MUX_HW(CMU_AUDIOPLL, 24, 1), | ||
| 356 | OWL_GATE_HW(CMU_DEVCLKEN0, 22, 0), | ||
| 357 | OWL_DIVIDER_HW(CMU_AUDIOPLL, 24, 4, 0, i2s_div_table), | ||
| 358 | 0); | ||
| 359 | |||
| 360 | static OWL_COMP_DIV(spdif_clk, "spdif_clk", i2s_clk_mux_p, | ||
| 361 | OWL_MUX_HW(CMU_AUDIOPLL, 24, 1), | ||
| 362 | OWL_GATE_HW(CMU_DEVCLKEN0, 23, 0), | ||
| 363 | OWL_DIVIDER_HW(CMU_AUDIOPLL, 28, 4, 0, i2s_div_table), | ||
| 364 | 0); | ||
| 365 | |||
| 366 | static OWL_COMP_DIV(nand_clk, "nand_clk", nand_clk_mux_p, | ||
| 367 | OWL_MUX_HW(CMU_NANDCCLK, 8, 2), | ||
| 368 | OWL_GATE_HW(CMU_DEVCLKEN0, 4, 0), | ||
| 369 | OWL_DIVIDER_HW(CMU_NANDCCLK, 0, 3, 0, nand_div_table), | ||
| 370 | CLK_SET_RATE_PARENT); | ||
| 371 | |||
| 372 | static OWL_COMP_DIV(ecc_clk, "ecc_clk", nand_clk_mux_p, | ||
| 373 | OWL_MUX_HW(CMU_NANDCCLK, 8, 2), | ||
| 374 | OWL_GATE_HW(CMU_DEVCLKEN0, 4, 0), | ||
| 375 | OWL_DIVIDER_HW(CMU_NANDCCLK, 4, 3, 0, nand_div_table), | ||
| 376 | CLK_SET_RATE_PARENT); | ||
| 377 | |||
| 378 | static struct owl_clk_common *s500_clks[] = { | ||
| 379 | ðernet_pll_clk.common, | ||
| 380 | &core_pll_clk.common, | ||
| 381 | &ddr_pll_clk.common, | ||
| 382 | &dev_pll_clk.common, | ||
| 383 | &nand_pll_clk.common, | ||
| 384 | &audio_pll_clk.common, | ||
| 385 | &display_pll_clk.common, | ||
| 386 | &dev_clk.common, | ||
| 387 | &timer_clk.common, | ||
| 388 | &i2c0_clk.common, | ||
| 389 | &i2c1_clk.common, | ||
| 390 | &i2c2_clk.common, | ||
| 391 | &i2c3_clk.common, | ||
| 392 | &uart0_clk.common, | ||
| 393 | &uart1_clk.common, | ||
| 394 | &uart2_clk.common, | ||
| 395 | &uart3_clk.common, | ||
| 396 | &uart4_clk.common, | ||
| 397 | &uart5_clk.common, | ||
| 398 | &uart6_clk.common, | ||
| 399 | &pwm0_clk.common, | ||
| 400 | &pwm1_clk.common, | ||
| 401 | &pwm2_clk.common, | ||
| 402 | &pwm3_clk.common, | ||
| 403 | &pwm4_clk.common, | ||
| 404 | &pwm5_clk.common, | ||
| 405 | &sensor0_clk.common, | ||
| 406 | &sensor1_clk.common, | ||
| 407 | &sd0_clk.common, | ||
| 408 | &sd1_clk.common, | ||
| 409 | &sd2_clk.common, | ||
| 410 | &bisp_clk.common, | ||
| 411 | &ahb_clk.common, | ||
| 412 | &ahbprediv_clk.common, | ||
| 413 | &h_clk.common, | ||
| 414 | &spi0_clk.common, | ||
| 415 | &spi1_clk.common, | ||
| 416 | &spi2_clk.common, | ||
| 417 | &spi3_clk.common, | ||
| 418 | &rmii_ref_clk.common, | ||
| 419 | &de_clk.common, | ||
| 420 | &de1_clk.common, | ||
| 421 | &de2_clk.common, | ||
| 422 | &i2srx_clk.common, | ||
| 423 | &i2stx_clk.common, | ||
| 424 | &hdmia_clk.common, | ||
| 425 | &hdmi_clk.common, | ||
| 426 | &vce_clk.common, | ||
| 427 | &vde_clk.common, | ||
| 428 | &spdif_clk.common, | ||
| 429 | &nand_clk.common, | ||
| 430 | &ecc_clk.common, | ||
| 431 | }; | ||
| 432 | |||
| 433 | static struct clk_hw_onecell_data s500_hw_clks = { | ||
| 434 | .hws = { | ||
| 435 | [CLK_ETHERNET_PLL] = ðernet_pll_clk.common.hw, | ||
| 436 | [CLK_CORE_PLL] = &core_pll_clk.common.hw, | ||
| 437 | [CLK_DDR_PLL] = &ddr_pll_clk.common.hw, | ||
| 438 | [CLK_NAND_PLL] = &nand_pll_clk.common.hw, | ||
| 439 | [CLK_DISPLAY_PLL] = &display_pll_clk.common.hw, | ||
| 440 | [CLK_DEV_PLL] = &dev_pll_clk.common.hw, | ||
| 441 | [CLK_AUDIO_PLL] = &audio_pll_clk.common.hw, | ||
| 442 | [CLK_TIMER] = &timer_clk.common.hw, | ||
| 443 | [CLK_DEV] = &dev_clk.common.hw, | ||
| 444 | [CLK_DE] = &de_clk.common.hw, | ||
| 445 | [CLK_DE1] = &de1_clk.common.hw, | ||
| 446 | [CLK_DE2] = &de2_clk.common.hw, | ||
| 447 | [CLK_I2C0] = &i2c0_clk.common.hw, | ||
| 448 | [CLK_I2C1] = &i2c1_clk.common.hw, | ||
| 449 | [CLK_I2C2] = &i2c2_clk.common.hw, | ||
| 450 | [CLK_I2C3] = &i2c3_clk.common.hw, | ||
| 451 | [CLK_I2SRX] = &i2srx_clk.common.hw, | ||
| 452 | [CLK_I2STX] = &i2stx_clk.common.hw, | ||
| 453 | [CLK_UART0] = &uart0_clk.common.hw, | ||
| 454 | [CLK_UART1] = &uart1_clk.common.hw, | ||
| 455 | [CLK_UART2] = &uart2_clk.common.hw, | ||
| 456 | [CLK_UART3] = &uart3_clk.common.hw, | ||
| 457 | [CLK_UART4] = &uart4_clk.common.hw, | ||
| 458 | [CLK_UART5] = &uart5_clk.common.hw, | ||
| 459 | [CLK_UART6] = &uart6_clk.common.hw, | ||
| 460 | [CLK_PWM0] = &pwm0_clk.common.hw, | ||
| 461 | [CLK_PWM1] = &pwm1_clk.common.hw, | ||
| 462 | [CLK_PWM2] = &pwm2_clk.common.hw, | ||
| 463 | [CLK_PWM3] = &pwm3_clk.common.hw, | ||
| 464 | [CLK_PWM4] = &pwm4_clk.common.hw, | ||
| 465 | [CLK_PWM5] = &pwm5_clk.common.hw, | ||
| 466 | [CLK_SENSOR0] = &sensor0_clk.common.hw, | ||
| 467 | [CLK_SENSOR1] = &sensor1_clk.common.hw, | ||
| 468 | [CLK_SD0] = &sd0_clk.common.hw, | ||
| 469 | [CLK_SD1] = &sd1_clk.common.hw, | ||
| 470 | [CLK_SD2] = &sd2_clk.common.hw, | ||
| 471 | [CLK_BISP] = &bisp_clk.common.hw, | ||
| 472 | [CLK_SPI0] = &spi0_clk.common.hw, | ||
| 473 | [CLK_SPI1] = &spi1_clk.common.hw, | ||
| 474 | [CLK_SPI2] = &spi2_clk.common.hw, | ||
| 475 | [CLK_SPI3] = &spi3_clk.common.hw, | ||
| 476 | [CLK_AHB] = &ahb_clk.common.hw, | ||
| 477 | [CLK_H] = &h_clk.common.hw, | ||
| 478 | [CLK_AHBPREDIV] = &ahbprediv_clk.common.hw, | ||
| 479 | [CLK_RMII_REF] = &rmii_ref_clk.common.hw, | ||
| 480 | [CLK_HDMI_AUDIO] = &hdmia_clk.common.hw, | ||
| 481 | [CLK_HDMI] = &hdmi_clk.common.hw, | ||
| 482 | [CLK_VDE] = &vde_clk.common.hw, | ||
| 483 | [CLK_VCE] = &vce_clk.common.hw, | ||
| 484 | [CLK_SPDIF] = &spdif_clk.common.hw, | ||
| 485 | [CLK_NAND] = &nand_clk.common.hw, | ||
| 486 | [CLK_ECC] = &ecc_clk.common.hw, | ||
| 487 | }, | ||
| 488 | .num = CLK_NR_CLKS, | ||
| 489 | }; | ||
| 490 | |||
| 491 | static struct owl_clk_desc s500_clk_desc = { | ||
| 492 | .clks = s500_clks, | ||
| 493 | .num_clks = ARRAY_SIZE(s500_clks), | ||
| 494 | |||
| 495 | .hw_clks = &s500_hw_clks, | ||
| 496 | }; | ||
| 497 | |||
| 498 | static int s500_clk_probe(struct platform_device *pdev) | ||
| 499 | { | ||
| 500 | struct owl_clk_desc *desc; | ||
| 501 | |||
| 502 | desc = &s500_clk_desc; | ||
| 503 | owl_clk_regmap_init(pdev, desc); | ||
| 504 | |||
| 505 | return owl_clk_probe(&pdev->dev, desc->hw_clks); | ||
| 506 | } | ||
| 507 | |||
| 508 | static const struct of_device_id s500_clk_of_match[] = { | ||
| 509 | { .compatible = "actions,s500-cmu", }, | ||
| 510 | { /* sentinel */ } | ||
| 511 | }; | ||
| 512 | |||
| 513 | static struct platform_driver s500_clk_driver = { | ||
| 514 | .probe = s500_clk_probe, | ||
| 515 | .driver = { | ||
| 516 | .name = "s500-cmu", | ||
| 517 | .of_match_table = s500_clk_of_match, | ||
| 518 | }, | ||
| 519 | }; | ||
| 520 | |||
| 521 | static int __init s500_clk_init(void) | ||
| 522 | { | ||
| 523 | return platform_driver_register(&s500_clk_driver); | ||
| 524 | } | ||
| 525 | core_initcall(s500_clk_init); | ||
diff --git a/drivers/clk/at91/clk-audio-pll.c b/drivers/clk/at91/clk-audio-pll.c index 36d77146a3bd..3cc4a82f4e9f 100644 --- a/drivers/clk/at91/clk-audio-pll.c +++ b/drivers/clk/at91/clk-audio-pll.c | |||
| @@ -340,7 +340,12 @@ static long clk_audio_pll_pmc_round_rate(struct clk_hw *hw, unsigned long rate, | |||
| 340 | pr_debug("A PLL/PMC: %s, rate = %lu (parent_rate = %lu)\n", __func__, | 340 | pr_debug("A PLL/PMC: %s, rate = %lu (parent_rate = %lu)\n", __func__, |
| 341 | rate, *parent_rate); | 341 | rate, *parent_rate); |
| 342 | 342 | ||
| 343 | for (div = 1; div <= AUDIO_PLL_QDPMC_MAX; div++) { | 343 | if (!rate) |
| 344 | return 0; | ||
| 345 | |||
| 346 | best_parent_rate = clk_round_rate(pclk->clk, 1); | ||
| 347 | div = max(best_parent_rate / rate, 1UL); | ||
| 348 | for (; div <= AUDIO_PLL_QDPMC_MAX; div++) { | ||
| 344 | best_parent_rate = clk_round_rate(pclk->clk, rate * div); | 349 | best_parent_rate = clk_round_rate(pclk->clk, rate * div); |
| 345 | tmp_rate = best_parent_rate / div; | 350 | tmp_rate = best_parent_rate / div; |
| 346 | tmp_diff = abs(rate - tmp_rate); | 351 | tmp_diff = abs(rate - tmp_rate); |
| @@ -350,6 +355,8 @@ static long clk_audio_pll_pmc_round_rate(struct clk_hw *hw, unsigned long rate, | |||
| 350 | best_rate = tmp_rate; | 355 | best_rate = tmp_rate; |
| 351 | best_diff = tmp_diff; | 356 | best_diff = tmp_diff; |
| 352 | tmp_qd = div; | 357 | tmp_qd = div; |
| 358 | if (!best_diff) | ||
| 359 | break; /* got exact match */ | ||
| 353 | } | 360 | } |
| 354 | } | 361 | } |
| 355 | 362 | ||
diff --git a/drivers/clk/at91/clk-programmable.c b/drivers/clk/at91/clk-programmable.c index 5bc68b9c5498..89d6f3736dbf 100644 --- a/drivers/clk/at91/clk-programmable.c +++ b/drivers/clk/at91/clk-programmable.c | |||
| @@ -132,11 +132,8 @@ static int clk_programmable_set_rate(struct clk_hw *hw, unsigned long rate, | |||
| 132 | struct clk_programmable *prog = to_clk_programmable(hw); | 132 | struct clk_programmable *prog = to_clk_programmable(hw); |
| 133 | const struct clk_programmable_layout *layout = prog->layout; | 133 | const struct clk_programmable_layout *layout = prog->layout; |
| 134 | unsigned long div = parent_rate / rate; | 134 | unsigned long div = parent_rate / rate; |
| 135 | unsigned int pckr; | ||
| 136 | int shift = 0; | 135 | int shift = 0; |
| 137 | 136 | ||
| 138 | regmap_read(prog->regmap, AT91_PMC_PCKR(prog->id), &pckr); | ||
| 139 | |||
| 140 | if (!div) | 137 | if (!div) |
| 141 | return -EINVAL; | 138 | return -EINVAL; |
| 142 | 139 | ||
diff --git a/drivers/clk/at91/sama5d2.c b/drivers/clk/at91/sama5d2.c index cd0ef7274fdb..1f70cb164b06 100644 --- a/drivers/clk/at91/sama5d2.c +++ b/drivers/clk/at91/sama5d2.c | |||
| @@ -241,13 +241,14 @@ static void __init sama5d2_pmc_setup(struct device_node *np) | |||
| 241 | parent_names[2] = "plladivck"; | 241 | parent_names[2] = "plladivck"; |
| 242 | parent_names[3] = "utmick"; | 242 | parent_names[3] = "utmick"; |
| 243 | parent_names[4] = "masterck"; | 243 | parent_names[4] = "masterck"; |
| 244 | parent_names[5] = "audiopll_pmcck"; | ||
| 244 | for (i = 0; i < 3; i++) { | 245 | for (i = 0; i < 3; i++) { |
| 245 | char name[6]; | 246 | char name[6]; |
| 246 | 247 | ||
| 247 | snprintf(name, sizeof(name), "prog%d", i); | 248 | snprintf(name, sizeof(name), "prog%d", i); |
| 248 | 249 | ||
| 249 | hw = at91_clk_register_programmable(regmap, name, | 250 | hw = at91_clk_register_programmable(regmap, name, |
| 250 | parent_names, 5, i, | 251 | parent_names, 6, i, |
| 251 | &at91sam9x5_programmable_layout); | 252 | &at91sam9x5_programmable_layout); |
| 252 | if (IS_ERR(hw)) | 253 | if (IS_ERR(hw)) |
| 253 | goto err_free; | 254 | goto err_free; |
diff --git a/drivers/clk/clk-clps711x.c b/drivers/clk/clk-clps711x.c index 2c04396402ab..c36c47bdba02 100644 --- a/drivers/clk/clk-clps711x.c +++ b/drivers/clk/clk-clps711x.c | |||
| @@ -44,21 +44,21 @@ struct clps711x_clk { | |||
| 44 | struct clk_hw_onecell_data clk_data; | 44 | struct clk_hw_onecell_data clk_data; |
| 45 | }; | 45 | }; |
| 46 | 46 | ||
| 47 | static struct clps711x_clk * __init _clps711x_clk_init(void __iomem *base, | 47 | static void __init clps711x_clk_init_dt(struct device_node *np) |
| 48 | u32 fref) | ||
| 49 | { | 48 | { |
| 50 | u32 tmp, f_cpu, f_pll, f_bus, f_tim, f_pwm, f_spi; | 49 | u32 tmp, f_cpu, f_pll, f_bus, f_tim, f_pwm, f_spi, fref = 0; |
| 51 | struct clps711x_clk *clps711x_clk; | 50 | struct clps711x_clk *clps711x_clk; |
| 52 | unsigned i; | 51 | void __iomem *base; |
| 52 | |||
| 53 | WARN_ON(of_property_read_u32(np, "startup-frequency", &fref)); | ||
| 53 | 54 | ||
| 54 | if (!base) | 55 | base = of_iomap(np, 0); |
| 55 | return ERR_PTR(-ENOMEM); | 56 | BUG_ON(!base); |
| 56 | 57 | ||
| 57 | clps711x_clk = kzalloc(struct_size(clps711x_clk, clk_data.hws, | 58 | clps711x_clk = kzalloc(struct_size(clps711x_clk, clk_data.hws, |
| 58 | CLPS711X_CLK_MAX), | 59 | CLPS711X_CLK_MAX), |
| 59 | GFP_KERNEL); | 60 | GFP_KERNEL); |
| 60 | if (!clps711x_clk) | 61 | BUG_ON(!clps711x_clk); |
| 61 | return ERR_PTR(-ENOMEM); | ||
| 62 | 62 | ||
| 63 | spin_lock_init(&clps711x_clk->lock); | 63 | spin_lock_init(&clps711x_clk->lock); |
| 64 | 64 | ||
| @@ -137,52 +137,13 @@ static struct clps711x_clk * __init _clps711x_clk_init(void __iomem *base, | |||
| 137 | clk_hw_register_fixed_factor(NULL, "uart", "bus", 0, 1, 10); | 137 | clk_hw_register_fixed_factor(NULL, "uart", "bus", 0, 1, 10); |
| 138 | clps711x_clk->clk_data.hws[CLPS711X_CLK_TICK] = | 138 | clps711x_clk->clk_data.hws[CLPS711X_CLK_TICK] = |
| 139 | clk_hw_register_fixed_rate(NULL, "tick", NULL, 0, 64); | 139 | clk_hw_register_fixed_rate(NULL, "tick", NULL, 0, 64); |
| 140 | for (i = 0; i < CLPS711X_CLK_MAX; i++) | 140 | for (tmp = 0; tmp < CLPS711X_CLK_MAX; tmp++) |
| 141 | if (IS_ERR(clps711x_clk->clk_data.hws[i])) | 141 | if (IS_ERR(clps711x_clk->clk_data.hws[tmp])) |
| 142 | pr_err("clk %i: register failed with %ld\n", | 142 | pr_err("clk %i: register failed with %ld\n", |
| 143 | i, PTR_ERR(clps711x_clk->clk_data.hws[i])); | 143 | tmp, PTR_ERR(clps711x_clk->clk_data.hws[tmp])); |
| 144 | |||
| 145 | return clps711x_clk; | ||
| 146 | } | ||
| 147 | |||
| 148 | void __init clps711x_clk_init(void __iomem *base) | ||
| 149 | { | ||
| 150 | struct clps711x_clk *clps711x_clk; | ||
| 151 | |||
| 152 | clps711x_clk = _clps711x_clk_init(base, 73728000); | ||
| 153 | |||
| 154 | BUG_ON(IS_ERR(clps711x_clk)); | ||
| 155 | |||
| 156 | /* Clocksource */ | ||
| 157 | clk_hw_register_clkdev(clps711x_clk->clk_data.hws[CLPS711X_CLK_TIMER1], | ||
| 158 | NULL, "clps711x-timer.0"); | ||
| 159 | clk_hw_register_clkdev(clps711x_clk->clk_data.hws[CLPS711X_CLK_TIMER2], | ||
| 160 | NULL, "clps711x-timer.1"); | ||
| 161 | |||
| 162 | /* Drivers */ | ||
| 163 | clk_hw_register_clkdev(clps711x_clk->clk_data.hws[CLPS711X_CLK_PWM], | ||
| 164 | NULL, "clps711x-pwm"); | ||
| 165 | clk_hw_register_clkdev(clps711x_clk->clk_data.hws[CLPS711X_CLK_UART], | ||
| 166 | NULL, "clps711x-uart.0"); | ||
| 167 | clk_hw_register_clkdev(clps711x_clk->clk_data.hws[CLPS711X_CLK_UART], | ||
| 168 | NULL, "clps711x-uart.1"); | ||
| 169 | } | ||
| 170 | |||
| 171 | #ifdef CONFIG_OF | ||
| 172 | static void __init clps711x_clk_init_dt(struct device_node *np) | ||
| 173 | { | ||
| 174 | void __iomem *base = of_iomap(np, 0); | ||
| 175 | struct clps711x_clk *clps711x_clk; | ||
| 176 | u32 fref = 0; | ||
| 177 | |||
| 178 | WARN_ON(of_property_read_u32(np, "startup-frequency", &fref)); | ||
| 179 | |||
| 180 | clps711x_clk = _clps711x_clk_init(base, fref); | ||
| 181 | BUG_ON(IS_ERR(clps711x_clk)); | ||
| 182 | 144 | ||
| 183 | clps711x_clk->clk_data.num = CLPS711X_CLK_MAX; | 145 | clps711x_clk->clk_data.num = CLPS711X_CLK_MAX; |
| 184 | of_clk_add_hw_provider(np, of_clk_hw_onecell_get, | 146 | of_clk_add_hw_provider(np, of_clk_hw_onecell_get, |
| 185 | &clps711x_clk->clk_data); | 147 | &clps711x_clk->clk_data); |
| 186 | } | 148 | } |
| 187 | CLK_OF_DECLARE(clps711x, "cirrus,ep7209-clk", clps711x_clk_init_dt); | 149 | CLK_OF_DECLARE(clps711x, "cirrus,ep7209-clk", clps711x_clk_init_dt); |
| 188 | #endif | ||
diff --git a/drivers/clk/clk-devres.c b/drivers/clk/clk-devres.c index c9a86156ced8..daa1fc8fba53 100644 --- a/drivers/clk/clk-devres.c +++ b/drivers/clk/clk-devres.c | |||
| @@ -29,6 +29,17 @@ struct clk *devm_clk_get(struct device *dev, const char *id) | |||
| 29 | } | 29 | } |
| 30 | EXPORT_SYMBOL(devm_clk_get); | 30 | EXPORT_SYMBOL(devm_clk_get); |
| 31 | 31 | ||
| 32 | struct clk *devm_clk_get_optional(struct device *dev, const char *id) | ||
| 33 | { | ||
| 34 | struct clk *clk = devm_clk_get(dev, id); | ||
| 35 | |||
| 36 | if (clk == ERR_PTR(-ENOENT)) | ||
| 37 | return NULL; | ||
| 38 | |||
| 39 | return clk; | ||
| 40 | } | ||
| 41 | EXPORT_SYMBOL(devm_clk_get_optional); | ||
| 42 | |||
| 32 | struct clk_bulk_devres { | 43 | struct clk_bulk_devres { |
| 33 | struct clk_bulk_data *clks; | 44 | struct clk_bulk_data *clks; |
| 34 | int num_clks; | 45 | int num_clks; |
diff --git a/drivers/clk/clk-fixed-mmio.c b/drivers/clk/clk-fixed-mmio.c new file mode 100644 index 000000000000..d1a97d971183 --- /dev/null +++ b/drivers/clk/clk-fixed-mmio.c | |||
| @@ -0,0 +1,101 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | |||
| 3 | /* | ||
| 4 | * Memory Mapped IO Fixed clock driver | ||
| 5 | * | ||
| 6 | * Copyright (C) 2018 Cadence Design Systems, Inc. | ||
| 7 | * | ||
| 8 | * Authors: | ||
| 9 | * Jan Kotas <jank@cadence.com> | ||
| 10 | */ | ||
| 11 | |||
| 12 | #include <linux/clk-provider.h> | ||
| 13 | #include <linux/of_address.h> | ||
| 14 | #include <linux/module.h> | ||
| 15 | #include <linux/platform_device.h> | ||
| 16 | |||
| 17 | static struct clk_hw *fixed_mmio_clk_setup(struct device_node *node) | ||
| 18 | { | ||
| 19 | struct clk_hw *clk; | ||
| 20 | const char *clk_name = node->name; | ||
| 21 | void __iomem *base; | ||
| 22 | u32 freq; | ||
| 23 | int ret; | ||
| 24 | |||
| 25 | base = of_iomap(node, 0); | ||
| 26 | if (!base) { | ||
| 27 | pr_err("%pOFn: failed to map address\n", node); | ||
| 28 | return ERR_PTR(-EIO); | ||
| 29 | } | ||
| 30 | |||
| 31 | freq = readl(base); | ||
| 32 | iounmap(base); | ||
| 33 | of_property_read_string(node, "clock-output-names", &clk_name); | ||
| 34 | |||
| 35 | clk = clk_hw_register_fixed_rate(NULL, clk_name, NULL, 0, freq); | ||
| 36 | if (IS_ERR(clk)) { | ||
| 37 | pr_err("%pOFn: failed to register fixed rate clock\n", node); | ||
| 38 | return clk; | ||
| 39 | } | ||
| 40 | |||
| 41 | ret = of_clk_add_hw_provider(node, of_clk_hw_simple_get, clk); | ||
| 42 | if (ret) { | ||
| 43 | pr_err("%pOFn: failed to add clock provider\n", node); | ||
| 44 | clk_hw_unregister(clk); | ||
| 45 | clk = ERR_PTR(ret); | ||
| 46 | } | ||
| 47 | |||
| 48 | return clk; | ||
| 49 | } | ||
| 50 | |||
| 51 | static void __init of_fixed_mmio_clk_setup(struct device_node *node) | ||
| 52 | { | ||
| 53 | fixed_mmio_clk_setup(node); | ||
| 54 | } | ||
| 55 | CLK_OF_DECLARE(fixed_mmio_clk, "fixed-mmio-clock", of_fixed_mmio_clk_setup); | ||
| 56 | |||
| 57 | /** | ||
| 58 | * This is not executed when of_fixed_mmio_clk_setup succeeded. | ||
| 59 | */ | ||
| 60 | static int of_fixed_mmio_clk_probe(struct platform_device *pdev) | ||
| 61 | { | ||
| 62 | struct clk_hw *clk; | ||
| 63 | |||
| 64 | clk = fixed_mmio_clk_setup(pdev->dev.of_node); | ||
| 65 | if (IS_ERR(clk)) | ||
| 66 | return PTR_ERR(clk); | ||
| 67 | |||
| 68 | platform_set_drvdata(pdev, clk); | ||
| 69 | |||
| 70 | return 0; | ||
| 71 | } | ||
| 72 | |||
| 73 | static int of_fixed_mmio_clk_remove(struct platform_device *pdev) | ||
| 74 | { | ||
| 75 | struct clk_hw *clk = platform_get_drvdata(pdev); | ||
| 76 | |||
| 77 | of_clk_del_provider(pdev->dev.of_node); | ||
| 78 | clk_hw_unregister_fixed_rate(clk); | ||
| 79 | |||
| 80 | return 0; | ||
| 81 | } | ||
| 82 | |||
| 83 | static const struct of_device_id of_fixed_mmio_clk_ids[] = { | ||
| 84 | { .compatible = "fixed-mmio-clock" }, | ||
| 85 | { } | ||
| 86 | }; | ||
| 87 | MODULE_DEVICE_TABLE(of, of_fixed_mmio_clk_ids); | ||
| 88 | |||
| 89 | static struct platform_driver of_fixed_mmio_clk_driver = { | ||
| 90 | .driver = { | ||
| 91 | .name = "of_fixed_mmio_clk", | ||
| 92 | .of_match_table = of_fixed_mmio_clk_ids, | ||
| 93 | }, | ||
| 94 | .probe = of_fixed_mmio_clk_probe, | ||
| 95 | .remove = of_fixed_mmio_clk_remove, | ||
| 96 | }; | ||
| 97 | module_platform_driver(of_fixed_mmio_clk_driver); | ||
| 98 | |||
| 99 | MODULE_AUTHOR("Jan Kotas <jank@cadence.com>"); | ||
| 100 | MODULE_DESCRIPTION("Memory Mapped IO Fixed clock driver"); | ||
| 101 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/clk/clk-fractional-divider.c b/drivers/clk/clk-fractional-divider.c index 545dceec0bbf..fdfe2e423d15 100644 --- a/drivers/clk/clk-fractional-divider.c +++ b/drivers/clk/clk-fractional-divider.c | |||
| @@ -79,7 +79,7 @@ static long clk_fd_round_rate(struct clk_hw *hw, unsigned long rate, | |||
| 79 | unsigned long m, n; | 79 | unsigned long m, n; |
| 80 | u64 ret; | 80 | u64 ret; |
| 81 | 81 | ||
| 82 | if (!rate || rate >= *parent_rate) | 82 | if (!rate || (!clk_hw_can_set_rate_parent(hw) && rate >= *parent_rate)) |
| 83 | return *parent_rate; | 83 | return *parent_rate; |
| 84 | 84 | ||
| 85 | if (fd->approximation) | 85 | if (fd->approximation) |
diff --git a/drivers/clk/clk-gpio.c b/drivers/clk/clk-gpio.c index 25eed3e0251f..c2f07f0d077c 100644 --- a/drivers/clk/clk-gpio.c +++ b/drivers/clk/clk-gpio.c | |||
| @@ -58,6 +58,35 @@ const struct clk_ops clk_gpio_gate_ops = { | |||
| 58 | }; | 58 | }; |
| 59 | EXPORT_SYMBOL_GPL(clk_gpio_gate_ops); | 59 | EXPORT_SYMBOL_GPL(clk_gpio_gate_ops); |
| 60 | 60 | ||
| 61 | static int clk_sleeping_gpio_gate_prepare(struct clk_hw *hw) | ||
| 62 | { | ||
| 63 | struct clk_gpio *clk = to_clk_gpio(hw); | ||
| 64 | |||
| 65 | gpiod_set_value_cansleep(clk->gpiod, 1); | ||
| 66 | |||
| 67 | return 0; | ||
| 68 | } | ||
| 69 | |||
| 70 | static void clk_sleeping_gpio_gate_unprepare(struct clk_hw *hw) | ||
| 71 | { | ||
| 72 | struct clk_gpio *clk = to_clk_gpio(hw); | ||
| 73 | |||
| 74 | gpiod_set_value_cansleep(clk->gpiod, 0); | ||
| 75 | } | ||
| 76 | |||
| 77 | static int clk_sleeping_gpio_gate_is_prepared(struct clk_hw *hw) | ||
| 78 | { | ||
| 79 | struct clk_gpio *clk = to_clk_gpio(hw); | ||
| 80 | |||
| 81 | return gpiod_get_value_cansleep(clk->gpiod); | ||
| 82 | } | ||
| 83 | |||
| 84 | static const struct clk_ops clk_sleeping_gpio_gate_ops = { | ||
| 85 | .prepare = clk_sleeping_gpio_gate_prepare, | ||
| 86 | .unprepare = clk_sleeping_gpio_gate_unprepare, | ||
| 87 | .is_prepared = clk_sleeping_gpio_gate_is_prepared, | ||
| 88 | }; | ||
| 89 | |||
| 61 | /** | 90 | /** |
| 62 | * DOC: basic clock multiplexer which can be controlled with a gpio output | 91 | * DOC: basic clock multiplexer which can be controlled with a gpio output |
| 63 | * Traits of this clock: | 92 | * Traits of this clock: |
| @@ -144,10 +173,16 @@ struct clk_hw *clk_hw_register_gpio_gate(struct device *dev, const char *name, | |||
| 144 | const char *parent_name, struct gpio_desc *gpiod, | 173 | const char *parent_name, struct gpio_desc *gpiod, |
| 145 | unsigned long flags) | 174 | unsigned long flags) |
| 146 | { | 175 | { |
| 176 | const struct clk_ops *ops; | ||
| 177 | |||
| 178 | if (gpiod_cansleep(gpiod)) | ||
| 179 | ops = &clk_sleeping_gpio_gate_ops; | ||
| 180 | else | ||
| 181 | ops = &clk_gpio_gate_ops; | ||
| 182 | |||
| 147 | return clk_register_gpio(dev, name, | 183 | return clk_register_gpio(dev, name, |
| 148 | (parent_name ? &parent_name : NULL), | 184 | (parent_name ? &parent_name : NULL), |
| 149 | (parent_name ? 1 : 0), gpiod, flags, | 185 | (parent_name ? 1 : 0), gpiod, flags, ops); |
| 150 | &clk_gpio_gate_ops); | ||
| 151 | } | 186 | } |
| 152 | EXPORT_SYMBOL_GPL(clk_hw_register_gpio_gate); | 187 | EXPORT_SYMBOL_GPL(clk_hw_register_gpio_gate); |
| 153 | 188 | ||
diff --git a/drivers/clk/clk-highbank.c b/drivers/clk/clk-highbank.c index 727ed8e1bb72..8e4581004695 100644 --- a/drivers/clk/clk-highbank.c +++ b/drivers/clk/clk-highbank.c | |||
| @@ -293,6 +293,7 @@ static __init struct clk *hb_clk_init(struct device_node *node, const struct clk | |||
| 293 | /* Map system registers */ | 293 | /* Map system registers */ |
| 294 | srnp = of_find_compatible_node(NULL, NULL, "calxeda,hb-sregs"); | 294 | srnp = of_find_compatible_node(NULL, NULL, "calxeda,hb-sregs"); |
| 295 | hb_clk->reg = of_iomap(srnp, 0); | 295 | hb_clk->reg = of_iomap(srnp, 0); |
| 296 | of_node_put(srnp); | ||
| 296 | BUG_ON(!hb_clk->reg); | 297 | BUG_ON(!hb_clk->reg); |
| 297 | hb_clk->reg += reg; | 298 | hb_clk->reg += reg; |
| 298 | 299 | ||
diff --git a/drivers/clk/clk-max77686.c b/drivers/clk/clk-max77686.c index 22c937644c93..3727d5472450 100644 --- a/drivers/clk/clk-max77686.c +++ b/drivers/clk/clk-max77686.c | |||
| @@ -235,8 +235,9 @@ static int max77686_clk_probe(struct platform_device *pdev) | |||
| 235 | return ret; | 235 | return ret; |
| 236 | } | 236 | } |
| 237 | 237 | ||
| 238 | ret = clk_hw_register_clkdev(&max_clk_data->hw, | 238 | ret = devm_clk_hw_register_clkdev(dev, &max_clk_data->hw, |
| 239 | max_clk_data->clk_idata.name, NULL); | 239 | max_clk_data->clk_idata.name, |
| 240 | NULL); | ||
| 240 | if (ret < 0) { | 241 | if (ret < 0) { |
| 241 | dev_err(dev, "Failed to clkdev register: %d\n", ret); | 242 | dev_err(dev, "Failed to clkdev register: %d\n", ret); |
| 242 | return ret; | 243 | return ret; |
| @@ -244,8 +245,8 @@ static int max77686_clk_probe(struct platform_device *pdev) | |||
| 244 | } | 245 | } |
| 245 | 246 | ||
| 246 | if (parent->of_node) { | 247 | if (parent->of_node) { |
| 247 | ret = of_clk_add_hw_provider(parent->of_node, of_clk_max77686_get, | 248 | ret = devm_of_clk_add_hw_provider(dev, of_clk_max77686_get, |
| 248 | drv_data); | 249 | drv_data); |
| 249 | 250 | ||
| 250 | if (ret < 0) { | 251 | if (ret < 0) { |
| 251 | dev_err(dev, "Failed to register OF clock provider: %d\n", | 252 | dev_err(dev, "Failed to register OF clock provider: %d\n", |
| @@ -261,27 +262,11 @@ static int max77686_clk_probe(struct platform_device *pdev) | |||
| 261 | 1 << MAX77802_CLOCK_LOW_JITTER_SHIFT); | 262 | 1 << MAX77802_CLOCK_LOW_JITTER_SHIFT); |
| 262 | if (ret < 0) { | 263 | if (ret < 0) { |
| 263 | dev_err(dev, "Failed to config low-jitter: %d\n", ret); | 264 | dev_err(dev, "Failed to config low-jitter: %d\n", ret); |
| 264 | goto remove_of_clk_provider; | 265 | return ret; |
| 265 | } | 266 | } |
| 266 | } | 267 | } |
| 267 | 268 | ||
| 268 | return 0; | 269 | return 0; |
| 269 | |||
| 270 | remove_of_clk_provider: | ||
| 271 | if (parent->of_node) | ||
| 272 | of_clk_del_provider(parent->of_node); | ||
| 273 | |||
| 274 | return ret; | ||
| 275 | } | ||
| 276 | |||
| 277 | static int max77686_clk_remove(struct platform_device *pdev) | ||
| 278 | { | ||
| 279 | struct device *parent = pdev->dev.parent; | ||
| 280 | |||
| 281 | if (parent->of_node) | ||
| 282 | of_clk_del_provider(parent->of_node); | ||
| 283 | |||
| 284 | return 0; | ||
| 285 | } | 270 | } |
| 286 | 271 | ||
| 287 | static const struct platform_device_id max77686_clk_id[] = { | 272 | static const struct platform_device_id max77686_clk_id[] = { |
| @@ -297,7 +282,6 @@ static struct platform_driver max77686_clk_driver = { | |||
| 297 | .name = "max77686-clk", | 282 | .name = "max77686-clk", |
| 298 | }, | 283 | }, |
| 299 | .probe = max77686_clk_probe, | 284 | .probe = max77686_clk_probe, |
| 300 | .remove = max77686_clk_remove, | ||
| 301 | .id_table = max77686_clk_id, | 285 | .id_table = max77686_clk_id, |
| 302 | }; | 286 | }; |
| 303 | 287 | ||
diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c index 5baa9e051110..1212a9be7e80 100644 --- a/drivers/clk/clk-qoriq.c +++ b/drivers/clk/clk-qoriq.c | |||
| @@ -1148,8 +1148,8 @@ static void __init create_one_pll(struct clockgen *cg, int idx) | |||
| 1148 | pll->div[i].clk = clk; | 1148 | pll->div[i].clk = clk; |
| 1149 | ret = clk_register_clkdev(clk, pll->div[i].name, NULL); | 1149 | ret = clk_register_clkdev(clk, pll->div[i].name, NULL); |
| 1150 | if (ret != 0) | 1150 | if (ret != 0) |
| 1151 | pr_err("%s: %s: register to lookup table failed %ld\n", | 1151 | pr_err("%s: %s: register to lookup table failed %d\n", |
| 1152 | __func__, pll->div[i].name, PTR_ERR(clk)); | 1152 | __func__, pll->div[i].name, ret); |
| 1153 | 1153 | ||
| 1154 | } | 1154 | } |
| 1155 | } | 1155 | } |
| @@ -1389,6 +1389,7 @@ static void __init clockgen_init(struct device_node *np) | |||
| 1389 | pr_err("%s: Couldn't map %pOF regs\n", __func__, | 1389 | pr_err("%s: Couldn't map %pOF regs\n", __func__, |
| 1390 | guts); | 1390 | guts); |
| 1391 | } | 1391 | } |
| 1392 | of_node_put(guts); | ||
| 1392 | } | 1393 | } |
| 1393 | 1394 | ||
| 1394 | } | 1395 | } |
diff --git a/drivers/clk/clk-stm32mp1.c b/drivers/clk/clk-stm32mp1.c index 6a31f7f434ce..a0ae8dc16909 100644 --- a/drivers/clk/clk-stm32mp1.c +++ b/drivers/clk/clk-stm32mp1.c | |||
| @@ -121,7 +121,7 @@ static const char * const cpu_src[] = { | |||
| 121 | }; | 121 | }; |
| 122 | 122 | ||
| 123 | static const char * const axi_src[] = { | 123 | static const char * const axi_src[] = { |
| 124 | "ck_hsi", "ck_hse", "pll2_p", "pll3_p" | 124 | "ck_hsi", "ck_hse", "pll2_p" |
| 125 | }; | 125 | }; |
| 126 | 126 | ||
| 127 | static const char * const per_src[] = { | 127 | static const char * const per_src[] = { |
| @@ -225,19 +225,19 @@ static const char * const usart6_src[] = { | |||
| 225 | }; | 225 | }; |
| 226 | 226 | ||
| 227 | static const char * const fdcan_src[] = { | 227 | static const char * const fdcan_src[] = { |
| 228 | "ck_hse", "pll3_q", "pll4_q" | 228 | "ck_hse", "pll3_q", "pll4_q", "pll4_r" |
| 229 | }; | 229 | }; |
| 230 | 230 | ||
| 231 | static const char * const sai_src[] = { | 231 | static const char * const sai_src[] = { |
| 232 | "pll4_q", "pll3_q", "i2s_ckin", "ck_per" | 232 | "pll4_q", "pll3_q", "i2s_ckin", "ck_per", "pll3_r" |
| 233 | }; | 233 | }; |
| 234 | 234 | ||
| 235 | static const char * const sai2_src[] = { | 235 | static const char * const sai2_src[] = { |
| 236 | "pll4_q", "pll3_q", "i2s_ckin", "ck_per", "spdif_ck_symb" | 236 | "pll4_q", "pll3_q", "i2s_ckin", "ck_per", "spdif_ck_symb", "pll3_r" |
| 237 | }; | 237 | }; |
| 238 | 238 | ||
| 239 | static const char * const adc12_src[] = { | 239 | static const char * const adc12_src[] = { |
| 240 | "pll4_q", "ck_per" | 240 | "pll4_r", "ck_per", "pll3_q" |
| 241 | }; | 241 | }; |
| 242 | 242 | ||
| 243 | static const char * const dsi_src[] = { | 243 | static const char * const dsi_src[] = { |
| @@ -269,7 +269,7 @@ static const struct clk_div_table axi_div_table[] = { | |||
| 269 | static const struct clk_div_table mcu_div_table[] = { | 269 | static const struct clk_div_table mcu_div_table[] = { |
| 270 | { 0, 1 }, { 1, 2 }, { 2, 4 }, { 3, 8 }, | 270 | { 0, 1 }, { 1, 2 }, { 2, 4 }, { 3, 8 }, |
| 271 | { 4, 16 }, { 5, 32 }, { 6, 64 }, { 7, 128 }, | 271 | { 4, 16 }, { 5, 32 }, { 6, 64 }, { 7, 128 }, |
| 272 | { 8, 512 }, { 9, 512 }, { 10, 512}, { 11, 512 }, | 272 | { 8, 256 }, { 9, 512 }, { 10, 512}, { 11, 512 }, |
| 273 | { 12, 512 }, { 13, 512 }, { 14, 512}, { 15, 512 }, | 273 | { 12, 512 }, { 13, 512 }, { 14, 512}, { 15, 512 }, |
| 274 | { 0 }, | 274 | { 0 }, |
| 275 | }; | 275 | }; |
| @@ -1286,10 +1286,11 @@ _clk_stm32_register_composite(struct device *dev, | |||
| 1286 | MGATE_MP1(_id, _name, _parent, _flags, _mgate) | 1286 | MGATE_MP1(_id, _name, _parent, _flags, _mgate) |
| 1287 | 1287 | ||
| 1288 | #define KCLK(_id, _name, _parents, _flags, _mgate, _mmux)\ | 1288 | #define KCLK(_id, _name, _parents, _flags, _mgate, _mmux)\ |
| 1289 | COMPOSITE(_id, _name, _parents, CLK_OPS_PARENT_ENABLE | _flags,\ | 1289 | COMPOSITE(_id, _name, _parents, CLK_OPS_PARENT_ENABLE |\ |
| 1290 | _MGATE_MP1(_mgate),\ | 1290 | CLK_SET_RATE_NO_REPARENT | _flags,\ |
| 1291 | _MMUX(_mmux),\ | 1291 | _MGATE_MP1(_mgate),\ |
| 1292 | _NO_DIV) | 1292 | _MMUX(_mmux),\ |
| 1293 | _NO_DIV) | ||
| 1293 | 1294 | ||
| 1294 | enum { | 1295 | enum { |
| 1295 | G_SAI1, | 1296 | G_SAI1, |
| @@ -1655,12 +1656,14 @@ static const struct stm32_mux_cfg ker_mux_cfg[M_LAST] = { | |||
| 1655 | 1656 | ||
| 1656 | static const struct clock_config stm32mp1_clock_cfg[] = { | 1657 | static const struct clock_config stm32mp1_clock_cfg[] = { |
| 1657 | /* Oscillator divider */ | 1658 | /* Oscillator divider */ |
| 1658 | DIV(NO_ID, "clk-hsi-div", "clk-hsi", 0, RCC_HSICFGR, 0, 2, | 1659 | DIV(NO_ID, "clk-hsi-div", "clk-hsi", CLK_DIVIDER_POWER_OF_TWO, |
| 1659 | CLK_DIVIDER_READ_ONLY), | 1660 | RCC_HSICFGR, 0, 2, CLK_DIVIDER_READ_ONLY), |
| 1660 | 1661 | ||
| 1661 | /* External / Internal Oscillators */ | 1662 | /* External / Internal Oscillators */ |
| 1662 | GATE_MP1(CK_HSE, "ck_hse", "clk-hse", 0, RCC_OCENSETR, 8, 0), | 1663 | GATE_MP1(CK_HSE, "ck_hse", "clk-hse", 0, RCC_OCENSETR, 8, 0), |
| 1663 | GATE_MP1(CK_CSI, "ck_csi", "clk-csi", 0, RCC_OCENSETR, 4, 0), | 1664 | /* ck_csi is used by IO compensation and should be critical */ |
| 1665 | GATE_MP1(CK_CSI, "ck_csi", "clk-csi", CLK_IS_CRITICAL, | ||
| 1666 | RCC_OCENSETR, 4, 0), | ||
| 1664 | GATE_MP1(CK_HSI, "ck_hsi", "clk-hsi-div", 0, RCC_OCENSETR, 0, 0), | 1667 | GATE_MP1(CK_HSI, "ck_hsi", "clk-hsi-div", 0, RCC_OCENSETR, 0, 0), |
| 1665 | GATE(CK_LSI, "ck_lsi", "clk-lsi", 0, RCC_RDLSICR, 0, 0), | 1668 | GATE(CK_LSI, "ck_lsi", "clk-lsi", 0, RCC_RDLSICR, 0, 0), |
| 1666 | GATE(CK_LSE, "ck_lse", "clk-lse", 0, RCC_BDCR, 0, 0), | 1669 | GATE(CK_LSE, "ck_lse", "clk-lse", 0, RCC_BDCR, 0, 0), |
| @@ -1952,14 +1955,14 @@ static const struct clock_config stm32mp1_clock_cfg[] = { | |||
| 1952 | MGATE_MP1(GPU_K, "gpu_k", "pll2_q", 0, G_GPU), | 1955 | MGATE_MP1(GPU_K, "gpu_k", "pll2_q", 0, G_GPU), |
| 1953 | MGATE_MP1(DAC12_K, "dac12_k", "ck_lsi", 0, G_DAC12), | 1956 | MGATE_MP1(DAC12_K, "dac12_k", "ck_lsi", 0, G_DAC12), |
| 1954 | 1957 | ||
| 1955 | COMPOSITE(ETHPTP_K, "ethptp_k", eth_src, CLK_OPS_PARENT_ENABLE, | 1958 | COMPOSITE(ETHPTP_K, "ethptp_k", eth_src, CLK_OPS_PARENT_ENABLE | |
| 1959 | CLK_SET_RATE_NO_REPARENT, | ||
| 1956 | _NO_GATE, | 1960 | _NO_GATE, |
| 1957 | _MMUX(M_ETHCK), | 1961 | _MMUX(M_ETHCK), |
| 1958 | _DIV(RCC_ETHCKSELR, 4, 4, CLK_DIVIDER_ALLOW_ZERO, NULL)), | 1962 | _DIV(RCC_ETHCKSELR, 4, 4, 0, NULL)), |
| 1959 | 1963 | ||
| 1960 | /* RTC clock */ | 1964 | /* RTC clock */ |
| 1961 | DIV(NO_ID, "ck_hse_rtc", "ck_hse", 0, RCC_RTCDIVR, 0, 7, | 1965 | DIV(NO_ID, "ck_hse_rtc", "ck_hse", 0, RCC_RTCDIVR, 0, 6, 0), |
| 1962 | CLK_DIVIDER_ALLOW_ZERO), | ||
| 1963 | 1966 | ||
| 1964 | COMPOSITE(RTC, "ck_rtc", rtc_src, CLK_OPS_PARENT_ENABLE | | 1967 | COMPOSITE(RTC, "ck_rtc", rtc_src, CLK_OPS_PARENT_ENABLE | |
| 1965 | CLK_SET_RATE_PARENT, | 1968 | CLK_SET_RATE_PARENT, |
diff --git a/drivers/clk/clk-twl6040.c b/drivers/clk/clk-twl6040.c index ea846f77750b..0cad5748bf0e 100644 --- a/drivers/clk/clk-twl6040.c +++ b/drivers/clk/clk-twl6040.c | |||
| @@ -41,6 +41,43 @@ static int twl6040_pdmclk_is_prepared(struct clk_hw *hw) | |||
| 41 | return pdmclk->enabled; | 41 | return pdmclk->enabled; |
| 42 | } | 42 | } |
| 43 | 43 | ||
| 44 | static int twl6040_pdmclk_reset_one_clock(struct twl6040_pdmclk *pdmclk, | ||
| 45 | unsigned int reg) | ||
| 46 | { | ||
| 47 | const u8 reset_mask = TWL6040_HPLLRST; /* Same for HPPLL and LPPLL */ | ||
| 48 | int ret; | ||
| 49 | |||
| 50 | ret = twl6040_set_bits(pdmclk->twl6040, reg, reset_mask); | ||
| 51 | if (ret < 0) | ||
| 52 | return ret; | ||
| 53 | |||
| 54 | ret = twl6040_clear_bits(pdmclk->twl6040, reg, reset_mask); | ||
| 55 | if (ret < 0) | ||
| 56 | return ret; | ||
| 57 | |||
| 58 | return 0; | ||
| 59 | } | ||
| 60 | |||
| 61 | /* | ||
| 62 | * TWL6040A2 Phoenix Audio IC erratum #6: "PDM Clock Generation Issue At | ||
| 63 | * Cold Temperature". This affects cold boot and deeper idle states it | ||
| 64 | * seems. The workaround consists of resetting HPPLL and LPPLL. | ||
| 65 | */ | ||
| 66 | static int twl6040_pdmclk_quirk_reset_clocks(struct twl6040_pdmclk *pdmclk) | ||
| 67 | { | ||
| 68 | int ret; | ||
| 69 | |||
| 70 | ret = twl6040_pdmclk_reset_one_clock(pdmclk, TWL6040_REG_HPPLLCTL); | ||
| 71 | if (ret) | ||
| 72 | return ret; | ||
| 73 | |||
| 74 | ret = twl6040_pdmclk_reset_one_clock(pdmclk, TWL6040_REG_LPPLLCTL); | ||
| 75 | if (ret) | ||
| 76 | return ret; | ||
| 77 | |||
| 78 | return 0; | ||
| 79 | } | ||
| 80 | |||
| 44 | static int twl6040_pdmclk_prepare(struct clk_hw *hw) | 81 | static int twl6040_pdmclk_prepare(struct clk_hw *hw) |
| 45 | { | 82 | { |
| 46 | struct twl6040_pdmclk *pdmclk = container_of(hw, struct twl6040_pdmclk, | 83 | struct twl6040_pdmclk *pdmclk = container_of(hw, struct twl6040_pdmclk, |
| @@ -48,8 +85,20 @@ static int twl6040_pdmclk_prepare(struct clk_hw *hw) | |||
| 48 | int ret; | 85 | int ret; |
| 49 | 86 | ||
| 50 | ret = twl6040_power(pdmclk->twl6040, 1); | 87 | ret = twl6040_power(pdmclk->twl6040, 1); |
| 51 | if (!ret) | 88 | if (ret) |
| 52 | pdmclk->enabled = 1; | 89 | return ret; |
| 90 | |||
| 91 | ret = twl6040_pdmclk_quirk_reset_clocks(pdmclk); | ||
| 92 | if (ret) | ||
| 93 | goto out_err; | ||
| 94 | |||
| 95 | pdmclk->enabled = 1; | ||
| 96 | |||
| 97 | return 0; | ||
| 98 | |||
| 99 | out_err: | ||
| 100 | dev_err(pdmclk->dev, "%s: error %i\n", __func__, ret); | ||
| 101 | twl6040_power(pdmclk->twl6040, 0); | ||
| 53 | 102 | ||
| 54 | return ret; | 103 | return ret; |
| 55 | } | 104 | } |
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index d2477a5058ac..96053a96fe2f 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c | |||
| @@ -57,6 +57,7 @@ struct clk_core { | |||
| 57 | struct clk_core *new_child; | 57 | struct clk_core *new_child; |
| 58 | unsigned long flags; | 58 | unsigned long flags; |
| 59 | bool orphan; | 59 | bool orphan; |
| 60 | bool rpm_enabled; | ||
| 60 | unsigned int enable_count; | 61 | unsigned int enable_count; |
| 61 | unsigned int prepare_count; | 62 | unsigned int prepare_count; |
| 62 | unsigned int protect_count; | 63 | unsigned int protect_count; |
| @@ -81,6 +82,7 @@ struct clk_core { | |||
| 81 | 82 | ||
| 82 | struct clk { | 83 | struct clk { |
| 83 | struct clk_core *core; | 84 | struct clk_core *core; |
| 85 | struct device *dev; | ||
| 84 | const char *dev_id; | 86 | const char *dev_id; |
| 85 | const char *con_id; | 87 | const char *con_id; |
| 86 | unsigned long min_rate; | 88 | unsigned long min_rate; |
| @@ -92,9 +94,9 @@ struct clk { | |||
| 92 | /*** runtime pm ***/ | 94 | /*** runtime pm ***/ |
| 93 | static int clk_pm_runtime_get(struct clk_core *core) | 95 | static int clk_pm_runtime_get(struct clk_core *core) |
| 94 | { | 96 | { |
| 95 | int ret = 0; | 97 | int ret; |
| 96 | 98 | ||
| 97 | if (!core->dev) | 99 | if (!core->rpm_enabled) |
| 98 | return 0; | 100 | return 0; |
| 99 | 101 | ||
| 100 | ret = pm_runtime_get_sync(core->dev); | 102 | ret = pm_runtime_get_sync(core->dev); |
| @@ -103,7 +105,7 @@ static int clk_pm_runtime_get(struct clk_core *core) | |||
| 103 | 105 | ||
| 104 | static void clk_pm_runtime_put(struct clk_core *core) | 106 | static void clk_pm_runtime_put(struct clk_core *core) |
| 105 | { | 107 | { |
| 106 | if (!core->dev) | 108 | if (!core->rpm_enabled) |
| 107 | return; | 109 | return; |
| 108 | 110 | ||
| 109 | pm_runtime_put_sync(core->dev); | 111 | pm_runtime_put_sync(core->dev); |
| @@ -223,7 +225,7 @@ static bool clk_core_is_enabled(struct clk_core *core) | |||
| 223 | * taking enable spinlock, but the below check is needed if one tries | 225 | * taking enable spinlock, but the below check is needed if one tries |
| 224 | * to call it from other places. | 226 | * to call it from other places. |
| 225 | */ | 227 | */ |
| 226 | if (core->dev) { | 228 | if (core->rpm_enabled) { |
| 227 | pm_runtime_get_noresume(core->dev); | 229 | pm_runtime_get_noresume(core->dev); |
| 228 | if (!pm_runtime_active(core->dev)) { | 230 | if (!pm_runtime_active(core->dev)) { |
| 229 | ret = false; | 231 | ret = false; |
| @@ -233,7 +235,7 @@ static bool clk_core_is_enabled(struct clk_core *core) | |||
| 233 | 235 | ||
| 234 | ret = core->ops->is_enabled(core->hw); | 236 | ret = core->ops->is_enabled(core->hw); |
| 235 | done: | 237 | done: |
| 236 | if (core->dev) | 238 | if (core->rpm_enabled) |
| 237 | pm_runtime_put(core->dev); | 239 | pm_runtime_put(core->dev); |
| 238 | 240 | ||
| 239 | return ret; | 241 | return ret; |
| @@ -394,16 +396,19 @@ bool clk_hw_is_prepared(const struct clk_hw *hw) | |||
| 394 | { | 396 | { |
| 395 | return clk_core_is_prepared(hw->core); | 397 | return clk_core_is_prepared(hw->core); |
| 396 | } | 398 | } |
| 399 | EXPORT_SYMBOL_GPL(clk_hw_is_prepared); | ||
| 397 | 400 | ||
| 398 | bool clk_hw_rate_is_protected(const struct clk_hw *hw) | 401 | bool clk_hw_rate_is_protected(const struct clk_hw *hw) |
| 399 | { | 402 | { |
| 400 | return clk_core_rate_is_protected(hw->core); | 403 | return clk_core_rate_is_protected(hw->core); |
| 401 | } | 404 | } |
| 405 | EXPORT_SYMBOL_GPL(clk_hw_rate_is_protected); | ||
| 402 | 406 | ||
| 403 | bool clk_hw_is_enabled(const struct clk_hw *hw) | 407 | bool clk_hw_is_enabled(const struct clk_hw *hw) |
| 404 | { | 408 | { |
| 405 | return clk_core_is_enabled(hw->core); | 409 | return clk_core_is_enabled(hw->core); |
| 406 | } | 410 | } |
| 411 | EXPORT_SYMBOL_GPL(clk_hw_is_enabled); | ||
| 407 | 412 | ||
| 408 | bool __clk_is_enabled(struct clk *clk) | 413 | bool __clk_is_enabled(struct clk *clk) |
| 409 | { | 414 | { |
| @@ -3209,43 +3214,106 @@ unlock: | |||
| 3209 | return ret; | 3214 | return ret; |
| 3210 | } | 3215 | } |
| 3211 | 3216 | ||
| 3212 | struct clk *__clk_create_clk(struct clk_hw *hw, const char *dev_id, | 3217 | /** |
| 3218 | * clk_core_link_consumer - Add a clk consumer to the list of consumers in a clk_core | ||
| 3219 | * @core: clk to add consumer to | ||
| 3220 | * @clk: consumer to link to a clk | ||
| 3221 | */ | ||
| 3222 | static void clk_core_link_consumer(struct clk_core *core, struct clk *clk) | ||
| 3223 | { | ||
| 3224 | clk_prepare_lock(); | ||
| 3225 | hlist_add_head(&clk->clks_node, &core->clks); | ||
| 3226 | clk_prepare_unlock(); | ||
| 3227 | } | ||
| 3228 | |||
| 3229 | /** | ||
| 3230 | * clk_core_unlink_consumer - Remove a clk consumer from the list of consumers in a clk_core | ||
| 3231 | * @clk: consumer to unlink | ||
| 3232 | */ | ||
| 3233 | static void clk_core_unlink_consumer(struct clk *clk) | ||
| 3234 | { | ||
| 3235 | lockdep_assert_held(&prepare_lock); | ||
| 3236 | hlist_del(&clk->clks_node); | ||
| 3237 | } | ||
| 3238 | |||
| 3239 | /** | ||
| 3240 | * alloc_clk - Allocate a clk consumer, but leave it unlinked to the clk_core | ||
| 3241 | * @core: clk to allocate a consumer for | ||
| 3242 | * @dev_id: string describing device name | ||
| 3243 | * @con_id: connection ID string on device | ||
| 3244 | * | ||
| 3245 | * Returns: clk consumer left unlinked from the consumer list | ||
| 3246 | */ | ||
| 3247 | static struct clk *alloc_clk(struct clk_core *core, const char *dev_id, | ||
| 3213 | const char *con_id) | 3248 | const char *con_id) |
| 3214 | { | 3249 | { |
| 3215 | struct clk *clk; | 3250 | struct clk *clk; |
| 3216 | 3251 | ||
| 3217 | /* This is to allow this function to be chained to others */ | ||
| 3218 | if (IS_ERR_OR_NULL(hw)) | ||
| 3219 | return ERR_CAST(hw); | ||
| 3220 | |||
| 3221 | clk = kzalloc(sizeof(*clk), GFP_KERNEL); | 3252 | clk = kzalloc(sizeof(*clk), GFP_KERNEL); |
| 3222 | if (!clk) | 3253 | if (!clk) |
| 3223 | return ERR_PTR(-ENOMEM); | 3254 | return ERR_PTR(-ENOMEM); |
| 3224 | 3255 | ||
| 3225 | clk->core = hw->core; | 3256 | clk->core = core; |
| 3226 | clk->dev_id = dev_id; | 3257 | clk->dev_id = dev_id; |
| 3227 | clk->con_id = kstrdup_const(con_id, GFP_KERNEL); | 3258 | clk->con_id = kstrdup_const(con_id, GFP_KERNEL); |
| 3228 | clk->max_rate = ULONG_MAX; | 3259 | clk->max_rate = ULONG_MAX; |
| 3229 | 3260 | ||
| 3230 | clk_prepare_lock(); | ||
| 3231 | hlist_add_head(&clk->clks_node, &hw->core->clks); | ||
| 3232 | clk_prepare_unlock(); | ||
| 3233 | |||
| 3234 | return clk; | 3261 | return clk; |
| 3235 | } | 3262 | } |
| 3236 | 3263 | ||
| 3237 | /* keep in sync with __clk_put */ | 3264 | /** |
| 3238 | void __clk_free_clk(struct clk *clk) | 3265 | * free_clk - Free a clk consumer |
| 3266 | * @clk: clk consumer to free | ||
| 3267 | * | ||
| 3268 | * Note, this assumes the clk has been unlinked from the clk_core consumer | ||
| 3269 | * list. | ||
| 3270 | */ | ||
| 3271 | static void free_clk(struct clk *clk) | ||
| 3239 | { | 3272 | { |
| 3240 | clk_prepare_lock(); | ||
| 3241 | hlist_del(&clk->clks_node); | ||
| 3242 | clk_prepare_unlock(); | ||
| 3243 | |||
| 3244 | kfree_const(clk->con_id); | 3273 | kfree_const(clk->con_id); |
| 3245 | kfree(clk); | 3274 | kfree(clk); |
| 3246 | } | 3275 | } |
| 3247 | 3276 | ||
| 3248 | /** | 3277 | /** |
| 3278 | * clk_hw_create_clk: Allocate and link a clk consumer to a clk_core given | ||
| 3279 | * a clk_hw | ||
| 3280 | * @dev: clk consumer device | ||
| 3281 | * @hw: clk_hw associated with the clk being consumed | ||
| 3282 | * @dev_id: string describing device name | ||
| 3283 | * @con_id: connection ID string on device | ||
| 3284 | * | ||
| 3285 | * This is the main function used to create a clk pointer for use by clk | ||
| 3286 | * consumers. It connects a consumer to the clk_core and clk_hw structures | ||
| 3287 | * used by the framework and clk provider respectively. | ||
| 3288 | */ | ||
| 3289 | struct clk *clk_hw_create_clk(struct device *dev, struct clk_hw *hw, | ||
| 3290 | const char *dev_id, const char *con_id) | ||
| 3291 | { | ||
| 3292 | struct clk *clk; | ||
| 3293 | struct clk_core *core; | ||
| 3294 | |||
| 3295 | /* This is to allow this function to be chained to others */ | ||
| 3296 | if (IS_ERR_OR_NULL(hw)) | ||
| 3297 | return ERR_CAST(hw); | ||
| 3298 | |||
| 3299 | core = hw->core; | ||
| 3300 | clk = alloc_clk(core, dev_id, con_id); | ||
| 3301 | if (IS_ERR(clk)) | ||
| 3302 | return clk; | ||
| 3303 | clk->dev = dev; | ||
| 3304 | |||
| 3305 | if (!try_module_get(core->owner)) { | ||
| 3306 | free_clk(clk); | ||
| 3307 | return ERR_PTR(-ENOENT); | ||
| 3308 | } | ||
| 3309 | |||
| 3310 | kref_get(&core->ref); | ||
| 3311 | clk_core_link_consumer(core, clk); | ||
| 3312 | |||
| 3313 | return clk; | ||
| 3314 | } | ||
| 3315 | |||
| 3316 | /** | ||
| 3249 | * clk_register - allocate a new clock, register it and return an opaque cookie | 3317 | * clk_register - allocate a new clock, register it and return an opaque cookie |
| 3250 | * @dev: device that is registering this clock | 3318 | * @dev: device that is registering this clock |
| 3251 | * @hw: link to hardware-specific clock data | 3319 | * @hw: link to hardware-specific clock data |
| @@ -3280,7 +3348,8 @@ struct clk *clk_register(struct device *dev, struct clk_hw *hw) | |||
| 3280 | core->ops = hw->init->ops; | 3348 | core->ops = hw->init->ops; |
| 3281 | 3349 | ||
| 3282 | if (dev && pm_runtime_enabled(dev)) | 3350 | if (dev && pm_runtime_enabled(dev)) |
| 3283 | core->dev = dev; | 3351 | core->rpm_enabled = true; |
| 3352 | core->dev = dev; | ||
| 3284 | if (dev && dev->driver) | 3353 | if (dev && dev->driver) |
| 3285 | core->owner = dev->driver->owner; | 3354 | core->owner = dev->driver->owner; |
| 3286 | core->hw = hw; | 3355 | core->hw = hw; |
| @@ -3320,17 +3389,27 @@ struct clk *clk_register(struct device *dev, struct clk_hw *hw) | |||
| 3320 | 3389 | ||
| 3321 | INIT_HLIST_HEAD(&core->clks); | 3390 | INIT_HLIST_HEAD(&core->clks); |
| 3322 | 3391 | ||
| 3323 | hw->clk = __clk_create_clk(hw, NULL, NULL); | 3392 | /* |
| 3393 | * Don't call clk_hw_create_clk() here because that would pin the | ||
| 3394 | * provider module to itself and prevent it from ever being removed. | ||
| 3395 | */ | ||
| 3396 | hw->clk = alloc_clk(core, NULL, NULL); | ||
| 3324 | if (IS_ERR(hw->clk)) { | 3397 | if (IS_ERR(hw->clk)) { |
| 3325 | ret = PTR_ERR(hw->clk); | 3398 | ret = PTR_ERR(hw->clk); |
| 3326 | goto fail_parents; | 3399 | goto fail_parents; |
| 3327 | } | 3400 | } |
| 3328 | 3401 | ||
| 3402 | clk_core_link_consumer(hw->core, hw->clk); | ||
| 3403 | |||
| 3329 | ret = __clk_core_init(core); | 3404 | ret = __clk_core_init(core); |
| 3330 | if (!ret) | 3405 | if (!ret) |
| 3331 | return hw->clk; | 3406 | return hw->clk; |
| 3332 | 3407 | ||
| 3333 | __clk_free_clk(hw->clk); | 3408 | clk_prepare_lock(); |
| 3409 | clk_core_unlink_consumer(hw->clk); | ||
| 3410 | clk_prepare_unlock(); | ||
| 3411 | |||
| 3412 | free_clk(hw->clk); | ||
| 3334 | hw->clk = NULL; | 3413 | hw->clk = NULL; |
| 3335 | 3414 | ||
| 3336 | fail_parents: | 3415 | fail_parents: |
| @@ -3601,20 +3680,7 @@ EXPORT_SYMBOL_GPL(devm_clk_hw_unregister); | |||
| 3601 | /* | 3680 | /* |
| 3602 | * clkdev helpers | 3681 | * clkdev helpers |
| 3603 | */ | 3682 | */ |
| 3604 | int __clk_get(struct clk *clk) | ||
| 3605 | { | ||
| 3606 | struct clk_core *core = !clk ? NULL : clk->core; | ||
| 3607 | |||
| 3608 | if (core) { | ||
| 3609 | if (!try_module_get(core->owner)) | ||
| 3610 | return 0; | ||
| 3611 | 3683 | ||
| 3612 | kref_get(&core->ref); | ||
| 3613 | } | ||
| 3614 | return 1; | ||
| 3615 | } | ||
| 3616 | |||
| 3617 | /* keep in sync with __clk_free_clk */ | ||
| 3618 | void __clk_put(struct clk *clk) | 3684 | void __clk_put(struct clk *clk) |
| 3619 | { | 3685 | { |
| 3620 | struct module *owner; | 3686 | struct module *owner; |
| @@ -3648,8 +3714,7 @@ void __clk_put(struct clk *clk) | |||
| 3648 | 3714 | ||
| 3649 | module_put(owner); | 3715 | module_put(owner); |
| 3650 | 3716 | ||
| 3651 | kfree_const(clk->con_id); | 3717 | free_clk(clk); |
| 3652 | kfree(clk); | ||
| 3653 | } | 3718 | } |
| 3654 | 3719 | ||
| 3655 | /*** clk rate change notifiers ***/ | 3720 | /*** clk rate change notifiers ***/ |
| @@ -4006,6 +4071,49 @@ void devm_of_clk_del_provider(struct device *dev) | |||
| 4006 | } | 4071 | } |
| 4007 | EXPORT_SYMBOL(devm_of_clk_del_provider); | 4072 | EXPORT_SYMBOL(devm_of_clk_del_provider); |
| 4008 | 4073 | ||
| 4074 | /* | ||
| 4075 | * Beware the return values when np is valid, but no clock provider is found. | ||
| 4076 | * If name == NULL, the function returns -ENOENT. | ||
| 4077 | * If name != NULL, the function returns -EINVAL. This is because | ||
| 4078 | * of_parse_phandle_with_args() is called even if of_property_match_string() | ||
| 4079 | * returns an error. | ||
| 4080 | */ | ||
| 4081 | static int of_parse_clkspec(const struct device_node *np, int index, | ||
| 4082 | const char *name, struct of_phandle_args *out_args) | ||
| 4083 | { | ||
| 4084 | int ret = -ENOENT; | ||
| 4085 | |||
| 4086 | /* Walk up the tree of devices looking for a clock property that matches */ | ||
| 4087 | while (np) { | ||
| 4088 | /* | ||
| 4089 | * For named clocks, first look up the name in the | ||
| 4090 | * "clock-names" property. If it cannot be found, then index | ||
| 4091 | * will be an error code and of_parse_phandle_with_args() will | ||
| 4092 | * return -EINVAL. | ||
| 4093 | */ | ||
| 4094 | if (name) | ||
| 4095 | index = of_property_match_string(np, "clock-names", name); | ||
| 4096 | ret = of_parse_phandle_with_args(np, "clocks", "#clock-cells", | ||
| 4097 | index, out_args); | ||
| 4098 | if (!ret) | ||
| 4099 | break; | ||
| 4100 | if (name && index >= 0) | ||
| 4101 | break; | ||
| 4102 | |||
| 4103 | /* | ||
| 4104 | * No matching clock found on this node. If the parent node | ||
| 4105 | * has a "clock-ranges" property, then we can try one of its | ||
| 4106 | * clocks. | ||
| 4107 | */ | ||
| 4108 | np = np->parent; | ||
| 4109 | if (np && !of_get_property(np, "clock-ranges", NULL)) | ||
| 4110 | break; | ||
| 4111 | index = 0; | ||
| 4112 | } | ||
| 4113 | |||
| 4114 | return ret; | ||
| 4115 | } | ||
| 4116 | |||
| 4009 | static struct clk_hw * | 4117 | static struct clk_hw * |
| 4010 | __of_clk_get_hw_from_provider(struct of_clk_provider *provider, | 4118 | __of_clk_get_hw_from_provider(struct of_clk_provider *provider, |
| 4011 | struct of_phandle_args *clkspec) | 4119 | struct of_phandle_args *clkspec) |
| @@ -4021,36 +4129,26 @@ __of_clk_get_hw_from_provider(struct of_clk_provider *provider, | |||
| 4021 | return __clk_get_hw(clk); | 4129 | return __clk_get_hw(clk); |
| 4022 | } | 4130 | } |
| 4023 | 4131 | ||
| 4024 | struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec, | 4132 | static struct clk_hw * |
| 4025 | const char *dev_id, const char *con_id) | 4133 | of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec) |
| 4026 | { | 4134 | { |
| 4027 | struct of_clk_provider *provider; | 4135 | struct of_clk_provider *provider; |
| 4028 | struct clk *clk = ERR_PTR(-EPROBE_DEFER); | 4136 | struct clk_hw *hw = ERR_PTR(-EPROBE_DEFER); |
| 4029 | struct clk_hw *hw; | ||
| 4030 | 4137 | ||
| 4031 | if (!clkspec) | 4138 | if (!clkspec) |
| 4032 | return ERR_PTR(-EINVAL); | 4139 | return ERR_PTR(-EINVAL); |
| 4033 | 4140 | ||
| 4034 | /* Check if we have such a provider in our array */ | ||
| 4035 | mutex_lock(&of_clk_mutex); | 4141 | mutex_lock(&of_clk_mutex); |
| 4036 | list_for_each_entry(provider, &of_clk_providers, link) { | 4142 | list_for_each_entry(provider, &of_clk_providers, link) { |
| 4037 | if (provider->node == clkspec->np) { | 4143 | if (provider->node == clkspec->np) { |
| 4038 | hw = __of_clk_get_hw_from_provider(provider, clkspec); | 4144 | hw = __of_clk_get_hw_from_provider(provider, clkspec); |
| 4039 | clk = __clk_create_clk(hw, dev_id, con_id); | 4145 | if (!IS_ERR(hw)) |
| 4040 | } | 4146 | break; |
| 4041 | |||
| 4042 | if (!IS_ERR(clk)) { | ||
| 4043 | if (!__clk_get(clk)) { | ||
| 4044 | __clk_free_clk(clk); | ||
| 4045 | clk = ERR_PTR(-ENOENT); | ||
| 4046 | } | ||
| 4047 | |||
| 4048 | break; | ||
| 4049 | } | 4147 | } |
| 4050 | } | 4148 | } |
| 4051 | mutex_unlock(&of_clk_mutex); | 4149 | mutex_unlock(&of_clk_mutex); |
| 4052 | 4150 | ||
| 4053 | return clk; | 4151 | return hw; |
| 4054 | } | 4152 | } |
| 4055 | 4153 | ||
| 4056 | /** | 4154 | /** |
| @@ -4063,10 +4161,62 @@ struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec, | |||
| 4063 | */ | 4161 | */ |
| 4064 | struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec) | 4162 | struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec) |
| 4065 | { | 4163 | { |
| 4066 | return __of_clk_get_from_provider(clkspec, NULL, __func__); | 4164 | struct clk_hw *hw = of_clk_get_hw_from_clkspec(clkspec); |
| 4165 | |||
| 4166 | return clk_hw_create_clk(NULL, hw, NULL, __func__); | ||
| 4067 | } | 4167 | } |
| 4068 | EXPORT_SYMBOL_GPL(of_clk_get_from_provider); | 4168 | EXPORT_SYMBOL_GPL(of_clk_get_from_provider); |
| 4069 | 4169 | ||
| 4170 | struct clk_hw *of_clk_get_hw(struct device_node *np, int index, | ||
| 4171 | const char *con_id) | ||
| 4172 | { | ||
| 4173 | int ret; | ||
| 4174 | struct clk_hw *hw; | ||
| 4175 | struct of_phandle_args clkspec; | ||
| 4176 | |||
| 4177 | ret = of_parse_clkspec(np, index, con_id, &clkspec); | ||
| 4178 | if (ret) | ||
| 4179 | return ERR_PTR(ret); | ||
| 4180 | |||
| 4181 | hw = of_clk_get_hw_from_clkspec(&clkspec); | ||
| 4182 | of_node_put(clkspec.np); | ||
| 4183 | |||
| 4184 | return hw; | ||
| 4185 | } | ||
| 4186 | |||
| 4187 | static struct clk *__of_clk_get(struct device_node *np, | ||
| 4188 | int index, const char *dev_id, | ||
| 4189 | const char *con_id) | ||
| 4190 | { | ||
| 4191 | struct clk_hw *hw = of_clk_get_hw(np, index, con_id); | ||
| 4192 | |||
| 4193 | return clk_hw_create_clk(NULL, hw, dev_id, con_id); | ||
| 4194 | } | ||
| 4195 | |||
| 4196 | struct clk *of_clk_get(struct device_node *np, int index) | ||
| 4197 | { | ||
| 4198 | return __of_clk_get(np, index, np->full_name, NULL); | ||
| 4199 | } | ||
| 4200 | EXPORT_SYMBOL(of_clk_get); | ||
| 4201 | |||
| 4202 | /** | ||
| 4203 | * of_clk_get_by_name() - Parse and lookup a clock referenced by a device node | ||
| 4204 | * @np: pointer to clock consumer node | ||
| 4205 | * @name: name of consumer's clock input, or NULL for the first clock reference | ||
| 4206 | * | ||
| 4207 | * This function parses the clocks and clock-names properties, | ||
| 4208 | * and uses them to look up the struct clk from the registered list of clock | ||
| 4209 | * providers. | ||
| 4210 | */ | ||
| 4211 | struct clk *of_clk_get_by_name(struct device_node *np, const char *name) | ||
| 4212 | { | ||
| 4213 | if (!np) | ||
| 4214 | return ERR_PTR(-ENOENT); | ||
| 4215 | |||
| 4216 | return __of_clk_get(np, 0, np->full_name, name); | ||
| 4217 | } | ||
| 4218 | EXPORT_SYMBOL(of_clk_get_by_name); | ||
| 4219 | |||
| 4070 | /** | 4220 | /** |
| 4071 | * of_clk_get_parent_count() - Count the number of clocks a device node has | 4221 | * of_clk_get_parent_count() - Count the number of clocks a device node has |
| 4072 | * @np: device node to count | 4222 | * @np: device node to count |
diff --git a/drivers/clk/clk.h b/drivers/clk/clk.h index b02f5e604e69..553f531cc232 100644 --- a/drivers/clk/clk.h +++ b/drivers/clk/clk.h | |||
| @@ -5,31 +5,36 @@ | |||
| 5 | */ | 5 | */ |
| 6 | 6 | ||
| 7 | struct clk_hw; | 7 | struct clk_hw; |
| 8 | struct device; | ||
| 9 | struct of_phandle_args; | ||
| 8 | 10 | ||
| 9 | #if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK) | 11 | #if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK) |
| 10 | struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec, | 12 | struct clk_hw *of_clk_get_hw(struct device_node *np, |
| 11 | const char *dev_id, const char *con_id); | 13 | int index, const char *con_id); |
| 14 | #else /* !CONFIG_COMMON_CLK || !CONFIG_OF */ | ||
| 15 | static inline struct clk_hw *of_clk_get_hw(struct device_node *np, | ||
| 16 | int index, const char *con_id) | ||
| 17 | { | ||
| 18 | return ERR_PTR(-ENOENT); | ||
| 19 | } | ||
| 12 | #endif | 20 | #endif |
| 13 | 21 | ||
| 14 | #ifdef CONFIG_COMMON_CLK | 22 | #ifdef CONFIG_COMMON_CLK |
| 15 | struct clk *__clk_create_clk(struct clk_hw *hw, const char *dev_id, | 23 | struct clk *clk_hw_create_clk(struct device *dev, struct clk_hw *hw, |
| 16 | const char *con_id); | 24 | const char *dev_id, const char *con_id); |
| 17 | void __clk_free_clk(struct clk *clk); | ||
| 18 | int __clk_get(struct clk *clk); | ||
| 19 | void __clk_put(struct clk *clk); | 25 | void __clk_put(struct clk *clk); |
| 20 | #else | 26 | #else |
| 21 | /* All these casts to avoid ifdefs in clkdev... */ | 27 | /* All these casts to avoid ifdefs in clkdev... */ |
| 22 | static inline struct clk * | 28 | static inline struct clk * |
| 23 | __clk_create_clk(struct clk_hw *hw, const char *dev_id, const char *con_id) | 29 | clk_hw_create_clk(struct device *dev, struct clk_hw *hw, const char *dev_id, |
| 30 | const char *con_id) | ||
| 24 | { | 31 | { |
| 25 | return (struct clk *)hw; | 32 | return (struct clk *)hw; |
| 26 | } | 33 | } |
| 27 | static inline void __clk_free_clk(struct clk *clk) { } | ||
| 28 | static struct clk_hw *__clk_get_hw(struct clk *clk) | 34 | static struct clk_hw *__clk_get_hw(struct clk *clk) |
| 29 | { | 35 | { |
| 30 | return (struct clk_hw *)clk; | 36 | return (struct clk_hw *)clk; |
| 31 | } | 37 | } |
| 32 | static inline int __clk_get(struct clk *clk) { return 1; } | ||
| 33 | static inline void __clk_put(struct clk *clk) { } | 38 | static inline void __clk_put(struct clk *clk) { } |
| 34 | 39 | ||
| 35 | #endif | 40 | #endif |
diff --git a/drivers/clk/clkdev.c b/drivers/clk/clkdev.c index 9ab3db8b3988..8c4435c53f09 100644 --- a/drivers/clk/clkdev.c +++ b/drivers/clk/clkdev.c | |||
| @@ -27,99 +27,6 @@ | |||
| 27 | static LIST_HEAD(clocks); | 27 | static LIST_HEAD(clocks); |
| 28 | static DEFINE_MUTEX(clocks_mutex); | 28 | static DEFINE_MUTEX(clocks_mutex); |
| 29 | 29 | ||
| 30 | #if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK) | ||
| 31 | static struct clk *__of_clk_get(struct device_node *np, int index, | ||
| 32 | const char *dev_id, const char *con_id) | ||
| 33 | { | ||
| 34 | struct of_phandle_args clkspec; | ||
| 35 | struct clk *clk; | ||
| 36 | int rc; | ||
| 37 | |||
| 38 | rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index, | ||
| 39 | &clkspec); | ||
| 40 | if (rc) | ||
| 41 | return ERR_PTR(rc); | ||
| 42 | |||
| 43 | clk = __of_clk_get_from_provider(&clkspec, dev_id, con_id); | ||
| 44 | of_node_put(clkspec.np); | ||
| 45 | |||
| 46 | return clk; | ||
| 47 | } | ||
| 48 | |||
| 49 | struct clk *of_clk_get(struct device_node *np, int index) | ||
| 50 | { | ||
| 51 | return __of_clk_get(np, index, np->full_name, NULL); | ||
| 52 | } | ||
| 53 | EXPORT_SYMBOL(of_clk_get); | ||
| 54 | |||
| 55 | static struct clk *__of_clk_get_by_name(struct device_node *np, | ||
| 56 | const char *dev_id, | ||
| 57 | const char *name) | ||
| 58 | { | ||
| 59 | struct clk *clk = ERR_PTR(-ENOENT); | ||
| 60 | |||
| 61 | /* Walk up the tree of devices looking for a clock that matches */ | ||
| 62 | while (np) { | ||
| 63 | int index = 0; | ||
| 64 | |||
| 65 | /* | ||
| 66 | * For named clocks, first look up the name in the | ||
| 67 | * "clock-names" property. If it cannot be found, then | ||
| 68 | * index will be an error code, and of_clk_get() will fail. | ||
| 69 | */ | ||
| 70 | if (name) | ||
| 71 | index = of_property_match_string(np, "clock-names", name); | ||
| 72 | clk = __of_clk_get(np, index, dev_id, name); | ||
| 73 | if (!IS_ERR(clk)) { | ||
| 74 | break; | ||
| 75 | } else if (name && index >= 0) { | ||
| 76 | if (PTR_ERR(clk) != -EPROBE_DEFER) | ||
| 77 | pr_err("ERROR: could not get clock %pOF:%s(%i)\n", | ||
| 78 | np, name ? name : "", index); | ||
| 79 | return clk; | ||
| 80 | } | ||
| 81 | |||
| 82 | /* | ||
| 83 | * No matching clock found on this node. If the parent node | ||
| 84 | * has a "clock-ranges" property, then we can try one of its | ||
| 85 | * clocks. | ||
| 86 | */ | ||
| 87 | np = np->parent; | ||
| 88 | if (np && !of_get_property(np, "clock-ranges", NULL)) | ||
| 89 | break; | ||
| 90 | } | ||
| 91 | |||
| 92 | return clk; | ||
| 93 | } | ||
| 94 | |||
| 95 | /** | ||
| 96 | * of_clk_get_by_name() - Parse and lookup a clock referenced by a device node | ||
| 97 | * @np: pointer to clock consumer node | ||
| 98 | * @name: name of consumer's clock input, or NULL for the first clock reference | ||
| 99 | * | ||
| 100 | * This function parses the clocks and clock-names properties, | ||
| 101 | * and uses them to look up the struct clk from the registered list of clock | ||
| 102 | * providers. | ||
| 103 | */ | ||
| 104 | struct clk *of_clk_get_by_name(struct device_node *np, const char *name) | ||
| 105 | { | ||
| 106 | if (!np) | ||
| 107 | return ERR_PTR(-ENOENT); | ||
| 108 | |||
| 109 | return __of_clk_get_by_name(np, np->full_name, name); | ||
| 110 | } | ||
| 111 | EXPORT_SYMBOL(of_clk_get_by_name); | ||
| 112 | |||
| 113 | #else /* defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK) */ | ||
| 114 | |||
| 115 | static struct clk *__of_clk_get_by_name(struct device_node *np, | ||
| 116 | const char *dev_id, | ||
| 117 | const char *name) | ||
| 118 | { | ||
| 119 | return ERR_PTR(-ENOENT); | ||
| 120 | } | ||
| 121 | #endif | ||
| 122 | |||
| 123 | /* | 30 | /* |
| 124 | * Find the correct struct clk for the device and connection ID. | 31 | * Find the correct struct clk for the device and connection ID. |
| 125 | * We do slightly fuzzy matching here: | 32 | * We do slightly fuzzy matching here: |
| @@ -163,7 +70,8 @@ static struct clk_lookup *clk_find(const char *dev_id, const char *con_id) | |||
| 163 | return cl; | 70 | return cl; |
| 164 | } | 71 | } |
| 165 | 72 | ||
| 166 | struct clk *clk_get_sys(const char *dev_id, const char *con_id) | 73 | static struct clk *__clk_get_sys(struct device *dev, const char *dev_id, |
| 74 | const char *con_id) | ||
| 167 | { | 75 | { |
| 168 | struct clk_lookup *cl; | 76 | struct clk_lookup *cl; |
| 169 | struct clk *clk = NULL; | 77 | struct clk *clk = NULL; |
| @@ -174,35 +82,33 @@ struct clk *clk_get_sys(const char *dev_id, const char *con_id) | |||
| 174 | if (!cl) | 82 | if (!cl) |
| 175 | goto out; | 83 | goto out; |
| 176 | 84 | ||
| 177 | clk = __clk_create_clk(cl->clk_hw, dev_id, con_id); | 85 | clk = clk_hw_create_clk(dev, cl->clk_hw, dev_id, con_id); |
| 178 | if (IS_ERR(clk)) | 86 | if (IS_ERR(clk)) |
| 179 | goto out; | ||
| 180 | |||
| 181 | if (!__clk_get(clk)) { | ||
| 182 | __clk_free_clk(clk); | ||
| 183 | cl = NULL; | 87 | cl = NULL; |
| 184 | goto out; | ||
| 185 | } | ||
| 186 | |||
| 187 | out: | 88 | out: |
| 188 | mutex_unlock(&clocks_mutex); | 89 | mutex_unlock(&clocks_mutex); |
| 189 | 90 | ||
| 190 | return cl ? clk : ERR_PTR(-ENOENT); | 91 | return cl ? clk : ERR_PTR(-ENOENT); |
| 191 | } | 92 | } |
| 93 | |||
| 94 | struct clk *clk_get_sys(const char *dev_id, const char *con_id) | ||
| 95 | { | ||
| 96 | return __clk_get_sys(NULL, dev_id, con_id); | ||
| 97 | } | ||
| 192 | EXPORT_SYMBOL(clk_get_sys); | 98 | EXPORT_SYMBOL(clk_get_sys); |
| 193 | 99 | ||
| 194 | struct clk *clk_get(struct device *dev, const char *con_id) | 100 | struct clk *clk_get(struct device *dev, const char *con_id) |
| 195 | { | 101 | { |
| 196 | const char *dev_id = dev ? dev_name(dev) : NULL; | 102 | const char *dev_id = dev ? dev_name(dev) : NULL; |
| 197 | struct clk *clk; | 103 | struct clk_hw *hw; |
| 198 | 104 | ||
| 199 | if (dev && dev->of_node) { | 105 | if (dev && dev->of_node) { |
| 200 | clk = __of_clk_get_by_name(dev->of_node, dev_id, con_id); | 106 | hw = of_clk_get_hw(dev->of_node, 0, con_id); |
| 201 | if (!IS_ERR(clk) || PTR_ERR(clk) == -EPROBE_DEFER) | 107 | if (!IS_ERR(hw) || PTR_ERR(hw) == -EPROBE_DEFER) |
| 202 | return clk; | 108 | return clk_hw_create_clk(dev, hw, dev_id, con_id); |
| 203 | } | 109 | } |
| 204 | 110 | ||
| 205 | return clk_get_sys(dev_id, con_id); | 111 | return __clk_get_sys(dev, dev_id, con_id); |
| 206 | } | 112 | } |
| 207 | EXPORT_SYMBOL(clk_get); | 113 | EXPORT_SYMBOL(clk_get); |
| 208 | 114 | ||
| @@ -401,6 +307,23 @@ static struct clk_lookup *__clk_register_clkdev(struct clk_hw *hw, | |||
| 401 | return cl; | 307 | return cl; |
| 402 | } | 308 | } |
| 403 | 309 | ||
| 310 | static int do_clk_register_clkdev(struct clk_hw *hw, | ||
| 311 | struct clk_lookup **cl, const char *con_id, const char *dev_id) | ||
| 312 | { | ||
| 313 | if (IS_ERR(hw)) | ||
| 314 | return PTR_ERR(hw); | ||
| 315 | /* | ||
| 316 | * Since dev_id can be NULL, and NULL is handled specially, we must | ||
| 317 | * pass it as either a NULL format string, or with "%s". | ||
| 318 | */ | ||
| 319 | if (dev_id) | ||
| 320 | *cl = __clk_register_clkdev(hw, con_id, "%s", dev_id); | ||
| 321 | else | ||
| 322 | *cl = __clk_register_clkdev(hw, con_id, NULL); | ||
| 323 | |||
| 324 | return *cl ? 0 : -ENOMEM; | ||
| 325 | } | ||
| 326 | |||
| 404 | /** | 327 | /** |
| 405 | * clk_register_clkdev - register one clock lookup for a struct clk | 328 | * clk_register_clkdev - register one clock lookup for a struct clk |
| 406 | * @clk: struct clk to associate with all clk_lookups | 329 | * @clk: struct clk to associate with all clk_lookups |
| @@ -423,17 +346,8 @@ int clk_register_clkdev(struct clk *clk, const char *con_id, | |||
| 423 | if (IS_ERR(clk)) | 346 | if (IS_ERR(clk)) |
| 424 | return PTR_ERR(clk); | 347 | return PTR_ERR(clk); |
| 425 | 348 | ||
| 426 | /* | 349 | return do_clk_register_clkdev(__clk_get_hw(clk), &cl, con_id, |
| 427 | * Since dev_id can be NULL, and NULL is handled specially, we must | 350 | dev_id); |
| 428 | * pass it as either a NULL format string, or with "%s". | ||
| 429 | */ | ||
| 430 | if (dev_id) | ||
| 431 | cl = __clk_register_clkdev(__clk_get_hw(clk), con_id, "%s", | ||
| 432 | dev_id); | ||
| 433 | else | ||
| 434 | cl = __clk_register_clkdev(__clk_get_hw(clk), con_id, NULL); | ||
| 435 | |||
| 436 | return cl ? 0 : -ENOMEM; | ||
| 437 | } | 351 | } |
| 438 | EXPORT_SYMBOL(clk_register_clkdev); | 352 | EXPORT_SYMBOL(clk_register_clkdev); |
| 439 | 353 | ||
| @@ -456,18 +370,75 @@ int clk_hw_register_clkdev(struct clk_hw *hw, const char *con_id, | |||
| 456 | { | 370 | { |
| 457 | struct clk_lookup *cl; | 371 | struct clk_lookup *cl; |
| 458 | 372 | ||
| 459 | if (IS_ERR(hw)) | 373 | return do_clk_register_clkdev(hw, &cl, con_id, dev_id); |
| 460 | return PTR_ERR(hw); | 374 | } |
| 375 | EXPORT_SYMBOL(clk_hw_register_clkdev); | ||
| 461 | 376 | ||
| 462 | /* | 377 | static void devm_clkdev_release(struct device *dev, void *res) |
| 463 | * Since dev_id can be NULL, and NULL is handled specially, we must | 378 | { |
| 464 | * pass it as either a NULL format string, or with "%s". | 379 | clkdev_drop(*(struct clk_lookup **)res); |
| 465 | */ | 380 | } |
| 466 | if (dev_id) | 381 | |
| 467 | cl = __clk_register_clkdev(hw, con_id, "%s", dev_id); | 382 | static int devm_clk_match_clkdev(struct device *dev, void *res, void *data) |
| 468 | else | 383 | { |
| 469 | cl = __clk_register_clkdev(hw, con_id, NULL); | 384 | struct clk_lookup **l = res; |
| 470 | 385 | ||
| 471 | return cl ? 0 : -ENOMEM; | 386 | return *l == data; |
| 472 | } | 387 | } |
| 473 | EXPORT_SYMBOL(clk_hw_register_clkdev); | 388 | |
| 389 | /** | ||
| 390 | * devm_clk_release_clkdev - Resource managed clkdev lookup release | ||
| 391 | * @dev: device this lookup is bound | ||
| 392 | * @con_id: connection ID string on device | ||
| 393 | * @dev_id: format string describing device name | ||
| 394 | * | ||
| 395 | * Drop the clkdev lookup created with devm_clk_hw_register_clkdev. | ||
| 396 | * Normally this function will not need to be called and the resource | ||
| 397 | * management code will ensure that the resource is freed. | ||
| 398 | */ | ||
| 399 | void devm_clk_release_clkdev(struct device *dev, const char *con_id, | ||
| 400 | const char *dev_id) | ||
| 401 | { | ||
| 402 | struct clk_lookup *cl; | ||
| 403 | int rval; | ||
| 404 | |||
| 405 | cl = clk_find(dev_id, con_id); | ||
| 406 | WARN_ON(!cl); | ||
| 407 | rval = devres_release(dev, devm_clkdev_release, | ||
| 408 | devm_clk_match_clkdev, cl); | ||
| 409 | WARN_ON(rval); | ||
| 410 | } | ||
| 411 | EXPORT_SYMBOL(devm_clk_release_clkdev); | ||
| 412 | |||
| 413 | /** | ||
| 414 | * devm_clk_hw_register_clkdev - managed clk lookup registration for clk_hw | ||
| 415 | * @dev: device this lookup is bound | ||
| 416 | * @hw: struct clk_hw to associate with all clk_lookups | ||
| 417 | * @con_id: connection ID string on device | ||
| 418 | * @dev_id: format string describing device name | ||
| 419 | * | ||
| 420 | * con_id or dev_id may be NULL as a wildcard, just as in the rest of | ||
| 421 | * clkdev. | ||
| 422 | * | ||
| 423 | * To make things easier for mass registration, we detect error clk_hws | ||
| 424 | * from a previous clk_hw_register_*() call, and return the error code for | ||
| 425 | * those. This is to permit this function to be called immediately | ||
| 426 | * after clk_hw_register_*(). | ||
| 427 | */ | ||
| 428 | int devm_clk_hw_register_clkdev(struct device *dev, struct clk_hw *hw, | ||
| 429 | const char *con_id, const char *dev_id) | ||
| 430 | { | ||
| 431 | int rval = -ENOMEM; | ||
| 432 | struct clk_lookup **cl; | ||
| 433 | |||
| 434 | cl = devres_alloc(devm_clkdev_release, sizeof(*cl), GFP_KERNEL); | ||
| 435 | if (cl) { | ||
| 436 | rval = do_clk_register_clkdev(hw, cl, con_id, dev_id); | ||
| 437 | if (!rval) | ||
| 438 | devres_add(dev, cl); | ||
| 439 | else | ||
| 440 | devres_free(cl); | ||
| 441 | } | ||
| 442 | return rval; | ||
| 443 | } | ||
| 444 | EXPORT_SYMBOL(devm_clk_hw_register_clkdev); | ||
diff --git a/drivers/clk/imx/Kconfig b/drivers/clk/imx/Kconfig index 4aae31a23449..0eaf41848280 100644 --- a/drivers/clk/imx/Kconfig +++ b/drivers/clk/imx/Kconfig | |||
| @@ -8,6 +8,12 @@ config MXC_CLK_SCU | |||
| 8 | bool | 8 | bool |
| 9 | depends on IMX_SCU | 9 | depends on IMX_SCU |
| 10 | 10 | ||
| 11 | config CLK_IMX8MM | ||
| 12 | bool "IMX8MM CCM Clock Driver" | ||
| 13 | depends on ARCH_MXC && ARM64 | ||
| 14 | help | ||
| 15 | Build the driver for i.MX8MM CCM Clock Driver | ||
| 16 | |||
| 11 | config CLK_IMX8MQ | 17 | config CLK_IMX8MQ |
| 12 | bool "IMX8MQ CCM Clock Driver" | 18 | bool "IMX8MQ CCM Clock Driver" |
| 13 | depends on ARCH_MXC && ARM64 | 19 | depends on ARCH_MXC && ARM64 |
diff --git a/drivers/clk/imx/Makefile b/drivers/clk/imx/Makefile index 73119fbfa547..0d5180fbe988 100644 --- a/drivers/clk/imx/Makefile +++ b/drivers/clk/imx/Makefile | |||
| @@ -18,12 +18,14 @@ obj-$(CONFIG_MXC_CLK) += \ | |||
| 18 | clk-pllv2.o \ | 18 | clk-pllv2.o \ |
| 19 | clk-pllv3.o \ | 19 | clk-pllv3.o \ |
| 20 | clk-pllv4.o \ | 20 | clk-pllv4.o \ |
| 21 | clk-sccg-pll.o | 21 | clk-sccg-pll.o \ |
| 22 | clk-pll14xx.o | ||
| 22 | 23 | ||
| 23 | obj-$(CONFIG_MXC_CLK_SCU) += \ | 24 | obj-$(CONFIG_MXC_CLK_SCU) += \ |
| 24 | clk-scu.o \ | 25 | clk-scu.o \ |
| 25 | clk-lpcg-scu.o | 26 | clk-lpcg-scu.o |
| 26 | 27 | ||
| 28 | obj-$(CONFIG_CLK_IMX8MM) += clk-imx8mm.o | ||
| 27 | obj-$(CONFIG_CLK_IMX8MQ) += clk-imx8mq.o | 29 | obj-$(CONFIG_CLK_IMX8MQ) += clk-imx8mq.o |
| 28 | obj-$(CONFIG_CLK_IMX8QXP) += clk-imx8qxp.o clk-imx8qxp-lpcg.o | 30 | obj-$(CONFIG_CLK_IMX8QXP) += clk-imx8qxp.o clk-imx8qxp-lpcg.o |
| 29 | 31 | ||
diff --git a/drivers/clk/imx/clk-composite-8m.c b/drivers/clk/imx/clk-composite-8m.c index 527ade1d6933..574fac1a169f 100644 --- a/drivers/clk/imx/clk-composite-8m.c +++ b/drivers/clk/imx/clk-composite-8m.c | |||
| @@ -123,7 +123,7 @@ static const struct clk_ops imx8m_clk_composite_divider_ops = { | |||
| 123 | }; | 123 | }; |
| 124 | 124 | ||
| 125 | struct clk *imx8m_clk_composite_flags(const char *name, | 125 | struct clk *imx8m_clk_composite_flags(const char *name, |
| 126 | const char **parent_names, | 126 | const char * const *parent_names, |
| 127 | int num_parents, void __iomem *reg, | 127 | int num_parents, void __iomem *reg, |
| 128 | unsigned long flags) | 128 | unsigned long flags) |
| 129 | { | 129 | { |
diff --git a/drivers/clk/imx/clk-imx51-imx53.c b/drivers/clk/imx/clk-imx51-imx53.c index fc8e782d817b..e91c826bce70 100644 --- a/drivers/clk/imx/clk-imx51-imx53.c +++ b/drivers/clk/imx/clk-imx51-imx53.c | |||
| @@ -428,6 +428,7 @@ static void __init mx51_clocks_init(struct device_node *np) | |||
| 428 | clk[IMX5_CLK_ESDHC4_PER_GATE] = imx_clk_gate2("esdhc4_per_gate", "esdhc_d_sel", MXC_CCM_CCGR3, 14); | 428 | clk[IMX5_CLK_ESDHC4_PER_GATE] = imx_clk_gate2("esdhc4_per_gate", "esdhc_d_sel", MXC_CCM_CCGR3, 14); |
| 429 | clk[IMX5_CLK_USB_PHY_GATE] = imx_clk_gate2("usb_phy_gate", "usb_phy_sel", MXC_CCM_CCGR2, 0); | 429 | clk[IMX5_CLK_USB_PHY_GATE] = imx_clk_gate2("usb_phy_gate", "usb_phy_sel", MXC_CCM_CCGR2, 0); |
| 430 | clk[IMX5_CLK_HSI2C_GATE] = imx_clk_gate2("hsi2c_gate", "ipg", MXC_CCM_CCGR1, 22); | 430 | clk[IMX5_CLK_HSI2C_GATE] = imx_clk_gate2("hsi2c_gate", "ipg", MXC_CCM_CCGR1, 22); |
| 431 | clk[IMX5_CLK_SCC2_IPG_GATE] = imx_clk_gate2("scc2_gate", "ipg", MXC_CCM_CCGR1, 30); | ||
| 431 | clk[IMX5_CLK_MIPI_HSC1_GATE] = imx_clk_gate2_flags("mipi_hsc1_gate", "ipg", MXC_CCM_CCGR4, 6, CLK_IS_CRITICAL); | 432 | clk[IMX5_CLK_MIPI_HSC1_GATE] = imx_clk_gate2_flags("mipi_hsc1_gate", "ipg", MXC_CCM_CCGR4, 6, CLK_IS_CRITICAL); |
| 432 | clk[IMX5_CLK_MIPI_HSC2_GATE] = imx_clk_gate2_flags("mipi_hsc2_gate", "ipg", MXC_CCM_CCGR4, 8, CLK_IS_CRITICAL); | 433 | clk[IMX5_CLK_MIPI_HSC2_GATE] = imx_clk_gate2_flags("mipi_hsc2_gate", "ipg", MXC_CCM_CCGR4, 8, CLK_IS_CRITICAL); |
| 433 | clk[IMX5_CLK_MIPI_ESC_GATE] = imx_clk_gate2_flags("mipi_esc_gate", "ipg", MXC_CCM_CCGR4, 10, CLK_IS_CRITICAL); | 434 | clk[IMX5_CLK_MIPI_ESC_GATE] = imx_clk_gate2_flags("mipi_esc_gate", "ipg", MXC_CCM_CCGR4, 10, CLK_IS_CRITICAL); |
diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c index 716eac3136b4..708e7c5590dd 100644 --- a/drivers/clk/imx/clk-imx6q.c +++ b/drivers/clk/imx/clk-imx6q.c | |||
| @@ -471,6 +471,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node) | |||
| 471 | np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-anatop"); | 471 | np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-anatop"); |
| 472 | anatop_base = base = of_iomap(np, 0); | 472 | anatop_base = base = of_iomap(np, 0); |
| 473 | WARN_ON(!base); | 473 | WARN_ON(!base); |
| 474 | of_node_put(np); | ||
| 474 | 475 | ||
| 475 | /* Audio/video PLL post dividers do not work on i.MX6q revision 1.0 */ | 476 | /* Audio/video PLL post dividers do not work on i.MX6q revision 1.0 */ |
| 476 | if (clk_on_imx6q() && imx_get_soc_revision() == IMX_CHIP_REVISION_1_0) { | 477 | if (clk_on_imx6q() && imx_get_soc_revision() == IMX_CHIP_REVISION_1_0) { |
diff --git a/drivers/clk/imx/clk-imx6sx.c b/drivers/clk/imx/clk-imx6sx.c index 18527a335ace..91558b09bf9e 100644 --- a/drivers/clk/imx/clk-imx6sx.c +++ b/drivers/clk/imx/clk-imx6sx.c | |||
| @@ -151,6 +151,7 @@ static void __init imx6sx_clocks_init(struct device_node *ccm_node) | |||
| 151 | np = of_find_compatible_node(NULL, NULL, "fsl,imx6sx-anatop"); | 151 | np = of_find_compatible_node(NULL, NULL, "fsl,imx6sx-anatop"); |
| 152 | base = of_iomap(np, 0); | 152 | base = of_iomap(np, 0); |
| 153 | WARN_ON(!base); | 153 | WARN_ON(!base); |
| 154 | of_node_put(np); | ||
| 154 | 155 | ||
| 155 | clks[IMX6SX_PLL1_BYPASS_SRC] = imx_clk_mux("pll1_bypass_src", base + 0x00, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); | 156 | clks[IMX6SX_PLL1_BYPASS_SRC] = imx_clk_mux("pll1_bypass_src", base + 0x00, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); |
| 156 | clks[IMX6SX_PLL2_BYPASS_SRC] = imx_clk_mux("pll2_bypass_src", base + 0x30, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); | 157 | clks[IMX6SX_PLL2_BYPASS_SRC] = imx_clk_mux("pll2_bypass_src", base + 0x30, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); |
diff --git a/drivers/clk/imx/clk-imx7d.c b/drivers/clk/imx/clk-imx7d.c index 06c105d580a4..cfbd8d4edb85 100644 --- a/drivers/clk/imx/clk-imx7d.c +++ b/drivers/clk/imx/clk-imx7d.c | |||
| @@ -404,6 +404,7 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node) | |||
| 404 | np = of_find_compatible_node(NULL, NULL, "fsl,imx7d-anatop"); | 404 | np = of_find_compatible_node(NULL, NULL, "fsl,imx7d-anatop"); |
| 405 | base = of_iomap(np, 0); | 405 | base = of_iomap(np, 0); |
| 406 | WARN_ON(!base); | 406 | WARN_ON(!base); |
| 407 | of_node_put(np); | ||
| 407 | 408 | ||
| 408 | clks[IMX7D_PLL_ARM_MAIN_SRC] = imx_clk_mux("pll_arm_main_src", base + 0x60, 14, 2, pll_bypass_src_sel, ARRAY_SIZE(pll_bypass_src_sel)); | 409 | clks[IMX7D_PLL_ARM_MAIN_SRC] = imx_clk_mux("pll_arm_main_src", base + 0x60, 14, 2, pll_bypass_src_sel, ARRAY_SIZE(pll_bypass_src_sel)); |
| 409 | clks[IMX7D_PLL_DRAM_MAIN_SRC] = imx_clk_mux("pll_dram_main_src", base + 0x70, 14, 2, pll_bypass_src_sel, ARRAY_SIZE(pll_bypass_src_sel)); | 410 | clks[IMX7D_PLL_DRAM_MAIN_SRC] = imx_clk_mux("pll_dram_main_src", base + 0x70, 14, 2, pll_bypass_src_sel, ARRAY_SIZE(pll_bypass_src_sel)); |
diff --git a/drivers/clk/imx/clk-imx7ulp.c b/drivers/clk/imx/clk-imx7ulp.c index 4e18f629f823..ce306631e844 100644 --- a/drivers/clk/imx/clk-imx7ulp.c +++ b/drivers/clk/imx/clk-imx7ulp.c | |||
| @@ -48,8 +48,8 @@ static void __init imx7ulp_clk_scg1_init(struct device_node *np) | |||
| 48 | struct clk_hw **clks; | 48 | struct clk_hw **clks; |
| 49 | void __iomem *base; | 49 | void __iomem *base; |
| 50 | 50 | ||
| 51 | clk_data = kzalloc(sizeof(*clk_data) + sizeof(*clk_data->hws) * | 51 | clk_data = kzalloc(struct_size(clk_data, hws, IMX7ULP_CLK_SCG1_END), |
| 52 | IMX7ULP_CLK_SCG1_END, GFP_KERNEL); | 52 | GFP_KERNEL); |
| 53 | if (!clk_data) | 53 | if (!clk_data) |
| 54 | return; | 54 | return; |
| 55 | 55 | ||
| @@ -136,8 +136,8 @@ static void __init imx7ulp_clk_pcc2_init(struct device_node *np) | |||
| 136 | struct clk_hw **clks; | 136 | struct clk_hw **clks; |
| 137 | void __iomem *base; | 137 | void __iomem *base; |
| 138 | 138 | ||
| 139 | clk_data = kzalloc(sizeof(*clk_data) + sizeof(*clk_data->hws) * | 139 | clk_data = kzalloc(struct_size(clk_data, hws, IMX7ULP_CLK_PCC2_END), |
| 140 | IMX7ULP_CLK_PCC2_END, GFP_KERNEL); | 140 | GFP_KERNEL); |
| 141 | if (!clk_data) | 141 | if (!clk_data) |
| 142 | return; | 142 | return; |
| 143 | 143 | ||
| @@ -183,8 +183,8 @@ static void __init imx7ulp_clk_pcc3_init(struct device_node *np) | |||
| 183 | struct clk_hw **clks; | 183 | struct clk_hw **clks; |
| 184 | void __iomem *base; | 184 | void __iomem *base; |
| 185 | 185 | ||
| 186 | clk_data = kzalloc(sizeof(*clk_data) + sizeof(*clk_data->hws) * | 186 | clk_data = kzalloc(struct_size(clk_data, hws, IMX7ULP_CLK_PCC3_END), |
| 187 | IMX7ULP_CLK_PCC3_END, GFP_KERNEL); | 187 | GFP_KERNEL); |
| 188 | if (!clk_data) | 188 | if (!clk_data) |
| 189 | return; | 189 | return; |
| 190 | 190 | ||
| @@ -228,8 +228,8 @@ static void __init imx7ulp_clk_smc1_init(struct device_node *np) | |||
| 228 | struct clk_hw **clks; | 228 | struct clk_hw **clks; |
| 229 | void __iomem *base; | 229 | void __iomem *base; |
| 230 | 230 | ||
| 231 | clk_data = kzalloc(sizeof(*clk_data) + sizeof(*clk_data->hws) * | 231 | clk_data = kzalloc(struct_size(clk_data, hws, IMX7ULP_CLK_SMC1_END), |
| 232 | IMX7ULP_CLK_SMC1_END, GFP_KERNEL); | 232 | GFP_KERNEL); |
| 233 | if (!clk_data) | 233 | if (!clk_data) |
| 234 | return; | 234 | return; |
| 235 | 235 | ||
diff --git a/drivers/clk/imx/clk-imx8mm.c b/drivers/clk/imx/clk-imx8mm.c new file mode 100644 index 000000000000..1ef8438e3d6d --- /dev/null +++ b/drivers/clk/imx/clk-imx8mm.c | |||
| @@ -0,0 +1,675 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | /* | ||
| 3 | * Copyright 2017-2018 NXP. | ||
| 4 | */ | ||
| 5 | |||
| 6 | #include <dt-bindings/clock/imx8mm-clock.h> | ||
| 7 | #include <linux/clk.h> | ||
| 8 | #include <linux/err.h> | ||
| 9 | #include <linux/init.h> | ||
| 10 | #include <linux/io.h> | ||
| 11 | #include <linux/module.h> | ||
| 12 | #include <linux/of.h> | ||
| 13 | #include <linux/of_address.h> | ||
| 14 | #include <linux/platform_device.h> | ||
| 15 | #include <linux/types.h> | ||
| 16 | |||
| 17 | #include "clk.h" | ||
| 18 | |||
| 19 | static u32 share_count_sai1; | ||
| 20 | static u32 share_count_sai2; | ||
| 21 | static u32 share_count_sai3; | ||
| 22 | static u32 share_count_sai4; | ||
| 23 | static u32 share_count_sai5; | ||
| 24 | static u32 share_count_sai6; | ||
| 25 | static u32 share_count_dcss; | ||
| 26 | static u32 share_count_pdm; | ||
| 27 | static u32 share_count_nand; | ||
| 28 | |||
| 29 | #define PLL_1416X_RATE(_rate, _m, _p, _s) \ | ||
| 30 | { \ | ||
| 31 | .rate = (_rate), \ | ||
| 32 | .mdiv = (_m), \ | ||
| 33 | .pdiv = (_p), \ | ||
| 34 | .sdiv = (_s), \ | ||
| 35 | } | ||
| 36 | |||
| 37 | #define PLL_1443X_RATE(_rate, _m, _p, _s, _k) \ | ||
| 38 | { \ | ||
| 39 | .rate = (_rate), \ | ||
| 40 | .mdiv = (_m), \ | ||
| 41 | .pdiv = (_p), \ | ||
| 42 | .sdiv = (_s), \ | ||
| 43 | .kdiv = (_k), \ | ||
| 44 | } | ||
| 45 | |||
| 46 | static const struct imx_pll14xx_rate_table imx8mm_pll1416x_tbl[] = { | ||
| 47 | PLL_1416X_RATE(1800000000U, 225, 3, 0), | ||
| 48 | PLL_1416X_RATE(1600000000U, 200, 3, 0), | ||
| 49 | PLL_1416X_RATE(1200000000U, 300, 3, 1), | ||
| 50 | PLL_1416X_RATE(1000000000U, 250, 3, 1), | ||
| 51 | PLL_1416X_RATE(800000000U, 200, 3, 1), | ||
| 52 | PLL_1416X_RATE(750000000U, 250, 2, 2), | ||
| 53 | PLL_1416X_RATE(700000000U, 350, 3, 2), | ||
| 54 | PLL_1416X_RATE(600000000U, 300, 3, 2), | ||
| 55 | }; | ||
| 56 | |||
| 57 | static const struct imx_pll14xx_rate_table imx8mm_audiopll_tbl[] = { | ||
| 58 | PLL_1443X_RATE(786432000U, 655, 5, 2, 23593), | ||
| 59 | PLL_1443X_RATE(722534400U, 301, 5, 1, 3670), | ||
| 60 | }; | ||
| 61 | |||
| 62 | static const struct imx_pll14xx_rate_table imx8mm_videopll_tbl[] = { | ||
| 63 | PLL_1443X_RATE(650000000U, 325, 3, 2, 0), | ||
| 64 | PLL_1443X_RATE(594000000U, 198, 2, 2, 0), | ||
| 65 | }; | ||
| 66 | |||
| 67 | static const struct imx_pll14xx_rate_table imx8mm_drampll_tbl[] = { | ||
| 68 | PLL_1443X_RATE(650000000U, 325, 3, 2, 0), | ||
| 69 | }; | ||
| 70 | |||
| 71 | static struct imx_pll14xx_clk imx8mm_audio_pll __initdata = { | ||
| 72 | .type = PLL_1443X, | ||
| 73 | .rate_table = imx8mm_audiopll_tbl, | ||
| 74 | .rate_count = ARRAY_SIZE(imx8mm_audiopll_tbl), | ||
| 75 | }; | ||
| 76 | |||
| 77 | static struct imx_pll14xx_clk imx8mm_video_pll __initdata = { | ||
| 78 | .type = PLL_1443X, | ||
| 79 | .rate_table = imx8mm_videopll_tbl, | ||
| 80 | .rate_count = ARRAY_SIZE(imx8mm_videopll_tbl), | ||
| 81 | }; | ||
| 82 | |||
| 83 | static struct imx_pll14xx_clk imx8mm_dram_pll __initdata = { | ||
| 84 | .type = PLL_1443X, | ||
| 85 | .rate_table = imx8mm_drampll_tbl, | ||
| 86 | .rate_count = ARRAY_SIZE(imx8mm_drampll_tbl), | ||
| 87 | }; | ||
| 88 | |||
| 89 | static struct imx_pll14xx_clk imx8mm_arm_pll __initdata = { | ||
| 90 | .type = PLL_1416X, | ||
| 91 | .rate_table = imx8mm_pll1416x_tbl, | ||
| 92 | .rate_count = ARRAY_SIZE(imx8mm_pll1416x_tbl), | ||
| 93 | }; | ||
| 94 | |||
| 95 | static struct imx_pll14xx_clk imx8mm_gpu_pll __initdata = { | ||
| 96 | .type = PLL_1416X, | ||
| 97 | .rate_table = imx8mm_pll1416x_tbl, | ||
| 98 | .rate_count = ARRAY_SIZE(imx8mm_pll1416x_tbl), | ||
| 99 | }; | ||
| 100 | |||
| 101 | static struct imx_pll14xx_clk imx8mm_vpu_pll __initdata = { | ||
| 102 | .type = PLL_1416X, | ||
| 103 | .rate_table = imx8mm_pll1416x_tbl, | ||
| 104 | .rate_count = ARRAY_SIZE(imx8mm_pll1416x_tbl), | ||
| 105 | }; | ||
| 106 | |||
| 107 | static struct imx_pll14xx_clk imx8mm_sys_pll __initdata = { | ||
| 108 | .type = PLL_1416X, | ||
| 109 | .rate_table = imx8mm_pll1416x_tbl, | ||
| 110 | .rate_count = ARRAY_SIZE(imx8mm_pll1416x_tbl), | ||
| 111 | }; | ||
| 112 | |||
| 113 | static const char *pll_ref_sels[] = { "osc_24m", "dummy", "dummy", "dummy", }; | ||
| 114 | static const char *audio_pll1_bypass_sels[] = {"audio_pll1", "audio_pll1_ref_sel", }; | ||
| 115 | static const char *audio_pll2_bypass_sels[] = {"audio_pll2", "audio_pll2_ref_sel", }; | ||
| 116 | static const char *video_pll1_bypass_sels[] = {"video_pll1", "video_pll1_ref_sel", }; | ||
| 117 | static const char *dram_pll_bypass_sels[] = {"dram_pll", "dram_pll_ref_sel", }; | ||
| 118 | static const char *gpu_pll_bypass_sels[] = {"gpu_pll", "gpu_pll_ref_sel", }; | ||
| 119 | static const char *vpu_pll_bypass_sels[] = {"vpu_pll", "vpu_pll_ref_sel", }; | ||
| 120 | static const char *arm_pll_bypass_sels[] = {"arm_pll", "arm_pll_ref_sel", }; | ||
| 121 | static const char *sys_pll1_bypass_sels[] = {"sys_pll1", "sys_pll1_ref_sel", }; | ||
| 122 | static const char *sys_pll2_bypass_sels[] = {"sys_pll2", "sys_pll2_ref_sel", }; | ||
| 123 | static const char *sys_pll3_bypass_sels[] = {"sys_pll3", "sys_pll3_ref_sel", }; | ||
| 124 | |||
| 125 | /* CCM ROOT */ | ||
| 126 | static const char *imx8mm_a53_sels[] = {"osc_24m", "arm_pll_out", "sys_pll2_500m", "sys_pll2_1000m", | ||
| 127 | "sys_pll1_800m", "sys_pll1_400m", "audio_pll1_out", "sys_pll3_out", }; | ||
| 128 | |||
| 129 | static const char *imx8mm_m4_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll2_250m", "sys_pll1_266m", | ||
| 130 | "sys_pll1_800m", "audio_pll1_out", "video_pll1_out", "sys_pll3_out", }; | ||
| 131 | |||
| 132 | static const char *imx8mm_vpu_sels[] = {"osc_24m", "arm_pll_out", "sys_pll2_500m", "sys_pll2_1000m", | ||
| 133 | "sys_pll1_800m", "sys_pll1_400m", "audio_pll1_out", "vpu_pll_out", }; | ||
| 134 | |||
| 135 | static const char *imx8mm_gpu3d_sels[] = {"osc_24m", "gpu_pll_out", "sys_pll1_800m", "sys_pll3_out", | ||
| 136 | "sys_pll2_1000m", "audio_pll1_out", "video_pll1_out", "audio_pll2_out", }; | ||
| 137 | |||
| 138 | static const char *imx8mm_gpu2d_sels[] = {"osc_24m", "gpu_pll_out", "sys_pll1_800m", "sys_pll3_out", | ||
| 139 | "sys_pll2_1000m", "audio_pll1_out", "video_pll1_out", "audio_pll2_out", }; | ||
| 140 | |||
| 141 | static const char *imx8mm_main_axi_sels[] = {"osc_24m", "sys_pll2_333m", "sys_pll1_800m", "sys_pll2_250m", | ||
| 142 | "sys_pll2_1000m", "audio_pll1_out", "video_pll1_out", "sys_pll1_100m",}; | ||
| 143 | |||
| 144 | static const char *imx8mm_enet_axi_sels[] = {"osc_24m", "sys_pll1_266m", "sys_pll1_800m", "sys_pll2_250m", | ||
| 145 | "sys_pll2_200m", "audio_pll1_out", "video_pll1_out", "sys_pll3_out", }; | ||
| 146 | |||
| 147 | static const char *imx8mm_nand_usdhc_sels[] = {"osc_24m", "sys_pll1_266m", "sys_pll1_800m", "sys_pll2_200m", | ||
| 148 | "sys_pll1_133m", "sys_pll3_out", "sys_pll2_250m", "audio_pll1_out", }; | ||
| 149 | |||
| 150 | static const char *imx8mm_vpu_bus_sels[] = {"osc_24m", "sys_pll1_800m", "vpu_pll_out", "audio_pll2_out", | ||
| 151 | "sys_pll3_out", "sys_pll2_1000m", "sys_pll2_200m", "sys_pll1_100m", }; | ||
| 152 | |||
| 153 | static const char *imx8mm_disp_axi_sels[] = {"osc_24m", "sys_pll2_1000m", "sys_pll1_800m", "sys_pll3_out", | ||
| 154 | "sys_pll1_40m", "audio_pll2_out", "clk_ext1", "clk_ext4", }; | ||
| 155 | |||
| 156 | static const char *imx8mm_disp_apb_sels[] = {"osc_24m", "sys_pll2_125m", "sys_pll1_800m", "sys_pll3_out", | ||
| 157 | "sys_pll1_40m", "audio_pll2_out", "clk_ext1", "clk_ext3", }; | ||
| 158 | |||
| 159 | static const char *imx8mm_disp_rtrm_sels[] = {"osc_24m", "sys_pll1_800m", "sys_pll2_200m", "sys_pll2_1000m", | ||
| 160 | "audio_pll1_out", "video_pll1_out", "clk_ext2", "clk_ext3", }; | ||
| 161 | |||
| 162 | static const char *imx8mm_usb_bus_sels[] = {"osc_24m", "sys_pll2_500m", "sys_pll1_800m", "sys_pll2_100m", | ||
| 163 | "sys_pll2_200m", "clk_ext2", "clk_ext4", "audio_pll2_out", }; | ||
| 164 | |||
| 165 | static const char *imx8mm_gpu_axi_sels[] = {"osc_24m", "sys_pll1_800m", "gpu_pll_out", "sys_pll3_out", "sys_pll2_1000m", | ||
| 166 | "audio_pll1_out", "video_pll1_out", "audio_pll2_out", }; | ||
| 167 | |||
| 168 | static const char *imx8mm_gpu_ahb_sels[] = {"osc_24m", "sys_pll1_800m", "gpu_pll_out", "sys_pll3_out", "sys_pll2_1000m", | ||
| 169 | "audio_pll1_out", "video_pll1_out", "audio_pll2_out", }; | ||
| 170 | |||
| 171 | static const char *imx8mm_noc_sels[] = {"osc_24m", "sys_pll1_800m", "sys_pll3_out", "sys_pll2_1000m", "sys_pll2_500m", | ||
| 172 | "audio_pll1_out", "video_pll1_out", "audio_pll2_out", }; | ||
| 173 | |||
| 174 | static const char *imx8mm_noc_apb_sels[] = {"osc_24m", "sys_pll1_400m", "sys_pll3_out", "sys_pll2_333m", "sys_pll2_200m", | ||
| 175 | "sys_pll1_800m", "audio_pll1_out", "video_pll1_out", }; | ||
| 176 | |||
| 177 | static const char *imx8mm_ahb_sels[] = {"osc_24m", "sys_pll1_133m", "sys_pll1_800m", "sys_pll1_400m", | ||
| 178 | "sys_pll2_125m", "sys_pll3_out", "audio_pll1_out", "video_pll1_out", }; | ||
| 179 | |||
| 180 | static const char *imx8mm_audio_ahb_sels[] = {"osc_24m", "sys_pll2_500m", "sys_pll1_800m", "sys_pll2_1000m", | ||
| 181 | "sys_pll2_166m", "sys_pll3_out", "audio_pll1_out", "video_pll1_out", }; | ||
| 182 | |||
| 183 | static const char *imx8mm_dram_alt_sels[] = {"osc_24m", "sys_pll1_800m", "sys_pll1_100m", "sys_pll2_500m", | ||
| 184 | "sys_pll2_1000m", "sys_pll3_out", "audio_pll1_out", "sys_pll1_266m", }; | ||
| 185 | |||
| 186 | static const char *imx8mm_dram_apb_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll1_40m", "sys_pll1_160m", | ||
| 187 | "sys_pll1_800m", "sys_pll3_out", "sys_pll2_250m", "audio_pll2_out", }; | ||
| 188 | |||
| 189 | static const char *imx8mm_vpu_g1_sels[] = {"osc_24m", "vpu_pll_out", "sys_pll1_800m", "sys_pll2_1000m", | ||
| 190 | "sys_pll1_100m", "sys_pll2_125m", "sys_pll3_out", "audio_pll1_out", }; | ||
| 191 | |||
| 192 | static const char *imx8mm_vpu_g2_sels[] = {"osc_24m", "vpu_pll_out", "sys_pll1_800m", "sys_pll2_1000m", | ||
| 193 | "sys_pll1_100m", "sys_pll2_125m", "sys_pll3_out", "audio_pll1_out", }; | ||
| 194 | |||
| 195 | static const char *imx8mm_disp_dtrc_sels[] = {"osc_24m", "video_pll2_out", "sys_pll1_800m", "sys_pll2_1000m", | ||
| 196 | "sys_pll1_160m", "video_pll1_out", "sys_pll3_out", "audio_pll2_out", }; | ||
| 197 | |||
| 198 | static const char *imx8mm_disp_dc8000_sels[] = {"osc_24m", "video_pll2_out", "sys_pll1_800m", "sys_pll2_1000m", | ||
| 199 | "sys_pll1_160m", "video_pll1_out", "sys_pll3_out", "audio_pll2_out", }; | ||
| 200 | |||
| 201 | static const char *imx8mm_pcie1_ctrl_sels[] = {"osc_24m", "sys_pll2_250m", "sys_pll2_200m", "sys_pll1_266m", | ||
| 202 | "sys_pll1_800m", "sys_pll2_500m", "sys_pll2_333m", "sys_pll3_out", }; | ||
| 203 | |||
| 204 | static const char *imx8mm_pcie1_phy_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll2_500m", "clk_ext1", "clk_ext2", | ||
| 205 | "clk_ext3", "clk_ext4", "sys_pll1_400m", }; | ||
| 206 | |||
| 207 | static const char *imx8mm_pcie1_aux_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll2_50m", "sys_pll3_out", | ||
| 208 | "sys_pll2_100m", "sys_pll1_80m", "sys_pll1_160m", "sys_pll1_200m", }; | ||
| 209 | |||
| 210 | static const char *imx8mm_dc_pixel_sels[] = {"osc_24m", "video_pll1_out", "audio_pll2_out", "audio_pll1_out", | ||
| 211 | "sys_pll1_800m", "sys_pll2_1000m", "sys_pll3_out", "clk_ext4", }; | ||
| 212 | |||
| 213 | static const char *imx8mm_lcdif_pixel_sels[] = {"osc_24m", "video_pll1_out", "audio_pll2_out", "audio_pll1_out", | ||
| 214 | "sys_pll1_800m", "sys_pll2_1000m", "sys_pll3_out", "clk_ext4", }; | ||
| 215 | |||
| 216 | static const char *imx8mm_sai1_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", | ||
| 217 | "sys_pll1_133m", "osc_hdmi", "clk_ext1", "clk_ext2", }; | ||
| 218 | |||
| 219 | static const char *imx8mm_sai2_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", | ||
| 220 | "sys_pll1_133m", "osc_hdmi", "clk_ext2", "clk_ext3", }; | ||
| 221 | |||
| 222 | static const char *imx8mm_sai3_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", | ||
| 223 | "sys_pll1_133m", "osc_hdmi", "clk_ext3", "clk_ext4", }; | ||
| 224 | |||
| 225 | static const char *imx8mm_sai4_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", | ||
| 226 | "sys_pll1_133m", "osc_hdmi", "clk_ext1", "clk_ext2", }; | ||
| 227 | |||
| 228 | static const char *imx8mm_sai5_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", | ||
| 229 | "sys_pll1_133m", "osc_hdmi", "clk_ext2", "clk_ext3", }; | ||
| 230 | |||
| 231 | static const char *imx8mm_sai6_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", | ||
| 232 | "sys_pll1_133m", "osc_hdmi", "clk_ext3", "clk_ext4", }; | ||
| 233 | |||
| 234 | static const char *imx8mm_spdif1_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", | ||
| 235 | "sys_pll1_133m", "osc_hdmi", "clk_ext2", "clk_ext3", }; | ||
| 236 | |||
| 237 | static const char *imx8mm_spdif2_sels[] = {"osc_24m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", | ||
| 238 | "sys_pll1_133m", "osc_hdmi", "clk_ext3", "clk_ext4", }; | ||
| 239 | |||
| 240 | static const char *imx8mm_enet_ref_sels[] = {"osc_24m", "sys_pll2_125m", "sys_pll2_50m", "sys_pll2_100m", | ||
| 241 | "sys_pll1_160m", "audio_pll1_out", "video_pll1_out", "clk_ext4", }; | ||
| 242 | |||
| 243 | static const char *imx8mm_enet_timer_sels[] = {"osc_24m", "sys_pll2_100m", "audio_pll1_out", "clk_ext1", "clk_ext2", | ||
| 244 | "clk_ext3", "clk_ext4", "video_pll1_out", }; | ||
| 245 | |||
| 246 | static const char *imx8mm_enet_phy_sels[] = {"osc_24m", "sys_pll2_50m", "sys_pll2_125m", "sys_pll2_200m", | ||
| 247 | "sys_pll2_500m", "video_pll1_out", "audio_pll2_out", }; | ||
| 248 | |||
| 249 | static const char *imx8mm_nand_sels[] = {"osc_24m", "sys_pll2_500m", "audio_pll1_out", "sys_pll1_400m", | ||
| 250 | "audio_pll2_out", "sys_pll3_out", "sys_pll2_250m", "video_pll1_out", }; | ||
| 251 | |||
| 252 | static const char *imx8mm_qspi_sels[] = {"osc_24m", "sys_pll1_400m", "sys_pll1_800m", "sys_pll2_500m", | ||
| 253 | "audio_pll2_out", "sys_pll1_266m", "sys_pll3_out", "sys_pll1_100m", }; | ||
| 254 | |||
| 255 | static const char *imx8mm_usdhc1_sels[] = {"osc_24m", "sys_pll1_400m", "sys_pll1_800m", "sys_pll2_500m", | ||
| 256 | "sys_pll3_out", "sys_pll1_266m", "audio_pll2_out", "sys_pll1_100m", }; | ||
| 257 | |||
| 258 | static const char *imx8mm_usdhc2_sels[] = {"osc_24m", "sys_pll1_400m", "sys_pll1_800m", "sys_pll2_500m", | ||
| 259 | "sys_pll3_out", "sys_pll1_266m", "audio_pll2_out", "sys_pll1_100m", }; | ||
| 260 | |||
| 261 | static const char *imx8mm_i2c1_sels[] = {"osc_24m", "sys_pll1_160m", "sys_pll2_50m", "sys_pll3_out", "audio_pll1_out", | ||
| 262 | "video_pll1_out", "audio_pll2_out", "sys_pll1_133m", }; | ||
| 263 | |||
| 264 | static const char *imx8mm_i2c2_sels[] = {"osc_24m", "sys_pll1_160m", "sys_pll2_50m", "sys_pll3_out", "audio_pll1_out", | ||
| 265 | "video_pll1_out", "audio_pll2_out", "sys_pll1_133m", }; | ||
| 266 | |||
| 267 | static const char *imx8mm_i2c3_sels[] = {"osc_24m", "sys_pll1_160m", "sys_pll2_50m", "sys_pll3_out", "audio_pll1_out", | ||
| 268 | "video_pll1_out", "audio_pll2_out", "sys_pll1_133m", }; | ||
| 269 | |||
| 270 | static const char *imx8mm_i2c4_sels[] = {"osc_24m", "sys_pll1_160m", "sys_pll2_50m", "sys_pll3_out", "audio_pll1_out", | ||
| 271 | "video_pll1_out", "audio_pll2_out", "sys_pll1_133m", }; | ||
| 272 | |||
| 273 | static const char *imx8mm_uart1_sels[] = {"osc_24m", "sys_pll1_80m", "sys_pll2_200m", "sys_pll2_100m", | ||
| 274 | "sys_pll3_out", "clk_ext2", "clk_ext4", "audio_pll2_out", }; | ||
| 275 | |||
| 276 | static const char *imx8mm_uart2_sels[] = {"osc_24m", "sys_pll1_80m", "sys_pll2_200m", "sys_pll2_100m", | ||
| 277 | "sys_pll3_out", "clk_ext2", "clk_ext3", "audio_pll2_out", }; | ||
| 278 | |||
| 279 | static const char *imx8mm_uart3_sels[] = {"osc_24m", "sys_pll1_80m", "sys_pll2_200m", "sys_pll2_100m", | ||
| 280 | "sys_pll3_out", "clk_ext2", "clk_ext4", "audio_pll2_out", }; | ||
| 281 | |||
| 282 | static const char *imx8mm_uart4_sels[] = {"osc_24m", "sys_pll1_80m", "sys_pll2_200m", "sys_pll2_100m", | ||
| 283 | "sys_pll3_out", "clk_ext2", "clk_ext3", "audio_pll2_out", }; | ||
| 284 | |||
| 285 | static const char *imx8mm_usb_core_sels[] = {"osc_24m", "sys_pll1_100m", "sys_pll1_40m", "sys_pll2_100m", | ||
| 286 | "sys_pll2_200m", "clk_ext2", "clk_ext3", "audio_pll2_out", }; | ||
| 287 | |||
| 288 | static const char *imx8mm_usb_phy_sels[] = {"osc_24m", "sys_pll1_100m", "sys_pll1_40m", "sys_pll2_100m", | ||
| 289 | "sys_pll2_200m", "clk_ext2", "clk_ext3", "audio_pll2_out", }; | ||
| 290 | |||
| 291 | static const char *imx8mm_ecspi1_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll1_40m", "sys_pll1_160m", | ||
| 292 | "sys_pll1_800m", "sys_pll3_out", "sys_pll2_250m", "audio_pll2_out", }; | ||
| 293 | |||
| 294 | static const char *imx8mm_ecspi2_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll1_40m", "sys_pll1_160m", | ||
| 295 | "sys_pll1_800m", "sys_pll3_out", "sys_pll2_250m", "audio_pll2_out", }; | ||
| 296 | |||
| 297 | static const char *imx8mm_pwm1_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_160m", "sys_pll1_40m", | ||
| 298 | "sys_pll3_out", "clk_ext1", "sys_pll1_80m", "video_pll1_out", }; | ||
| 299 | |||
| 300 | static const char *imx8mm_pwm2_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_160m", "sys_pll1_40m", | ||
| 301 | "sys_pll3_out", "clk_ext1", "sys_pll1_80m", "video_pll1_out", }; | ||
| 302 | |||
| 303 | static const char *imx8mm_pwm3_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_160m", "sys_pll1_40m", | ||
| 304 | "sys3_pll2_out", "clk_ext2", "sys_pll1_80m", "video_pll1_out", }; | ||
| 305 | |||
| 306 | static const char *imx8mm_pwm4_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_160m", "sys_pll1_40m", | ||
| 307 | "sys_pll3_out", "clk_ext2", "sys_pll1_80m", "video_pll1_out", }; | ||
| 308 | |||
| 309 | static const char *imx8mm_gpt1_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_400m", "sys_pll1_40m", | ||
| 310 | "video_pll1_out", "sys_pll1_800m", "audio_pll1_out", "clk_ext1" }; | ||
| 311 | |||
| 312 | static const char *imx8mm_wdog_sels[] = {"osc_24m", "sys_pll1_133m", "sys_pll1_160m", "vpu_pll_out", | ||
| 313 | "sys_pll2_125m", "sys_pll3_out", "sys_pll1_80m", "sys_pll2_166m", }; | ||
| 314 | |||
| 315 | static const char *imx8mm_wrclk_sels[] = {"osc_24m", "sys_pll1_40m", "vpu_pll_out", "sys_pll3_out", "sys_pll2_200m", | ||
| 316 | "sys_pll1_266m", "sys_pll2_500m", "sys_pll1_100m", }; | ||
| 317 | |||
| 318 | static const char *imx8mm_dsi_core_sels[] = {"osc_24m", "sys_pll1_266m", "sys_pll2_250m", "sys_pll1_800m", | ||
| 319 | "sys_pll2_1000m", "sys_pll3_out", "audio_pll2_out", "video_pll1_out", }; | ||
| 320 | |||
| 321 | static const char *imx8mm_dsi_phy_sels[] = {"osc_24m", "sys_pll2_125m", "sys_pll2_100m", "sys_pll1_800m", | ||
| 322 | "sys_pll2_1000m", "clk_ext2", "audio_pll2_out", "video_pll1_out", }; | ||
| 323 | |||
| 324 | static const char *imx8mm_dsi_dbi_sels[] = {"osc_24m", "sys_pll1_266m", "sys_pll2_100m", "sys_pll1_800m", | ||
| 325 | "sys_pll2_1000m", "sys_pll3_out", "audio_pll2_out", "video_pll1_out", }; | ||
| 326 | |||
| 327 | static const char *imx8mm_usdhc3_sels[] = {"osc_24m", "sys_pll1_400m", "sys_pll1_800m", "sys_pll2_500m", | ||
| 328 | "sys_pll3_out", "sys_pll1_266m", "audio_pll2_clk", "sys_pll1_100m", }; | ||
| 329 | |||
| 330 | static const char *imx8mm_csi1_core_sels[] = {"osc_24m", "sys_pll1_266m", "sys_pll2_250m", "sys_pll1_800m", | ||
| 331 | "sys_pll2_1000m", "sys_pll3_out", "audio_pll2_out", "video_pll1_out", }; | ||
| 332 | |||
| 333 | static const char *imx8mm_csi1_phy_sels[] = {"osc_24m", "sys_pll2_333m", "sys_pll2_100m", "sys_pll1_800m", | ||
| 334 | "sys_pll2_1000m", "clk_ext2", "audio_pll2_out", "video_pll1_out", }; | ||
| 335 | |||
| 336 | static const char *imx8mm_csi1_esc_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_80m", "sys_pll1_800m", | ||
| 337 | "sys_pll2_1000m", "sys_pll3_out", "clk_ext3", "audio_pll2_out", }; | ||
| 338 | |||
| 339 | static const char *imx8mm_csi2_core_sels[] = {"osc_24m", "sys_pll1_266m", "sys_pll2_250m", "sys_pll1_800m", | ||
| 340 | "sys_pll2_1000m", "sys_pll3_out", "audio_pll2_out", "video_pll1_out", }; | ||
| 341 | |||
| 342 | static const char *imx8mm_csi2_phy_sels[] = {"osc_24m", "sys_pll2_333m", "sys_pll2_100m", "sys_pll1_800m", | ||
| 343 | "sys_pll2_1000m", "clk_ext2", "audio_pll2_out", "video_pll1_out", }; | ||
| 344 | |||
| 345 | static const char *imx8mm_csi2_esc_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_80m", "sys_pll1_800m", | ||
| 346 | "sys_pll2_1000m", "sys_pll3_out", "clk_ext3", "audio_pll2_out", }; | ||
| 347 | |||
| 348 | static const char *imx8mm_pcie2_ctrl_sels[] = {"osc_24m", "sys_pll2_250m", "sys_pll2_200m", "sys_pll1_266m", | ||
| 349 | "sys_pll1_800m", "sys_pll2_500m", "sys_pll2_333m", "sys_pll3_out", }; | ||
| 350 | |||
| 351 | static const char *imx8mm_pcie2_phy_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll2_500m", "clk_ext1", | ||
| 352 | "clk_ext2", "clk_ext3", "clk_ext4", "sys_pll1_400m", }; | ||
| 353 | |||
| 354 | static const char *imx8mm_pcie2_aux_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll2_50m", "sys_pll3_out", | ||
| 355 | "sys_pll2_100m", "sys_pll1_80m", "sys_pll1_160m", "sys_pll1_200m", }; | ||
| 356 | |||
| 357 | static const char *imx8mm_ecspi3_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll1_40m", "sys_pll1_160m", | ||
| 358 | "sys_pll1_800m", "sys_pll3_out", "sys_pll2_250m", "audio_pll2_out", }; | ||
| 359 | |||
| 360 | static const char *imx8mm_pdm_sels[] = {"osc_24m", "sys_pll2_100m", "audio_pll1_out", "sys_pll1_800m", | ||
| 361 | "sys_pll2_1000m", "sys_pll3_out", "clk_ext3", "audio_pll2_out", }; | ||
| 362 | |||
| 363 | static const char *imx8mm_vpu_h1_sels[] = {"osc_24m", "vpu_pll_out", "sys_pll1_800m", "sys_pll2_1000m", | ||
| 364 | "audio_pll2_clk", "sys_pll2_125m", "sys_pll3_clk", "audio_pll1_out", }; | ||
| 365 | |||
| 366 | static const char *imx8mm_dram_core_sels[] = {"dram_pll_out", "dram_alt_root", }; | ||
| 367 | |||
| 368 | static const char *imx8mm_clko1_sels[] = {"osc_24m", "sys_pll1_800m", "osc_27m", "sys_pll1_200m", "audio_pll2_clk", | ||
| 369 | "vpu_pll", "sys_pll1_80m", }; | ||
| 370 | |||
| 371 | static struct clk *clks[IMX8MM_CLK_END]; | ||
| 372 | static struct clk_onecell_data clk_data; | ||
| 373 | |||
| 374 | static struct clk ** const uart_clks[] __initconst = { | ||
| 375 | &clks[IMX8MM_CLK_UART1_ROOT], | ||
| 376 | &clks[IMX8MM_CLK_UART2_ROOT], | ||
| 377 | &clks[IMX8MM_CLK_UART3_ROOT], | ||
| 378 | &clks[IMX8MM_CLK_UART4_ROOT], | ||
| 379 | NULL | ||
| 380 | }; | ||
| 381 | |||
| 382 | static int __init imx8mm_clocks_init(struct device_node *ccm_node) | ||
| 383 | { | ||
| 384 | struct device_node *np; | ||
| 385 | void __iomem *base; | ||
| 386 | int ret; | ||
| 387 | |||
| 388 | clks[IMX8MM_CLK_DUMMY] = imx_clk_fixed("dummy", 0); | ||
| 389 | clks[IMX8MM_CLK_24M] = of_clk_get_by_name(ccm_node, "osc_24m"); | ||
| 390 | clks[IMX8MM_CLK_32K] = of_clk_get_by_name(ccm_node, "osc_32k"); | ||
| 391 | clks[IMX8MM_CLK_EXT1] = of_clk_get_by_name(ccm_node, "clk_ext1"); | ||
| 392 | clks[IMX8MM_CLK_EXT2] = of_clk_get_by_name(ccm_node, "clk_ext2"); | ||
| 393 | clks[IMX8MM_CLK_EXT3] = of_clk_get_by_name(ccm_node, "clk_ext3"); | ||
| 394 | clks[IMX8MM_CLK_EXT4] = of_clk_get_by_name(ccm_node, "clk_ext4"); | ||
| 395 | |||
| 396 | np = of_find_compatible_node(NULL, NULL, "fsl,imx8mm-anatop"); | ||
| 397 | base = of_iomap(np, 0); | ||
| 398 | if (WARN_ON(!base)) | ||
| 399 | return -ENOMEM; | ||
| 400 | |||
| 401 | clks[IMX8MM_AUDIO_PLL1_REF_SEL] = imx_clk_mux("audio_pll1_ref_sel", base + 0x0, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels)); | ||
| 402 | clks[IMX8MM_AUDIO_PLL2_REF_SEL] = imx_clk_mux("audio_pll2_ref_sel", base + 0x14, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels)); | ||
| 403 | clks[IMX8MM_VIDEO_PLL1_REF_SEL] = imx_clk_mux("video_pll1_ref_sel", base + 0x28, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels)); | ||
| 404 | clks[IMX8MM_DRAM_PLL_REF_SEL] = imx_clk_mux("dram_pll_ref_sel", base + 0x50, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels)); | ||
| 405 | clks[IMX8MM_GPU_PLL_REF_SEL] = imx_clk_mux("gpu_pll_ref_sel", base + 0x64, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels)); | ||
| 406 | clks[IMX8MM_VPU_PLL_REF_SEL] = imx_clk_mux("vpu_pll_ref_sel", base + 0x74, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels)); | ||
| 407 | clks[IMX8MM_ARM_PLL_REF_SEL] = imx_clk_mux("arm_pll_ref_sel", base + 0x84, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels)); | ||
| 408 | clks[IMX8MM_SYS_PLL1_REF_SEL] = imx_clk_mux("sys_pll1_ref_sel", base + 0x94, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels)); | ||
| 409 | clks[IMX8MM_SYS_PLL2_REF_SEL] = imx_clk_mux("sys_pll2_ref_sel", base + 0x104, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels)); | ||
| 410 | clks[IMX8MM_SYS_PLL3_REF_SEL] = imx_clk_mux("sys_pll3_ref_sel", base + 0x114, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels)); | ||
| 411 | |||
| 412 | clks[IMX8MM_AUDIO_PLL1] = imx_clk_pll14xx("audio_pll1", "audio_pll1_ref_sel", base, &imx8mm_audio_pll); | ||
| 413 | clks[IMX8MM_AUDIO_PLL2] = imx_clk_pll14xx("audio_pll2", "audio_pll2_ref_sel", base + 0x14, &imx8mm_audio_pll); | ||
| 414 | clks[IMX8MM_VIDEO_PLL1] = imx_clk_pll14xx("video_pll1", "video_pll1_ref_sel", base + 0x28, &imx8mm_video_pll); | ||
| 415 | clks[IMX8MM_DRAM_PLL] = imx_clk_pll14xx("dram_pll", "dram_pll_ref_sel", base + 0x50, &imx8mm_dram_pll); | ||
| 416 | clks[IMX8MM_GPU_PLL] = imx_clk_pll14xx("gpu_pll", "gpu_pll_ref_sel", base + 0x64, &imx8mm_gpu_pll); | ||
| 417 | clks[IMX8MM_VPU_PLL] = imx_clk_pll14xx("vpu_pll", "vpu_pll_ref_sel", base + 0x74, &imx8mm_vpu_pll); | ||
| 418 | clks[IMX8MM_ARM_PLL] = imx_clk_pll14xx("arm_pll", "arm_pll_ref_sel", base + 0x84, &imx8mm_arm_pll); | ||
| 419 | clks[IMX8MM_SYS_PLL1] = imx_clk_pll14xx("sys_pll1", "sys_pll1_ref_sel", base + 0x94, &imx8mm_sys_pll); | ||
| 420 | clks[IMX8MM_SYS_PLL2] = imx_clk_pll14xx("sys_pll2", "sys_pll2_ref_sel", base + 0x104, &imx8mm_sys_pll); | ||
| 421 | clks[IMX8MM_SYS_PLL3] = imx_clk_pll14xx("sys_pll3", "sys_pll3_ref_sel", base + 0x114, &imx8mm_sys_pll); | ||
| 422 | |||
| 423 | /* PLL bypass out */ | ||
| 424 | clks[IMX8MM_AUDIO_PLL1_BYPASS] = imx_clk_mux_flags("audio_pll1_bypass", base, 4, 1, audio_pll1_bypass_sels, ARRAY_SIZE(audio_pll1_bypass_sels), CLK_SET_RATE_PARENT); | ||
| 425 | clks[IMX8MM_AUDIO_PLL2_BYPASS] = imx_clk_mux_flags("audio_pll2_bypass", base + 0x14, 4, 1, audio_pll2_bypass_sels, ARRAY_SIZE(audio_pll2_bypass_sels), CLK_SET_RATE_PARENT); | ||
| 426 | clks[IMX8MM_VIDEO_PLL1_BYPASS] = imx_clk_mux_flags("video_pll1_bypass", base + 0x28, 4, 1, video_pll1_bypass_sels, ARRAY_SIZE(video_pll1_bypass_sels), CLK_SET_RATE_PARENT); | ||
| 427 | clks[IMX8MM_DRAM_PLL_BYPASS] = imx_clk_mux_flags("dram_pll_bypass", base + 0x50, 4, 1, dram_pll_bypass_sels, ARRAY_SIZE(dram_pll_bypass_sels), CLK_SET_RATE_PARENT); | ||
| 428 | clks[IMX8MM_GPU_PLL_BYPASS] = imx_clk_mux_flags("gpu_pll_bypass", base + 0x64, 4, 1, gpu_pll_bypass_sels, ARRAY_SIZE(gpu_pll_bypass_sels), CLK_SET_RATE_PARENT); | ||
| 429 | clks[IMX8MM_VPU_PLL_BYPASS] = imx_clk_mux_flags("vpu_pll_bypass", base + 0x74, 4, 1, vpu_pll_bypass_sels, ARRAY_SIZE(vpu_pll_bypass_sels), CLK_SET_RATE_PARENT); | ||
| 430 | clks[IMX8MM_ARM_PLL_BYPASS] = imx_clk_mux_flags("arm_pll_bypass", base + 0x84, 4, 1, arm_pll_bypass_sels, ARRAY_SIZE(arm_pll_bypass_sels), CLK_SET_RATE_PARENT); | ||
| 431 | clks[IMX8MM_SYS_PLL1_BYPASS] = imx_clk_mux_flags("sys_pll1_bypass", base + 0x94, 4, 1, sys_pll1_bypass_sels, ARRAY_SIZE(sys_pll1_bypass_sels), CLK_SET_RATE_PARENT); | ||
| 432 | clks[IMX8MM_SYS_PLL2_BYPASS] = imx_clk_mux_flags("sys_pll2_bypass", base + 0x104, 4, 1, sys_pll2_bypass_sels, ARRAY_SIZE(sys_pll2_bypass_sels), CLK_SET_RATE_PARENT); | ||
| 433 | clks[IMX8MM_SYS_PLL3_BYPASS] = imx_clk_mux_flags("sys_pll3_bypass", base + 0x114, 4, 1, sys_pll3_bypass_sels, ARRAY_SIZE(sys_pll3_bypass_sels), CLK_SET_RATE_PARENT); | ||
| 434 | |||
| 435 | /* unbypass all the plls */ | ||
| 436 | clk_set_parent(clks[IMX8MM_AUDIO_PLL1_BYPASS], clks[IMX8MM_AUDIO_PLL1]); | ||
| 437 | clk_set_parent(clks[IMX8MM_AUDIO_PLL2_BYPASS], clks[IMX8MM_AUDIO_PLL2]); | ||
| 438 | clk_set_parent(clks[IMX8MM_VIDEO_PLL1_BYPASS], clks[IMX8MM_VIDEO_PLL1]); | ||
| 439 | clk_set_parent(clks[IMX8MM_DRAM_PLL_BYPASS], clks[IMX8MM_DRAM_PLL]); | ||
| 440 | clk_set_parent(clks[IMX8MM_GPU_PLL_BYPASS], clks[IMX8MM_GPU_PLL]); | ||
| 441 | clk_set_parent(clks[IMX8MM_VPU_PLL_BYPASS], clks[IMX8MM_VPU_PLL]); | ||
| 442 | clk_set_parent(clks[IMX8MM_ARM_PLL_BYPASS], clks[IMX8MM_ARM_PLL]); | ||
| 443 | clk_set_parent(clks[IMX8MM_SYS_PLL1_BYPASS], clks[IMX8MM_SYS_PLL1]); | ||
| 444 | clk_set_parent(clks[IMX8MM_SYS_PLL2_BYPASS], clks[IMX8MM_SYS_PLL2]); | ||
| 445 | clk_set_parent(clks[IMX8MM_SYS_PLL3_BYPASS], clks[IMX8MM_SYS_PLL3]); | ||
| 446 | |||
| 447 | /* PLL out gate */ | ||
| 448 | clks[IMX8MM_AUDIO_PLL1_OUT] = imx_clk_gate("audio_pll1_out", "audio_pll1_bypass", base, 13); | ||
| 449 | clks[IMX8MM_AUDIO_PLL2_OUT] = imx_clk_gate("audio_pll2_out", "audio_pll2_bypass", base + 0x14, 13); | ||
| 450 | clks[IMX8MM_VIDEO_PLL1_OUT] = imx_clk_gate("video_pll1_out", "video_pll1_bypass", base + 0x28, 13); | ||
| 451 | clks[IMX8MM_DRAM_PLL_OUT] = imx_clk_gate("dram_pll_out", "dram_pll_bypass", base + 0x50, 13); | ||
| 452 | clks[IMX8MM_GPU_PLL_OUT] = imx_clk_gate("gpu_pll_out", "gpu_pll_bypass", base + 0x64, 13); | ||
| 453 | clks[IMX8MM_VPU_PLL_OUT] = imx_clk_gate("vpu_pll_out", "vpu_pll_bypass", base + 0x74, 13); | ||
| 454 | clks[IMX8MM_ARM_PLL_OUT] = imx_clk_gate("arm_pll_out", "arm_pll_bypass", base + 0x84, 13); | ||
| 455 | clks[IMX8MM_SYS_PLL1_OUT] = imx_clk_gate("sys_pll1_out", "sys_pll1_bypass", base + 0x94, 13); | ||
| 456 | clks[IMX8MM_SYS_PLL2_OUT] = imx_clk_gate("sys_pll2_out", "sys_pll2_bypass", base + 0x104, 13); | ||
| 457 | clks[IMX8MM_SYS_PLL3_OUT] = imx_clk_gate("sys_pll3_out", "sys_pll3_bypass", base + 0x114, 13); | ||
| 458 | |||
| 459 | /* SYS PLL fixed output */ | ||
| 460 | clks[IMX8MM_SYS_PLL1_40M] = imx_clk_fixed_factor("sys_pll1_40m", "sys_pll1_out", 1, 20); | ||
| 461 | clks[IMX8MM_SYS_PLL1_80M] = imx_clk_fixed_factor("sys_pll1_80m", "sys_pll1_out", 1, 10); | ||
| 462 | clks[IMX8MM_SYS_PLL1_100M] = imx_clk_fixed_factor("sys_pll1_100m", "sys_pll1_out", 1, 8); | ||
| 463 | clks[IMX8MM_SYS_PLL1_133M] = imx_clk_fixed_factor("sys_pll1_133m", "sys_pll1_out", 1, 6); | ||
| 464 | clks[IMX8MM_SYS_PLL1_160M] = imx_clk_fixed_factor("sys_pll1_160m", "sys_pll1_out", 1, 5); | ||
| 465 | clks[IMX8MM_SYS_PLL1_200M] = imx_clk_fixed_factor("sys_pll1_200m", "sys_pll1_out", 1, 4); | ||
| 466 | clks[IMX8MM_SYS_PLL1_266M] = imx_clk_fixed_factor("sys_pll1_266m", "sys_pll1_out", 1, 3); | ||
| 467 | clks[IMX8MM_SYS_PLL1_400M] = imx_clk_fixed_factor("sys_pll1_400m", "sys_pll1_out", 1, 2); | ||
| 468 | clks[IMX8MM_SYS_PLL1_800M] = imx_clk_fixed_factor("sys_pll1_800m", "sys_pll1_out", 1, 1); | ||
| 469 | |||
| 470 | clks[IMX8MM_SYS_PLL2_50M] = imx_clk_fixed_factor("sys_pll2_50m", "sys_pll2_out", 1, 20); | ||
| 471 | clks[IMX8MM_SYS_PLL2_100M] = imx_clk_fixed_factor("sys_pll2_100m", "sys_pll2_out", 1, 10); | ||
| 472 | clks[IMX8MM_SYS_PLL2_125M] = imx_clk_fixed_factor("sys_pll2_125m", "sys_pll2_out", 1, 8); | ||
| 473 | clks[IMX8MM_SYS_PLL2_166M] = imx_clk_fixed_factor("sys_pll2_166m", "sys_pll2_out", 1, 6); | ||
| 474 | clks[IMX8MM_SYS_PLL2_200M] = imx_clk_fixed_factor("sys_pll2_200m", "sys_pll2_out", 1, 5); | ||
| 475 | clks[IMX8MM_SYS_PLL2_250M] = imx_clk_fixed_factor("sys_pll2_250m", "sys_pll2_out", 1, 4); | ||
| 476 | clks[IMX8MM_SYS_PLL2_333M] = imx_clk_fixed_factor("sys_pll2_333m", "sys_pll2_out", 1, 3); | ||
| 477 | clks[IMX8MM_SYS_PLL2_500M] = imx_clk_fixed_factor("sys_pll2_500m", "sys_pll2_out", 1, 2); | ||
| 478 | clks[IMX8MM_SYS_PLL2_1000M] = imx_clk_fixed_factor("sys_pll2_1000m", "sys_pll2_out", 1, 1); | ||
| 479 | |||
| 480 | np = ccm_node; | ||
| 481 | base = of_iomap(np, 0); | ||
| 482 | if (WARN_ON(!base)) | ||
| 483 | return -ENOMEM; | ||
| 484 | |||
| 485 | /* Core Slice */ | ||
| 486 | clks[IMX8MM_CLK_A53_SRC] = imx_clk_mux2("arm_a53_src", base + 0x8000, 24, 3, imx8mm_a53_sels, ARRAY_SIZE(imx8mm_a53_sels)); | ||
| 487 | clks[IMX8MM_CLK_M4_SRC] = imx_clk_mux2("arm_m4_src", base + 0x8080, 24, 3, imx8mm_m4_sels, ARRAY_SIZE(imx8mm_m4_sels)); | ||
| 488 | clks[IMX8MM_CLK_VPU_SRC] = imx_clk_mux2("vpu_src", base + 0x8100, 24, 3, imx8mm_vpu_sels, ARRAY_SIZE(imx8mm_vpu_sels)); | ||
| 489 | clks[IMX8MM_CLK_GPU3D_SRC] = imx_clk_mux2("gpu3d_src", base + 0x8180, 24, 3, imx8mm_gpu3d_sels, ARRAY_SIZE(imx8mm_gpu3d_sels)); | ||
| 490 | clks[IMX8MM_CLK_GPU2D_SRC] = imx_clk_mux2("gpu2d_src", base + 0x8200, 24, 3, imx8mm_gpu2d_sels, ARRAY_SIZE(imx8mm_gpu2d_sels)); | ||
| 491 | clks[IMX8MM_CLK_A53_CG] = imx_clk_gate3("arm_a53_cg", "arm_a53_src", base + 0x8000, 28); | ||
| 492 | clks[IMX8MM_CLK_M4_CG] = imx_clk_gate3("arm_m4_cg", "arm_m4_src", base + 0x8080, 28); | ||
| 493 | clks[IMX8MM_CLK_VPU_CG] = imx_clk_gate3("vpu_cg", "vpu_src", base + 0x8100, 28); | ||
| 494 | clks[IMX8MM_CLK_GPU3D_CG] = imx_clk_gate3("gpu3d_cg", "gpu3d_src", base + 0x8180, 28); | ||
| 495 | clks[IMX8MM_CLK_GPU2D_CG] = imx_clk_gate3("gpu2d_cg", "gpu2d_src", base + 0x8200, 28); | ||
| 496 | clks[IMX8MM_CLK_A53_DIV] = imx_clk_divider2("arm_a53_div", "arm_a53_cg", base + 0x8000, 0, 3); | ||
| 497 | clks[IMX8MM_CLK_M4_DIV] = imx_clk_divider2("arm_m4_div", "arm_m4_cg", base + 0x8080, 0, 3); | ||
| 498 | clks[IMX8MM_CLK_VPU_DIV] = imx_clk_divider2("vpu_div", "vpu_cg", base + 0x8100, 0, 3); | ||
| 499 | clks[IMX8MM_CLK_GPU3D_DIV] = imx_clk_divider2("gpu3d_div", "gpu3d_cg", base + 0x8180, 0, 3); | ||
| 500 | clks[IMX8MM_CLK_GPU2D_DIV] = imx_clk_divider2("gpu2d_div", "gpu2d_cg", base + 0x8200, 0, 3); | ||
| 501 | |||
| 502 | /* BUS */ | ||
| 503 | clks[IMX8MM_CLK_MAIN_AXI] = imx8m_clk_composite_critical("main_axi", imx8mm_main_axi_sels, base + 0x8800); | ||
| 504 | clks[IMX8MM_CLK_ENET_AXI] = imx8m_clk_composite("enet_axi", imx8mm_enet_axi_sels, base + 0x8880); | ||
| 505 | clks[IMX8MM_CLK_NAND_USDHC_BUS] = imx8m_clk_composite_critical("nand_usdhc_bus", imx8mm_nand_usdhc_sels, base + 0x8900); | ||
| 506 | clks[IMX8MM_CLK_VPU_BUS] = imx8m_clk_composite("vpu_bus", imx8mm_vpu_bus_sels, base + 0x8980); | ||
| 507 | clks[IMX8MM_CLK_DISP_AXI] = imx8m_clk_composite("disp_axi", imx8mm_disp_axi_sels, base + 0x8a00); | ||
| 508 | clks[IMX8MM_CLK_DISP_APB] = imx8m_clk_composite("disp_apb", imx8mm_disp_apb_sels, base + 0x8a80); | ||
| 509 | clks[IMX8MM_CLK_DISP_RTRM] = imx8m_clk_composite("disp_rtrm", imx8mm_disp_rtrm_sels, base + 0x8b00); | ||
| 510 | clks[IMX8MM_CLK_USB_BUS] = imx8m_clk_composite("usb_bus", imx8mm_usb_bus_sels, base + 0x8b80); | ||
| 511 | clks[IMX8MM_CLK_GPU_AXI] = imx8m_clk_composite("gpu_axi", imx8mm_gpu_axi_sels, base + 0x8c00); | ||
| 512 | clks[IMX8MM_CLK_GPU_AHB] = imx8m_clk_composite("gpu_ahb", imx8mm_gpu_ahb_sels, base + 0x8c80); | ||
| 513 | clks[IMX8MM_CLK_NOC] = imx8m_clk_composite_critical("noc", imx8mm_noc_sels, base + 0x8d00); | ||
| 514 | clks[IMX8MM_CLK_NOC_APB] = imx8m_clk_composite_critical("noc_apb", imx8mm_noc_apb_sels, base + 0x8d80); | ||
| 515 | |||
| 516 | /* AHB */ | ||
| 517 | clks[IMX8MM_CLK_AHB] = imx8m_clk_composite_critical("ahb", imx8mm_ahb_sels, base + 0x9000); | ||
| 518 | clks[IMX8MM_CLK_AUDIO_AHB] = imx8m_clk_composite("audio_ahb", imx8mm_audio_ahb_sels, base + 0x9100); | ||
| 519 | |||
| 520 | /* IPG */ | ||
| 521 | clks[IMX8MM_CLK_IPG_ROOT] = imx_clk_divider2("ipg_root", "ahb", base + 0x9080, 0, 1); | ||
| 522 | clks[IMX8MM_CLK_IPG_AUDIO_ROOT] = imx_clk_divider2("ipg_audio_root", "audio_ahb", base + 0x9180, 0, 1); | ||
| 523 | |||
| 524 | /* IP */ | ||
| 525 | clks[IMX8MM_CLK_DRAM_ALT] = imx8m_clk_composite("dram_alt", imx8mm_dram_alt_sels, base + 0xa000); | ||
| 526 | clks[IMX8MM_CLK_DRAM_APB] = imx8m_clk_composite("dram_apb", imx8mm_dram_apb_sels, base + 0xa080); | ||
| 527 | clks[IMX8MM_CLK_VPU_G1] = imx8m_clk_composite("vpu_g1", imx8mm_vpu_g1_sels, base + 0xa100); | ||
| 528 | clks[IMX8MM_CLK_VPU_G2] = imx8m_clk_composite("vpu_g2", imx8mm_vpu_g2_sels, base + 0xa180); | ||
| 529 | clks[IMX8MM_CLK_DISP_DTRC] = imx8m_clk_composite("disp_dtrc", imx8mm_disp_dtrc_sels, base + 0xa200); | ||
| 530 | clks[IMX8MM_CLK_DISP_DC8000] = imx8m_clk_composite("disp_dc8000", imx8mm_disp_dc8000_sels, base + 0xa280); | ||
| 531 | clks[IMX8MM_CLK_PCIE1_CTRL] = imx8m_clk_composite("pcie1_ctrl", imx8mm_pcie1_ctrl_sels, base + 0xa300); | ||
| 532 | clks[IMX8MM_CLK_PCIE1_PHY] = imx8m_clk_composite("pcie1_phy", imx8mm_pcie1_phy_sels, base + 0xa380); | ||
| 533 | clks[IMX8MM_CLK_PCIE1_AUX] = imx8m_clk_composite("pcie1_aux", imx8mm_pcie1_aux_sels, base + 0xa400); | ||
| 534 | clks[IMX8MM_CLK_DC_PIXEL] = imx8m_clk_composite("dc_pixel", imx8mm_dc_pixel_sels, base + 0xa480); | ||
| 535 | clks[IMX8MM_CLK_LCDIF_PIXEL] = imx8m_clk_composite("lcdif_pixel", imx8mm_lcdif_pixel_sels, base + 0xa500); | ||
| 536 | clks[IMX8MM_CLK_SAI1] = imx8m_clk_composite("sai1", imx8mm_sai1_sels, base + 0xa580); | ||
| 537 | clks[IMX8MM_CLK_SAI2] = imx8m_clk_composite("sai2", imx8mm_sai2_sels, base + 0xa600); | ||
| 538 | clks[IMX8MM_CLK_SAI3] = imx8m_clk_composite("sai3", imx8mm_sai3_sels, base + 0xa680); | ||
| 539 | clks[IMX8MM_CLK_SAI4] = imx8m_clk_composite("sai4", imx8mm_sai4_sels, base + 0xa700); | ||
| 540 | clks[IMX8MM_CLK_SAI5] = imx8m_clk_composite("sai5", imx8mm_sai5_sels, base + 0xa780); | ||
| 541 | clks[IMX8MM_CLK_SAI6] = imx8m_clk_composite("sai6", imx8mm_sai6_sels, base + 0xa800); | ||
| 542 | clks[IMX8MM_CLK_SPDIF1] = imx8m_clk_composite("spdif1", imx8mm_spdif1_sels, base + 0xa880); | ||
| 543 | clks[IMX8MM_CLK_SPDIF2] = imx8m_clk_composite("spdif2", imx8mm_spdif2_sels, base + 0xa900); | ||
| 544 | clks[IMX8MM_CLK_ENET_REF] = imx8m_clk_composite("enet_ref", imx8mm_enet_ref_sels, base + 0xa980); | ||
| 545 | clks[IMX8MM_CLK_ENET_TIMER] = imx8m_clk_composite("enet_timer", imx8mm_enet_timer_sels, base + 0xaa00); | ||
| 546 | clks[IMX8MM_CLK_ENET_PHY_REF] = imx8m_clk_composite("enet_phy", imx8mm_enet_phy_sels, base + 0xaa80); | ||
| 547 | clks[IMX8MM_CLK_NAND] = imx8m_clk_composite("nand", imx8mm_nand_sels, base + 0xab00); | ||
| 548 | clks[IMX8MM_CLK_QSPI] = imx8m_clk_composite("qspi", imx8mm_qspi_sels, base + 0xab80); | ||
| 549 | clks[IMX8MM_CLK_USDHC1] = imx8m_clk_composite("usdhc1", imx8mm_usdhc1_sels, base + 0xac00); | ||
| 550 | clks[IMX8MM_CLK_USDHC2] = imx8m_clk_composite("usdhc2", imx8mm_usdhc2_sels, base + 0xac80); | ||
| 551 | clks[IMX8MM_CLK_I2C1] = imx8m_clk_composite("i2c1", imx8mm_i2c1_sels, base + 0xad00); | ||
| 552 | clks[IMX8MM_CLK_I2C2] = imx8m_clk_composite("i2c2", imx8mm_i2c2_sels, base + 0xad80); | ||
| 553 | clks[IMX8MM_CLK_I2C3] = imx8m_clk_composite("i2c3", imx8mm_i2c3_sels, base + 0xae00); | ||
| 554 | clks[IMX8MM_CLK_I2C4] = imx8m_clk_composite("i2c4", imx8mm_i2c4_sels, base + 0xae80); | ||
| 555 | clks[IMX8MM_CLK_UART1] = imx8m_clk_composite("uart1", imx8mm_uart1_sels, base + 0xaf00); | ||
| 556 | clks[IMX8MM_CLK_UART2] = imx8m_clk_composite("uart2", imx8mm_uart2_sels, base + 0xaf80); | ||
| 557 | clks[IMX8MM_CLK_UART3] = imx8m_clk_composite("uart3", imx8mm_uart3_sels, base + 0xb000); | ||
| 558 | clks[IMX8MM_CLK_UART4] = imx8m_clk_composite("uart4", imx8mm_uart4_sels, base + 0xb080); | ||
| 559 | clks[IMX8MM_CLK_USB_CORE_REF] = imx8m_clk_composite("usb_core_ref", imx8mm_usb_core_sels, base + 0xb100); | ||
| 560 | clks[IMX8MM_CLK_USB_PHY_REF] = imx8m_clk_composite("usb_phy_ref", imx8mm_usb_phy_sels, base + 0xb180); | ||
| 561 | clks[IMX8MM_CLK_ECSPI1] = imx8m_clk_composite("ecspi1", imx8mm_ecspi1_sels, base + 0xb280); | ||
| 562 | clks[IMX8MM_CLK_ECSPI2] = imx8m_clk_composite("ecspi2", imx8mm_ecspi2_sels, base + 0xb300); | ||
| 563 | clks[IMX8MM_CLK_PWM1] = imx8m_clk_composite("pwm1", imx8mm_pwm1_sels, base + 0xb380); | ||
| 564 | clks[IMX8MM_CLK_PWM2] = imx8m_clk_composite("pwm2", imx8mm_pwm2_sels, base + 0xb400); | ||
| 565 | clks[IMX8MM_CLK_PWM3] = imx8m_clk_composite("pwm3", imx8mm_pwm3_sels, base + 0xb480); | ||
| 566 | clks[IMX8MM_CLK_PWM4] = imx8m_clk_composite("pwm4", imx8mm_pwm4_sels, base + 0xb500); | ||
| 567 | clks[IMX8MM_CLK_GPT1] = imx8m_clk_composite("gpt1", imx8mm_gpt1_sels, base + 0xb580); | ||
| 568 | clks[IMX8MM_CLK_WDOG] = imx8m_clk_composite("wdog", imx8mm_wdog_sels, base + 0xb900); | ||
| 569 | clks[IMX8MM_CLK_WRCLK] = imx8m_clk_composite("wrclk", imx8mm_wrclk_sels, base + 0xb980); | ||
| 570 | clks[IMX8MM_CLK_CLKO1] = imx8m_clk_composite("clko1", imx8mm_clko1_sels, base + 0xba00); | ||
| 571 | clks[IMX8MM_CLK_DSI_CORE] = imx8m_clk_composite("dsi_core", imx8mm_dsi_core_sels, base + 0xbb00); | ||
| 572 | clks[IMX8MM_CLK_DSI_PHY_REF] = imx8m_clk_composite("dsi_phy_ref", imx8mm_dsi_phy_sels, base + 0xbb80); | ||
| 573 | clks[IMX8MM_CLK_DSI_DBI] = imx8m_clk_composite("dsi_dbi", imx8mm_dsi_dbi_sels, base + 0xbc00); | ||
| 574 | clks[IMX8MM_CLK_USDHC3] = imx8m_clk_composite("usdhc3", imx8mm_usdhc3_sels, base + 0xbc80); | ||
| 575 | clks[IMX8MM_CLK_CSI1_CORE] = imx8m_clk_composite("csi1_core", imx8mm_csi1_core_sels, base + 0xbd00); | ||
| 576 | clks[IMX8MM_CLK_CSI1_PHY_REF] = imx8m_clk_composite("csi1_phy_ref", imx8mm_csi1_phy_sels, base + 0xbd80); | ||
| 577 | clks[IMX8MM_CLK_CSI1_ESC] = imx8m_clk_composite("csi1_esc", imx8mm_csi1_esc_sels, base + 0xbe00); | ||
| 578 | clks[IMX8MM_CLK_CSI2_CORE] = imx8m_clk_composite("csi2_core", imx8mm_csi2_core_sels, base + 0xbe80); | ||
| 579 | clks[IMX8MM_CLK_CSI2_PHY_REF] = imx8m_clk_composite("csi2_phy_ref", imx8mm_csi2_phy_sels, base + 0xbf00); | ||
| 580 | clks[IMX8MM_CLK_CSI2_ESC] = imx8m_clk_composite("csi2_esc", imx8mm_csi2_esc_sels, base + 0xbf80); | ||
| 581 | clks[IMX8MM_CLK_PCIE2_CTRL] = imx8m_clk_composite("pcie2_ctrl", imx8mm_pcie2_ctrl_sels, base + 0xc000); | ||
| 582 | clks[IMX8MM_CLK_PCIE2_PHY] = imx8m_clk_composite("pcie2_phy", imx8mm_pcie2_phy_sels, base + 0xc080); | ||
| 583 | clks[IMX8MM_CLK_PCIE2_AUX] = imx8m_clk_composite("pcie2_aux", imx8mm_pcie2_aux_sels, base + 0xc100); | ||
| 584 | clks[IMX8MM_CLK_ECSPI3] = imx8m_clk_composite("ecspi3", imx8mm_ecspi3_sels, base + 0xc180); | ||
| 585 | clks[IMX8MM_CLK_PDM] = imx8m_clk_composite("pdm", imx8mm_pdm_sels, base + 0xc200); | ||
| 586 | clks[IMX8MM_CLK_VPU_H1] = imx8m_clk_composite("vpu_h1", imx8mm_vpu_h1_sels, base + 0xc280); | ||
| 587 | |||
| 588 | /* CCGR */ | ||
| 589 | clks[IMX8MM_CLK_ECSPI1_ROOT] = imx_clk_gate4("ecspi1_root_clk", "ecspi1", base + 0x4070, 0); | ||
| 590 | clks[IMX8MM_CLK_ECSPI2_ROOT] = imx_clk_gate4("ecspi2_root_clk", "ecspi2", base + 0x4080, 0); | ||
| 591 | clks[IMX8MM_CLK_ECSPI3_ROOT] = imx_clk_gate4("ecspi3_root_clk", "ecspi3", base + 0x4090, 0); | ||
| 592 | clks[IMX8MM_CLK_ENET1_ROOT] = imx_clk_gate4("enet1_root_clk", "enet_axi", base + 0x40a0, 0); | ||
| 593 | clks[IMX8MM_CLK_GPT1_ROOT] = imx_clk_gate4("gpt1_root_clk", "gpt1", base + 0x4100, 0); | ||
| 594 | clks[IMX8MM_CLK_I2C1_ROOT] = imx_clk_gate4("i2c1_root_clk", "i2c1", base + 0x4170, 0); | ||
| 595 | clks[IMX8MM_CLK_I2C2_ROOT] = imx_clk_gate4("i2c2_root_clk", "i2c2", base + 0x4180, 0); | ||
| 596 | clks[IMX8MM_CLK_I2C3_ROOT] = imx_clk_gate4("i2c3_root_clk", "i2c3", base + 0x4190, 0); | ||
| 597 | clks[IMX8MM_CLK_I2C4_ROOT] = imx_clk_gate4("i2c4_root_clk", "i2c4", base + 0x41a0, 0); | ||
| 598 | clks[IMX8MM_CLK_MU_ROOT] = imx_clk_gate4("mu_root_clk", "ipg_root", base + 0x4210, 0); | ||
| 599 | clks[IMX8MM_CLK_OCOTP_ROOT] = imx_clk_gate4("ocotp_root_clk", "ipg_root", base + 0x4220, 0); | ||
| 600 | clks[IMX8MM_CLK_PCIE1_ROOT] = imx_clk_gate4("pcie1_root_clk", "pcie1_ctrl", base + 0x4250, 0); | ||
| 601 | clks[IMX8MM_CLK_PWM1_ROOT] = imx_clk_gate4("pwm1_root_clk", "pwm1", base + 0x4280, 0); | ||
| 602 | clks[IMX8MM_CLK_PWM2_ROOT] = imx_clk_gate4("pwm2_root_clk", "pwm2", base + 0x4290, 0); | ||
| 603 | clks[IMX8MM_CLK_PWM3_ROOT] = imx_clk_gate4("pwm3_root_clk", "pwm3", base + 0x42a0, 0); | ||
| 604 | clks[IMX8MM_CLK_PWM4_ROOT] = imx_clk_gate4("pwm4_root_clk", "pwm4", base + 0x42b0, 0); | ||
| 605 | clks[IMX8MM_CLK_QSPI_ROOT] = imx_clk_gate4("qspi_root_clk", "qspi", base + 0x42f0, 0); | ||
| 606 | clks[IMX8MM_CLK_NAND_ROOT] = imx_clk_gate2_shared2("nand_root_clk", "nand", base + 0x4300, 0, &share_count_nand); | ||
| 607 | clks[IMX8MM_CLK_NAND_USDHC_BUS_RAWNAND_CLK] = imx_clk_gate2_shared2("nand_usdhc_rawnand_clk", "nand_usdhc_bus", base + 0x4300, 0, &share_count_nand); | ||
| 608 | clks[IMX8MM_CLK_SAI1_ROOT] = imx_clk_gate2_shared2("sai1_root_clk", "sai1", base + 0x4330, 0, &share_count_sai1); | ||
| 609 | clks[IMX8MM_CLK_SAI1_IPG] = imx_clk_gate2_shared2("sai1_ipg_clk", "ipg_audio_root", base + 0x4330, 0, &share_count_sai1); | ||
| 610 | clks[IMX8MM_CLK_SAI2_ROOT] = imx_clk_gate2_shared2("sai2_root_clk", "sai2", base + 0x4340, 0, &share_count_sai2); | ||
| 611 | clks[IMX8MM_CLK_SAI2_IPG] = imx_clk_gate2_shared2("sai2_ipg_clk", "ipg_audio_root", base + 0x4340, 0, &share_count_sai2); | ||
| 612 | clks[IMX8MM_CLK_SAI3_ROOT] = imx_clk_gate2_shared2("sai3_root_clk", "sai3", base + 0x4350, 0, &share_count_sai3); | ||
| 613 | clks[IMX8MM_CLK_SAI3_IPG] = imx_clk_gate2_shared2("sai3_ipg_clk", "ipg_audio_root", base + 0x4350, 0, &share_count_sai3); | ||
| 614 | clks[IMX8MM_CLK_SAI4_ROOT] = imx_clk_gate2_shared2("sai4_root_clk", "sai4", base + 0x4360, 0, &share_count_sai4); | ||
| 615 | clks[IMX8MM_CLK_SAI4_IPG] = imx_clk_gate2_shared2("sai4_ipg_clk", "ipg_audio_root", base + 0x4360, 0, &share_count_sai4); | ||
| 616 | clks[IMX8MM_CLK_SAI5_ROOT] = imx_clk_gate2_shared2("sai5_root_clk", "sai5", base + 0x4370, 0, &share_count_sai5); | ||
| 617 | clks[IMX8MM_CLK_SAI5_IPG] = imx_clk_gate2_shared2("sai5_ipg_clk", "ipg_audio_root", base + 0x4370, 0, &share_count_sai5); | ||
| 618 | clks[IMX8MM_CLK_SAI6_ROOT] = imx_clk_gate2_shared2("sai6_root_clk", "sai6", base + 0x4380, 0, &share_count_sai6); | ||
| 619 | clks[IMX8MM_CLK_SAI6_IPG] = imx_clk_gate2_shared2("sai6_ipg_clk", "ipg_audio_root", base + 0x4380, 0, &share_count_sai6); | ||
| 620 | clks[IMX8MM_CLK_UART1_ROOT] = imx_clk_gate4("uart1_root_clk", "uart1", base + 0x4490, 0); | ||
| 621 | clks[IMX8MM_CLK_UART2_ROOT] = imx_clk_gate4("uart2_root_clk", "uart2", base + 0x44a0, 0); | ||
| 622 | clks[IMX8MM_CLK_UART3_ROOT] = imx_clk_gate4("uart3_root_clk", "uart3", base + 0x44b0, 0); | ||
| 623 | clks[IMX8MM_CLK_UART4_ROOT] = imx_clk_gate4("uart4_root_clk", "uart4", base + 0x44c0, 0); | ||
| 624 | clks[IMX8MM_CLK_USB1_CTRL_ROOT] = imx_clk_gate4("usb1_ctrl_root_clk", "usb_core_ref", base + 0x44d0, 0); | ||
| 625 | clks[IMX8MM_CLK_GPU3D_ROOT] = imx_clk_gate4("gpu3d_root_clk", "gpu3d_div", base + 0x44f0, 0); | ||
| 626 | clks[IMX8MM_CLK_USDHC1_ROOT] = imx_clk_gate4("usdhc1_root_clk", "usdhc1", base + 0x4510, 0); | ||
| 627 | clks[IMX8MM_CLK_USDHC2_ROOT] = imx_clk_gate4("usdhc2_root_clk", "usdhc2", base + 0x4520, 0); | ||
| 628 | clks[IMX8MM_CLK_WDOG1_ROOT] = imx_clk_gate4("wdog1_root_clk", "wdog", base + 0x4530, 0); | ||
| 629 | clks[IMX8MM_CLK_WDOG2_ROOT] = imx_clk_gate4("wdog2_root_clk", "wdog", base + 0x4540, 0); | ||
| 630 | clks[IMX8MM_CLK_WDOG3_ROOT] = imx_clk_gate4("wdog3_root_clk", "wdog", base + 0x4550, 0); | ||
| 631 | clks[IMX8MM_CLK_VPU_G1_ROOT] = imx_clk_gate4("vpu_g1_root_clk", "vpu_g1", base + 0x4560, 0); | ||
| 632 | clks[IMX8MM_CLK_GPU_BUS_ROOT] = imx_clk_gate4("gpu_root_clk", "gpu_axi", base + 0x4570, 0); | ||
| 633 | clks[IMX8MM_CLK_VPU_H1_ROOT] = imx_clk_gate4("vpu_h1_root_clk", "vpu_h1", base + 0x4590, 0); | ||
| 634 | clks[IMX8MM_CLK_VPU_G2_ROOT] = imx_clk_gate4("vpu_g2_root_clk", "vpu_g2", base + 0x45a0, 0); | ||
| 635 | clks[IMX8MM_CLK_PDM_ROOT] = imx_clk_gate2_shared2("pdm_root_clk", "pdm", base + 0x45b0, 0, &share_count_pdm); | ||
| 636 | clks[IMX8MM_CLK_PDM_IPG] = imx_clk_gate2_shared2("pdm_ipg_clk", "ipg_audio_root", base + 0x45b0, 0, &share_count_pdm); | ||
| 637 | clks[IMX8MM_CLK_DISP_ROOT] = imx_clk_gate2_shared2("disp_root_clk", "disp_dc8000", base + 0x45d0, 0, &share_count_dcss); | ||
| 638 | clks[IMX8MM_CLK_DISP_AXI_ROOT] = imx_clk_gate2_shared2("disp_axi_root_clk", "disp_axi", base + 0x45d0, 0, &share_count_dcss); | ||
| 639 | clks[IMX8MM_CLK_DISP_APB_ROOT] = imx_clk_gate2_shared2("disp_apb_root_clk", "disp_apb", base + 0x45d0, 0, &share_count_dcss); | ||
| 640 | clks[IMX8MM_CLK_DISP_RTRM_ROOT] = imx_clk_gate2_shared2("disp_rtrm_root_clk", "disp_rtrm", base + 0x45d0, 0, &share_count_dcss); | ||
| 641 | clks[IMX8MM_CLK_USDHC3_ROOT] = imx_clk_gate4("usdhc3_root_clk", "usdhc3", base + 0x45e0, 0); | ||
| 642 | clks[IMX8MM_CLK_TMU_ROOT] = imx_clk_gate4("tmu_root_clk", "ipg_root", base + 0x4620, 0); | ||
| 643 | clks[IMX8MM_CLK_VPU_DEC_ROOT] = imx_clk_gate4("vpu_dec_root_clk", "vpu_bus", base + 0x4630, 0); | ||
| 644 | clks[IMX8MM_CLK_SDMA1_ROOT] = imx_clk_gate4("sdma1_clk", "ipg_root", base + 0x43a0, 0); | ||
| 645 | clks[IMX8MM_CLK_SDMA2_ROOT] = imx_clk_gate4("sdma2_clk", "ipg_audio_root", base + 0x43b0, 0); | ||
| 646 | clks[IMX8MM_CLK_SDMA3_ROOT] = imx_clk_gate4("sdma3_clk", "ipg_audio_root", base + 0x45f0, 0); | ||
| 647 | clks[IMX8MM_CLK_GPU2D_ROOT] = imx_clk_gate4("gpu2d_root_clk", "gpu2d_div", base + 0x4660, 0); | ||
| 648 | clks[IMX8MM_CLK_CSI1_ROOT] = imx_clk_gate4("csi1_root_clk", "csi1_core", base + 0x4650, 0); | ||
| 649 | |||
| 650 | clks[IMX8MM_CLK_GPT_3M] = imx_clk_fixed_factor("gpt_3m", "osc_24m", 1, 8); | ||
| 651 | |||
| 652 | clks[IMX8MM_CLK_DRAM_ALT_ROOT] = imx_clk_fixed_factor("dram_alt_root", "dram_alt", 1, 4); | ||
| 653 | clks[IMX8MM_CLK_DRAM_CORE] = imx_clk_mux2_flags("dram_core_clk", base + 0x9800, 24, 1, imx8mm_dram_core_sels, ARRAY_SIZE(imx8mm_dram_core_sels), CLK_IS_CRITICAL); | ||
| 654 | |||
| 655 | clks[IMX8MM_CLK_ARM] = imx_clk_cpu("arm", "arm_a53_div", | ||
| 656 | clks[IMX8MM_CLK_A53_DIV], | ||
| 657 | clks[IMX8MM_CLK_A53_SRC], | ||
| 658 | clks[IMX8MM_ARM_PLL_OUT], | ||
| 659 | clks[IMX8MM_CLK_24M]); | ||
| 660 | |||
| 661 | imx_check_clocks(clks, ARRAY_SIZE(clks)); | ||
| 662 | |||
| 663 | clk_data.clks = clks; | ||
| 664 | clk_data.clk_num = ARRAY_SIZE(clks); | ||
| 665 | ret = of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data); | ||
| 666 | if (ret < 0) { | ||
| 667 | pr_err("failed to register clks for i.MX8MM\n"); | ||
| 668 | return -EINVAL; | ||
| 669 | } | ||
| 670 | |||
| 671 | imx_register_uart_clocks(uart_clks); | ||
| 672 | |||
| 673 | return 0; | ||
| 674 | } | ||
| 675 | CLK_OF_DECLARE_DRIVER(imx8mm, "fsl,imx8mm-ccm", imx8mm_clocks_init); | ||
diff --git a/drivers/clk/imx/clk-imx8mq.c b/drivers/clk/imx/clk-imx8mq.c index 26b57f43ccc3..a9b3888aef0c 100644 --- a/drivers/clk/imx/clk-imx8mq.c +++ b/drivers/clk/imx/clk-imx8mq.c | |||
| @@ -26,246 +26,246 @@ static u32 share_count_nand; | |||
| 26 | 26 | ||
| 27 | static struct clk *clks[IMX8MQ_CLK_END]; | 27 | static struct clk *clks[IMX8MQ_CLK_END]; |
| 28 | 28 | ||
| 29 | static const char *pll_ref_sels[] = { "osc_25m", "osc_27m", "dummy", "dummy", }; | 29 | static const char * const pll_ref_sels[] = { "osc_25m", "osc_27m", "dummy", "dummy", }; |
| 30 | static const char *arm_pll_bypass_sels[] = {"arm_pll", "arm_pll_ref_sel", }; | 30 | static const char * const arm_pll_bypass_sels[] = {"arm_pll", "arm_pll_ref_sel", }; |
| 31 | static const char *gpu_pll_bypass_sels[] = {"gpu_pll", "gpu_pll_ref_sel", }; | 31 | static const char * const gpu_pll_bypass_sels[] = {"gpu_pll", "gpu_pll_ref_sel", }; |
| 32 | static const char *vpu_pll_bypass_sels[] = {"vpu_pll", "vpu_pll_ref_sel", }; | 32 | static const char * const vpu_pll_bypass_sels[] = {"vpu_pll", "vpu_pll_ref_sel", }; |
| 33 | static const char *audio_pll1_bypass_sels[] = {"audio_pll1", "audio_pll1_ref_sel", }; | 33 | static const char * const audio_pll1_bypass_sels[] = {"audio_pll1", "audio_pll1_ref_sel", }; |
| 34 | static const char *audio_pll2_bypass_sels[] = {"audio_pll2", "audio_pll2_ref_sel", }; | 34 | static const char * const audio_pll2_bypass_sels[] = {"audio_pll2", "audio_pll2_ref_sel", }; |
| 35 | static const char *video_pll1_bypass_sels[] = {"video_pll1", "video_pll1_ref_sel", }; | 35 | static const char * const video_pll1_bypass_sels[] = {"video_pll1", "video_pll1_ref_sel", }; |
| 36 | 36 | ||
| 37 | static const char *sys1_pll1_out_sels[] = {"sys1_pll1", "sys1_pll1_ref_sel", }; | 37 | static const char * const sys1_pll_out_sels[] = {"sys1_pll1_ref_sel", }; |
| 38 | static const char *sys2_pll1_out_sels[] = {"sys2_pll1", "sys1_pll1_ref_sel", }; | 38 | static const char * const sys2_pll_out_sels[] = {"sys1_pll1_ref_sel", "sys2_pll1_ref_sel", }; |
| 39 | static const char *sys3_pll1_out_sels[] = {"sys3_pll1", "sys3_pll1_ref_sel", }; | 39 | static const char * const sys3_pll_out_sels[] = {"sys3_pll1_ref_sel", "sys2_pll1_ref_sel", }; |
| 40 | static const char *dram_pll1_out_sels[] = {"dram_pll1", "dram_pll1_ref_sel", }; | 40 | static const char * const dram_pll_out_sels[] = {"dram_pll1_ref_sel", }; |
| 41 | |||
| 42 | static const char *sys1_pll2_out_sels[] = {"sys1_pll2_div", "sys1_pll1_ref_sel", }; | ||
| 43 | static const char *sys2_pll2_out_sels[] = {"sys2_pll2_div", "sys2_pll1_ref_sel", }; | ||
| 44 | static const char *sys3_pll2_out_sels[] = {"sys3_pll2_div", "sys2_pll1_ref_sel", }; | ||
| 45 | static const char *dram_pll2_out_sels[] = {"dram_pll2_div", "dram_pll1_ref_sel", }; | ||
| 46 | 41 | ||
| 47 | /* CCM ROOT */ | 42 | /* CCM ROOT */ |
| 48 | static const char *imx8mq_a53_sels[] = {"osc_25m", "arm_pll_out", "sys2_pll_500m", "sys2_pll_1000m", | 43 | static const char * const imx8mq_a53_sels[] = {"osc_25m", "arm_pll_out", "sys2_pll_500m", "sys2_pll_1000m", |
| 49 | "sys1_pll_800m", "sys1_pll_400m", "audio_pll1_out", "sys3_pll2_out", }; | 44 | "sys1_pll_800m", "sys1_pll_400m", "audio_pll1_out", "sys3_pll2_out", }; |
| 50 | 45 | ||
| 51 | static const char *imx8mq_vpu_sels[] = {"osc_25m", "arm_pll_out", "sys2_pll_500m", "sys2_pll_1000m", | 46 | static const char * const imx8mq_arm_m4_sels[] = {"osc_25m", "sys2_pll_200m", "sys2_pll_250m", "sys1_pll_266m", |
| 47 | "sys1_pll_800m", "audio_pll1_out", "video_pll1_out", "sys3_pll2_out", }; | ||
| 48 | |||
| 49 | static const char * const imx8mq_vpu_sels[] = {"osc_25m", "arm_pll_out", "sys2_pll_500m", "sys2_pll_1000m", | ||
| 52 | "sys1_pll_800m", "sys1_pll_400m", "audio_pll1_out", "vpu_pll_out", }; | 50 | "sys1_pll_800m", "sys1_pll_400m", "audio_pll1_out", "vpu_pll_out", }; |
| 53 | 51 | ||
| 54 | static const char *imx8mq_gpu_core_sels[] = {"osc_25m", "gpu_pll_out", "sys1_pll_800m", "sys3_pll2_out", | 52 | static const char * const imx8mq_gpu_core_sels[] = {"osc_25m", "gpu_pll_out", "sys1_pll_800m", "sys3_pll2_out", |
| 55 | "sys2_pll_1000m", "audio_pll1_out", "video_pll1_out", "audio_pll2_out", }; | 53 | "sys2_pll_1000m", "audio_pll1_out", "video_pll1_out", "audio_pll2_out", }; |
| 56 | 54 | ||
| 57 | static const char *imx8mq_gpu_shader_sels[] = {"osc_25m", "gpu_pll_out", "sys1_pll_800m", "sys3_pll2_out", | 55 | static const char * const imx8mq_gpu_shader_sels[] = {"osc_25m", "gpu_pll_out", "sys1_pll_800m", "sys3_pll2_out", |
| 58 | "sys2_pll_1000m", "audio_pll1_out", "video_pll1_out", "audio_pll2_out", }; | 56 | "sys2_pll_1000m", "audio_pll1_out", "video_pll1_out", "audio_pll2_out", }; |
| 59 | 57 | ||
| 60 | static const char *imx8mq_main_axi_sels[] = {"osc_25m", "sys2_pll_333m", "sys1_pll_800m", "sys2_pll_250m", | 58 | static const char * const imx8mq_main_axi_sels[] = {"osc_25m", "sys2_pll_333m", "sys1_pll_800m", "sys2_pll_250m", |
| 61 | "sys2_pll_1000m", "audio_pll1_out", "video_pll1_out", "sys1_pll_100m",}; | 59 | "sys2_pll_1000m", "audio_pll1_out", "video_pll1_out", "sys1_pll_100m",}; |
| 62 | 60 | ||
| 63 | static const char *imx8mq_enet_axi_sels[] = {"osc_25m", "sys1_pll_266m", "sys1_pll_800m", "sys2_pll_250m", | 61 | static const char * const imx8mq_enet_axi_sels[] = {"osc_25m", "sys1_pll_266m", "sys1_pll_800m", "sys2_pll_250m", |
| 64 | "sys2_pll_200m", "audio_pll1_out", "video_pll1_out", "sys3_pll2_out", }; | 62 | "sys2_pll_200m", "audio_pll1_out", "video_pll1_out", "sys3_pll2_out", }; |
| 65 | 63 | ||
| 66 | static const char *imx8mq_nand_usdhc_sels[] = {"osc_25m", "sys1_pll_266m", "sys1_pll_800m", "sys2_pll_200m", | 64 | static const char * const imx8mq_nand_usdhc_sels[] = {"osc_25m", "sys1_pll_266m", "sys1_pll_800m", "sys2_pll_200m", |
| 67 | "sys1_pll_133m", "sys3_pll2_out", "sys2_pll_250m", "audio_pll1_out", }; | 65 | "sys1_pll_133m", "sys3_pll2_out", "sys2_pll_250m", "audio_pll1_out", }; |
| 68 | 66 | ||
| 69 | static const char *imx8mq_vpu_bus_sels[] = {"osc_25m", "sys1_pll_800m", "vpu_pll_out", "audio_pll2_out", "sys3_pll2_out", "sys2_pll_1000m", "sys2_pll_200m", "sys1_pll_100m", }; | 67 | static const char * const imx8mq_vpu_bus_sels[] = {"osc_25m", "sys1_pll_800m", "vpu_pll_out", "audio_pll2_out", "sys3_pll2_out", "sys2_pll_1000m", "sys2_pll_200m", "sys1_pll_100m", }; |
| 70 | 68 | ||
| 71 | static const char *imx8mq_disp_axi_sels[] = {"osc_25m", "sys2_pll_125m", "sys1_pll_800m", "sys3_pll2_out", "sys1_pll_400m", "audio_pll2_out", "clk_ext1", "clk_ext4", }; | 69 | static const char * const imx8mq_disp_axi_sels[] = {"osc_25m", "sys2_pll_125m", "sys1_pll_800m", "sys3_pll2_out", "sys1_pll_400m", "audio_pll2_out", "clk_ext1", "clk_ext4", }; |
| 72 | 70 | ||
| 73 | static const char *imx8mq_disp_apb_sels[] = {"osc_25m", "sys2_pll_125m", "sys1_pll_800m", "sys3_pll2_out", | 71 | static const char * const imx8mq_disp_apb_sels[] = {"osc_25m", "sys2_pll_125m", "sys1_pll_800m", "sys3_pll2_out", |
| 74 | "sys1_pll_40m", "audio_pll2_out", "clk_ext1", "clk_ext3", }; | 72 | "sys1_pll_40m", "audio_pll2_out", "clk_ext1", "clk_ext3", }; |
| 75 | 73 | ||
| 76 | static const char *imx8mq_disp_rtrm_sels[] = {"osc_25m", "sys1_pll_800m", "sys2_pll_200m", "sys1_pll_400m", | 74 | static const char * const imx8mq_disp_rtrm_sels[] = {"osc_25m", "sys1_pll_800m", "sys2_pll_200m", "sys1_pll_400m", |
| 77 | "audio_pll1_out", "video_pll1_out", "clk_ext2", "clk_ext3", }; | 75 | "audio_pll1_out", "video_pll1_out", "clk_ext2", "clk_ext3", }; |
| 78 | 76 | ||
| 79 | static const char *imx8mq_usb_bus_sels[] = {"osc_25m", "sys2_pll_500m", "sys1_pll_800m", "sys2_pll_100m", | 77 | static const char * const imx8mq_usb_bus_sels[] = {"osc_25m", "sys2_pll_500m", "sys1_pll_800m", "sys2_pll_100m", |
| 80 | "sys2_pll_200m", "clk_ext2", "clk_ext4", "audio_pll2_out", }; | 78 | "sys2_pll_200m", "clk_ext2", "clk_ext4", "audio_pll2_out", }; |
| 81 | 79 | ||
| 82 | static const char *imx8mq_gpu_axi_sels[] = {"osc_25m", "sys1_pll_800m", "gpu_pll_out", "sys3_pll2_out", "sys2_pll_1000m", | 80 | static const char * const imx8mq_gpu_axi_sels[] = {"osc_25m", "sys1_pll_800m", "gpu_pll_out", "sys3_pll2_out", "sys2_pll_1000m", |
| 83 | "audio_pll1_out", "video_pll1_out", "audio_pll2_out", }; | 81 | "audio_pll1_out", "video_pll1_out", "audio_pll2_out", }; |
| 84 | 82 | ||
| 85 | static const char *imx8mq_gpu_ahb_sels[] = {"osc_25m", "sys1_pll_800m", "gpu_pll_out", "sys3_pll2_out", "sys2_pll_1000m", | 83 | static const char * const imx8mq_gpu_ahb_sels[] = {"osc_25m", "sys1_pll_800m", "gpu_pll_out", "sys3_pll2_out", "sys2_pll_1000m", |
| 86 | "audio_pll1_out", "video_pll1_out", "audio_pll2_out", }; | 84 | "audio_pll1_out", "video_pll1_out", "audio_pll2_out", }; |
| 87 | 85 | ||
| 88 | static const char *imx8mq_noc_sels[] = {"osc_25m", "sys1_pll_800m", "sys3_pll2_out", "sys2_pll_1000m", "sys2_pll_500m", | 86 | static const char * const imx8mq_noc_sels[] = {"osc_25m", "sys1_pll_800m", "sys3_pll2_out", "sys2_pll_1000m", "sys2_pll_500m", |
| 89 | "audio_pll1_out", "video_pll1_out", "audio_pll2_out", }; | 87 | "audio_pll1_out", "video_pll1_out", "audio_pll2_out", }; |
| 90 | 88 | ||
| 91 | static const char *imx8mq_noc_apb_sels[] = {"osc_25m", "sys1_pll_400m", "sys3_pll2_out", "sys2_pll_333m", "sys2_pll_200m", | 89 | static const char * const imx8mq_noc_apb_sels[] = {"osc_25m", "sys1_pll_400m", "sys3_pll2_out", "sys2_pll_333m", "sys2_pll_200m", |
| 92 | "sys1_pll_800m", "audio_pll1_out", "video_pll1_out", }; | 90 | "sys1_pll_800m", "audio_pll1_out", "video_pll1_out", }; |
| 93 | 91 | ||
| 94 | static const char *imx8mq_ahb_sels[] = {"osc_25m", "sys1_pll_133m", "sys1_pll_800m", "sys1_pll_400m", | 92 | static const char * const imx8mq_ahb_sels[] = {"osc_25m", "sys1_pll_133m", "sys1_pll_800m", "sys1_pll_400m", |
| 95 | "sys2_pll_125m", "sys3_pll2_out", "audio_pll1_out", "video_pll1_out", }; | 93 | "sys2_pll_125m", "sys3_pll2_out", "audio_pll1_out", "video_pll1_out", }; |
| 96 | 94 | ||
| 97 | static const char *imx8mq_audio_ahb_sels[] = {"osc_25m", "sys2_pll_500m", "sys1_pll_800m", "sys2_pll_1000m", | 95 | static const char * const imx8mq_audio_ahb_sels[] = {"osc_25m", "sys2_pll_500m", "sys1_pll_800m", "sys2_pll_1000m", |
| 98 | "sys2_pll_166m", "sys3_pll2_out", "audio_pll1_out", "video_pll1_out", }; | 96 | "sys2_pll_166m", "sys3_pll2_out", "audio_pll1_out", "video_pll1_out", }; |
| 99 | 97 | ||
| 100 | static const char *imx8mq_dsi_ahb_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_80m", "sys1_pll_800m", | 98 | static const char * const imx8mq_dsi_ahb_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_80m", "sys1_pll_800m", |
| 101 | "sys2_pll_1000m", "sys3_pll2_out", "clk_ext3", "audio_pll2_out"}; | 99 | "sys2_pll_1000m", "sys3_pll2_out", "clk_ext3", "audio_pll2_out"}; |
| 102 | 100 | ||
| 103 | static const char *imx8mq_dram_alt_sels[] = {"osc_25m", "sys1_pll_800m", "sys1_pll_100m", "sys2_pll_500m", | 101 | static const char * const imx8mq_dram_alt_sels[] = {"osc_25m", "sys1_pll_800m", "sys1_pll_100m", "sys2_pll_500m", |
| 104 | "sys2_pll_250m", "sys1_pll_400m", "audio_pll1_out", "sys1_pll_266m", }; | 102 | "sys2_pll_250m", "sys1_pll_400m", "audio_pll1_out", "sys1_pll_266m", }; |
| 105 | 103 | ||
| 106 | static const char *imx8mq_dram_apb_sels[] = {"osc_25m", "sys2_pll_200m", "sys1_pll_40m", "sys1_pll_160m", | 104 | static const char * const imx8mq_dram_apb_sels[] = {"osc_25m", "sys2_pll_200m", "sys1_pll_40m", "sys1_pll_160m", |
| 107 | "sys1_pll_800m", "sys3_pll2_out", "sys2_pll_250m", "audio_pll2_out", }; | 105 | "sys1_pll_800m", "sys3_pll2_out", "sys2_pll_250m", "audio_pll2_out", }; |
| 108 | 106 | ||
| 109 | static const char *imx8mq_vpu_g1_sels[] = {"osc_25m", "vpu_pll_out", "sys1_pll_800m", "sys2_pll_1000m", "sys1_pll_100m", "sys2_pll_125m", "sys3_pll2_out", "audio_pll1_out", }; | 107 | static const char * const imx8mq_vpu_g1_sels[] = {"osc_25m", "vpu_pll_out", "sys1_pll_800m", "sys2_pll_1000m", "sys1_pll_100m", "sys2_pll_125m", "sys3_pll2_out", "audio_pll1_out", }; |
| 110 | 108 | ||
| 111 | static const char *imx8mq_vpu_g2_sels[] = {"osc_25m", "vpu_pll_out", "sys1_pll_800m", "sys2_pll_1000m", "sys1_pll_100m", "sys2_pll_125m", "sys3_pll2_out", "audio_pll1_out", }; | 109 | static const char * const imx8mq_vpu_g2_sels[] = {"osc_25m", "vpu_pll_out", "sys1_pll_800m", "sys2_pll_1000m", "sys1_pll_100m", "sys2_pll_125m", "sys3_pll2_out", "audio_pll1_out", }; |
| 112 | 110 | ||
| 113 | static const char *imx8mq_disp_dtrc_sels[] = {"osc_25m", "vpu_pll_out", "sys1_pll_800m", "sys2_pll_1000m", "sys1_pll_160m", "sys2_pll_100m", "sys3_pll2_out", "audio_pll2_out", }; | 111 | static const char * const imx8mq_disp_dtrc_sels[] = {"osc_25m", "vpu_pll_out", "sys1_pll_800m", "sys2_pll_1000m", "sys1_pll_160m", "sys2_pll_100m", "sys3_pll2_out", "audio_pll2_out", }; |
| 114 | 112 | ||
| 115 | static const char *imx8mq_disp_dc8000_sels[] = {"osc_25m", "vpu_pll_out", "sys1_pll_800m", "sys2_pll_1000m", "sys1_pll_160m", "sys2_pll_100m", "sys3_pll2_out", "audio_pll2_out", }; | 113 | static const char * const imx8mq_disp_dc8000_sels[] = {"osc_25m", "vpu_pll_out", "sys1_pll_800m", "sys2_pll_1000m", "sys1_pll_160m", "sys2_pll_100m", "sys3_pll2_out", "audio_pll2_out", }; |
| 116 | 114 | ||
| 117 | static const char *imx8mq_pcie1_ctrl_sels[] = {"osc_25m", "sys2_pll_250m", "sys2_pll_200m", "sys1_pll_266m", | 115 | static const char * const imx8mq_pcie1_ctrl_sels[] = {"osc_25m", "sys2_pll_250m", "sys2_pll_200m", "sys1_pll_266m", |
| 118 | "sys1_pll_800m", "sys2_pll_500m", "sys2_pll_250m", "sys3_pll2_out", }; | 116 | "sys1_pll_800m", "sys2_pll_500m", "sys2_pll_250m", "sys3_pll2_out", }; |
| 119 | 117 | ||
| 120 | static const char *imx8mq_pcie1_phy_sels[] = {"osc_25m", "sys2_pll_100m", "sys2_pll_500m", "clk_ext1", "clk_ext2", | 118 | static const char * const imx8mq_pcie1_phy_sels[] = {"osc_25m", "sys2_pll_100m", "sys2_pll_500m", "clk_ext1", "clk_ext2", |
| 121 | "clk_ext3", "clk_ext4", }; | 119 | "clk_ext3", "clk_ext4", }; |
| 122 | 120 | ||
| 123 | static const char *imx8mq_pcie1_aux_sels[] = {"osc_25m", "sys2_pll_200m", "sys2_pll_500m", "sys3_pll2_out", | 121 | static const char * const imx8mq_pcie1_aux_sels[] = {"osc_25m", "sys2_pll_200m", "sys2_pll_500m", "sys3_pll2_out", |
| 124 | "sys2_pll_100m", "sys1_pll_80m", "sys1_pll_160m", "sys1_pll_200m", }; | 122 | "sys2_pll_100m", "sys1_pll_80m", "sys1_pll_160m", "sys1_pll_200m", }; |
| 125 | 123 | ||
| 126 | static const char *imx8mq_dc_pixel_sels[] = {"osc_25m", "video_pll1_out", "audio_pll2_out", "audio_pll1_out", "sys1_pll_800m", "sys2_pll_1000m", "sys3_pll2_out", "clk_ext4", }; | 124 | static const char * const imx8mq_dc_pixel_sels[] = {"osc_25m", "video_pll1_out", "audio_pll2_out", "audio_pll1_out", "sys1_pll_800m", "sys2_pll_1000m", "sys3_pll2_out", "clk_ext4", }; |
| 127 | 125 | ||
| 128 | static const char *imx8mq_lcdif_pixel_sels[] = {"osc_25m", "video_pll1_out", "audio_pll2_out", "audio_pll1_out", "sys1_pll_800m", "sys2_pll_1000m", "sys3_pll2_out", "clk_ext4", }; | 126 | static const char * const imx8mq_lcdif_pixel_sels[] = {"osc_25m", "video_pll1_out", "audio_pll2_out", "audio_pll1_out", "sys1_pll_800m", "sys2_pll_1000m", "sys3_pll2_out", "clk_ext4", }; |
| 129 | 127 | ||
| 130 | static const char *imx8mq_sai1_sels[] = {"osc_25m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", "sys1_pll_133m", "osc_27m", "clk_ext1", "clk_ext2", }; | 128 | static const char * const imx8mq_sai1_sels[] = {"osc_25m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", "sys1_pll_133m", "osc_27m", "clk_ext1", "clk_ext2", }; |
| 131 | 129 | ||
| 132 | static const char *imx8mq_sai2_sels[] = {"osc_25m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", "sys1_pll_133m", "osc_27m", "clk_ext2", "clk_ext3", }; | 130 | static const char * const imx8mq_sai2_sels[] = {"osc_25m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", "sys1_pll_133m", "osc_27m", "clk_ext2", "clk_ext3", }; |
| 133 | 131 | ||
| 134 | static const char *imx8mq_sai3_sels[] = {"osc_25m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", "sys1_pll_133m", "osc_27m", "clk_ext3", "clk_ext4", }; | 132 | static const char * const imx8mq_sai3_sels[] = {"osc_25m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", "sys1_pll_133m", "osc_27m", "clk_ext3", "clk_ext4", }; |
| 135 | 133 | ||
| 136 | static const char *imx8mq_sai4_sels[] = {"osc_25m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", "sys1_pll_133m", "osc_27m", "clk_ext1", "clk_ext2", }; | 134 | static const char * const imx8mq_sai4_sels[] = {"osc_25m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", "sys1_pll_133m", "osc_27m", "clk_ext1", "clk_ext2", }; |
| 137 | 135 | ||
| 138 | static const char *imx8mq_sai5_sels[] = {"osc_25m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", "sys1_pll_133m", "osc_27m", "clk_ext2", "clk_ext3", }; | 136 | static const char * const imx8mq_sai5_sels[] = {"osc_25m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", "sys1_pll_133m", "osc_27m", "clk_ext2", "clk_ext3", }; |
| 139 | 137 | ||
| 140 | static const char *imx8mq_sai6_sels[] = {"osc_25m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", "sys1_pll_133m", "osc_27m", "clk_ext3", "clk_ext4", }; | 138 | static const char * const imx8mq_sai6_sels[] = {"osc_25m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", "sys1_pll_133m", "osc_27m", "clk_ext3", "clk_ext4", }; |
| 141 | 139 | ||
| 142 | static const char *imx8mq_spdif1_sels[] = {"osc_25m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", "sys1_pll_133m", "osc_27m", "clk_ext2", "clk_ext3", }; | 140 | static const char * const imx8mq_spdif1_sels[] = {"osc_25m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", "sys1_pll_133m", "osc_27m", "clk_ext2", "clk_ext3", }; |
| 143 | 141 | ||
| 144 | static const char *imx8mq_spdif2_sels[] = {"osc_25m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", "sys1_pll_133m", "osc_27m", "clk_ext3", "clk_ext4", }; | 142 | static const char * const imx8mq_spdif2_sels[] = {"osc_25m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", "sys1_pll_133m", "osc_27m", "clk_ext3", "clk_ext4", }; |
| 145 | 143 | ||
| 146 | static const char *imx8mq_enet_ref_sels[] = {"osc_25m", "sys2_pll_125m", "sys2_pll_500m", "sys2_pll_100m", | 144 | static const char * const imx8mq_enet_ref_sels[] = {"osc_25m", "sys2_pll_125m", "sys2_pll_500m", "sys2_pll_100m", |
| 147 | "sys1_pll_160m", "audio_pll1_out", "video_pll1_out", "clk_ext4", }; | 145 | "sys1_pll_160m", "audio_pll1_out", "video_pll1_out", "clk_ext4", }; |
| 148 | 146 | ||
| 149 | static const char *imx8mq_enet_timer_sels[] = {"osc_25m", "sys2_pll_100m", "audio_pll1_out", "clk_ext1", "clk_ext2", | 147 | static const char * const imx8mq_enet_timer_sels[] = {"osc_25m", "sys2_pll_100m", "audio_pll1_out", "clk_ext1", "clk_ext2", |
| 150 | "clk_ext3", "clk_ext4", "video_pll1_out", }; | 148 | "clk_ext3", "clk_ext4", "video_pll1_out", }; |
| 151 | 149 | ||
| 152 | static const char *imx8mq_enet_phy_sels[] = {"osc_25m", "sys2_pll_50m", "sys2_pll_125m", "sys2_pll_500m", | 150 | static const char * const imx8mq_enet_phy_sels[] = {"osc_25m", "sys2_pll_50m", "sys2_pll_125m", "sys2_pll_500m", |
| 153 | "audio_pll1_out", "video_pll1_out", "audio_pll2_out", }; | 151 | "audio_pll1_out", "video_pll1_out", "audio_pll2_out", }; |
| 154 | 152 | ||
| 155 | static const char *imx8mq_nand_sels[] = {"osc_25m", "sys2_pll_500m", "audio_pll1_out", "sys1_pll_400m", | 153 | static const char * const imx8mq_nand_sels[] = {"osc_25m", "sys2_pll_500m", "audio_pll1_out", "sys1_pll_400m", |
| 156 | "audio_pll2_out", "sys3_pll2_out", "sys2_pll_250m", "video_pll1_out", }; | 154 | "audio_pll2_out", "sys3_pll2_out", "sys2_pll_250m", "video_pll1_out", }; |
| 157 | 155 | ||
| 158 | static const char *imx8mq_qspi_sels[] = {"osc_25m", "sys1_pll_400m", "sys1_pll_800m", "sys2_pll_500m", | 156 | static const char * const imx8mq_qspi_sels[] = {"osc_25m", "sys1_pll_400m", "sys1_pll_800m", "sys2_pll_500m", |
| 159 | "audio_pll2_out", "sys1_pll_266m", "sys3_pll2_out", "sys1_pll_100m", }; | 157 | "audio_pll2_out", "sys1_pll_266m", "sys3_pll2_out", "sys1_pll_100m", }; |
| 160 | 158 | ||
| 161 | static const char *imx8mq_usdhc1_sels[] = {"osc_25m", "sys1_pll_400m", "sys1_pll_800m", "sys2_pll_500m", | 159 | static const char * const imx8mq_usdhc1_sels[] = {"osc_25m", "sys1_pll_400m", "sys1_pll_800m", "sys2_pll_500m", |
| 162 | "audio_pll2_out", "sys1_pll_266m", "sys3_pll2_out", "sys1_pll_100m", }; | 160 | "audio_pll2_out", "sys1_pll_266m", "sys3_pll2_out", "sys1_pll_100m", }; |
| 163 | 161 | ||
| 164 | static const char *imx8mq_usdhc2_sels[] = {"osc_25m", "sys1_pll_400m", "sys1_pll_800m", "sys2_pll_500m", | 162 | static const char * const imx8mq_usdhc2_sels[] = {"osc_25m", "sys1_pll_400m", "sys1_pll_800m", "sys2_pll_500m", |
| 165 | "audio_pll2_out", "sys1_pll_266m", "sys3_pll2_out", "sys1_pll_100m", }; | 163 | "audio_pll2_out", "sys1_pll_266m", "sys3_pll2_out", "sys1_pll_100m", }; |
| 166 | 164 | ||
| 167 | static const char *imx8mq_i2c1_sels[] = {"osc_25m", "sys1_pll_160m", "sys2_pll_50m", "sys3_pll2_out", "audio_pll1_out", | 165 | static const char * const imx8mq_i2c1_sels[] = {"osc_25m", "sys1_pll_160m", "sys2_pll_50m", "sys3_pll2_out", "audio_pll1_out", |
| 168 | "video_pll1_out", "audio_pll2_out", "sys1_pll_133m", }; | 166 | "video_pll1_out", "audio_pll2_out", "sys1_pll_133m", }; |
| 169 | 167 | ||
| 170 | static const char *imx8mq_i2c2_sels[] = {"osc_25m", "sys1_pll_160m", "sys2_pll_50m", "sys3_pll2_out", "audio_pll1_out", | 168 | static const char * const imx8mq_i2c2_sels[] = {"osc_25m", "sys1_pll_160m", "sys2_pll_50m", "sys3_pll2_out", "audio_pll1_out", |
| 171 | "video_pll1_out", "audio_pll2_out", "sys1_pll_133m", }; | 169 | "video_pll1_out", "audio_pll2_out", "sys1_pll_133m", }; |
| 172 | 170 | ||
| 173 | static const char *imx8mq_i2c3_sels[] = {"osc_25m", "sys1_pll_160m", "sys2_pll_50m", "sys3_pll2_out", "audio_pll1_out", | 171 | static const char * const imx8mq_i2c3_sels[] = {"osc_25m", "sys1_pll_160m", "sys2_pll_50m", "sys3_pll2_out", "audio_pll1_out", |
| 174 | "video_pll1_out", "audio_pll2_out", "sys1_pll_133m", }; | 172 | "video_pll1_out", "audio_pll2_out", "sys1_pll_133m", }; |
| 175 | 173 | ||
| 176 | static const char *imx8mq_i2c4_sels[] = {"osc_25m", "sys1_pll_160m", "sys2_pll_50m", "sys3_pll2_out", "audio_pll1_out", | 174 | static const char * const imx8mq_i2c4_sels[] = {"osc_25m", "sys1_pll_160m", "sys2_pll_50m", "sys3_pll2_out", "audio_pll1_out", |
| 177 | "video_pll1_out", "audio_pll2_out", "sys1_pll_133m", }; | 175 | "video_pll1_out", "audio_pll2_out", "sys1_pll_133m", }; |
| 178 | 176 | ||
| 179 | static const char *imx8mq_uart1_sels[] = {"osc_25m", "sys1_pll_80m", "sys2_pll_200m", "sys2_pll_100m", | 177 | static const char * const imx8mq_uart1_sels[] = {"osc_25m", "sys1_pll_80m", "sys2_pll_200m", "sys2_pll_100m", |
| 180 | "sys3_pll2_out", "clk_ext2", "clk_ext4", "audio_pll2_out", }; | 178 | "sys3_pll2_out", "clk_ext2", "clk_ext4", "audio_pll2_out", }; |
| 181 | 179 | ||
| 182 | static const char *imx8mq_uart2_sels[] = {"osc_25m", "sys1_pll_80m", "sys2_pll_200m", "sys2_pll_100m", | 180 | static const char * const imx8mq_uart2_sels[] = {"osc_25m", "sys1_pll_80m", "sys2_pll_200m", "sys2_pll_100m", |
| 183 | "sys3_pll2_out", "clk_ext2", "clk_ext3", "audio_pll2_out", }; | 181 | "sys3_pll2_out", "clk_ext2", "clk_ext3", "audio_pll2_out", }; |
| 184 | 182 | ||
| 185 | static const char *imx8mq_uart3_sels[] = {"osc_25m", "sys1_pll_80m", "sys2_pll_200m", "sys2_pll_100m", | 183 | static const char * const imx8mq_uart3_sels[] = {"osc_25m", "sys1_pll_80m", "sys2_pll_200m", "sys2_pll_100m", |
| 186 | "sys3_pll2_out", "clk_ext2", "clk_ext4", "audio_pll2_out", }; | 184 | "sys3_pll2_out", "clk_ext2", "clk_ext4", "audio_pll2_out", }; |
| 187 | 185 | ||
| 188 | static const char *imx8mq_uart4_sels[] = {"osc_25m", "sys1_pll_80m", "sys2_pll_200m", "sys2_pll_100m", | 186 | static const char * const imx8mq_uart4_sels[] = {"osc_25m", "sys1_pll_80m", "sys2_pll_200m", "sys2_pll_100m", |
| 189 | "sys3_pll2_out", "clk_ext2", "clk_ext3", "audio_pll2_out", }; | 187 | "sys3_pll2_out", "clk_ext2", "clk_ext3", "audio_pll2_out", }; |
| 190 | 188 | ||
| 191 | static const char *imx8mq_usb_core_sels[] = {"osc_25m", "sys1_pll_100m", "sys1_pll_40m", "sys2_pll_100m", | 189 | static const char * const imx8mq_usb_core_sels[] = {"osc_25m", "sys1_pll_100m", "sys1_pll_40m", "sys2_pll_100m", |
| 192 | "sys2_pll_200m", "clk_ext2", "clk_ext3", "audio_pll2_out", }; | 190 | "sys2_pll_200m", "clk_ext2", "clk_ext3", "audio_pll2_out", }; |
| 193 | 191 | ||
| 194 | static const char *imx8mq_usb_phy_sels[] = {"osc_25m", "sys1_pll_100m", "sys1_pll_40m", "sys2_pll_100m", | 192 | static const char * const imx8mq_usb_phy_sels[] = {"osc_25m", "sys1_pll_100m", "sys1_pll_40m", "sys2_pll_100m", |
| 195 | "sys2_pll_200m", "clk_ext2", "clk_ext3", "audio_pll2_out", }; | 193 | "sys2_pll_200m", "clk_ext2", "clk_ext3", "audio_pll2_out", }; |
| 196 | 194 | ||
| 197 | static const char *imx8mq_ecspi1_sels[] = {"osc_25m", "sys2_pll_200m", "sys1_pll_40m", "sys1_pll_160m", | 195 | static const char * const imx8mq_ecspi1_sels[] = {"osc_25m", "sys2_pll_200m", "sys1_pll_40m", "sys1_pll_160m", |
| 198 | "sys1_pll_800m", "sys3_pll2_out", "sys2_pll_250m", "audio_pll2_out", }; | 196 | "sys1_pll_800m", "sys3_pll2_out", "sys2_pll_250m", "audio_pll2_out", }; |
| 199 | 197 | ||
| 200 | static const char *imx8mq_ecspi2_sels[] = {"osc_25m", "sys2_pll_200m", "sys1_pll_40m", "sys1_pll_160m", | 198 | static const char * const imx8mq_ecspi2_sels[] = {"osc_25m", "sys2_pll_200m", "sys1_pll_40m", "sys1_pll_160m", |
| 201 | "sys1_pll_800m", "sys3_pll2_out", "sys2_pll_250m", "audio_pll2_out", }; | 199 | "sys1_pll_800m", "sys3_pll2_out", "sys2_pll_250m", "audio_pll2_out", }; |
| 202 | 200 | ||
| 203 | static const char *imx8mq_pwm1_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_160m", "sys1_pll_40m", | 201 | static const char * const imx8mq_pwm1_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_160m", "sys1_pll_40m", |
| 204 | "sys3_pll2_out", "clk_ext1", "sys1_pll_80m", "video_pll1_out", }; | 202 | "sys3_pll2_out", "clk_ext1", "sys1_pll_80m", "video_pll1_out", }; |
| 205 | 203 | ||
| 206 | static const char *imx8mq_pwm2_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_160m", "sys1_pll_40m", | 204 | static const char * const imx8mq_pwm2_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_160m", "sys1_pll_40m", |
| 207 | "sys3_pll2_out", "clk_ext1", "sys1_pll_80m", "video_pll1_out", }; | 205 | "sys3_pll2_out", "clk_ext1", "sys1_pll_80m", "video_pll1_out", }; |
| 208 | 206 | ||
| 209 | static const char *imx8mq_pwm3_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_160m", "sys1_pll_40m", | 207 | static const char * const imx8mq_pwm3_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_160m", "sys1_pll_40m", |
| 210 | "sys3_pll2_out", "clk_ext2", "sys1_pll_80m", "video_pll1_out", }; | 208 | "sys3_pll2_out", "clk_ext2", "sys1_pll_80m", "video_pll1_out", }; |
| 211 | 209 | ||
| 212 | static const char *imx8mq_pwm4_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_160m", "sys1_pll_40m", | 210 | static const char * const imx8mq_pwm4_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_160m", "sys1_pll_40m", |
| 213 | "sys3_pll2_out", "clk_ext2", "sys1_pll_80m", "video_pll1_out", }; | 211 | "sys3_pll2_out", "clk_ext2", "sys1_pll_80m", "video_pll1_out", }; |
| 214 | 212 | ||
| 215 | static const char *imx8mq_gpt1_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_400m", "sys1_pll_40m", | 213 | static const char * const imx8mq_gpt1_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_400m", "sys1_pll_40m", |
| 216 | "sys1_pll_80m", "audio_pll1_out", "clk_ext1", }; | 214 | "sys1_pll_80m", "audio_pll1_out", "clk_ext1", }; |
| 217 | 215 | ||
| 218 | static const char *imx8mq_wdog_sels[] = {"osc_25m", "sys1_pll_133m", "sys1_pll_160m", "vpu_pll_out", | 216 | static const char * const imx8mq_wdog_sels[] = {"osc_25m", "sys1_pll_133m", "sys1_pll_160m", "vpu_pll_out", |
| 219 | "sys2_pll_125m", "sys3_pll2_out", "sys1_pll_80m", "sys2_pll_166m", }; | 217 | "sys2_pll_125m", "sys3_pll2_out", "sys1_pll_80m", "sys2_pll_166m", }; |
| 220 | 218 | ||
| 221 | static const char *imx8mq_wrclk_sels[] = {"osc_25m", "sys1_pll_40m", "vpu_pll_out", "sys3_pll2_out", "sys2_pll_200m", | 219 | static const char * const imx8mq_wrclk_sels[] = {"osc_25m", "sys1_pll_40m", "vpu_pll_out", "sys3_pll2_out", "sys2_pll_200m", |
| 222 | "sys1_pll_266m", "sys2_pll_500m", "sys1_pll_100m", }; | 220 | "sys1_pll_266m", "sys2_pll_500m", "sys1_pll_100m", }; |
| 223 | 221 | ||
| 224 | static const char *imx8mq_dsi_core_sels[] = {"osc_25m", "sys1_pll_266m", "sys2_pll_250m", "sys1_pll_800m", | 222 | static const char * const imx8mq_dsi_core_sels[] = {"osc_25m", "sys1_pll_266m", "sys2_pll_250m", "sys1_pll_800m", |
| 225 | "sys2_pll_1000m", "sys3_pll2_out", "audio_pll2_out", "video_pll1_out", }; | 223 | "sys2_pll_1000m", "sys3_pll2_out", "audio_pll2_out", "video_pll1_out", }; |
| 226 | 224 | ||
| 227 | static const char *imx8mq_dsi_phy_sels[] = {"osc_25m", "sys2_pll_125m", "sys2_pll_100m", "sys1_pll_800m", | 225 | static const char * const imx8mq_dsi_phy_sels[] = {"osc_25m", "sys2_pll_125m", "sys2_pll_100m", "sys1_pll_800m", |
| 228 | "sys2_pll_1000m", "clk_ext2", "audio_pll2_out", "video_pll1_out", }; | 226 | "sys2_pll_1000m", "clk_ext2", "audio_pll2_out", "video_pll1_out", }; |
| 229 | 227 | ||
| 230 | static const char *imx8mq_dsi_dbi_sels[] = {"osc_25m", "sys1_pll_266m", "sys2_pll_100m", "sys1_pll_800m", | 228 | static const char * const imx8mq_dsi_dbi_sels[] = {"osc_25m", "sys1_pll_266m", "sys2_pll_100m", "sys1_pll_800m", |
| 231 | "sys2_pll_1000m", "sys3_pll2_out", "audio_pll2_out", "video_pll1_out", }; | 229 | "sys2_pll_1000m", "sys3_pll2_out", "audio_pll2_out", "video_pll1_out", }; |
| 232 | 230 | ||
| 233 | static const char *imx8mq_dsi_esc_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_80m", "sys1_pll_800m", | 231 | static const char * const imx8mq_dsi_esc_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_80m", "sys1_pll_800m", |
| 234 | "sys2_pll_1000m", "sys3_pll2_out", "clk_ext3", "audio_pll2_out", }; | 232 | "sys2_pll_1000m", "sys3_pll2_out", "clk_ext3", "audio_pll2_out", }; |
| 235 | 233 | ||
| 236 | static const char *imx8mq_csi1_core_sels[] = {"osc_25m", "sys1_pll_266m", "sys2_pll_250m", "sys1_pll_800m", | 234 | static const char * const imx8mq_csi1_core_sels[] = {"osc_25m", "sys1_pll_266m", "sys2_pll_250m", "sys1_pll_800m", |
| 237 | "sys2_pll_1000m", "sys3_pll2_out", "audio_pll2_out", "video_pll1_out", }; | 235 | "sys2_pll_1000m", "sys3_pll2_out", "audio_pll2_out", "video_pll1_out", }; |
| 238 | 236 | ||
| 239 | static const char *imx8mq_csi1_phy_sels[] = {"osc_25m", "sys2_pll_125m", "sys2_pll_100m", "sys1_pll_800m", | 237 | static const char * const imx8mq_csi1_phy_sels[] = {"osc_25m", "sys2_pll_125m", "sys2_pll_100m", "sys1_pll_800m", |
| 240 | "sys2_pll_1000m", "clk_ext2", "audio_pll2_out", "video_pll1_out", }; | 238 | "sys2_pll_1000m", "clk_ext2", "audio_pll2_out", "video_pll1_out", }; |
| 241 | 239 | ||
| 242 | static const char *imx8mq_csi1_esc_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_80m", "sys1_pll_800m", | 240 | static const char * const imx8mq_csi1_esc_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_80m", "sys1_pll_800m", |
| 243 | "sys2_pll_1000m", "sys3_pll2_out", "clk_ext3", "audio_pll2_out", }; | 241 | "sys2_pll_1000m", "sys3_pll2_out", "clk_ext3", "audio_pll2_out", }; |
| 244 | 242 | ||
| 245 | static const char *imx8mq_csi2_core_sels[] = {"osc_25m", "sys1_pll_266m", "sys2_pll_250m", "sys1_pll_800m", | 243 | static const char * const imx8mq_csi2_core_sels[] = {"osc_25m", "sys1_pll_266m", "sys2_pll_250m", "sys1_pll_800m", |
| 246 | "sys2_pll_1000m", "sys3_pll2_out", "audio_pll2_out", "video_pll1_out", }; | 244 | "sys2_pll_1000m", "sys3_pll2_out", "audio_pll2_out", "video_pll1_out", }; |
| 247 | 245 | ||
| 248 | static const char *imx8mq_csi2_phy_sels[] = {"osc_25m", "sys2_pll_125m", "sys2_pll_100m", "sys1_pll_800m", | 246 | static const char * const imx8mq_csi2_phy_sels[] = {"osc_25m", "sys2_pll_125m", "sys2_pll_100m", "sys1_pll_800m", |
| 249 | "sys2_pll_1000m", "clk_ext2", "audio_pll2_out", "video_pll1_out", }; | 247 | "sys2_pll_1000m", "clk_ext2", "audio_pll2_out", "video_pll1_out", }; |
| 250 | 248 | ||
| 251 | static const char *imx8mq_csi2_esc_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_80m", "sys1_pll_800m", | 249 | static const char * const imx8mq_csi2_esc_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_80m", "sys1_pll_800m", |
| 252 | "sys2_pll_1000m", "sys3_pll2_out", "clk_ext3", "audio_pll2_out", }; | 250 | "sys2_pll_1000m", "sys3_pll2_out", "clk_ext3", "audio_pll2_out", }; |
| 253 | 251 | ||
| 254 | static const char *imx8mq_pcie2_ctrl_sels[] = {"osc_25m", "sys2_pll_250m", "sys2_pll_200m", "sys1_pll_266m", | 252 | static const char * const imx8mq_pcie2_ctrl_sels[] = {"osc_25m", "sys2_pll_250m", "sys2_pll_200m", "sys1_pll_266m", |
| 255 | "sys1_pll_800m", "sys2_pll_500m", "sys2_pll_333m", "sys3_pll2_out", }; | 253 | "sys1_pll_800m", "sys2_pll_500m", "sys2_pll_333m", "sys3_pll2_out", }; |
| 256 | 254 | ||
| 257 | static const char *imx8mq_pcie2_phy_sels[] = {"osc_25m", "sys2_pll_100m", "sys2_pll_500m", "clk_ext1", | 255 | static const char * const imx8mq_pcie2_phy_sels[] = {"osc_25m", "sys2_pll_100m", "sys2_pll_500m", "clk_ext1", |
| 258 | "clk_ext2", "clk_ext3", "clk_ext4", "sys1_pll_400m", }; | 256 | "clk_ext2", "clk_ext3", "clk_ext4", "sys1_pll_400m", }; |
| 259 | 257 | ||
| 260 | static const char *imx8mq_pcie2_aux_sels[] = {"osc_25m", "sys2_pll_200m", "sys2_pll_50m", "sys3_pll2_out", | 258 | static const char * const imx8mq_pcie2_aux_sels[] = {"osc_25m", "sys2_pll_200m", "sys2_pll_50m", "sys3_pll2_out", |
| 261 | "sys2_pll_100m", "sys1_pll_80m", "sys1_pll_160m", "sys1_pll_200m", }; | 259 | "sys2_pll_100m", "sys1_pll_80m", "sys1_pll_160m", "sys1_pll_200m", }; |
| 262 | 260 | ||
| 263 | static const char *imx8mq_ecspi3_sels[] = {"osc_25m", "sys2_pll_200m", "sys1_pll_40m", "sys1_pll_160m", | 261 | static const char * const imx8mq_ecspi3_sels[] = {"osc_25m", "sys2_pll_200m", "sys1_pll_40m", "sys1_pll_160m", |
| 264 | "sys1_pll_800m", "sys3_pll2_out", "sys2_pll_250m", "audio_pll2_out", }; | 262 | "sys1_pll_800m", "sys3_pll2_out", "sys2_pll_250m", "audio_pll2_out", }; |
| 265 | static const char *imx8mq_dram_core_sels[] = {"dram_pll_out", "dram_alt_root", }; | 263 | static const char * const imx8mq_dram_core_sels[] = {"dram_pll_out", "dram_alt_root", }; |
| 266 | 264 | ||
| 267 | static const char *imx8mq_clko2_sels[] = {"osc_25m", "sys2_pll_200m", "sys1_pll_400m", "sys2_pll_166m", "audio_pll1_out", | 265 | static const char * const imx8mq_clko1_sels[] = {"osc_25m", "sys1_pll_800m", "osc_27m", "sys1_pll_200m", |
| 268 | "video_pll1_out", "ckil", }; | 266 | "audio_pll2_out", "sys2_pll_500m", "vpu_pll_out", "sys1_pll_80m", }; |
| 267 | static const char * const imx8mq_clko2_sels[] = {"osc_25m", "sys2_pll_200m", "sys1_pll_400m", "sys2_pll_166m", | ||
| 268 | "sys3_pll2_out", "audio_pll1_out", "video_pll1_out", "ckil", }; | ||
| 269 | 269 | ||
| 270 | static struct clk_onecell_data clk_data; | 270 | static struct clk_onecell_data clk_data; |
| 271 | 271 | ||
| @@ -308,10 +308,6 @@ static int imx8mq_clocks_probe(struct platform_device *pdev) | |||
| 308 | clks[IMX8MQ_AUDIO_PLL1_REF_DIV] = imx_clk_divider("audio_pll1_ref_div", "audio_pll1_ref_sel", base + 0x0, 5, 6); | 308 | clks[IMX8MQ_AUDIO_PLL1_REF_DIV] = imx_clk_divider("audio_pll1_ref_div", "audio_pll1_ref_sel", base + 0x0, 5, 6); |
| 309 | clks[IMX8MQ_AUDIO_PLL2_REF_DIV] = imx_clk_divider("audio_pll2_ref_div", "audio_pll2_ref_sel", base + 0x8, 5, 6); | 309 | clks[IMX8MQ_AUDIO_PLL2_REF_DIV] = imx_clk_divider("audio_pll2_ref_div", "audio_pll2_ref_sel", base + 0x8, 5, 6); |
| 310 | clks[IMX8MQ_VIDEO_PLL1_REF_DIV] = imx_clk_divider("video_pll1_ref_div", "video_pll1_ref_sel", base + 0x10, 5, 6); | 310 | clks[IMX8MQ_VIDEO_PLL1_REF_DIV] = imx_clk_divider("video_pll1_ref_div", "video_pll1_ref_sel", base + 0x10, 5, 6); |
| 311 | clks[IMX8MQ_SYS1_PLL1_REF_DIV] = imx_clk_divider("sys1_pll1_ref_div", "sys1_pll1_ref_sel", base + 0x38, 25, 3); | ||
| 312 | clks[IMX8MQ_SYS2_PLL1_REF_DIV] = imx_clk_divider("sys2_pll1_ref_div", "sys2_pll1_ref_sel", base + 0x44, 25, 3); | ||
| 313 | clks[IMX8MQ_SYS3_PLL1_REF_DIV] = imx_clk_divider("sys3_pll1_ref_div", "sys3_pll1_ref_sel", base + 0x50, 25, 3); | ||
| 314 | clks[IMX8MQ_DRAM_PLL1_REF_DIV] = imx_clk_divider("dram_pll1_ref_div", "dram_pll1_ref_sel", base + 0x68, 25, 3); | ||
| 315 | 311 | ||
| 316 | clks[IMX8MQ_ARM_PLL] = imx_clk_frac_pll("arm_pll", "arm_pll_ref_div", base + 0x28); | 312 | clks[IMX8MQ_ARM_PLL] = imx_clk_frac_pll("arm_pll", "arm_pll_ref_div", base + 0x28); |
| 317 | clks[IMX8MQ_GPU_PLL] = imx_clk_frac_pll("gpu_pll", "gpu_pll_ref_div", base + 0x18); | 313 | clks[IMX8MQ_GPU_PLL] = imx_clk_frac_pll("gpu_pll", "gpu_pll_ref_div", base + 0x18); |
| @@ -319,43 +315,15 @@ static int imx8mq_clocks_probe(struct platform_device *pdev) | |||
| 319 | clks[IMX8MQ_AUDIO_PLL1] = imx_clk_frac_pll("audio_pll1", "audio_pll1_ref_div", base + 0x0); | 315 | clks[IMX8MQ_AUDIO_PLL1] = imx_clk_frac_pll("audio_pll1", "audio_pll1_ref_div", base + 0x0); |
| 320 | clks[IMX8MQ_AUDIO_PLL2] = imx_clk_frac_pll("audio_pll2", "audio_pll2_ref_div", base + 0x8); | 316 | clks[IMX8MQ_AUDIO_PLL2] = imx_clk_frac_pll("audio_pll2", "audio_pll2_ref_div", base + 0x8); |
| 321 | clks[IMX8MQ_VIDEO_PLL1] = imx_clk_frac_pll("video_pll1", "video_pll1_ref_div", base + 0x10); | 317 | clks[IMX8MQ_VIDEO_PLL1] = imx_clk_frac_pll("video_pll1", "video_pll1_ref_div", base + 0x10); |
| 322 | clks[IMX8MQ_SYS1_PLL1] = imx_clk_sccg_pll("sys1_pll1", "sys1_pll1_ref_div", base + 0x30, SCCG_PLL1); | ||
| 323 | clks[IMX8MQ_SYS2_PLL1] = imx_clk_sccg_pll("sys2_pll1", "sys2_pll1_ref_div", base + 0x3c, SCCG_PLL1); | ||
| 324 | clks[IMX8MQ_SYS3_PLL1] = imx_clk_sccg_pll("sys3_pll1", "sys3_pll1_ref_div", base + 0x48, SCCG_PLL1); | ||
| 325 | clks[IMX8MQ_DRAM_PLL1] = imx_clk_sccg_pll("dram_pll1", "dram_pll1_ref_div", base + 0x60, SCCG_PLL1); | ||
| 326 | |||
| 327 | clks[IMX8MQ_SYS1_PLL2] = imx_clk_sccg_pll("sys1_pll2", "sys1_pll1_out_div", base + 0x30, SCCG_PLL2); | ||
| 328 | clks[IMX8MQ_SYS2_PLL2] = imx_clk_sccg_pll("sys2_pll2", "sys2_pll1_out_div", base + 0x3c, SCCG_PLL2); | ||
| 329 | clks[IMX8MQ_SYS3_PLL2] = imx_clk_sccg_pll("sys3_pll2", "sys3_pll1_out_div", base + 0x48, SCCG_PLL2); | ||
| 330 | clks[IMX8MQ_DRAM_PLL2] = imx_clk_sccg_pll("dram_pll2", "dram_pll1_out_div", base + 0x60, SCCG_PLL2); | ||
| 331 | |||
| 332 | /* PLL divs */ | ||
| 333 | clks[IMX8MQ_SYS1_PLL1_OUT_DIV] = imx_clk_divider("sys1_pll1_out_div", "sys1_pll1_out", base + 0x38, 19, 6); | ||
| 334 | clks[IMX8MQ_SYS2_PLL1_OUT_DIV] = imx_clk_divider("sys2_pll1_out_div", "sys2_pll1_out", base + 0x44, 19, 6); | ||
| 335 | clks[IMX8MQ_SYS3_PLL1_OUT_DIV] = imx_clk_divider("sys3_pll1_out_div", "sys3_pll1_out", base + 0x50, 19, 6); | ||
| 336 | clks[IMX8MQ_DRAM_PLL1_OUT_DIV] = imx_clk_divider("dram_pll1_out_div", "dram_pll1_out", base + 0x68, 19, 6); | ||
| 337 | clks[IMX8MQ_SYS1_PLL2_DIV] = imx_clk_divider("sys1_pll2_div", "sys1_pll2", base + 0x38, 1, 6); | ||
| 338 | clks[IMX8MQ_SYS2_PLL2_DIV] = imx_clk_divider("sys2_pll2_div", "sys2_pll2", base + 0x44, 1, 6); | ||
| 339 | clks[IMX8MQ_SYS3_PLL2_DIV] = imx_clk_divider("sys3_pll2_div", "sys3_pll2", base + 0x50, 1, 6); | ||
| 340 | clks[IMX8MQ_DRAM_PLL2_DIV] = imx_clk_divider("dram_pll2_div", "dram_pll2", base + 0x68, 1, 6); | ||
| 341 | 318 | ||
| 342 | /* PLL bypass out */ | 319 | /* PLL bypass out */ |
| 343 | clks[IMX8MQ_ARM_PLL_BYPASS] = imx_clk_mux("arm_pll_bypass", base + 0x28, 14, 1, arm_pll_bypass_sels, ARRAY_SIZE(arm_pll_bypass_sels)); | 320 | clks[IMX8MQ_ARM_PLL_BYPASS] = imx_clk_mux_flags("arm_pll_bypass", base + 0x28, 14, 1, arm_pll_bypass_sels, ARRAY_SIZE(arm_pll_bypass_sels), CLK_SET_RATE_PARENT); |
| 344 | clks[IMX8MQ_GPU_PLL_BYPASS] = imx_clk_mux("gpu_pll_bypass", base + 0x18, 14, 1, gpu_pll_bypass_sels, ARRAY_SIZE(gpu_pll_bypass_sels)); | 321 | clks[IMX8MQ_GPU_PLL_BYPASS] = imx_clk_mux("gpu_pll_bypass", base + 0x18, 14, 1, gpu_pll_bypass_sels, ARRAY_SIZE(gpu_pll_bypass_sels)); |
| 345 | clks[IMX8MQ_VPU_PLL_BYPASS] = imx_clk_mux("vpu_pll_bypass", base + 0x20, 14, 1, vpu_pll_bypass_sels, ARRAY_SIZE(vpu_pll_bypass_sels)); | 322 | clks[IMX8MQ_VPU_PLL_BYPASS] = imx_clk_mux("vpu_pll_bypass", base + 0x20, 14, 1, vpu_pll_bypass_sels, ARRAY_SIZE(vpu_pll_bypass_sels)); |
| 346 | clks[IMX8MQ_AUDIO_PLL1_BYPASS] = imx_clk_mux("audio_pll1_bypass", base + 0x0, 14, 1, audio_pll1_bypass_sels, ARRAY_SIZE(audio_pll1_bypass_sels)); | 323 | clks[IMX8MQ_AUDIO_PLL1_BYPASS] = imx_clk_mux("audio_pll1_bypass", base + 0x0, 14, 1, audio_pll1_bypass_sels, ARRAY_SIZE(audio_pll1_bypass_sels)); |
| 347 | clks[IMX8MQ_AUDIO_PLL2_BYPASS] = imx_clk_mux("audio_pll2_bypass", base + 0x8, 14, 1, audio_pll2_bypass_sels, ARRAY_SIZE(audio_pll2_bypass_sels)); | 324 | clks[IMX8MQ_AUDIO_PLL2_BYPASS] = imx_clk_mux("audio_pll2_bypass", base + 0x8, 14, 1, audio_pll2_bypass_sels, ARRAY_SIZE(audio_pll2_bypass_sels)); |
| 348 | clks[IMX8MQ_VIDEO_PLL1_BYPASS] = imx_clk_mux("video_pll1_bypass", base + 0x10, 14, 1, video_pll1_bypass_sels, ARRAY_SIZE(video_pll1_bypass_sels)); | 325 | clks[IMX8MQ_VIDEO_PLL1_BYPASS] = imx_clk_mux("video_pll1_bypass", base + 0x10, 14, 1, video_pll1_bypass_sels, ARRAY_SIZE(video_pll1_bypass_sels)); |
| 349 | 326 | ||
| 350 | clks[IMX8MQ_SYS1_PLL1_OUT] = imx_clk_mux("sys1_pll1_out", base + 0x30, 5, 1, sys1_pll1_out_sels, ARRAY_SIZE(sys1_pll1_out_sels)); | ||
| 351 | clks[IMX8MQ_SYS2_PLL1_OUT] = imx_clk_mux("sys2_pll1_out", base + 0x3c, 5, 1, sys2_pll1_out_sels, ARRAY_SIZE(sys2_pll1_out_sels)); | ||
| 352 | clks[IMX8MQ_SYS3_PLL1_OUT] = imx_clk_mux("sys3_pll1_out", base + 0x48, 5, 1, sys3_pll1_out_sels, ARRAY_SIZE(sys3_pll1_out_sels)); | ||
| 353 | clks[IMX8MQ_DRAM_PLL1_OUT] = imx_clk_mux("dram_pll1_out", base + 0x60, 5, 1, dram_pll1_out_sels, ARRAY_SIZE(dram_pll1_out_sels)); | ||
| 354 | clks[IMX8MQ_SYS1_PLL2_OUT] = imx_clk_mux("sys1_pll2_out", base + 0x30, 4, 1, sys1_pll2_out_sels, ARRAY_SIZE(sys1_pll2_out_sels)); | ||
| 355 | clks[IMX8MQ_SYS2_PLL2_OUT] = imx_clk_mux("sys2_pll2_out", base + 0x3c, 4, 1, sys2_pll2_out_sels, ARRAY_SIZE(sys2_pll2_out_sels)); | ||
| 356 | clks[IMX8MQ_SYS3_PLL2_OUT] = imx_clk_mux("sys3_pll2_out", base + 0x48, 4, 1, sys3_pll2_out_sels, ARRAY_SIZE(sys3_pll2_out_sels)); | ||
| 357 | clks[IMX8MQ_DRAM_PLL2_OUT] = imx_clk_mux("dram_pll2_out", base + 0x60, 4, 1, dram_pll2_out_sels, ARRAY_SIZE(dram_pll2_out_sels)); | ||
| 358 | |||
| 359 | /* PLL OUT GATE */ | 327 | /* PLL OUT GATE */ |
| 360 | clks[IMX8MQ_ARM_PLL_OUT] = imx_clk_gate("arm_pll_out", "arm_pll_bypass", base + 0x28, 21); | 328 | clks[IMX8MQ_ARM_PLL_OUT] = imx_clk_gate("arm_pll_out", "arm_pll_bypass", base + 0x28, 21); |
| 361 | clks[IMX8MQ_GPU_PLL_OUT] = imx_clk_gate("gpu_pll_out", "gpu_pll_bypass", base + 0x18, 21); | 329 | clks[IMX8MQ_GPU_PLL_OUT] = imx_clk_gate("gpu_pll_out", "gpu_pll_bypass", base + 0x18, 21); |
| @@ -363,11 +331,11 @@ static int imx8mq_clocks_probe(struct platform_device *pdev) | |||
| 363 | clks[IMX8MQ_AUDIO_PLL1_OUT] = imx_clk_gate("audio_pll1_out", "audio_pll1_bypass", base + 0x0, 21); | 331 | clks[IMX8MQ_AUDIO_PLL1_OUT] = imx_clk_gate("audio_pll1_out", "audio_pll1_bypass", base + 0x0, 21); |
| 364 | clks[IMX8MQ_AUDIO_PLL2_OUT] = imx_clk_gate("audio_pll2_out", "audio_pll2_bypass", base + 0x8, 21); | 332 | clks[IMX8MQ_AUDIO_PLL2_OUT] = imx_clk_gate("audio_pll2_out", "audio_pll2_bypass", base + 0x8, 21); |
| 365 | clks[IMX8MQ_VIDEO_PLL1_OUT] = imx_clk_gate("video_pll1_out", "video_pll1_bypass", base + 0x10, 21); | 333 | clks[IMX8MQ_VIDEO_PLL1_OUT] = imx_clk_gate("video_pll1_out", "video_pll1_bypass", base + 0x10, 21); |
| 366 | clks[IMX8MQ_SYS1_PLL_OUT] = imx_clk_gate("sys1_pll_out", "sys1_pll2_out", base + 0x30, 9); | ||
| 367 | clks[IMX8MQ_SYS2_PLL_OUT] = imx_clk_gate("sys2_pll_out", "sys2_pll2_out", base + 0x3c, 9); | ||
| 368 | clks[IMX8MQ_SYS3_PLL_OUT] = imx_clk_gate("sys3_pll_out", "sys3_pll2_out", base + 0x48, 9); | ||
| 369 | clks[IMX8MQ_DRAM_PLL_OUT] = imx_clk_gate("dram_pll_out", "dram_pll2_out", base + 0x60, 9); | ||
| 370 | 334 | ||
| 335 | clks[IMX8MQ_SYS1_PLL_OUT] = imx_clk_sccg_pll("sys1_pll_out", sys1_pll_out_sels, ARRAY_SIZE(sys1_pll_out_sels), 0, 0, 0, base + 0x30, CLK_IS_CRITICAL); | ||
| 336 | clks[IMX8MQ_SYS2_PLL_OUT] = imx_clk_sccg_pll("sys2_pll_out", sys2_pll_out_sels, ARRAY_SIZE(sys2_pll_out_sels), 0, 0, 1, base + 0x3c, CLK_IS_CRITICAL); | ||
| 337 | clks[IMX8MQ_SYS3_PLL_OUT] = imx_clk_sccg_pll("sys3_pll_out", sys3_pll_out_sels, ARRAY_SIZE(sys3_pll_out_sels), 0, 0, 1, base + 0x48, CLK_IS_CRITICAL); | ||
| 338 | clks[IMX8MQ_DRAM_PLL_OUT] = imx_clk_sccg_pll("dram_pll_out", dram_pll_out_sels, ARRAY_SIZE(dram_pll_out_sels), 0, 0, 0, base + 0x60, CLK_IS_CRITICAL); | ||
| 371 | /* SYS PLL fixed output */ | 339 | /* SYS PLL fixed output */ |
| 372 | clks[IMX8MQ_SYS1_PLL_40M] = imx_clk_fixed_factor("sys1_pll_40m", "sys1_pll_out", 1, 20); | 340 | clks[IMX8MQ_SYS1_PLL_40M] = imx_clk_fixed_factor("sys1_pll_40m", "sys1_pll_out", 1, 20); |
| 373 | clks[IMX8MQ_SYS1_PLL_80M] = imx_clk_fixed_factor("sys1_pll_80m", "sys1_pll_out", 1, 10); | 341 | clks[IMX8MQ_SYS1_PLL_80M] = imx_clk_fixed_factor("sys1_pll_80m", "sys1_pll_out", 1, 10); |
| @@ -396,15 +364,19 @@ static int imx8mq_clocks_probe(struct platform_device *pdev) | |||
| 396 | 364 | ||
| 397 | /* CORE */ | 365 | /* CORE */ |
| 398 | clks[IMX8MQ_CLK_A53_SRC] = imx_clk_mux2("arm_a53_src", base + 0x8000, 24, 3, imx8mq_a53_sels, ARRAY_SIZE(imx8mq_a53_sels)); | 366 | clks[IMX8MQ_CLK_A53_SRC] = imx_clk_mux2("arm_a53_src", base + 0x8000, 24, 3, imx8mq_a53_sels, ARRAY_SIZE(imx8mq_a53_sels)); |
| 367 | clks[IMX8MQ_CLK_M4_SRC] = imx_clk_mux2("arm_m4_src", base + 0x8080, 24, 3, imx8mq_arm_m4_sels, ARRAY_SIZE(imx8mq_arm_m4_sels)); | ||
| 399 | clks[IMX8MQ_CLK_VPU_SRC] = imx_clk_mux2("vpu_src", base + 0x8100, 24, 3, imx8mq_vpu_sels, ARRAY_SIZE(imx8mq_vpu_sels)); | 368 | clks[IMX8MQ_CLK_VPU_SRC] = imx_clk_mux2("vpu_src", base + 0x8100, 24, 3, imx8mq_vpu_sels, ARRAY_SIZE(imx8mq_vpu_sels)); |
| 400 | clks[IMX8MQ_CLK_GPU_CORE_SRC] = imx_clk_mux2("gpu_core_src", base + 0x8180, 24, 3, imx8mq_gpu_core_sels, ARRAY_SIZE(imx8mq_gpu_core_sels)); | 369 | clks[IMX8MQ_CLK_GPU_CORE_SRC] = imx_clk_mux2("gpu_core_src", base + 0x8180, 24, 3, imx8mq_gpu_core_sels, ARRAY_SIZE(imx8mq_gpu_core_sels)); |
| 401 | clks[IMX8MQ_CLK_GPU_SHADER_SRC] = imx_clk_mux2("gpu_shader_src", base + 0x8200, 24, 3, imx8mq_gpu_shader_sels, ARRAY_SIZE(imx8mq_gpu_shader_sels)); | 370 | clks[IMX8MQ_CLK_GPU_SHADER_SRC] = imx_clk_mux2("gpu_shader_src", base + 0x8200, 24, 3, imx8mq_gpu_shader_sels, ARRAY_SIZE(imx8mq_gpu_shader_sels)); |
| 371 | |||
| 402 | clks[IMX8MQ_CLK_A53_CG] = imx_clk_gate3_flags("arm_a53_cg", "arm_a53_src", base + 0x8000, 28, CLK_IS_CRITICAL); | 372 | clks[IMX8MQ_CLK_A53_CG] = imx_clk_gate3_flags("arm_a53_cg", "arm_a53_src", base + 0x8000, 28, CLK_IS_CRITICAL); |
| 373 | clks[IMX8MQ_CLK_M4_CG] = imx_clk_gate3("arm_m4_cg", "arm_m4_src", base + 0x8080, 28); | ||
| 403 | clks[IMX8MQ_CLK_VPU_CG] = imx_clk_gate3("vpu_cg", "vpu_src", base + 0x8100, 28); | 374 | clks[IMX8MQ_CLK_VPU_CG] = imx_clk_gate3("vpu_cg", "vpu_src", base + 0x8100, 28); |
| 404 | clks[IMX8MQ_CLK_GPU_CORE_CG] = imx_clk_gate3("gpu_core_cg", "gpu_core_src", base + 0x8180, 28); | 375 | clks[IMX8MQ_CLK_GPU_CORE_CG] = imx_clk_gate3("gpu_core_cg", "gpu_core_src", base + 0x8180, 28); |
| 405 | clks[IMX8MQ_CLK_GPU_SHADER_CG] = imx_clk_gate3("gpu_shader_cg", "gpu_shader_src", base + 0x8200, 28); | 376 | clks[IMX8MQ_CLK_GPU_SHADER_CG] = imx_clk_gate3("gpu_shader_cg", "gpu_shader_src", base + 0x8200, 28); |
| 406 | 377 | ||
| 407 | clks[IMX8MQ_CLK_A53_DIV] = imx_clk_divider2("arm_a53_div", "arm_a53_cg", base + 0x8000, 0, 3); | 378 | clks[IMX8MQ_CLK_A53_DIV] = imx_clk_divider2("arm_a53_div", "arm_a53_cg", base + 0x8000, 0, 3); |
| 379 | clks[IMX8MQ_CLK_M4_DIV] = imx_clk_divider2("arm_m4_div", "arm_m4_cg", base + 0x8080, 0, 3); | ||
| 408 | clks[IMX8MQ_CLK_VPU_DIV] = imx_clk_divider2("vpu_div", "vpu_cg", base + 0x8100, 0, 3); | 380 | clks[IMX8MQ_CLK_VPU_DIV] = imx_clk_divider2("vpu_div", "vpu_cg", base + 0x8100, 0, 3); |
| 409 | clks[IMX8MQ_CLK_GPU_CORE_DIV] = imx_clk_divider2("gpu_core_div", "gpu_core_cg", base + 0x8180, 0, 3); | 381 | clks[IMX8MQ_CLK_GPU_CORE_DIV] = imx_clk_divider2("gpu_core_div", "gpu_core_cg", base + 0x8180, 0, 3); |
| 410 | clks[IMX8MQ_CLK_GPU_SHADER_DIV] = imx_clk_divider2("gpu_shader_div", "gpu_shader_cg", base + 0x8200, 0, 3); | 382 | clks[IMX8MQ_CLK_GPU_SHADER_DIV] = imx_clk_divider2("gpu_shader_div", "gpu_shader_cg", base + 0x8200, 0, 3); |
| @@ -479,6 +451,7 @@ static int imx8mq_clocks_probe(struct platform_device *pdev) | |||
| 479 | clks[IMX8MQ_CLK_GPT1] = imx8m_clk_composite("gpt1", imx8mq_gpt1_sels, base + 0xb580); | 451 | clks[IMX8MQ_CLK_GPT1] = imx8m_clk_composite("gpt1", imx8mq_gpt1_sels, base + 0xb580); |
| 480 | clks[IMX8MQ_CLK_WDOG] = imx8m_clk_composite("wdog", imx8mq_wdog_sels, base + 0xb900); | 452 | clks[IMX8MQ_CLK_WDOG] = imx8m_clk_composite("wdog", imx8mq_wdog_sels, base + 0xb900); |
| 481 | clks[IMX8MQ_CLK_WRCLK] = imx8m_clk_composite("wrclk", imx8mq_wrclk_sels, base + 0xb980); | 453 | clks[IMX8MQ_CLK_WRCLK] = imx8m_clk_composite("wrclk", imx8mq_wrclk_sels, base + 0xb980); |
| 454 | clks[IMX8MQ_CLK_CLKO1] = imx8m_clk_composite("clko1", imx8mq_clko1_sels, base + 0xba00); | ||
| 482 | clks[IMX8MQ_CLK_CLKO2] = imx8m_clk_composite("clko2", imx8mq_clko2_sels, base + 0xba80); | 455 | clks[IMX8MQ_CLK_CLKO2] = imx8m_clk_composite("clko2", imx8mq_clko2_sels, base + 0xba80); |
| 483 | clks[IMX8MQ_CLK_DSI_CORE] = imx8m_clk_composite("dsi_core", imx8mq_dsi_core_sels, base + 0xbb00); | 456 | clks[IMX8MQ_CLK_DSI_CORE] = imx8m_clk_composite("dsi_core", imx8mq_dsi_core_sels, base + 0xbb00); |
| 484 | clks[IMX8MQ_CLK_DSI_PHY_REF] = imx8m_clk_composite("dsi_phy_ref", imx8mq_dsi_phy_sels, base + 0xbb80); | 457 | clks[IMX8MQ_CLK_DSI_PHY_REF] = imx8m_clk_composite("dsi_phy_ref", imx8mq_dsi_phy_sels, base + 0xbb80); |
| @@ -500,6 +473,11 @@ static int imx8mq_clocks_probe(struct platform_device *pdev) | |||
| 500 | clks[IMX8MQ_CLK_ECSPI2_ROOT] = imx_clk_gate4("ecspi2_root_clk", "ecspi2", base + 0x4080, 0); | 473 | clks[IMX8MQ_CLK_ECSPI2_ROOT] = imx_clk_gate4("ecspi2_root_clk", "ecspi2", base + 0x4080, 0); |
| 501 | clks[IMX8MQ_CLK_ECSPI3_ROOT] = imx_clk_gate4("ecspi3_root_clk", "ecspi3", base + 0x4090, 0); | 474 | clks[IMX8MQ_CLK_ECSPI3_ROOT] = imx_clk_gate4("ecspi3_root_clk", "ecspi3", base + 0x4090, 0); |
| 502 | clks[IMX8MQ_CLK_ENET1_ROOT] = imx_clk_gate4("enet1_root_clk", "enet_axi", base + 0x40a0, 0); | 475 | clks[IMX8MQ_CLK_ENET1_ROOT] = imx_clk_gate4("enet1_root_clk", "enet_axi", base + 0x40a0, 0); |
| 476 | clks[IMX8MQ_CLK_GPIO1_ROOT] = imx_clk_gate4("gpio1_root_clk", "ipg_root", base + 0x40b0, 0); | ||
| 477 | clks[IMX8MQ_CLK_GPIO2_ROOT] = imx_clk_gate4("gpio2_root_clk", "ipg_root", base + 0x40c0, 0); | ||
| 478 | clks[IMX8MQ_CLK_GPIO3_ROOT] = imx_clk_gate4("gpio3_root_clk", "ipg_root", base + 0x40d0, 0); | ||
| 479 | clks[IMX8MQ_CLK_GPIO4_ROOT] = imx_clk_gate4("gpio4_root_clk", "ipg_root", base + 0x40e0, 0); | ||
| 480 | clks[IMX8MQ_CLK_GPIO5_ROOT] = imx_clk_gate4("gpio5_root_clk", "ipg_root", base + 0x40f0, 0); | ||
| 503 | clks[IMX8MQ_CLK_GPT1_ROOT] = imx_clk_gate4("gpt1_root_clk", "gpt1", base + 0x4100, 0); | 481 | clks[IMX8MQ_CLK_GPT1_ROOT] = imx_clk_gate4("gpt1_root_clk", "gpt1", base + 0x4100, 0); |
| 504 | clks[IMX8MQ_CLK_I2C1_ROOT] = imx_clk_gate4("i2c1_root_clk", "i2c1", base + 0x4170, 0); | 482 | clks[IMX8MQ_CLK_I2C1_ROOT] = imx_clk_gate4("i2c1_root_clk", "i2c1", base + 0x4170, 0); |
| 505 | clks[IMX8MQ_CLK_I2C2_ROOT] = imx_clk_gate4("i2c2_root_clk", "i2c2", base + 0x4180, 0); | 483 | clks[IMX8MQ_CLK_I2C2_ROOT] = imx_clk_gate4("i2c2_root_clk", "i2c2", base + 0x4180, 0); |
| @@ -558,6 +536,12 @@ static int imx8mq_clocks_probe(struct platform_device *pdev) | |||
| 558 | clks[IMX8MQ_GPT_3M_CLK] = imx_clk_fixed_factor("gpt_3m", "osc_25m", 1, 8); | 536 | clks[IMX8MQ_GPT_3M_CLK] = imx_clk_fixed_factor("gpt_3m", "osc_25m", 1, 8); |
| 559 | clks[IMX8MQ_CLK_DRAM_ALT_ROOT] = imx_clk_fixed_factor("dram_alt_root", "dram_alt", 1, 4); | 537 | clks[IMX8MQ_CLK_DRAM_ALT_ROOT] = imx_clk_fixed_factor("dram_alt_root", "dram_alt", 1, 4); |
| 560 | 538 | ||
| 539 | clks[IMX8MQ_CLK_ARM] = imx_clk_cpu("arm", "arm_a53_div", | ||
| 540 | clks[IMX8MQ_CLK_A53_DIV], | ||
| 541 | clks[IMX8MQ_CLK_A53_SRC], | ||
| 542 | clks[IMX8MQ_ARM_PLL_OUT], | ||
| 543 | clks[IMX8MQ_SYS1_PLL_800M]); | ||
| 544 | |||
| 561 | for (i = 0; i < IMX8MQ_CLK_END; i++) | 545 | for (i = 0; i < IMX8MQ_CLK_END; i++) |
| 562 | if (IS_ERR(clks[i])) | 546 | if (IS_ERR(clks[i])) |
| 563 | pr_err("i.MX8mq clk %u register failed with %ld\n", | 547 | pr_err("i.MX8mq clk %u register failed with %ld\n", |
diff --git a/drivers/clk/imx/clk-imx8qxp.c b/drivers/clk/imx/clk-imx8qxp.c index 83e2ef96d81d..5e2903efc488 100644 --- a/drivers/clk/imx/clk-imx8qxp.c +++ b/drivers/clk/imx/clk-imx8qxp.c | |||
| @@ -138,6 +138,7 @@ static int imx8qxp_clk_probe(struct platform_device *pdev) | |||
| 138 | } | 138 | } |
| 139 | 139 | ||
| 140 | static const struct of_device_id imx8qxp_match[] = { | 140 | static const struct of_device_id imx8qxp_match[] = { |
| 141 | { .compatible = "fsl,scu-clk", }, | ||
| 141 | { .compatible = "fsl,imx8qxp-clk", }, | 142 | { .compatible = "fsl,imx8qxp-clk", }, |
| 142 | { /* sentinel */ } | 143 | { /* sentinel */ } |
| 143 | }; | 144 | }; |
diff --git a/drivers/clk/imx/clk-pll14xx.c b/drivers/clk/imx/clk-pll14xx.c new file mode 100644 index 000000000000..1acfa3e3cfb4 --- /dev/null +++ b/drivers/clk/imx/clk-pll14xx.c | |||
| @@ -0,0 +1,392 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | /* | ||
| 3 | * Copyright 2017-2018 NXP. | ||
| 4 | */ | ||
| 5 | |||
| 6 | #include <linux/bitops.h> | ||
| 7 | #include <linux/clk-provider.h> | ||
| 8 | #include <linux/err.h> | ||
| 9 | #include <linux/io.h> | ||
| 10 | #include <linux/iopoll.h> | ||
| 11 | #include <linux/slab.h> | ||
| 12 | #include <linux/jiffies.h> | ||
| 13 | |||
| 14 | #include "clk.h" | ||
| 15 | |||
| 16 | #define GNRL_CTL 0x0 | ||
| 17 | #define DIV_CTL 0x4 | ||
| 18 | #define LOCK_STATUS BIT(31) | ||
| 19 | #define LOCK_SEL_MASK BIT(29) | ||
| 20 | #define CLKE_MASK BIT(11) | ||
| 21 | #define RST_MASK BIT(9) | ||
| 22 | #define BYPASS_MASK BIT(4) | ||
| 23 | #define MDIV_SHIFT 12 | ||
| 24 | #define MDIV_MASK GENMASK(21, 12) | ||
| 25 | #define PDIV_SHIFT 4 | ||
| 26 | #define PDIV_MASK GENMASK(9, 4) | ||
| 27 | #define SDIV_SHIFT 0 | ||
| 28 | #define SDIV_MASK GENMASK(2, 0) | ||
| 29 | #define KDIV_SHIFT 0 | ||
| 30 | #define KDIV_MASK GENMASK(15, 0) | ||
| 31 | |||
| 32 | #define LOCK_TIMEOUT_US 10000 | ||
| 33 | |||
| 34 | struct clk_pll14xx { | ||
| 35 | struct clk_hw hw; | ||
| 36 | void __iomem *base; | ||
| 37 | enum imx_pll14xx_type type; | ||
| 38 | const struct imx_pll14xx_rate_table *rate_table; | ||
| 39 | int rate_count; | ||
| 40 | }; | ||
| 41 | |||
| 42 | #define to_clk_pll14xx(_hw) container_of(_hw, struct clk_pll14xx, hw) | ||
| 43 | |||
| 44 | static const struct imx_pll14xx_rate_table *imx_get_pll_settings( | ||
| 45 | struct clk_pll14xx *pll, unsigned long rate) | ||
| 46 | { | ||
| 47 | const struct imx_pll14xx_rate_table *rate_table = pll->rate_table; | ||
| 48 | int i; | ||
| 49 | |||
| 50 | for (i = 0; i < pll->rate_count; i++) | ||
| 51 | if (rate == rate_table[i].rate) | ||
| 52 | return &rate_table[i]; | ||
| 53 | |||
| 54 | return NULL; | ||
| 55 | } | ||
| 56 | |||
| 57 | static long clk_pll14xx_round_rate(struct clk_hw *hw, unsigned long rate, | ||
| 58 | unsigned long *prate) | ||
| 59 | { | ||
| 60 | struct clk_pll14xx *pll = to_clk_pll14xx(hw); | ||
| 61 | const struct imx_pll14xx_rate_table *rate_table = pll->rate_table; | ||
| 62 | int i; | ||
| 63 | |||
| 64 | /* Assumming rate_table is in descending order */ | ||
| 65 | for (i = 0; i < pll->rate_count; i++) | ||
| 66 | if (rate >= rate_table[i].rate) | ||
| 67 | return rate_table[i].rate; | ||
| 68 | |||
| 69 | /* return minimum supported value */ | ||
| 70 | return rate_table[i - 1].rate; | ||
| 71 | } | ||
| 72 | |||
| 73 | static unsigned long clk_pll1416x_recalc_rate(struct clk_hw *hw, | ||
| 74 | unsigned long parent_rate) | ||
| 75 | { | ||
| 76 | struct clk_pll14xx *pll = to_clk_pll14xx(hw); | ||
| 77 | u32 mdiv, pdiv, sdiv, pll_gnrl, pll_div; | ||
| 78 | u64 fvco = parent_rate; | ||
| 79 | |||
| 80 | pll_gnrl = readl_relaxed(pll->base); | ||
| 81 | pll_div = readl_relaxed(pll->base + 4); | ||
| 82 | mdiv = (pll_div & MDIV_MASK) >> MDIV_SHIFT; | ||
| 83 | pdiv = (pll_div & PDIV_MASK) >> PDIV_SHIFT; | ||
| 84 | sdiv = (pll_div & SDIV_MASK) >> SDIV_SHIFT; | ||
| 85 | |||
| 86 | fvco *= mdiv; | ||
| 87 | do_div(fvco, pdiv << sdiv); | ||
| 88 | |||
| 89 | return fvco; | ||
| 90 | } | ||
| 91 | |||
| 92 | static unsigned long clk_pll1443x_recalc_rate(struct clk_hw *hw, | ||
| 93 | unsigned long parent_rate) | ||
| 94 | { | ||
| 95 | struct clk_pll14xx *pll = to_clk_pll14xx(hw); | ||
| 96 | u32 mdiv, pdiv, sdiv, pll_gnrl, pll_div_ctl0, pll_div_ctl1; | ||
| 97 | short int kdiv; | ||
| 98 | u64 fvco = parent_rate; | ||
| 99 | |||
| 100 | pll_gnrl = readl_relaxed(pll->base); | ||
| 101 | pll_div_ctl0 = readl_relaxed(pll->base + 4); | ||
| 102 | pll_div_ctl1 = readl_relaxed(pll->base + 8); | ||
| 103 | mdiv = (pll_div_ctl0 & MDIV_MASK) >> MDIV_SHIFT; | ||
| 104 | pdiv = (pll_div_ctl0 & PDIV_MASK) >> PDIV_SHIFT; | ||
| 105 | sdiv = (pll_div_ctl0 & SDIV_MASK) >> SDIV_SHIFT; | ||
| 106 | kdiv = pll_div_ctl1 & KDIV_MASK; | ||
| 107 | |||
| 108 | /* fvco = (m * 65536 + k) * Fin / (p * 65536) */ | ||
| 109 | fvco *= (mdiv * 65536 + kdiv); | ||
| 110 | pdiv *= 65536; | ||
| 111 | |||
| 112 | do_div(fvco, pdiv << sdiv); | ||
| 113 | |||
| 114 | return fvco; | ||
| 115 | } | ||
| 116 | |||
| 117 | static inline bool clk_pll1416x_mp_change(const struct imx_pll14xx_rate_table *rate, | ||
| 118 | u32 pll_div) | ||
| 119 | { | ||
| 120 | u32 old_mdiv, old_pdiv; | ||
| 121 | |||
| 122 | old_mdiv = (pll_div >> MDIV_SHIFT) & MDIV_MASK; | ||
| 123 | old_pdiv = (pll_div >> PDIV_SHIFT) & PDIV_MASK; | ||
| 124 | |||
| 125 | return rate->mdiv != old_mdiv || rate->pdiv != old_pdiv; | ||
| 126 | } | ||
| 127 | |||
| 128 | static inline bool clk_pll1443x_mpk_change(const struct imx_pll14xx_rate_table *rate, | ||
| 129 | u32 pll_div_ctl0, u32 pll_div_ctl1) | ||
| 130 | { | ||
| 131 | u32 old_mdiv, old_pdiv, old_kdiv; | ||
| 132 | |||
| 133 | old_mdiv = (pll_div_ctl0 >> MDIV_SHIFT) & MDIV_MASK; | ||
| 134 | old_pdiv = (pll_div_ctl0 >> PDIV_SHIFT) & PDIV_MASK; | ||
| 135 | old_kdiv = (pll_div_ctl1 >> KDIV_SHIFT) & KDIV_MASK; | ||
| 136 | |||
| 137 | return rate->mdiv != old_mdiv || rate->pdiv != old_pdiv || | ||
| 138 | rate->kdiv != old_kdiv; | ||
| 139 | } | ||
| 140 | |||
| 141 | static inline bool clk_pll1443x_mp_change(const struct imx_pll14xx_rate_table *rate, | ||
| 142 | u32 pll_div_ctl0, u32 pll_div_ctl1) | ||
| 143 | { | ||
| 144 | u32 old_mdiv, old_pdiv, old_kdiv; | ||
| 145 | |||
| 146 | old_mdiv = (pll_div_ctl0 >> MDIV_SHIFT) & MDIV_MASK; | ||
| 147 | old_pdiv = (pll_div_ctl0 >> PDIV_SHIFT) & PDIV_MASK; | ||
| 148 | old_kdiv = (pll_div_ctl1 >> KDIV_SHIFT) & KDIV_MASK; | ||
| 149 | |||
| 150 | return rate->mdiv != old_mdiv || rate->pdiv != old_pdiv || | ||
| 151 | rate->kdiv != old_kdiv; | ||
| 152 | } | ||
| 153 | |||
| 154 | static int clk_pll14xx_wait_lock(struct clk_pll14xx *pll) | ||
| 155 | { | ||
| 156 | u32 val; | ||
| 157 | |||
| 158 | return readl_poll_timeout(pll->base, val, val & LOCK_TIMEOUT_US, 0, | ||
| 159 | LOCK_TIMEOUT_US); | ||
| 160 | } | ||
| 161 | |||
| 162 | static int clk_pll1416x_set_rate(struct clk_hw *hw, unsigned long drate, | ||
| 163 | unsigned long prate) | ||
| 164 | { | ||
| 165 | struct clk_pll14xx *pll = to_clk_pll14xx(hw); | ||
| 166 | const struct imx_pll14xx_rate_table *rate; | ||
| 167 | u32 tmp, div_val; | ||
| 168 | int ret; | ||
| 169 | |||
| 170 | rate = imx_get_pll_settings(pll, drate); | ||
| 171 | if (!rate) { | ||
| 172 | pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__, | ||
| 173 | drate, clk_hw_get_name(hw)); | ||
| 174 | return -EINVAL; | ||
| 175 | } | ||
| 176 | |||
| 177 | tmp = readl_relaxed(pll->base + 4); | ||
| 178 | |||
| 179 | if (!clk_pll1416x_mp_change(rate, tmp)) { | ||
| 180 | tmp &= ~(SDIV_MASK) << SDIV_SHIFT; | ||
| 181 | tmp |= rate->sdiv << SDIV_SHIFT; | ||
| 182 | writel_relaxed(tmp, pll->base + 4); | ||
| 183 | |||
| 184 | return 0; | ||
| 185 | } | ||
| 186 | |||
| 187 | /* Bypass clock and set lock to pll output lock */ | ||
| 188 | tmp = readl_relaxed(pll->base); | ||
| 189 | tmp |= LOCK_SEL_MASK; | ||
| 190 | writel_relaxed(tmp, pll->base); | ||
| 191 | |||
| 192 | /* Enable RST */ | ||
| 193 | tmp &= ~RST_MASK; | ||
| 194 | writel_relaxed(tmp, pll->base); | ||
| 195 | |||
| 196 | div_val = (rate->mdiv << MDIV_SHIFT) | (rate->pdiv << PDIV_SHIFT) | | ||
| 197 | (rate->sdiv << SDIV_SHIFT); | ||
| 198 | writel_relaxed(div_val, pll->base + 0x4); | ||
| 199 | |||
| 200 | /* | ||
| 201 | * According to SPEC, t3 - t2 need to be greater than | ||
| 202 | * 1us and 1/FREF, respectively. | ||
| 203 | * FREF is FIN / Prediv, the prediv is [1, 63], so choose | ||
| 204 | * 3us. | ||
| 205 | */ | ||
| 206 | udelay(3); | ||
| 207 | |||
| 208 | /* Disable RST */ | ||
| 209 | tmp |= RST_MASK; | ||
| 210 | writel_relaxed(tmp, pll->base); | ||
| 211 | |||
| 212 | /* Wait Lock */ | ||
| 213 | ret = clk_pll14xx_wait_lock(pll); | ||
| 214 | if (ret) | ||
| 215 | return ret; | ||
| 216 | |||
| 217 | /* Bypass */ | ||
| 218 | tmp &= ~BYPASS_MASK; | ||
| 219 | writel_relaxed(tmp, pll->base); | ||
| 220 | |||
| 221 | return 0; | ||
| 222 | } | ||
| 223 | |||
| 224 | static int clk_pll1443x_set_rate(struct clk_hw *hw, unsigned long drate, | ||
| 225 | unsigned long prate) | ||
| 226 | { | ||
| 227 | struct clk_pll14xx *pll = to_clk_pll14xx(hw); | ||
| 228 | const struct imx_pll14xx_rate_table *rate; | ||
| 229 | u32 tmp, div_val; | ||
| 230 | int ret; | ||
| 231 | |||
| 232 | rate = imx_get_pll_settings(pll, drate); | ||
| 233 | if (!rate) { | ||
| 234 | pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__, | ||
| 235 | drate, clk_hw_get_name(hw)); | ||
| 236 | return -EINVAL; | ||
| 237 | } | ||
| 238 | |||
| 239 | tmp = readl_relaxed(pll->base + 4); | ||
| 240 | div_val = readl_relaxed(pll->base + 8); | ||
| 241 | |||
| 242 | if (!clk_pll1443x_mpk_change(rate, tmp, div_val)) { | ||
| 243 | tmp &= ~(SDIV_MASK) << SDIV_SHIFT; | ||
| 244 | tmp |= rate->sdiv << SDIV_SHIFT; | ||
| 245 | writel_relaxed(tmp, pll->base + 4); | ||
| 246 | |||
| 247 | return 0; | ||
| 248 | } | ||
| 249 | |||
| 250 | /* Enable RST */ | ||
| 251 | tmp = readl_relaxed(pll->base); | ||
| 252 | tmp &= ~RST_MASK; | ||
| 253 | writel_relaxed(tmp, pll->base); | ||
| 254 | |||
| 255 | div_val = (rate->mdiv << MDIV_SHIFT) | (rate->pdiv << PDIV_SHIFT) | | ||
| 256 | (rate->sdiv << SDIV_SHIFT); | ||
| 257 | writel_relaxed(div_val, pll->base + 0x4); | ||
| 258 | writel_relaxed(rate->kdiv << KDIV_SHIFT, pll->base + 0x8); | ||
| 259 | |||
| 260 | /* | ||
| 261 | * According to SPEC, t3 - t2 need to be greater than | ||
| 262 | * 1us and 1/FREF, respectively. | ||
| 263 | * FREF is FIN / Prediv, the prediv is [1, 63], so choose | ||
| 264 | * 3us. | ||
| 265 | */ | ||
| 266 | udelay(3); | ||
| 267 | |||
| 268 | /* Disable RST */ | ||
| 269 | tmp |= RST_MASK; | ||
| 270 | writel_relaxed(tmp, pll->base); | ||
| 271 | |||
| 272 | /* Wait Lock*/ | ||
| 273 | ret = clk_pll14xx_wait_lock(pll); | ||
| 274 | if (ret) | ||
| 275 | return ret; | ||
| 276 | |||
| 277 | /* Bypass */ | ||
| 278 | tmp &= ~BYPASS_MASK; | ||
| 279 | writel_relaxed(tmp, pll->base); | ||
| 280 | |||
| 281 | return 0; | ||
| 282 | } | ||
| 283 | |||
| 284 | static int clk_pll14xx_prepare(struct clk_hw *hw) | ||
| 285 | { | ||
| 286 | struct clk_pll14xx *pll = to_clk_pll14xx(hw); | ||
| 287 | u32 val; | ||
| 288 | |||
| 289 | /* | ||
| 290 | * RESETB = 1 from 0, PLL starts its normal | ||
| 291 | * operation after lock time | ||
| 292 | */ | ||
| 293 | val = readl_relaxed(pll->base + GNRL_CTL); | ||
| 294 | val |= RST_MASK; | ||
| 295 | writel_relaxed(val, pll->base + GNRL_CTL); | ||
| 296 | |||
| 297 | return clk_pll14xx_wait_lock(pll); | ||
| 298 | } | ||
| 299 | |||
| 300 | static int clk_pll14xx_is_prepared(struct clk_hw *hw) | ||
| 301 | { | ||
| 302 | struct clk_pll14xx *pll = to_clk_pll14xx(hw); | ||
| 303 | u32 val; | ||
| 304 | |||
| 305 | val = readl_relaxed(pll->base + GNRL_CTL); | ||
| 306 | |||
| 307 | return (val & RST_MASK) ? 1 : 0; | ||
| 308 | } | ||
| 309 | |||
| 310 | static void clk_pll14xx_unprepare(struct clk_hw *hw) | ||
| 311 | { | ||
| 312 | struct clk_pll14xx *pll = to_clk_pll14xx(hw); | ||
| 313 | u32 val; | ||
| 314 | |||
| 315 | /* | ||
| 316 | * Set RST to 0, power down mode is enabled and | ||
| 317 | * every digital block is reset | ||
| 318 | */ | ||
| 319 | val = readl_relaxed(pll->base + GNRL_CTL); | ||
| 320 | val &= ~RST_MASK; | ||
| 321 | writel_relaxed(val, pll->base + GNRL_CTL); | ||
| 322 | } | ||
| 323 | |||
| 324 | static const struct clk_ops clk_pll1416x_ops = { | ||
| 325 | .prepare = clk_pll14xx_prepare, | ||
| 326 | .unprepare = clk_pll14xx_unprepare, | ||
| 327 | .is_prepared = clk_pll14xx_is_prepared, | ||
| 328 | .recalc_rate = clk_pll1416x_recalc_rate, | ||
| 329 | .round_rate = clk_pll14xx_round_rate, | ||
| 330 | .set_rate = clk_pll1416x_set_rate, | ||
| 331 | }; | ||
| 332 | |||
| 333 | static const struct clk_ops clk_pll1416x_min_ops = { | ||
| 334 | .recalc_rate = clk_pll1416x_recalc_rate, | ||
| 335 | }; | ||
| 336 | |||
| 337 | static const struct clk_ops clk_pll1443x_ops = { | ||
| 338 | .prepare = clk_pll14xx_prepare, | ||
| 339 | .unprepare = clk_pll14xx_unprepare, | ||
| 340 | .is_prepared = clk_pll14xx_is_prepared, | ||
| 341 | .recalc_rate = clk_pll1443x_recalc_rate, | ||
| 342 | .round_rate = clk_pll14xx_round_rate, | ||
| 343 | .set_rate = clk_pll1443x_set_rate, | ||
| 344 | }; | ||
| 345 | |||
| 346 | struct clk *imx_clk_pll14xx(const char *name, const char *parent_name, | ||
| 347 | void __iomem *base, | ||
| 348 | const struct imx_pll14xx_clk *pll_clk) | ||
| 349 | { | ||
| 350 | struct clk_pll14xx *pll; | ||
| 351 | struct clk *clk; | ||
| 352 | struct clk_init_data init; | ||
| 353 | |||
| 354 | pll = kzalloc(sizeof(*pll), GFP_KERNEL); | ||
| 355 | if (!pll) | ||
| 356 | return ERR_PTR(-ENOMEM); | ||
| 357 | |||
| 358 | init.name = name; | ||
| 359 | init.flags = pll_clk->flags; | ||
| 360 | init.parent_names = &parent_name; | ||
| 361 | init.num_parents = 1; | ||
| 362 | |||
| 363 | switch (pll_clk->type) { | ||
| 364 | case PLL_1416X: | ||
| 365 | if (!pll->rate_table) | ||
| 366 | init.ops = &clk_pll1416x_min_ops; | ||
| 367 | else | ||
| 368 | init.ops = &clk_pll1416x_ops; | ||
| 369 | break; | ||
| 370 | case PLL_1443X: | ||
| 371 | init.ops = &clk_pll1443x_ops; | ||
| 372 | break; | ||
| 373 | default: | ||
| 374 | pr_err("%s: Unknown pll type for pll clk %s\n", | ||
| 375 | __func__, name); | ||
| 376 | }; | ||
| 377 | |||
| 378 | pll->base = base; | ||
| 379 | pll->hw.init = &init; | ||
| 380 | pll->type = pll_clk->type; | ||
| 381 | pll->rate_table = pll_clk->rate_table; | ||
| 382 | pll->rate_count = pll_clk->rate_count; | ||
| 383 | |||
| 384 | clk = clk_register(NULL, &pll->hw); | ||
| 385 | if (IS_ERR(clk)) { | ||
| 386 | pr_err("%s: failed to register pll %s %lu\n", | ||
| 387 | __func__, name, PTR_ERR(clk)); | ||
| 388 | kfree(pll); | ||
| 389 | } | ||
| 390 | |||
| 391 | return clk; | ||
| 392 | } | ||
diff --git a/drivers/clk/imx/clk-sccg-pll.c b/drivers/clk/imx/clk-sccg-pll.c index ee7752bace89..9dfd03a95557 100644 --- a/drivers/clk/imx/clk-sccg-pll.c +++ b/drivers/clk/imx/clk-sccg-pll.c | |||
| @@ -25,87 +25,292 @@ | |||
| 25 | #define PLL_DIVF2_MASK GENMASK(12, 7) | 25 | #define PLL_DIVF2_MASK GENMASK(12, 7) |
| 26 | #define PLL_DIVR1_MASK GENMASK(27, 25) | 26 | #define PLL_DIVR1_MASK GENMASK(27, 25) |
| 27 | #define PLL_DIVR2_MASK GENMASK(24, 19) | 27 | #define PLL_DIVR2_MASK GENMASK(24, 19) |
| 28 | #define PLL_DIVQ_MASK GENMASK(6, 1) | ||
| 28 | #define PLL_REF_MASK GENMASK(2, 0) | 29 | #define PLL_REF_MASK GENMASK(2, 0) |
| 29 | 30 | ||
| 30 | #define PLL_LOCK_MASK BIT(31) | 31 | #define PLL_LOCK_MASK BIT(31) |
| 31 | #define PLL_PD_MASK BIT(7) | 32 | #define PLL_PD_MASK BIT(7) |
| 32 | 33 | ||
| 33 | #define OSC_25M 25000000 | 34 | /* These are the specification limits for the SSCG PLL */ |
| 34 | #define OSC_27M 27000000 | 35 | #define PLL_REF_MIN_FREQ 25000000UL |
| 36 | #define PLL_REF_MAX_FREQ 235000000UL | ||
| 35 | 37 | ||
| 36 | #define PLL_SCCG_LOCK_TIMEOUT 70 | 38 | #define PLL_STAGE1_MIN_FREQ 1600000000UL |
| 39 | #define PLL_STAGE1_MAX_FREQ 2400000000UL | ||
| 40 | |||
| 41 | #define PLL_STAGE1_REF_MIN_FREQ 25000000UL | ||
| 42 | #define PLL_STAGE1_REF_MAX_FREQ 54000000UL | ||
| 43 | |||
| 44 | #define PLL_STAGE2_MIN_FREQ 1200000000UL | ||
| 45 | #define PLL_STAGE2_MAX_FREQ 2400000000UL | ||
| 46 | |||
| 47 | #define PLL_STAGE2_REF_MIN_FREQ 54000000UL | ||
| 48 | #define PLL_STAGE2_REF_MAX_FREQ 75000000UL | ||
| 49 | |||
| 50 | #define PLL_OUT_MIN_FREQ 20000000UL | ||
| 51 | #define PLL_OUT_MAX_FREQ 1200000000UL | ||
| 52 | |||
| 53 | #define PLL_DIVR1_MAX 7 | ||
| 54 | #define PLL_DIVR2_MAX 63 | ||
| 55 | #define PLL_DIVF1_MAX 63 | ||
| 56 | #define PLL_DIVF2_MAX 63 | ||
| 57 | #define PLL_DIVQ_MAX 63 | ||
| 58 | |||
| 59 | #define PLL_BYPASS_NONE 0x0 | ||
| 60 | #define PLL_BYPASS1 0x2 | ||
| 61 | #define PLL_BYPASS2 0x1 | ||
| 62 | |||
| 63 | #define SSCG_PLL_BYPASS1_MASK BIT(5) | ||
| 64 | #define SSCG_PLL_BYPASS2_MASK BIT(4) | ||
| 65 | #define SSCG_PLL_BYPASS_MASK GENMASK(5, 4) | ||
| 66 | |||
| 67 | #define PLL_SCCG_LOCK_TIMEOUT 70 | ||
| 68 | |||
| 69 | struct clk_sccg_pll_setup { | ||
| 70 | int divr1, divf1; | ||
| 71 | int divr2, divf2; | ||
| 72 | int divq; | ||
| 73 | int bypass; | ||
| 74 | |||
| 75 | uint64_t vco1; | ||
| 76 | uint64_t vco2; | ||
| 77 | uint64_t fout; | ||
| 78 | uint64_t ref; | ||
| 79 | uint64_t ref_div1; | ||
| 80 | uint64_t ref_div2; | ||
| 81 | uint64_t fout_request; | ||
| 82 | int fout_error; | ||
| 83 | }; | ||
| 37 | 84 | ||
| 38 | struct clk_sccg_pll { | 85 | struct clk_sccg_pll { |
| 39 | struct clk_hw hw; | 86 | struct clk_hw hw; |
| 40 | void __iomem *base; | 87 | const struct clk_ops ops; |
| 88 | |||
| 89 | void __iomem *base; | ||
| 90 | |||
| 91 | struct clk_sccg_pll_setup setup; | ||
| 92 | |||
| 93 | u8 parent; | ||
| 94 | u8 bypass1; | ||
| 95 | u8 bypass2; | ||
| 41 | }; | 96 | }; |
| 42 | 97 | ||
| 43 | #define to_clk_sccg_pll(_hw) container_of(_hw, struct clk_sccg_pll, hw) | 98 | #define to_clk_sccg_pll(_hw) container_of(_hw, struct clk_sccg_pll, hw) |
| 44 | 99 | ||
| 45 | static int clk_pll_wait_lock(struct clk_sccg_pll *pll) | 100 | static int clk_sccg_pll_wait_lock(struct clk_sccg_pll *pll) |
| 46 | { | 101 | { |
| 47 | u32 val; | 102 | u32 val; |
| 48 | 103 | ||
| 49 | return readl_poll_timeout(pll->base, val, val & PLL_LOCK_MASK, 0, | 104 | val = readl_relaxed(pll->base + PLL_CFG0); |
| 50 | PLL_SCCG_LOCK_TIMEOUT); | 105 | |
| 106 | /* don't wait for lock if all plls are bypassed */ | ||
| 107 | if (!(val & SSCG_PLL_BYPASS2_MASK)) | ||
| 108 | return readl_poll_timeout(pll->base, val, val & PLL_LOCK_MASK, | ||
| 109 | 0, PLL_SCCG_LOCK_TIMEOUT); | ||
| 110 | |||
| 111 | return 0; | ||
| 51 | } | 112 | } |
| 52 | 113 | ||
| 53 | static int clk_pll1_is_prepared(struct clk_hw *hw) | 114 | static int clk_sccg_pll2_check_match(struct clk_sccg_pll_setup *setup, |
| 115 | struct clk_sccg_pll_setup *temp_setup) | ||
| 54 | { | 116 | { |
| 55 | struct clk_sccg_pll *pll = to_clk_sccg_pll(hw); | 117 | int new_diff = temp_setup->fout - temp_setup->fout_request; |
| 56 | u32 val; | 118 | int diff = temp_setup->fout_error; |
| 57 | 119 | ||
| 58 | val = readl_relaxed(pll->base + PLL_CFG0); | 120 | if (abs(diff) > abs(new_diff)) { |
| 59 | return (val & PLL_PD_MASK) ? 0 : 1; | 121 | temp_setup->fout_error = new_diff; |
| 122 | memcpy(setup, temp_setup, sizeof(struct clk_sccg_pll_setup)); | ||
| 123 | |||
| 124 | if (temp_setup->fout_request == temp_setup->fout) | ||
| 125 | return 0; | ||
| 126 | } | ||
| 127 | return -1; | ||
| 60 | } | 128 | } |
| 61 | 129 | ||
| 62 | static unsigned long clk_pll1_recalc_rate(struct clk_hw *hw, | 130 | static int clk_sccg_divq_lookup(struct clk_sccg_pll_setup *setup, |
| 63 | unsigned long parent_rate) | 131 | struct clk_sccg_pll_setup *temp_setup) |
| 64 | { | 132 | { |
| 65 | struct clk_sccg_pll *pll = to_clk_sccg_pll(hw); | 133 | int ret = -EINVAL; |
| 66 | u32 val, divf; | 134 | |
| 135 | for (temp_setup->divq = 0; temp_setup->divq <= PLL_DIVQ_MAX; | ||
| 136 | temp_setup->divq++) { | ||
| 137 | temp_setup->vco2 = temp_setup->vco1; | ||
| 138 | do_div(temp_setup->vco2, temp_setup->divr2 + 1); | ||
| 139 | temp_setup->vco2 *= 2; | ||
| 140 | temp_setup->vco2 *= temp_setup->divf2 + 1; | ||
| 141 | if (temp_setup->vco2 >= PLL_STAGE2_MIN_FREQ && | ||
| 142 | temp_setup->vco2 <= PLL_STAGE2_MAX_FREQ) { | ||
| 143 | temp_setup->fout = temp_setup->vco2; | ||
| 144 | do_div(temp_setup->fout, 2 * (temp_setup->divq + 1)); | ||
| 145 | |||
| 146 | ret = clk_sccg_pll2_check_match(setup, temp_setup); | ||
| 147 | if (!ret) { | ||
| 148 | temp_setup->bypass = PLL_BYPASS1; | ||
| 149 | return ret; | ||
| 150 | } | ||
| 151 | } | ||
| 152 | } | ||
| 67 | 153 | ||
| 68 | val = readl_relaxed(pll->base + PLL_CFG2); | 154 | return ret; |
| 69 | divf = FIELD_GET(PLL_DIVF1_MASK, val); | 155 | } |
| 156 | |||
| 157 | static int clk_sccg_divf2_lookup(struct clk_sccg_pll_setup *setup, | ||
| 158 | struct clk_sccg_pll_setup *temp_setup) | ||
| 159 | { | ||
| 160 | int ret = -EINVAL; | ||
| 161 | |||
| 162 | for (temp_setup->divf2 = 0; temp_setup->divf2 <= PLL_DIVF2_MAX; | ||
| 163 | temp_setup->divf2++) { | ||
| 164 | ret = clk_sccg_divq_lookup(setup, temp_setup); | ||
| 165 | if (!ret) | ||
| 166 | return ret; | ||
| 167 | } | ||
| 70 | 168 | ||
| 71 | return parent_rate * 2 * (divf + 1); | 169 | return ret; |
| 72 | } | 170 | } |
| 73 | 171 | ||
| 74 | static long clk_pll1_round_rate(struct clk_hw *hw, unsigned long rate, | 172 | static int clk_sccg_divr2_lookup(struct clk_sccg_pll_setup *setup, |
| 75 | unsigned long *prate) | 173 | struct clk_sccg_pll_setup *temp_setup) |
| 76 | { | 174 | { |
| 77 | unsigned long parent_rate = *prate; | 175 | int ret = -EINVAL; |
| 78 | u32 div; | 176 | |
| 177 | for (temp_setup->divr2 = 0; temp_setup->divr2 <= PLL_DIVR2_MAX; | ||
| 178 | temp_setup->divr2++) { | ||
| 179 | temp_setup->ref_div2 = temp_setup->vco1; | ||
| 180 | do_div(temp_setup->ref_div2, temp_setup->divr2 + 1); | ||
| 181 | if (temp_setup->ref_div2 >= PLL_STAGE2_REF_MIN_FREQ && | ||
| 182 | temp_setup->ref_div2 <= PLL_STAGE2_REF_MAX_FREQ) { | ||
| 183 | ret = clk_sccg_divf2_lookup(setup, temp_setup); | ||
| 184 | if (!ret) | ||
| 185 | return ret; | ||
| 186 | } | ||
| 187 | } | ||
| 188 | |||
| 189 | return ret; | ||
| 190 | } | ||
| 191 | |||
| 192 | static int clk_sccg_pll2_find_setup(struct clk_sccg_pll_setup *setup, | ||
| 193 | struct clk_sccg_pll_setup *temp_setup, | ||
| 194 | uint64_t ref) | ||
| 195 | { | ||
| 196 | |||
| 197 | int ret = -EINVAL; | ||
| 79 | 198 | ||
| 80 | if (!parent_rate) | 199 | if (ref < PLL_STAGE1_MIN_FREQ || ref > PLL_STAGE1_MAX_FREQ) |
| 81 | return 0; | 200 | return ret; |
| 82 | 201 | ||
| 83 | div = rate / (parent_rate * 2); | 202 | temp_setup->vco1 = ref; |
| 84 | 203 | ||
| 85 | return parent_rate * div * 2; | 204 | ret = clk_sccg_divr2_lookup(setup, temp_setup); |
| 205 | return ret; | ||
| 86 | } | 206 | } |
| 87 | 207 | ||
| 88 | static int clk_pll1_set_rate(struct clk_hw *hw, unsigned long rate, | 208 | static int clk_sccg_divf1_lookup(struct clk_sccg_pll_setup *setup, |
| 89 | unsigned long parent_rate) | 209 | struct clk_sccg_pll_setup *temp_setup) |
| 90 | { | 210 | { |
| 91 | struct clk_sccg_pll *pll = to_clk_sccg_pll(hw); | 211 | int ret = -EINVAL; |
| 92 | u32 val; | ||
| 93 | u32 divf; | ||
| 94 | 212 | ||
| 95 | if (!parent_rate) | 213 | for (temp_setup->divf1 = 0; temp_setup->divf1 <= PLL_DIVF1_MAX; |
| 96 | return -EINVAL; | 214 | temp_setup->divf1++) { |
| 215 | uint64_t vco1 = temp_setup->ref; | ||
| 97 | 216 | ||
| 98 | divf = rate / (parent_rate * 2); | 217 | do_div(vco1, temp_setup->divr1 + 1); |
| 218 | vco1 *= 2; | ||
| 219 | vco1 *= temp_setup->divf1 + 1; | ||
| 99 | 220 | ||
| 100 | val = readl_relaxed(pll->base + PLL_CFG2); | 221 | ret = clk_sccg_pll2_find_setup(setup, temp_setup, vco1); |
| 101 | val &= ~PLL_DIVF1_MASK; | 222 | if (!ret) { |
| 102 | val |= FIELD_PREP(PLL_DIVF1_MASK, divf - 1); | 223 | temp_setup->bypass = PLL_BYPASS_NONE; |
| 103 | writel_relaxed(val, pll->base + PLL_CFG2); | 224 | return ret; |
| 225 | } | ||
| 226 | } | ||
| 227 | |||
| 228 | return ret; | ||
| 229 | } | ||
| 230 | |||
| 231 | static int clk_sccg_divr1_lookup(struct clk_sccg_pll_setup *setup, | ||
| 232 | struct clk_sccg_pll_setup *temp_setup) | ||
| 233 | { | ||
| 234 | int ret = -EINVAL; | ||
| 235 | |||
| 236 | for (temp_setup->divr1 = 0; temp_setup->divr1 <= PLL_DIVR1_MAX; | ||
| 237 | temp_setup->divr1++) { | ||
| 238 | temp_setup->ref_div1 = temp_setup->ref; | ||
| 239 | do_div(temp_setup->ref_div1, temp_setup->divr1 + 1); | ||
| 240 | if (temp_setup->ref_div1 >= PLL_STAGE1_REF_MIN_FREQ && | ||
| 241 | temp_setup->ref_div1 <= PLL_STAGE1_REF_MAX_FREQ) { | ||
| 242 | ret = clk_sccg_divf1_lookup(setup, temp_setup); | ||
| 243 | if (!ret) | ||
| 244 | return ret; | ||
| 245 | } | ||
| 246 | } | ||
| 247 | |||
| 248 | return ret; | ||
| 249 | } | ||
| 250 | |||
| 251 | static int clk_sccg_pll1_find_setup(struct clk_sccg_pll_setup *setup, | ||
| 252 | struct clk_sccg_pll_setup *temp_setup, | ||
| 253 | uint64_t ref) | ||
| 254 | { | ||
| 255 | |||
| 256 | int ret = -EINVAL; | ||
| 257 | |||
| 258 | if (ref < PLL_REF_MIN_FREQ || ref > PLL_REF_MAX_FREQ) | ||
| 259 | return ret; | ||
| 260 | |||
| 261 | temp_setup->ref = ref; | ||
| 262 | |||
| 263 | ret = clk_sccg_divr1_lookup(setup, temp_setup); | ||
| 264 | |||
| 265 | return ret; | ||
| 266 | } | ||
| 267 | |||
| 268 | static int clk_sccg_pll_find_setup(struct clk_sccg_pll_setup *setup, | ||
| 269 | uint64_t prate, | ||
| 270 | uint64_t rate, int try_bypass) | ||
| 271 | { | ||
| 272 | struct clk_sccg_pll_setup temp_setup; | ||
| 273 | int ret = -EINVAL; | ||
| 274 | |||
| 275 | memset(&temp_setup, 0, sizeof(struct clk_sccg_pll_setup)); | ||
| 276 | memset(setup, 0, sizeof(struct clk_sccg_pll_setup)); | ||
| 277 | |||
| 278 | temp_setup.fout_error = PLL_OUT_MAX_FREQ; | ||
| 279 | temp_setup.fout_request = rate; | ||
| 280 | |||
| 281 | switch (try_bypass) { | ||
| 104 | 282 | ||
| 105 | return clk_pll_wait_lock(pll); | 283 | case PLL_BYPASS2: |
| 284 | if (prate == rate) { | ||
| 285 | setup->bypass = PLL_BYPASS2; | ||
| 286 | setup->fout = rate; | ||
| 287 | ret = 0; | ||
| 288 | } | ||
| 289 | break; | ||
| 290 | |||
| 291 | case PLL_BYPASS1: | ||
| 292 | ret = clk_sccg_pll2_find_setup(setup, &temp_setup, prate); | ||
| 293 | break; | ||
| 294 | |||
| 295 | case PLL_BYPASS_NONE: | ||
| 296 | ret = clk_sccg_pll1_find_setup(setup, &temp_setup, prate); | ||
| 297 | break; | ||
| 298 | } | ||
| 299 | |||
| 300 | return ret; | ||
| 301 | } | ||
| 302 | |||
| 303 | |||
| 304 | static int clk_sccg_pll_is_prepared(struct clk_hw *hw) | ||
| 305 | { | ||
| 306 | struct clk_sccg_pll *pll = to_clk_sccg_pll(hw); | ||
| 307 | |||
| 308 | u32 val = readl_relaxed(pll->base + PLL_CFG0); | ||
| 309 | |||
| 310 | return (val & PLL_PD_MASK) ? 0 : 1; | ||
| 106 | } | 311 | } |
| 107 | 312 | ||
| 108 | static int clk_pll1_prepare(struct clk_hw *hw) | 313 | static int clk_sccg_pll_prepare(struct clk_hw *hw) |
| 109 | { | 314 | { |
| 110 | struct clk_sccg_pll *pll = to_clk_sccg_pll(hw); | 315 | struct clk_sccg_pll *pll = to_clk_sccg_pll(hw); |
| 111 | u32 val; | 316 | u32 val; |
| @@ -114,10 +319,10 @@ static int clk_pll1_prepare(struct clk_hw *hw) | |||
| 114 | val &= ~PLL_PD_MASK; | 319 | val &= ~PLL_PD_MASK; |
| 115 | writel_relaxed(val, pll->base + PLL_CFG0); | 320 | writel_relaxed(val, pll->base + PLL_CFG0); |
| 116 | 321 | ||
| 117 | return clk_pll_wait_lock(pll); | 322 | return clk_sccg_pll_wait_lock(pll); |
| 118 | } | 323 | } |
| 119 | 324 | ||
| 120 | static void clk_pll1_unprepare(struct clk_hw *hw) | 325 | static void clk_sccg_pll_unprepare(struct clk_hw *hw) |
| 121 | { | 326 | { |
| 122 | struct clk_sccg_pll *pll = to_clk_sccg_pll(hw); | 327 | struct clk_sccg_pll *pll = to_clk_sccg_pll(hw); |
| 123 | u32 val; | 328 | u32 val; |
| @@ -125,121 +330,208 @@ static void clk_pll1_unprepare(struct clk_hw *hw) | |||
| 125 | val = readl_relaxed(pll->base + PLL_CFG0); | 330 | val = readl_relaxed(pll->base + PLL_CFG0); |
| 126 | val |= PLL_PD_MASK; | 331 | val |= PLL_PD_MASK; |
| 127 | writel_relaxed(val, pll->base + PLL_CFG0); | 332 | writel_relaxed(val, pll->base + PLL_CFG0); |
| 128 | |||
| 129 | } | 333 | } |
| 130 | 334 | ||
| 131 | static unsigned long clk_pll2_recalc_rate(struct clk_hw *hw, | 335 | static unsigned long clk_sccg_pll_recalc_rate(struct clk_hw *hw, |
| 132 | unsigned long parent_rate) | 336 | unsigned long parent_rate) |
| 133 | { | 337 | { |
| 134 | struct clk_sccg_pll *pll = to_clk_sccg_pll(hw); | 338 | struct clk_sccg_pll *pll = to_clk_sccg_pll(hw); |
| 135 | u32 val, ref, divr1, divf1, divr2, divf2; | 339 | u32 val, divr1, divf1, divr2, divf2, divq; |
| 136 | u64 temp64; | 340 | u64 temp64; |
| 137 | 341 | ||
| 138 | val = readl_relaxed(pll->base + PLL_CFG0); | ||
| 139 | switch (FIELD_GET(PLL_REF_MASK, val)) { | ||
| 140 | case 0: | ||
| 141 | ref = OSC_25M; | ||
| 142 | break; | ||
| 143 | case 1: | ||
| 144 | ref = OSC_27M; | ||
| 145 | break; | ||
| 146 | default: | ||
| 147 | ref = OSC_25M; | ||
| 148 | break; | ||
| 149 | } | ||
| 150 | |||
| 151 | val = readl_relaxed(pll->base + PLL_CFG2); | 342 | val = readl_relaxed(pll->base + PLL_CFG2); |
| 152 | divr1 = FIELD_GET(PLL_DIVR1_MASK, val); | 343 | divr1 = FIELD_GET(PLL_DIVR1_MASK, val); |
| 153 | divr2 = FIELD_GET(PLL_DIVR2_MASK, val); | 344 | divr2 = FIELD_GET(PLL_DIVR2_MASK, val); |
| 154 | divf1 = FIELD_GET(PLL_DIVF1_MASK, val); | 345 | divf1 = FIELD_GET(PLL_DIVF1_MASK, val); |
| 155 | divf2 = FIELD_GET(PLL_DIVF2_MASK, val); | 346 | divf2 = FIELD_GET(PLL_DIVF2_MASK, val); |
| 156 | 347 | divq = FIELD_GET(PLL_DIVQ_MASK, val); | |
| 157 | temp64 = ref * 2; | 348 | |
| 158 | temp64 *= (divf1 + 1) * (divf2 + 1); | 349 | temp64 = parent_rate; |
| 159 | 350 | ||
| 160 | do_div(temp64, (divr1 + 1) * (divr2 + 1)); | 351 | val = clk_readl(pll->base + PLL_CFG0); |
| 352 | if (val & SSCG_PLL_BYPASS2_MASK) { | ||
| 353 | temp64 = parent_rate; | ||
| 354 | } else if (val & SSCG_PLL_BYPASS1_MASK) { | ||
| 355 | temp64 *= divf2; | ||
| 356 | do_div(temp64, (divr2 + 1) * (divq + 1)); | ||
| 357 | } else { | ||
| 358 | temp64 *= 2; | ||
| 359 | temp64 *= (divf1 + 1) * (divf2 + 1); | ||
| 360 | do_div(temp64, (divr1 + 1) * (divr2 + 1) * (divq + 1)); | ||
| 361 | } | ||
| 161 | 362 | ||
| 162 | return temp64; | 363 | return temp64; |
| 163 | } | 364 | } |
| 164 | 365 | ||
| 165 | static long clk_pll2_round_rate(struct clk_hw *hw, unsigned long rate, | 366 | static int clk_sccg_pll_set_rate(struct clk_hw *hw, unsigned long rate, |
| 166 | unsigned long *prate) | 367 | unsigned long parent_rate) |
| 167 | { | 368 | { |
| 168 | u32 div; | 369 | struct clk_sccg_pll *pll = to_clk_sccg_pll(hw); |
| 169 | unsigned long parent_rate = *prate; | 370 | struct clk_sccg_pll_setup *setup = &pll->setup; |
| 371 | u32 val; | ||
| 170 | 372 | ||
| 171 | if (!parent_rate) | 373 | /* set bypass here too since the parent might be the same */ |
| 172 | return 0; | 374 | val = clk_readl(pll->base + PLL_CFG0); |
| 375 | val &= ~SSCG_PLL_BYPASS_MASK; | ||
| 376 | val |= FIELD_PREP(SSCG_PLL_BYPASS_MASK, setup->bypass); | ||
| 377 | clk_writel(val, pll->base + PLL_CFG0); | ||
| 173 | 378 | ||
| 174 | div = rate / parent_rate; | 379 | val = readl_relaxed(pll->base + PLL_CFG2); |
| 380 | val &= ~(PLL_DIVF1_MASK | PLL_DIVF2_MASK); | ||
| 381 | val &= ~(PLL_DIVR1_MASK | PLL_DIVR2_MASK | PLL_DIVQ_MASK); | ||
| 382 | val |= FIELD_PREP(PLL_DIVF1_MASK, setup->divf1); | ||
| 383 | val |= FIELD_PREP(PLL_DIVF2_MASK, setup->divf2); | ||
| 384 | val |= FIELD_PREP(PLL_DIVR1_MASK, setup->divr1); | ||
| 385 | val |= FIELD_PREP(PLL_DIVR2_MASK, setup->divr2); | ||
| 386 | val |= FIELD_PREP(PLL_DIVQ_MASK, setup->divq); | ||
| 387 | writel_relaxed(val, pll->base + PLL_CFG2); | ||
| 175 | 388 | ||
| 176 | return parent_rate * div; | 389 | return clk_sccg_pll_wait_lock(pll); |
| 177 | } | 390 | } |
| 178 | 391 | ||
| 179 | static int clk_pll2_set_rate(struct clk_hw *hw, unsigned long rate, | 392 | static u8 clk_sccg_pll_get_parent(struct clk_hw *hw) |
| 180 | unsigned long parent_rate) | ||
| 181 | { | 393 | { |
| 394 | struct clk_sccg_pll *pll = to_clk_sccg_pll(hw); | ||
| 182 | u32 val; | 395 | u32 val; |
| 183 | u32 divf; | 396 | u8 ret = pll->parent; |
| 397 | |||
| 398 | val = clk_readl(pll->base + PLL_CFG0); | ||
| 399 | if (val & SSCG_PLL_BYPASS2_MASK) | ||
| 400 | ret = pll->bypass2; | ||
| 401 | else if (val & SSCG_PLL_BYPASS1_MASK) | ||
| 402 | ret = pll->bypass1; | ||
| 403 | return ret; | ||
| 404 | } | ||
| 405 | |||
| 406 | static int clk_sccg_pll_set_parent(struct clk_hw *hw, u8 index) | ||
| 407 | { | ||
| 184 | struct clk_sccg_pll *pll = to_clk_sccg_pll(hw); | 408 | struct clk_sccg_pll *pll = to_clk_sccg_pll(hw); |
| 409 | u32 val; | ||
| 185 | 410 | ||
| 186 | if (!parent_rate) | 411 | val = clk_readl(pll->base + PLL_CFG0); |
| 187 | return -EINVAL; | 412 | val &= ~SSCG_PLL_BYPASS_MASK; |
| 413 | val |= FIELD_PREP(SSCG_PLL_BYPASS_MASK, pll->setup.bypass); | ||
| 414 | clk_writel(val, pll->base + PLL_CFG0); | ||
| 188 | 415 | ||
| 189 | divf = rate / parent_rate; | 416 | return clk_sccg_pll_wait_lock(pll); |
| 417 | } | ||
| 190 | 418 | ||
| 191 | val = readl_relaxed(pll->base + PLL_CFG2); | 419 | static int __clk_sccg_pll_determine_rate(struct clk_hw *hw, |
| 192 | val &= ~PLL_DIVF2_MASK; | 420 | struct clk_rate_request *req, |
| 193 | val |= FIELD_PREP(PLL_DIVF2_MASK, divf - 1); | 421 | uint64_t min, |
| 194 | writel_relaxed(val, pll->base + PLL_CFG2); | 422 | uint64_t max, |
| 423 | uint64_t rate, | ||
| 424 | int bypass) | ||
| 425 | { | ||
| 426 | struct clk_sccg_pll *pll = to_clk_sccg_pll(hw); | ||
| 427 | struct clk_sccg_pll_setup *setup = &pll->setup; | ||
| 428 | struct clk_hw *parent_hw = NULL; | ||
| 429 | int bypass_parent_index; | ||
| 430 | int ret = -EINVAL; | ||
| 431 | |||
| 432 | req->max_rate = max; | ||
| 433 | req->min_rate = min; | ||
| 434 | |||
| 435 | switch (bypass) { | ||
| 436 | case PLL_BYPASS2: | ||
| 437 | bypass_parent_index = pll->bypass2; | ||
| 438 | break; | ||
| 439 | case PLL_BYPASS1: | ||
| 440 | bypass_parent_index = pll->bypass1; | ||
| 441 | break; | ||
| 442 | default: | ||
| 443 | bypass_parent_index = pll->parent; | ||
| 444 | break; | ||
| 445 | } | ||
| 446 | |||
| 447 | parent_hw = clk_hw_get_parent_by_index(hw, bypass_parent_index); | ||
| 448 | ret = __clk_determine_rate(parent_hw, req); | ||
| 449 | if (!ret) { | ||
| 450 | ret = clk_sccg_pll_find_setup(setup, req->rate, | ||
| 451 | rate, bypass); | ||
| 452 | } | ||
| 453 | |||
| 454 | req->best_parent_hw = parent_hw; | ||
| 455 | req->best_parent_rate = req->rate; | ||
| 456 | req->rate = setup->fout; | ||
| 195 | 457 | ||
| 196 | return clk_pll_wait_lock(pll); | 458 | return ret; |
| 197 | } | 459 | } |
| 198 | 460 | ||
| 199 | static const struct clk_ops clk_sccg_pll1_ops = { | 461 | static int clk_sccg_pll_determine_rate(struct clk_hw *hw, |
| 200 | .is_prepared = clk_pll1_is_prepared, | 462 | struct clk_rate_request *req) |
| 201 | .recalc_rate = clk_pll1_recalc_rate, | 463 | { |
| 202 | .round_rate = clk_pll1_round_rate, | 464 | struct clk_sccg_pll *pll = to_clk_sccg_pll(hw); |
| 203 | .set_rate = clk_pll1_set_rate, | 465 | struct clk_sccg_pll_setup *setup = &pll->setup; |
| 204 | }; | 466 | uint64_t rate = req->rate; |
| 467 | uint64_t min = req->min_rate; | ||
| 468 | uint64_t max = req->max_rate; | ||
| 469 | int ret = -EINVAL; | ||
| 470 | |||
| 471 | if (rate < PLL_OUT_MIN_FREQ || rate > PLL_OUT_MAX_FREQ) | ||
| 472 | return ret; | ||
| 473 | |||
| 474 | ret = __clk_sccg_pll_determine_rate(hw, req, req->rate, req->rate, | ||
| 475 | rate, PLL_BYPASS2); | ||
| 476 | if (!ret) | ||
| 477 | return ret; | ||
| 478 | |||
| 479 | ret = __clk_sccg_pll_determine_rate(hw, req, PLL_STAGE1_REF_MIN_FREQ, | ||
| 480 | PLL_STAGE1_REF_MAX_FREQ, rate, | ||
| 481 | PLL_BYPASS1); | ||
| 482 | if (!ret) | ||
| 483 | return ret; | ||
| 484 | |||
| 485 | ret = __clk_sccg_pll_determine_rate(hw, req, PLL_REF_MIN_FREQ, | ||
| 486 | PLL_REF_MAX_FREQ, rate, | ||
| 487 | PLL_BYPASS_NONE); | ||
| 488 | if (!ret) | ||
| 489 | return ret; | ||
| 490 | |||
| 491 | if (setup->fout >= min && setup->fout <= max) | ||
| 492 | ret = 0; | ||
| 493 | |||
| 494 | return ret; | ||
| 495 | } | ||
| 205 | 496 | ||
| 206 | static const struct clk_ops clk_sccg_pll2_ops = { | 497 | static const struct clk_ops clk_sccg_pll_ops = { |
| 207 | .prepare = clk_pll1_prepare, | 498 | .prepare = clk_sccg_pll_prepare, |
| 208 | .unprepare = clk_pll1_unprepare, | 499 | .unprepare = clk_sccg_pll_unprepare, |
| 209 | .recalc_rate = clk_pll2_recalc_rate, | 500 | .is_prepared = clk_sccg_pll_is_prepared, |
| 210 | .round_rate = clk_pll2_round_rate, | 501 | .recalc_rate = clk_sccg_pll_recalc_rate, |
| 211 | .set_rate = clk_pll2_set_rate, | 502 | .set_rate = clk_sccg_pll_set_rate, |
| 503 | .set_parent = clk_sccg_pll_set_parent, | ||
| 504 | .get_parent = clk_sccg_pll_get_parent, | ||
| 505 | .determine_rate = clk_sccg_pll_determine_rate, | ||
| 212 | }; | 506 | }; |
| 213 | 507 | ||
| 214 | struct clk *imx_clk_sccg_pll(const char *name, | 508 | struct clk *imx_clk_sccg_pll(const char *name, |
| 215 | const char *parent_name, | 509 | const char * const *parent_names, |
| 510 | u8 num_parents, | ||
| 511 | u8 parent, u8 bypass1, u8 bypass2, | ||
| 216 | void __iomem *base, | 512 | void __iomem *base, |
| 217 | enum imx_sccg_pll_type pll_type) | 513 | unsigned long flags) |
| 218 | { | 514 | { |
| 219 | struct clk_sccg_pll *pll; | 515 | struct clk_sccg_pll *pll; |
| 220 | struct clk_init_data init; | 516 | struct clk_init_data init; |
| 221 | struct clk_hw *hw; | 517 | struct clk_hw *hw; |
| 222 | int ret; | 518 | int ret; |
| 223 | 519 | ||
| 224 | switch (pll_type) { | ||
| 225 | case SCCG_PLL1: | ||
| 226 | init.ops = &clk_sccg_pll1_ops; | ||
| 227 | break; | ||
| 228 | case SCCG_PLL2: | ||
| 229 | init.ops = &clk_sccg_pll2_ops; | ||
| 230 | break; | ||
| 231 | default: | ||
| 232 | return ERR_PTR(-EINVAL); | ||
| 233 | } | ||
| 234 | |||
| 235 | pll = kzalloc(sizeof(*pll), GFP_KERNEL); | 520 | pll = kzalloc(sizeof(*pll), GFP_KERNEL); |
| 236 | if (!pll) | 521 | if (!pll) |
| 237 | return ERR_PTR(-ENOMEM); | 522 | return ERR_PTR(-ENOMEM); |
| 238 | 523 | ||
| 524 | pll->parent = parent; | ||
| 525 | pll->bypass1 = bypass1; | ||
| 526 | pll->bypass2 = bypass2; | ||
| 527 | |||
| 528 | pll->base = base; | ||
| 239 | init.name = name; | 529 | init.name = name; |
| 240 | init.flags = 0; | 530 | init.ops = &clk_sccg_pll_ops; |
| 241 | init.parent_names = &parent_name; | 531 | |
| 242 | init.num_parents = 1; | 532 | init.flags = flags; |
| 533 | init.parent_names = parent_names; | ||
| 534 | init.num_parents = num_parents; | ||
| 243 | 535 | ||
| 244 | pll->base = base; | 536 | pll->base = base; |
| 245 | pll->hw.init = &init; | 537 | pll->hw.init = &init; |
diff --git a/drivers/clk/imx/clk-scu.c b/drivers/clk/imx/clk-scu.c index 7ccf7edfe11c..fbef740704d0 100644 --- a/drivers/clk/imx/clk-scu.c +++ b/drivers/clk/imx/clk-scu.c | |||
| @@ -4,12 +4,17 @@ | |||
| 4 | * Dong Aisheng <aisheng.dong@nxp.com> | 4 | * Dong Aisheng <aisheng.dong@nxp.com> |
| 5 | */ | 5 | */ |
| 6 | 6 | ||
| 7 | #include <dt-bindings/firmware/imx/rsrc.h> | ||
| 8 | #include <linux/arm-smccc.h> | ||
| 7 | #include <linux/clk-provider.h> | 9 | #include <linux/clk-provider.h> |
| 8 | #include <linux/err.h> | 10 | #include <linux/err.h> |
| 9 | #include <linux/slab.h> | 11 | #include <linux/slab.h> |
| 10 | 12 | ||
| 11 | #include "clk-scu.h" | 13 | #include "clk-scu.h" |
| 12 | 14 | ||
| 15 | #define IMX_SIP_CPUFREQ 0xC2000001 | ||
| 16 | #define IMX_SIP_SET_CPUFREQ 0x00 | ||
| 17 | |||
| 13 | static struct imx_sc_ipc *ccm_ipc_handle; | 18 | static struct imx_sc_ipc *ccm_ipc_handle; |
| 14 | 19 | ||
| 15 | /* | 20 | /* |
| @@ -66,6 +71,41 @@ struct imx_sc_msg_get_clock_rate { | |||
| 66 | }; | 71 | }; |
| 67 | 72 | ||
| 68 | /* | 73 | /* |
| 74 | * struct imx_sc_msg_get_clock_parent - clock get parent protocol | ||
| 75 | * @hdr: SCU protocol header | ||
| 76 | * @req: get parent request protocol | ||
| 77 | * @resp: get parent response protocol | ||
| 78 | * | ||
| 79 | * This structure describes the SCU protocol of clock get parent | ||
| 80 | */ | ||
| 81 | struct imx_sc_msg_get_clock_parent { | ||
| 82 | struct imx_sc_rpc_msg hdr; | ||
| 83 | union { | ||
| 84 | struct req_get_clock_parent { | ||
| 85 | __le16 resource; | ||
| 86 | u8 clk; | ||
| 87 | } __packed req; | ||
| 88 | struct resp_get_clock_parent { | ||
| 89 | u8 parent; | ||
| 90 | } resp; | ||
| 91 | } data; | ||
| 92 | }; | ||
| 93 | |||
| 94 | /* | ||
| 95 | * struct imx_sc_msg_set_clock_parent - clock set parent protocol | ||
| 96 | * @hdr: SCU protocol header | ||
| 97 | * @req: set parent request protocol | ||
| 98 | * | ||
| 99 | * This structure describes the SCU protocol of clock set parent | ||
| 100 | */ | ||
| 101 | struct imx_sc_msg_set_clock_parent { | ||
| 102 | struct imx_sc_rpc_msg hdr; | ||
| 103 | __le16 resource; | ||
| 104 | u8 clk; | ||
| 105 | u8 parent; | ||
| 106 | } __packed; | ||
| 107 | |||
| 108 | /* | ||
| 69 | * struct imx_sc_msg_req_clock_enable - clock gate protocol | 109 | * struct imx_sc_msg_req_clock_enable - clock gate protocol |
| 70 | * @hdr: SCU protocol header | 110 | * @hdr: SCU protocol header |
| 71 | * @resource: clock resource to gate | 111 | * @resource: clock resource to gate |
| @@ -145,6 +185,25 @@ static long clk_scu_round_rate(struct clk_hw *hw, unsigned long rate, | |||
| 145 | return rate; | 185 | return rate; |
| 146 | } | 186 | } |
| 147 | 187 | ||
| 188 | static int clk_scu_atf_set_cpu_rate(struct clk_hw *hw, unsigned long rate, | ||
| 189 | unsigned long parent_rate) | ||
| 190 | { | ||
| 191 | struct clk_scu *clk = to_clk_scu(hw); | ||
| 192 | struct arm_smccc_res res; | ||
| 193 | unsigned long cluster_id; | ||
| 194 | |||
| 195 | if (clk->rsrc_id == IMX_SC_R_A35) | ||
| 196 | cluster_id = 0; | ||
| 197 | else | ||
| 198 | return -EINVAL; | ||
| 199 | |||
| 200 | /* CPU frequency scaling can ONLY be done by ARM-Trusted-Firmware */ | ||
| 201 | arm_smccc_smc(IMX_SIP_CPUFREQ, IMX_SIP_SET_CPUFREQ, | ||
| 202 | cluster_id, rate, 0, 0, 0, 0, &res); | ||
| 203 | |||
| 204 | return 0; | ||
| 205 | } | ||
| 206 | |||
| 148 | /* | 207 | /* |
| 149 | * clk_scu_set_rate - Set rate for a SCU clock | 208 | * clk_scu_set_rate - Set rate for a SCU clock |
| 150 | * @hw: clock to change rate for | 209 | * @hw: clock to change rate for |
| @@ -173,6 +232,49 @@ static int clk_scu_set_rate(struct clk_hw *hw, unsigned long rate, | |||
| 173 | return imx_scu_call_rpc(ccm_ipc_handle, &msg, true); | 232 | return imx_scu_call_rpc(ccm_ipc_handle, &msg, true); |
| 174 | } | 233 | } |
| 175 | 234 | ||
| 235 | static u8 clk_scu_get_parent(struct clk_hw *hw) | ||
| 236 | { | ||
| 237 | struct clk_scu *clk = to_clk_scu(hw); | ||
| 238 | struct imx_sc_msg_get_clock_parent msg; | ||
| 239 | struct imx_sc_rpc_msg *hdr = &msg.hdr; | ||
| 240 | int ret; | ||
| 241 | |||
| 242 | hdr->ver = IMX_SC_RPC_VERSION; | ||
| 243 | hdr->svc = IMX_SC_RPC_SVC_PM; | ||
| 244 | hdr->func = IMX_SC_PM_FUNC_GET_CLOCK_PARENT; | ||
| 245 | hdr->size = 2; | ||
| 246 | |||
| 247 | msg.data.req.resource = cpu_to_le16(clk->rsrc_id); | ||
| 248 | msg.data.req.clk = clk->clk_type; | ||
| 249 | |||
| 250 | ret = imx_scu_call_rpc(ccm_ipc_handle, &msg, true); | ||
| 251 | if (ret) { | ||
| 252 | pr_err("%s: failed to get clock parent %d\n", | ||
| 253 | clk_hw_get_name(hw), ret); | ||
| 254 | return 0; | ||
| 255 | } | ||
| 256 | |||
| 257 | return msg.data.resp.parent; | ||
| 258 | } | ||
| 259 | |||
| 260 | static int clk_scu_set_parent(struct clk_hw *hw, u8 index) | ||
| 261 | { | ||
| 262 | struct clk_scu *clk = to_clk_scu(hw); | ||
| 263 | struct imx_sc_msg_set_clock_parent msg; | ||
| 264 | struct imx_sc_rpc_msg *hdr = &msg.hdr; | ||
| 265 | |||
| 266 | hdr->ver = IMX_SC_RPC_VERSION; | ||
| 267 | hdr->svc = IMX_SC_RPC_SVC_PM; | ||
| 268 | hdr->func = IMX_SC_PM_FUNC_SET_CLOCK_PARENT; | ||
| 269 | hdr->size = 2; | ||
| 270 | |||
| 271 | msg.resource = cpu_to_le16(clk->rsrc_id); | ||
| 272 | msg.clk = clk->clk_type; | ||
| 273 | msg.parent = index; | ||
| 274 | |||
| 275 | return imx_scu_call_rpc(ccm_ipc_handle, &msg, true); | ||
| 276 | } | ||
| 277 | |||
| 176 | static int sc_pm_clock_enable(struct imx_sc_ipc *ipc, u16 resource, | 278 | static int sc_pm_clock_enable(struct imx_sc_ipc *ipc, u16 resource, |
| 177 | u8 clk, bool enable, bool autog) | 279 | u8 clk, bool enable, bool autog) |
| 178 | { | 280 | { |
| @@ -228,11 +330,22 @@ static const struct clk_ops clk_scu_ops = { | |||
| 228 | .recalc_rate = clk_scu_recalc_rate, | 330 | .recalc_rate = clk_scu_recalc_rate, |
| 229 | .round_rate = clk_scu_round_rate, | 331 | .round_rate = clk_scu_round_rate, |
| 230 | .set_rate = clk_scu_set_rate, | 332 | .set_rate = clk_scu_set_rate, |
| 333 | .get_parent = clk_scu_get_parent, | ||
| 334 | .set_parent = clk_scu_set_parent, | ||
| 335 | .prepare = clk_scu_prepare, | ||
| 336 | .unprepare = clk_scu_unprepare, | ||
| 337 | }; | ||
| 338 | |||
| 339 | static const struct clk_ops clk_scu_cpu_ops = { | ||
| 340 | .recalc_rate = clk_scu_recalc_rate, | ||
| 341 | .round_rate = clk_scu_round_rate, | ||
| 342 | .set_rate = clk_scu_atf_set_cpu_rate, | ||
| 231 | .prepare = clk_scu_prepare, | 343 | .prepare = clk_scu_prepare, |
| 232 | .unprepare = clk_scu_unprepare, | 344 | .unprepare = clk_scu_unprepare, |
| 233 | }; | 345 | }; |
| 234 | 346 | ||
| 235 | struct clk_hw *imx_clk_scu(const char *name, u32 rsrc_id, u8 clk_type) | 347 | struct clk_hw *__imx_clk_scu(const char *name, const char * const *parents, |
| 348 | int num_parents, u32 rsrc_id, u8 clk_type) | ||
| 236 | { | 349 | { |
| 237 | struct clk_init_data init; | 350 | struct clk_init_data init; |
| 238 | struct clk_scu *clk; | 351 | struct clk_scu *clk; |
| @@ -248,7 +361,13 @@ struct clk_hw *imx_clk_scu(const char *name, u32 rsrc_id, u8 clk_type) | |||
| 248 | 361 | ||
| 249 | init.name = name; | 362 | init.name = name; |
| 250 | init.ops = &clk_scu_ops; | 363 | init.ops = &clk_scu_ops; |
| 251 | init.num_parents = 0; | 364 | if (rsrc_id == IMX_SC_R_A35) |
| 365 | init.ops = &clk_scu_cpu_ops; | ||
| 366 | else | ||
| 367 | init.ops = &clk_scu_ops; | ||
| 368 | init.parent_names = parents; | ||
| 369 | init.num_parents = num_parents; | ||
| 370 | |||
| 252 | /* | 371 | /* |
| 253 | * Note on MX8, the clocks are tightly coupled with power domain | 372 | * Note on MX8, the clocks are tightly coupled with power domain |
| 254 | * that once the power domain is off, the clock status may be | 373 | * that once the power domain is off, the clock status may be |
diff --git a/drivers/clk/imx/clk-scu.h b/drivers/clk/imx/clk-scu.h index 52c1746ec988..2bcfaf06a458 100644 --- a/drivers/clk/imx/clk-scu.h +++ b/drivers/clk/imx/clk-scu.h | |||
| @@ -10,7 +10,21 @@ | |||
| 10 | #include <linux/firmware/imx/sci.h> | 10 | #include <linux/firmware/imx/sci.h> |
| 11 | 11 | ||
| 12 | int imx_clk_scu_init(void); | 12 | int imx_clk_scu_init(void); |
| 13 | struct clk_hw *imx_clk_scu(const char *name, u32 rsrc_id, u8 clk_type); | 13 | |
| 14 | struct clk_hw *__imx_clk_scu(const char *name, const char * const *parents, | ||
| 15 | int num_parents, u32 rsrc_id, u8 clk_type); | ||
| 16 | |||
| 17 | static inline struct clk_hw *imx_clk_scu(const char *name, u32 rsrc_id, | ||
| 18 | u8 clk_type) | ||
| 19 | { | ||
| 20 | return __imx_clk_scu(name, NULL, 0, rsrc_id, clk_type); | ||
| 21 | } | ||
| 22 | |||
| 23 | static inline struct clk_hw *imx_clk_scu2(const char *name, const char * const *parents, | ||
| 24 | int num_parents, u32 rsrc_id, u8 clk_type) | ||
| 25 | { | ||
| 26 | return __imx_clk_scu(name, parents, num_parents, rsrc_id, clk_type); | ||
| 27 | } | ||
| 14 | 28 | ||
| 15 | struct clk_hw *imx_clk_lpcg_scu(const char *name, const char *parent_name, | 29 | struct clk_hw *imx_clk_lpcg_scu(const char *name, const char *parent_name, |
| 16 | unsigned long flags, void __iomem *reg, | 30 | unsigned long flags, void __iomem *reg, |
diff --git a/drivers/clk/imx/clk-vf610.c b/drivers/clk/imx/clk-vf610.c index 6dae54325a91..a334667c450a 100644 --- a/drivers/clk/imx/clk-vf610.c +++ b/drivers/clk/imx/clk-vf610.c | |||
| @@ -203,6 +203,7 @@ static void __init vf610_clocks_init(struct device_node *ccm_node) | |||
| 203 | np = of_find_compatible_node(NULL, NULL, "fsl,vf610-anatop"); | 203 | np = of_find_compatible_node(NULL, NULL, "fsl,vf610-anatop"); |
| 204 | anatop_base = of_iomap(np, 0); | 204 | anatop_base = of_iomap(np, 0); |
| 205 | BUG_ON(!anatop_base); | 205 | BUG_ON(!anatop_base); |
| 206 | of_node_put(np); | ||
| 206 | 207 | ||
| 207 | np = ccm_node; | 208 | np = ccm_node; |
| 208 | ccm_base = of_iomap(np, 0); | 209 | ccm_base = of_iomap(np, 0); |
diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h index 028312de21b8..5748ec8673e4 100644 --- a/drivers/clk/imx/clk.h +++ b/drivers/clk/imx/clk.h | |||
| @@ -27,6 +27,30 @@ enum imx_sccg_pll_type { | |||
| 27 | SCCG_PLL2, | 27 | SCCG_PLL2, |
| 28 | }; | 28 | }; |
| 29 | 29 | ||
| 30 | enum imx_pll14xx_type { | ||
| 31 | PLL_1416X, | ||
| 32 | PLL_1443X, | ||
| 33 | }; | ||
| 34 | |||
| 35 | /* NOTE: Rate table should be kept sorted in descending order. */ | ||
| 36 | struct imx_pll14xx_rate_table { | ||
| 37 | unsigned int rate; | ||
| 38 | unsigned int pdiv; | ||
| 39 | unsigned int mdiv; | ||
| 40 | unsigned int sdiv; | ||
| 41 | unsigned int kdiv; | ||
| 42 | }; | ||
| 43 | |||
| 44 | struct imx_pll14xx_clk { | ||
| 45 | enum imx_pll14xx_type type; | ||
| 46 | const struct imx_pll14xx_rate_table *rate_table; | ||
| 47 | int rate_count; | ||
| 48 | int flags; | ||
| 49 | }; | ||
| 50 | |||
| 51 | struct clk *imx_clk_pll14xx(const char *name, const char *parent_name, | ||
| 52 | void __iomem *base, const struct imx_pll14xx_clk *pll_clk); | ||
| 53 | |||
| 30 | struct clk *imx_clk_pllv1(enum imx_pllv1_type type, const char *name, | 54 | struct clk *imx_clk_pllv1(enum imx_pllv1_type type, const char *name, |
| 31 | const char *parent, void __iomem *base); | 55 | const char *parent, void __iomem *base); |
| 32 | 56 | ||
| @@ -36,9 +60,12 @@ struct clk *imx_clk_pllv2(const char *name, const char *parent, | |||
| 36 | struct clk *imx_clk_frac_pll(const char *name, const char *parent_name, | 60 | struct clk *imx_clk_frac_pll(const char *name, const char *parent_name, |
| 37 | void __iomem *base); | 61 | void __iomem *base); |
| 38 | 62 | ||
| 39 | struct clk *imx_clk_sccg_pll(const char *name, const char *parent_name, | 63 | struct clk *imx_clk_sccg_pll(const char *name, |
| 40 | void __iomem *base, | 64 | const char * const *parent_names, |
| 41 | enum imx_sccg_pll_type pll_type); | 65 | u8 num_parents, |
| 66 | u8 parent, u8 bypass1, u8 bypass2, | ||
| 67 | void __iomem *base, | ||
| 68 | unsigned long flags); | ||
| 42 | 69 | ||
| 43 | enum imx_pllv3_type { | 70 | enum imx_pllv3_type { |
| 44 | IMX_PLLV3_GENERIC, | 71 | IMX_PLLV3_GENERIC, |
| @@ -329,7 +356,8 @@ static inline struct clk *imx_clk_mux_flags(const char *name, | |||
| 329 | } | 356 | } |
| 330 | 357 | ||
| 331 | static inline struct clk *imx_clk_mux2_flags(const char *name, | 358 | static inline struct clk *imx_clk_mux2_flags(const char *name, |
| 332 | void __iomem *reg, u8 shift, u8 width, const char **parents, | 359 | void __iomem *reg, u8 shift, u8 width, |
| 360 | const char * const *parents, | ||
| 333 | int num_parents, unsigned long flags) | 361 | int num_parents, unsigned long flags) |
| 334 | { | 362 | { |
| 335 | return clk_register_mux(NULL, name, parents, num_parents, | 363 | return clk_register_mux(NULL, name, parents, num_parents, |
| @@ -354,7 +382,7 @@ struct clk *imx_clk_cpu(const char *name, const char *parent_name, | |||
| 354 | struct clk *step); | 382 | struct clk *step); |
| 355 | 383 | ||
| 356 | struct clk *imx8m_clk_composite_flags(const char *name, | 384 | struct clk *imx8m_clk_composite_flags(const char *name, |
| 357 | const char **parent_names, | 385 | const char * const *parent_names, |
| 358 | int num_parents, void __iomem *reg, | 386 | int num_parents, void __iomem *reg, |
| 359 | unsigned long flags); | 387 | unsigned long flags); |
| 360 | 388 | ||
diff --git a/drivers/clk/ingenic/cgu.c b/drivers/clk/ingenic/cgu.c index 5ef7d9ba2195..510b685212d3 100644 --- a/drivers/clk/ingenic/cgu.c +++ b/drivers/clk/ingenic/cgu.c | |||
| @@ -83,7 +83,7 @@ ingenic_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) | |||
| 83 | const struct ingenic_cgu_clk_info *clk_info; | 83 | const struct ingenic_cgu_clk_info *clk_info; |
| 84 | const struct ingenic_cgu_pll_info *pll_info; | 84 | const struct ingenic_cgu_pll_info *pll_info; |
| 85 | unsigned m, n, od_enc, od; | 85 | unsigned m, n, od_enc, od; |
| 86 | bool bypass, enable; | 86 | bool bypass; |
| 87 | unsigned long flags; | 87 | unsigned long flags; |
| 88 | u32 ctl; | 88 | u32 ctl; |
| 89 | 89 | ||
| @@ -103,7 +103,6 @@ ingenic_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) | |||
| 103 | od_enc &= GENMASK(pll_info->od_bits - 1, 0); | 103 | od_enc &= GENMASK(pll_info->od_bits - 1, 0); |
| 104 | bypass = !pll_info->no_bypass_bit && | 104 | bypass = !pll_info->no_bypass_bit && |
| 105 | !!(ctl & BIT(pll_info->bypass_bit)); | 105 | !!(ctl & BIT(pll_info->bypass_bit)); |
| 106 | enable = !!(ctl & BIT(pll_info->enable_bit)); | ||
| 107 | 106 | ||
| 108 | if (bypass) | 107 | if (bypass) |
| 109 | return parent_rate; | 108 | return parent_rate; |
| @@ -426,16 +425,16 @@ ingenic_clk_round_rate(struct clk_hw *hw, unsigned long req_rate, | |||
| 426 | struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw); | 425 | struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw); |
| 427 | struct ingenic_cgu *cgu = ingenic_clk->cgu; | 426 | struct ingenic_cgu *cgu = ingenic_clk->cgu; |
| 428 | const struct ingenic_cgu_clk_info *clk_info; | 427 | const struct ingenic_cgu_clk_info *clk_info; |
| 429 | long rate = *parent_rate; | 428 | unsigned int div = 1; |
| 430 | 429 | ||
| 431 | clk_info = &cgu->clock_info[ingenic_clk->idx]; | 430 | clk_info = &cgu->clock_info[ingenic_clk->idx]; |
| 432 | 431 | ||
| 433 | if (clk_info->type & CGU_CLK_DIV) | 432 | if (clk_info->type & CGU_CLK_DIV) |
| 434 | rate /= ingenic_clk_calc_div(clk_info, *parent_rate, req_rate); | 433 | div = ingenic_clk_calc_div(clk_info, *parent_rate, req_rate); |
| 435 | else if (clk_info->type & CGU_CLK_FIXDIV) | 434 | else if (clk_info->type & CGU_CLK_FIXDIV) |
| 436 | rate /= clk_info->fixdiv.div; | 435 | div = clk_info->fixdiv.div; |
| 437 | 436 | ||
| 438 | return rate; | 437 | return DIV_ROUND_UP(*parent_rate, div); |
| 439 | } | 438 | } |
| 440 | 439 | ||
| 441 | static int | 440 | static int |
| @@ -455,7 +454,7 @@ ingenic_clk_set_rate(struct clk_hw *hw, unsigned long req_rate, | |||
| 455 | 454 | ||
| 456 | if (clk_info->type & CGU_CLK_DIV) { | 455 | if (clk_info->type & CGU_CLK_DIV) { |
| 457 | div = ingenic_clk_calc_div(clk_info, parent_rate, req_rate); | 456 | div = ingenic_clk_calc_div(clk_info, parent_rate, req_rate); |
| 458 | rate = parent_rate / div; | 457 | rate = DIV_ROUND_UP(parent_rate, div); |
| 459 | 458 | ||
| 460 | if (rate != req_rate) | 459 | if (rate != req_rate) |
| 461 | return -EINVAL; | 460 | return -EINVAL; |
diff --git a/drivers/clk/ingenic/cgu.h b/drivers/clk/ingenic/cgu.h index 502bcbb61b04..e12716d8ce3c 100644 --- a/drivers/clk/ingenic/cgu.h +++ b/drivers/clk/ingenic/cgu.h | |||
| @@ -80,7 +80,7 @@ struct ingenic_cgu_mux_info { | |||
| 80 | * @reg: offset of the divider control register within the CGU | 80 | * @reg: offset of the divider control register within the CGU |
| 81 | * @shift: number of bits to left shift the divide value by (ie. the index of | 81 | * @shift: number of bits to left shift the divide value by (ie. the index of |
| 82 | * the lowest bit of the divide value within its control register) | 82 | * the lowest bit of the divide value within its control register) |
| 83 | * @div: number of bits to divide the divider value by (i.e. if the | 83 | * @div: number to divide the divider value by (i.e. if the |
| 84 | * effective divider value is the value written to the register | 84 | * effective divider value is the value written to the register |
| 85 | * multiplied by some constant) | 85 | * multiplied by some constant) |
| 86 | * @bits: the size of the divide value in bits | 86 | * @bits: the size of the divide value in bits |
diff --git a/drivers/clk/ingenic/jz4740-cgu.c b/drivers/clk/ingenic/jz4740-cgu.c index 4479c102e899..b86edd328249 100644 --- a/drivers/clk/ingenic/jz4740-cgu.c +++ b/drivers/clk/ingenic/jz4740-cgu.c | |||
| @@ -165,7 +165,7 @@ static const struct ingenic_cgu_clk_info jz4740_cgu_clocks[] = { | |||
| 165 | .parents = { JZ4740_CLK_EXT, JZ4740_CLK_PLL_HALF, -1, -1 }, | 165 | .parents = { JZ4740_CLK_EXT, JZ4740_CLK_PLL_HALF, -1, -1 }, |
| 166 | .mux = { CGU_REG_CPCCR, 29, 1 }, | 166 | .mux = { CGU_REG_CPCCR, 29, 1 }, |
| 167 | .div = { CGU_REG_CPCCR, 23, 1, 6, -1, -1, -1 }, | 167 | .div = { CGU_REG_CPCCR, 23, 1, 6, -1, -1, -1 }, |
| 168 | .gate = { CGU_REG_SCR, 6 }, | 168 | .gate = { CGU_REG_SCR, 6, true }, |
| 169 | }, | 169 | }, |
| 170 | 170 | ||
| 171 | /* Gate-only clocks */ | 171 | /* Gate-only clocks */ |
diff --git a/drivers/clk/mediatek/clk-gate.c b/drivers/clk/mediatek/clk-gate.c index 934bf0e45e26..9628d4e7690b 100644 --- a/drivers/clk/mediatek/clk-gate.c +++ b/drivers/clk/mediatek/clk-gate.c | |||
| @@ -157,7 +157,8 @@ struct clk *mtk_clk_register_gate( | |||
| 157 | int clr_ofs, | 157 | int clr_ofs, |
| 158 | int sta_ofs, | 158 | int sta_ofs, |
| 159 | u8 bit, | 159 | u8 bit, |
| 160 | const struct clk_ops *ops) | 160 | const struct clk_ops *ops, |
| 161 | unsigned long flags) | ||
| 161 | { | 162 | { |
| 162 | struct mtk_clk_gate *cg; | 163 | struct mtk_clk_gate *cg; |
| 163 | struct clk *clk; | 164 | struct clk *clk; |
| @@ -172,6 +173,7 @@ struct clk *mtk_clk_register_gate( | |||
| 172 | init.parent_names = parent_name ? &parent_name : NULL; | 173 | init.parent_names = parent_name ? &parent_name : NULL; |
| 173 | init.num_parents = parent_name ? 1 : 0; | 174 | init.num_parents = parent_name ? 1 : 0; |
| 174 | init.ops = ops; | 175 | init.ops = ops; |
| 176 | init.flags = flags; | ||
| 175 | 177 | ||
| 176 | cg->regmap = regmap; | 178 | cg->regmap = regmap; |
| 177 | cg->set_ofs = set_ofs; | 179 | cg->set_ofs = set_ofs; |
diff --git a/drivers/clk/mediatek/clk-gate.h b/drivers/clk/mediatek/clk-gate.h index 72ef89b3ad7b..9f766dfe1d57 100644 --- a/drivers/clk/mediatek/clk-gate.h +++ b/drivers/clk/mediatek/clk-gate.h | |||
| @@ -47,6 +47,7 @@ struct clk *mtk_clk_register_gate( | |||
| 47 | int clr_ofs, | 47 | int clr_ofs, |
| 48 | int sta_ofs, | 48 | int sta_ofs, |
| 49 | u8 bit, | 49 | u8 bit, |
| 50 | const struct clk_ops *ops); | 50 | const struct clk_ops *ops, |
| 51 | unsigned long flags); | ||
| 51 | 52 | ||
| 52 | #endif /* __DRV_CLK_GATE_H */ | 53 | #endif /* __DRV_CLK_GATE_H */ |
diff --git a/drivers/clk/mediatek/clk-mt2701.c b/drivers/clk/mediatek/clk-mt2701.c index ab6ab07f53e6..905a2316f6a7 100644 --- a/drivers/clk/mediatek/clk-mt2701.c +++ b/drivers/clk/mediatek/clk-mt2701.c | |||
| @@ -535,8 +535,8 @@ static const struct mtk_composite top_muxes[] = { | |||
| 535 | 0x0080, 8, 2, 15), | 535 | 0x0080, 8, 2, 15), |
| 536 | MUX_GATE(CLK_TOP_DPI0_SEL, "dpi0_sel", dpi0_parents, | 536 | MUX_GATE(CLK_TOP_DPI0_SEL, "dpi0_sel", dpi0_parents, |
| 537 | 0x0080, 16, 3, 23), | 537 | 0x0080, 16, 3, 23), |
| 538 | MUX_GATE(CLK_TOP_DPI1_SEL, "dpi1_sel", dpi1_parents, | 538 | MUX_GATE_FLAGS_2(CLK_TOP_DPI1_SEL, "dpi1_sel", dpi1_parents, |
| 539 | 0x0080, 24, 2, 31), | 539 | 0x0080, 24, 2, 31, 0, CLK_MUX_ROUND_CLOSEST), |
| 540 | 540 | ||
| 541 | MUX_GATE(CLK_TOP_TVE_SEL, "tve_sel", tve_parents, | 541 | MUX_GATE(CLK_TOP_TVE_SEL, "tve_sel", tve_parents, |
| 542 | 0x0090, 0, 3, 7), | 542 | 0x0090, 0, 3, 7), |
diff --git a/drivers/clk/mediatek/clk-mt2712.c b/drivers/clk/mediatek/clk-mt2712.c index 991d4093726e..b09cb3d99f66 100644 --- a/drivers/clk/mediatek/clk-mt2712.c +++ b/drivers/clk/mediatek/clk-mt2712.c | |||
| @@ -223,6 +223,8 @@ static const struct mtk_fixed_factor top_divs[] = { | |||
| 223 | 4), | 223 | 4), |
| 224 | FACTOR(CLK_TOP_APLL1_D3, "apll1_d3", "apll1_ck", 1, | 224 | FACTOR(CLK_TOP_APLL1_D3, "apll1_d3", "apll1_ck", 1, |
| 225 | 3), | 225 | 3), |
| 226 | FACTOR(CLK_TOP_APLL2_D3, "apll2_d3", "apll2_ck", 1, | ||
| 227 | 3), | ||
| 226 | }; | 228 | }; |
| 227 | 229 | ||
| 228 | static const char * const axi_parents[] = { | 230 | static const char * const axi_parents[] = { |
| @@ -594,7 +596,8 @@ static const char * const a1sys_hp_parents[] = { | |||
| 594 | "apll1_ck", | 596 | "apll1_ck", |
| 595 | "apll1_d2", | 597 | "apll1_d2", |
| 596 | "apll1_d4", | 598 | "apll1_d4", |
| 597 | "apll1_d8" | 599 | "apll1_d8", |
| 600 | "apll1_d3" | ||
| 598 | }; | 601 | }; |
| 599 | 602 | ||
| 600 | static const char * const a2sys_hp_parents[] = { | 603 | static const char * const a2sys_hp_parents[] = { |
| @@ -602,7 +605,8 @@ static const char * const a2sys_hp_parents[] = { | |||
| 602 | "apll2_ck", | 605 | "apll2_ck", |
| 603 | "apll2_d2", | 606 | "apll2_d2", |
| 604 | "apll2_d4", | 607 | "apll2_d4", |
| 605 | "apll2_d8" | 608 | "apll2_d8", |
| 609 | "apll2_d3" | ||
| 606 | }; | 610 | }; |
| 607 | 611 | ||
| 608 | static const char * const asm_l_parents[] = { | 612 | static const char * const asm_l_parents[] = { |
| @@ -1463,7 +1467,6 @@ static struct platform_driver clk_mt2712_drv = { | |||
| 1463 | .probe = clk_mt2712_probe, | 1467 | .probe = clk_mt2712_probe, |
| 1464 | .driver = { | 1468 | .driver = { |
| 1465 | .name = "clk-mt2712", | 1469 | .name = "clk-mt2712", |
| 1466 | .owner = THIS_MODULE, | ||
| 1467 | .of_match_table = of_match_clk_mt2712, | 1470 | .of_match_table = of_match_clk_mt2712, |
| 1468 | }, | 1471 | }, |
| 1469 | }; | 1472 | }; |
diff --git a/drivers/clk/mediatek/clk-mt6797.c b/drivers/clk/mediatek/clk-mt6797.c index 5702bc974ed9..c2b46b184b9a 100644 --- a/drivers/clk/mediatek/clk-mt6797.c +++ b/drivers/clk/mediatek/clk-mt6797.c | |||
| @@ -324,6 +324,10 @@ static const char * const anc_md32_parents[] = { | |||
| 324 | "univpll_d5", | 324 | "univpll_d5", |
| 325 | }; | 325 | }; |
| 326 | 326 | ||
| 327 | /* | ||
| 328 | * Clock mux ddrphycfg is needed by the DRAM controller. We mark it as | ||
| 329 | * critical as otherwise the system will hang after boot. | ||
| 330 | */ | ||
| 327 | static const struct mtk_composite top_muxes[] = { | 331 | static const struct mtk_composite top_muxes[] = { |
| 328 | MUX(CLK_TOP_MUX_ULPOSC_AXI_CK_MUX_PRE, "ulposc_axi_ck_mux_pre", | 332 | MUX(CLK_TOP_MUX_ULPOSC_AXI_CK_MUX_PRE, "ulposc_axi_ck_mux_pre", |
| 329 | ulposc_axi_ck_mux_pre_parents, 0x0040, 3, 1), | 333 | ulposc_axi_ck_mux_pre_parents, 0x0040, 3, 1), |
| @@ -331,8 +335,8 @@ static const struct mtk_composite top_muxes[] = { | |||
| 331 | ulposc_axi_ck_mux_parents, 0x0040, 2, 1), | 335 | ulposc_axi_ck_mux_parents, 0x0040, 2, 1), |
| 332 | MUX(CLK_TOP_MUX_AXI, "axi_sel", axi_parents, | 336 | MUX(CLK_TOP_MUX_AXI, "axi_sel", axi_parents, |
| 333 | 0x0040, 0, 2), | 337 | 0x0040, 0, 2), |
| 334 | MUX(CLK_TOP_MUX_DDRPHYCFG, "ddrphycfg_sel", ddrphycfg_parents, | 338 | MUX_FLAGS(CLK_TOP_MUX_DDRPHYCFG, "ddrphycfg_sel", ddrphycfg_parents, |
| 335 | 0x0040, 16, 2), | 339 | 0x0040, 16, 2, CLK_IS_CRITICAL | CLK_SET_RATE_PARENT), |
| 336 | MUX(CLK_TOP_MUX_MM, "mm_sel", mm_parents, | 340 | MUX(CLK_TOP_MUX_MM, "mm_sel", mm_parents, |
| 337 | 0x0040, 24, 2), | 341 | 0x0040, 24, 2), |
| 338 | MUX_GATE(CLK_TOP_MUX_PWM, "pwm_sel", pwm_parents, 0x0050, 0, 3, 7), | 342 | MUX_GATE(CLK_TOP_MUX_PWM, "pwm_sel", pwm_parents, 0x0050, 0, 3, 7), |
| @@ -424,33 +428,45 @@ static const struct mtk_gate_regs infra2_cg_regs = { | |||
| 424 | .sta_ofs = 0x00b0, | 428 | .sta_ofs = 0x00b0, |
| 425 | }; | 429 | }; |
| 426 | 430 | ||
| 427 | #define GATE_ICG0(_id, _name, _parent, _shift) { \ | 431 | #define GATE_ICG0(_id, _name, _parent, _shift) { \ |
| 428 | .id = _id, \ | 432 | .id = _id, \ |
| 429 | .name = _name, \ | 433 | .name = _name, \ |
| 430 | .parent_name = _parent, \ | 434 | .parent_name = _parent, \ |
| 431 | .regs = &infra0_cg_regs, \ | 435 | .regs = &infra0_cg_regs, \ |
| 432 | .shift = _shift, \ | 436 | .shift = _shift, \ |
| 433 | .ops = &mtk_clk_gate_ops_setclr, \ | 437 | .ops = &mtk_clk_gate_ops_setclr, \ |
| 434 | } | 438 | } |
| 435 | 439 | ||
| 436 | #define GATE_ICG1(_id, _name, _parent, _shift) { \ | 440 | #define GATE_ICG1(_id, _name, _parent, _shift) \ |
| 437 | .id = _id, \ | 441 | GATE_ICG1_FLAGS(_id, _name, _parent, _shift, 0) |
| 438 | .name = _name, \ | 442 | |
| 439 | .parent_name = _parent, \ | 443 | #define GATE_ICG1_FLAGS(_id, _name, _parent, _shift, _flags) { \ |
| 440 | .regs = &infra1_cg_regs, \ | 444 | .id = _id, \ |
| 441 | .shift = _shift, \ | 445 | .name = _name, \ |
| 442 | .ops = &mtk_clk_gate_ops_setclr, \ | 446 | .parent_name = _parent, \ |
| 447 | .regs = &infra1_cg_regs, \ | ||
| 448 | .shift = _shift, \ | ||
| 449 | .ops = &mtk_clk_gate_ops_setclr, \ | ||
| 450 | .flags = _flags, \ | ||
| 443 | } | 451 | } |
| 444 | 452 | ||
| 445 | #define GATE_ICG2(_id, _name, _parent, _shift) { \ | 453 | #define GATE_ICG2(_id, _name, _parent, _shift) \ |
| 446 | .id = _id, \ | 454 | GATE_ICG2_FLAGS(_id, _name, _parent, _shift, 0) |
| 447 | .name = _name, \ | 455 | |
| 448 | .parent_name = _parent, \ | 456 | #define GATE_ICG2_FLAGS(_id, _name, _parent, _shift, _flags) { \ |
| 449 | .regs = &infra2_cg_regs, \ | 457 | .id = _id, \ |
| 450 | .shift = _shift, \ | 458 | .name = _name, \ |
| 451 | .ops = &mtk_clk_gate_ops_setclr, \ | 459 | .parent_name = _parent, \ |
| 460 | .regs = &infra2_cg_regs, \ | ||
| 461 | .shift = _shift, \ | ||
| 462 | .ops = &mtk_clk_gate_ops_setclr, \ | ||
| 463 | .flags = _flags, \ | ||
| 452 | } | 464 | } |
| 453 | 465 | ||
| 466 | /* | ||
| 467 | * Clock gates dramc and dramc_b are needed by the DRAM controller. | ||
| 468 | * We mark them as critical as otherwise the system will hang after boot. | ||
| 469 | */ | ||
| 454 | static const struct mtk_gate infra_clks[] = { | 470 | static const struct mtk_gate infra_clks[] = { |
| 455 | GATE_ICG0(CLK_INFRA_PMIC_TMR, "infra_pmic_tmr", "ulposc", 0), | 471 | GATE_ICG0(CLK_INFRA_PMIC_TMR, "infra_pmic_tmr", "ulposc", 0), |
| 456 | GATE_ICG0(CLK_INFRA_PMIC_AP, "infra_pmic_ap", "pmicspi_sel", 1), | 472 | GATE_ICG0(CLK_INFRA_PMIC_AP, "infra_pmic_ap", "pmicspi_sel", 1), |
| @@ -505,7 +521,8 @@ static const struct mtk_gate infra_clks[] = { | |||
| 505 | GATE_ICG1(CLK_INFRA_CCIF_AP, "infra_ccif_ap", "axi_sel", 23), | 521 | GATE_ICG1(CLK_INFRA_CCIF_AP, "infra_ccif_ap", "axi_sel", 23), |
| 506 | GATE_ICG1(CLK_INFRA_AUDIO, "infra_audio", "axi_sel", 25), | 522 | GATE_ICG1(CLK_INFRA_AUDIO, "infra_audio", "axi_sel", 25), |
| 507 | GATE_ICG1(CLK_INFRA_CCIF_MD, "infra_ccif_md", "axi_sel", 26), | 523 | GATE_ICG1(CLK_INFRA_CCIF_MD, "infra_ccif_md", "axi_sel", 26), |
| 508 | GATE_ICG1(CLK_INFRA_DRAMC_F26M, "infra_dramc_f26m", "clk26m", 31), | 524 | GATE_ICG1_FLAGS(CLK_INFRA_DRAMC_F26M, "infra_dramc_f26m", |
| 525 | "clk26m", 31, CLK_IS_CRITICAL), | ||
| 509 | GATE_ICG2(CLK_INFRA_I2C4, "infra_i2c4", "axi_sel", 0), | 526 | GATE_ICG2(CLK_INFRA_I2C4, "infra_i2c4", "axi_sel", 0), |
| 510 | GATE_ICG2(CLK_INFRA_I2C_APPM, "infra_i2c_appm", "axi_sel", 1), | 527 | GATE_ICG2(CLK_INFRA_I2C_APPM, "infra_i2c_appm", "axi_sel", 1), |
| 511 | GATE_ICG2(CLK_INFRA_I2C_GPUPM, "infra_i2c_gpupm", "axi_sel", 2), | 528 | GATE_ICG2(CLK_INFRA_I2C_GPUPM, "infra_i2c_gpupm", "axi_sel", 2), |
| @@ -516,7 +533,8 @@ static const struct mtk_gate infra_clks[] = { | |||
| 516 | GATE_ICG2(CLK_INFRA_I2C5, "infra_i2c5", "axi_sel", 7), | 533 | GATE_ICG2(CLK_INFRA_I2C5, "infra_i2c5", "axi_sel", 7), |
| 517 | GATE_ICG2(CLK_INFRA_SYS_CIRQ, "infra_sys_cirq", "axi_sel", 8), | 534 | GATE_ICG2(CLK_INFRA_SYS_CIRQ, "infra_sys_cirq", "axi_sel", 8), |
| 518 | GATE_ICG2(CLK_INFRA_SPI1, "infra_spi1", "spi_sel", 10), | 535 | GATE_ICG2(CLK_INFRA_SPI1, "infra_spi1", "spi_sel", 10), |
| 519 | GATE_ICG2(CLK_INFRA_DRAMC_B_F26M, "infra_dramc_b_f26m", "clk26m", 11), | 536 | GATE_ICG2_FLAGS(CLK_INFRA_DRAMC_B_F26M, "infra_dramc_b_f26m", |
| 537 | "clk26m", 11, CLK_IS_CRITICAL), | ||
| 520 | GATE_ICG2(CLK_INFRA_ANC_MD32, "infra_anc_md32", "anc_md32_sel", 12), | 538 | GATE_ICG2(CLK_INFRA_ANC_MD32, "infra_anc_md32", "anc_md32_sel", 12), |
| 521 | GATE_ICG2(CLK_INFRA_ANC_MD32_32K, "infra_anc_md32_32k", "clk26m", 13), | 539 | GATE_ICG2(CLK_INFRA_ANC_MD32_32K, "infra_anc_md32_32k", "clk26m", 13), |
| 522 | GATE_ICG2(CLK_INFRA_DVFS_SPM1, "infra_dvfs_spm1", "axi_sel", 15), | 540 | GATE_ICG2(CLK_INFRA_DVFS_SPM1, "infra_dvfs_spm1", "axi_sel", 15), |
diff --git a/drivers/clk/mediatek/clk-mt8173.c b/drivers/clk/mediatek/clk-mt8173.c index 96c292c3e440..deedeb3ea33b 100644 --- a/drivers/clk/mediatek/clk-mt8173.c +++ b/drivers/clk/mediatek/clk-mt8173.c | |||
| @@ -533,7 +533,7 @@ static const char * const ca53_parents[] __initconst = { | |||
| 533 | "univpll" | 533 | "univpll" |
| 534 | }; | 534 | }; |
| 535 | 535 | ||
| 536 | static const char * const ca57_parents[] __initconst = { | 536 | static const char * const ca72_parents[] __initconst = { |
| 537 | "clk26m", | 537 | "clk26m", |
| 538 | "armca15pll", | 538 | "armca15pll", |
| 539 | "mainpll", | 539 | "mainpll", |
| @@ -542,7 +542,7 @@ static const char * const ca57_parents[] __initconst = { | |||
| 542 | 542 | ||
| 543 | static const struct mtk_composite cpu_muxes[] __initconst = { | 543 | static const struct mtk_composite cpu_muxes[] __initconst = { |
| 544 | MUX(CLK_INFRA_CA53SEL, "infra_ca53_sel", ca53_parents, 0x0000, 0, 2), | 544 | MUX(CLK_INFRA_CA53SEL, "infra_ca53_sel", ca53_parents, 0x0000, 0, 2), |
| 545 | MUX(CLK_INFRA_CA57SEL, "infra_ca57_sel", ca57_parents, 0x0000, 2, 2), | 545 | MUX(CLK_INFRA_CA72SEL, "infra_ca72_sel", ca72_parents, 0x0000, 2, 2), |
| 546 | }; | 546 | }; |
| 547 | 547 | ||
| 548 | static const struct mtk_composite top_muxes[] __initconst = { | 548 | static const struct mtk_composite top_muxes[] __initconst = { |
diff --git a/drivers/clk/mediatek/clk-mtk.c b/drivers/clk/mediatek/clk-mtk.c index 9c0ae4278a94..5531dd2e496d 100644 --- a/drivers/clk/mediatek/clk-mtk.c +++ b/drivers/clk/mediatek/clk-mtk.c | |||
| @@ -130,7 +130,7 @@ int mtk_clk_register_gates(struct device_node *node, | |||
| 130 | gate->regs->set_ofs, | 130 | gate->regs->set_ofs, |
| 131 | gate->regs->clr_ofs, | 131 | gate->regs->clr_ofs, |
| 132 | gate->regs->sta_ofs, | 132 | gate->regs->sta_ofs, |
| 133 | gate->shift, gate->ops); | 133 | gate->shift, gate->ops, gate->flags); |
| 134 | 134 | ||
| 135 | if (IS_ERR(clk)) { | 135 | if (IS_ERR(clk)) { |
| 136 | pr_err("Failed to register clk %s: %ld\n", | 136 | pr_err("Failed to register clk %s: %ld\n", |
| @@ -167,7 +167,7 @@ struct clk *mtk_clk_register_composite(const struct mtk_composite *mc, | |||
| 167 | mux->mask = BIT(mc->mux_width) - 1; | 167 | mux->mask = BIT(mc->mux_width) - 1; |
| 168 | mux->shift = mc->mux_shift; | 168 | mux->shift = mc->mux_shift; |
| 169 | mux->lock = lock; | 169 | mux->lock = lock; |
| 170 | 170 | mux->flags = mc->mux_flags; | |
| 171 | mux_hw = &mux->hw; | 171 | mux_hw = &mux->hw; |
| 172 | mux_ops = &clk_mux_ops; | 172 | mux_ops = &clk_mux_ops; |
| 173 | 173 | ||
diff --git a/drivers/clk/mediatek/clk-mtk.h b/drivers/clk/mediatek/clk-mtk.h index f83c2bbb677e..fb27b5bf30d9 100644 --- a/drivers/clk/mediatek/clk-mtk.h +++ b/drivers/clk/mediatek/clk-mtk.h | |||
| @@ -81,15 +81,13 @@ struct mtk_composite { | |||
| 81 | signed char divider_shift; | 81 | signed char divider_shift; |
| 82 | signed char divider_width; | 82 | signed char divider_width; |
| 83 | 83 | ||
| 84 | u8 mux_flags; | ||
| 85 | |||
| 84 | signed char num_parents; | 86 | signed char num_parents; |
| 85 | }; | 87 | }; |
| 86 | 88 | ||
| 87 | /* | 89 | #define MUX_GATE_FLAGS_2(_id, _name, _parents, _reg, _shift, \ |
| 88 | * In case the rate change propagation to parent clocks is undesirable, | 90 | _width, _gate, _flags, _muxflags) { \ |
| 89 | * this macro allows to specify the clock flags manually. | ||
| 90 | */ | ||
| 91 | #define MUX_GATE_FLAGS(_id, _name, _parents, _reg, _shift, _width, \ | ||
| 92 | _gate, _flags) { \ | ||
| 93 | .id = _id, \ | 91 | .id = _id, \ |
| 94 | .name = _name, \ | 92 | .name = _name, \ |
| 95 | .mux_reg = _reg, \ | 93 | .mux_reg = _reg, \ |
| @@ -101,9 +99,19 @@ struct mtk_composite { | |||
| 101 | .parent_names = _parents, \ | 99 | .parent_names = _parents, \ |
| 102 | .num_parents = ARRAY_SIZE(_parents), \ | 100 | .num_parents = ARRAY_SIZE(_parents), \ |
| 103 | .flags = _flags, \ | 101 | .flags = _flags, \ |
| 102 | .mux_flags = _muxflags, \ | ||
| 104 | } | 103 | } |
| 105 | 104 | ||
| 106 | /* | 105 | /* |
| 106 | * In case the rate change propagation to parent clocks is undesirable, | ||
| 107 | * this macro allows to specify the clock flags manually. | ||
| 108 | */ | ||
| 109 | #define MUX_GATE_FLAGS(_id, _name, _parents, _reg, _shift, _width, \ | ||
| 110 | _gate, _flags) \ | ||
| 111 | MUX_GATE_FLAGS_2(_id, _name, _parents, _reg, \ | ||
| 112 | _shift, _width, _gate, _flags, 0) | ||
| 113 | |||
| 114 | /* | ||
| 107 | * Unless necessary, all MUX_GATE clocks propagate rate changes to their | 115 | * Unless necessary, all MUX_GATE clocks propagate rate changes to their |
| 108 | * parent clock by default. | 116 | * parent clock by default. |
| 109 | */ | 117 | */ |
| @@ -111,7 +119,11 @@ struct mtk_composite { | |||
| 111 | MUX_GATE_FLAGS(_id, _name, _parents, _reg, _shift, _width, \ | 119 | MUX_GATE_FLAGS(_id, _name, _parents, _reg, _shift, _width, \ |
| 112 | _gate, CLK_SET_RATE_PARENT) | 120 | _gate, CLK_SET_RATE_PARENT) |
| 113 | 121 | ||
| 114 | #define MUX(_id, _name, _parents, _reg, _shift, _width) { \ | 122 | #define MUX(_id, _name, _parents, _reg, _shift, _width) \ |
| 123 | MUX_FLAGS(_id, _name, _parents, _reg, \ | ||
| 124 | _shift, _width, CLK_SET_RATE_PARENT) | ||
| 125 | |||
| 126 | #define MUX_FLAGS(_id, _name, _parents, _reg, _shift, _width, _flags) { \ | ||
| 115 | .id = _id, \ | 127 | .id = _id, \ |
| 116 | .name = _name, \ | 128 | .name = _name, \ |
| 117 | .mux_reg = _reg, \ | 129 | .mux_reg = _reg, \ |
| @@ -121,7 +133,7 @@ struct mtk_composite { | |||
| 121 | .divider_shift = -1, \ | 133 | .divider_shift = -1, \ |
| 122 | .parent_names = _parents, \ | 134 | .parent_names = _parents, \ |
| 123 | .num_parents = ARRAY_SIZE(_parents), \ | 135 | .num_parents = ARRAY_SIZE(_parents), \ |
| 124 | .flags = CLK_SET_RATE_PARENT, \ | 136 | .flags = _flags, \ |
| 125 | } | 137 | } |
| 126 | 138 | ||
| 127 | #define DIV_GATE(_id, _name, _parent, _gate_reg, _gate_shift, _div_reg, \ | 139 | #define DIV_GATE(_id, _name, _parent, _gate_reg, _gate_shift, _div_reg, \ |
| @@ -158,6 +170,7 @@ struct mtk_gate { | |||
| 158 | const struct mtk_gate_regs *regs; | 170 | const struct mtk_gate_regs *regs; |
| 159 | int shift; | 171 | int shift; |
| 160 | const struct clk_ops *ops; | 172 | const struct clk_ops *ops; |
| 173 | unsigned long flags; | ||
| 161 | }; | 174 | }; |
| 162 | 175 | ||
| 163 | int mtk_clk_register_gates(struct device_node *node, | 176 | int mtk_clk_register_gates(struct device_node *node, |
diff --git a/drivers/clk/meson/Kconfig b/drivers/clk/meson/Kconfig index efaa70f682b4..3858747f5438 100644 --- a/drivers/clk/meson/Kconfig +++ b/drivers/clk/meson/Kconfig | |||
| @@ -1,27 +1,52 @@ | |||
| 1 | config COMMON_CLK_AMLOGIC | 1 | config COMMON_CLK_MESON_INPUT |
| 2 | bool | 2 | tristate |
| 3 | depends on ARCH_MESON || COMPILE_TEST | ||
| 4 | select COMMON_CLK_REGMAP_MESON | ||
| 5 | 3 | ||
| 6 | config COMMON_CLK_AMLOGIC_AUDIO | 4 | config COMMON_CLK_MESON_REGMAP |
| 7 | bool | 5 | tristate |
| 8 | depends on ARCH_MESON || COMPILE_TEST | 6 | select REGMAP |
| 9 | select COMMON_CLK_AMLOGIC | ||
| 10 | 7 | ||
| 11 | config COMMON_CLK_MESON_AO | 8 | config COMMON_CLK_MESON_DUALDIV |
| 12 | bool | 9 | tristate |
| 13 | depends on OF | 10 | select COMMON_CLK_MESON_REGMAP |
| 14 | depends on ARCH_MESON || COMPILE_TEST | 11 | |
| 15 | select COMMON_CLK_REGMAP_MESON | 12 | config COMMON_CLK_MESON_MPLL |
| 13 | tristate | ||
| 14 | select COMMON_CLK_MESON_REGMAP | ||
| 15 | |||
| 16 | config COMMON_CLK_MESON_PHASE | ||
| 17 | tristate | ||
| 18 | select COMMON_CLK_MESON_REGMAP | ||
| 19 | |||
| 20 | config COMMON_CLK_MESON_PLL | ||
| 21 | tristate | ||
| 22 | select COMMON_CLK_MESON_REGMAP | ||
| 23 | |||
| 24 | config COMMON_CLK_MESON_SCLK_DIV | ||
| 25 | tristate | ||
| 26 | select COMMON_CLK_MESON_REGMAP | ||
| 27 | |||
| 28 | config COMMON_CLK_MESON_VID_PLL_DIV | ||
| 29 | tristate | ||
| 30 | select COMMON_CLK_MESON_REGMAP | ||
| 31 | |||
| 32 | config COMMON_CLK_MESON_AO_CLKC | ||
| 33 | tristate | ||
| 34 | select COMMON_CLK_MESON_REGMAP | ||
| 35 | select COMMON_CLK_MESON_INPUT | ||
| 16 | select RESET_CONTROLLER | 36 | select RESET_CONTROLLER |
| 17 | 37 | ||
| 18 | config COMMON_CLK_REGMAP_MESON | 38 | config COMMON_CLK_MESON_EE_CLKC |
| 19 | bool | 39 | tristate |
| 20 | select REGMAP | 40 | select COMMON_CLK_MESON_REGMAP |
| 41 | select COMMON_CLK_MESON_INPUT | ||
| 21 | 42 | ||
| 22 | config COMMON_CLK_MESON8B | 43 | config COMMON_CLK_MESON8B |
| 23 | bool | 44 | bool |
| 24 | select COMMON_CLK_AMLOGIC | 45 | depends on ARCH_MESON |
| 46 | select COMMON_CLK_MESON_REGMAP | ||
| 47 | select COMMON_CLK_MESON_MPLL | ||
| 48 | select COMMON_CLK_MESON_PLL | ||
| 49 | select MFD_SYSCON | ||
| 25 | select RESET_CONTROLLER | 50 | select RESET_CONTROLLER |
| 26 | help | 51 | help |
| 27 | Support for the clock controller on AmLogic S802 (Meson8), | 52 | Support for the clock controller on AmLogic S802 (Meson8), |
| @@ -30,8 +55,14 @@ config COMMON_CLK_MESON8B | |||
| 30 | 55 | ||
| 31 | config COMMON_CLK_GXBB | 56 | config COMMON_CLK_GXBB |
| 32 | bool | 57 | bool |
| 33 | select COMMON_CLK_AMLOGIC | 58 | depends on ARCH_MESON |
| 34 | select COMMON_CLK_MESON_AO | 59 | select COMMON_CLK_MESON_REGMAP |
| 60 | select COMMON_CLK_MESON_DUALDIV | ||
| 61 | select COMMON_CLK_MESON_VID_PLL_DIV | ||
| 62 | select COMMON_CLK_MESON_MPLL | ||
| 63 | select COMMON_CLK_MESON_PLL | ||
| 64 | select COMMON_CLK_MESON_AO_CLKC | ||
| 65 | select COMMON_CLK_MESON_EE_CLKC | ||
| 35 | select MFD_SYSCON | 66 | select MFD_SYSCON |
| 36 | help | 67 | help |
| 37 | Support for the clock controller on AmLogic S905 devices, aka gxbb. | 68 | Support for the clock controller on AmLogic S905 devices, aka gxbb. |
| @@ -39,8 +70,13 @@ config COMMON_CLK_GXBB | |||
| 39 | 70 | ||
| 40 | config COMMON_CLK_AXG | 71 | config COMMON_CLK_AXG |
| 41 | bool | 72 | bool |
| 42 | select COMMON_CLK_AMLOGIC | 73 | depends on ARCH_MESON |
| 43 | select COMMON_CLK_MESON_AO | 74 | select COMMON_CLK_MESON_REGMAP |
| 75 | select COMMON_CLK_MESON_DUALDIV | ||
| 76 | select COMMON_CLK_MESON_MPLL | ||
| 77 | select COMMON_CLK_MESON_PLL | ||
| 78 | select COMMON_CLK_MESON_AO_CLKC | ||
| 79 | select COMMON_CLK_MESON_EE_CLKC | ||
| 44 | select MFD_SYSCON | 80 | select MFD_SYSCON |
| 45 | help | 81 | help |
| 46 | Support for the clock controller on AmLogic A113D devices, aka axg. | 82 | Support for the clock controller on AmLogic A113D devices, aka axg. |
| @@ -48,9 +84,26 @@ config COMMON_CLK_AXG | |||
| 48 | 84 | ||
| 49 | config COMMON_CLK_AXG_AUDIO | 85 | config COMMON_CLK_AXG_AUDIO |
| 50 | tristate "Meson AXG Audio Clock Controller Driver" | 86 | tristate "Meson AXG Audio Clock Controller Driver" |
| 51 | depends on COMMON_CLK_AXG | 87 | depends on ARCH_MESON |
| 52 | select COMMON_CLK_AMLOGIC_AUDIO | 88 | select COMMON_CLK_MESON_INPUT |
| 53 | select MFD_SYSCON | 89 | select COMMON_CLK_MESON_REGMAP |
| 90 | select COMMON_CLK_MESON_PHASE | ||
| 91 | select COMMON_CLK_MESON_SCLK_DIV | ||
| 92 | select REGMAP_MMIO | ||
| 54 | help | 93 | help |
| 55 | Support for the audio clock controller on AmLogic A113D devices, | 94 | Support for the audio clock controller on AmLogic A113D devices, |
| 56 | aka axg, Say Y if you want audio subsystem to work. | 95 | aka axg, Say Y if you want audio subsystem to work. |
| 96 | |||
| 97 | config COMMON_CLK_G12A | ||
| 98 | bool | ||
| 99 | depends on ARCH_MESON | ||
| 100 | select COMMON_CLK_MESON_REGMAP | ||
| 101 | select COMMON_CLK_MESON_DUALDIV | ||
| 102 | select COMMON_CLK_MESON_MPLL | ||
| 103 | select COMMON_CLK_MESON_PLL | ||
| 104 | select COMMON_CLK_MESON_AO_CLKC | ||
| 105 | select COMMON_CLK_MESON_EE_CLKC | ||
| 106 | select MFD_SYSCON | ||
| 107 | help | ||
| 108 | Support for the clock controller on Amlogic S905D2, S905X2 and S905Y2 | ||
| 109 | devices, aka g12a. Say Y if you want peripherals to work. | ||
diff --git a/drivers/clk/meson/Makefile b/drivers/clk/meson/Makefile index a849aa809825..021fc290e749 100644 --- a/drivers/clk/meson/Makefile +++ b/drivers/clk/meson/Makefile | |||
| @@ -1,13 +1,20 @@ | |||
| 1 | # | 1 | # Amlogic clock drivers |
| 2 | # Makefile for Meson specific clk | ||
| 3 | # | ||
| 4 | 2 | ||
| 5 | obj-$(CONFIG_COMMON_CLK_AMLOGIC) += clk-pll.o clk-mpll.o clk-phase.o vid-pll-div.o | 3 | obj-$(CONFIG_COMMON_CLK_MESON_AO_CLKC) += meson-aoclk.o |
| 6 | obj-$(CONFIG_COMMON_CLK_AMLOGIC) += clk-input.o | 4 | obj-$(CONFIG_COMMON_CLK_MESON_DUALDIV) += clk-dualdiv.o |
| 7 | obj-$(CONFIG_COMMON_CLK_AMLOGIC_AUDIO) += clk-triphase.o sclk-div.o | 5 | obj-$(CONFIG_COMMON_CLK_MESON_EE_CLKC) += meson-eeclk.o |
| 8 | obj-$(CONFIG_COMMON_CLK_MESON_AO) += meson-aoclk.o | 6 | obj-$(CONFIG_COMMON_CLK_MESON_INPUT) += clk-input.o |
| 7 | obj-$(CONFIG_COMMON_CLK_MESON_MPLL) += clk-mpll.o | ||
| 8 | obj-$(CONFIG_COMMON_CLK_MESON_PHASE) += clk-phase.o | ||
| 9 | obj-$(CONFIG_COMMON_CLK_MESON_PLL) += clk-pll.o | ||
| 10 | obj-$(CONFIG_COMMON_CLK_MESON_REGMAP) += clk-regmap.o | ||
| 11 | obj-$(CONFIG_COMMON_CLK_MESON_SCLK_DIV) += sclk-div.o | ||
| 12 | obj-$(CONFIG_COMMON_CLK_MESON_VID_PLL_DIV) += vid-pll-div.o | ||
| 13 | |||
| 14 | # Amlogic Clock controllers | ||
| 15 | |||
| 16 | obj-$(CONFIG_COMMON_CLK_AXG) += axg.o axg-aoclk.o | ||
| 17 | obj-$(CONFIG_COMMON_CLK_AXG_AUDIO) += axg-audio.o | ||
| 18 | obj-$(CONFIG_COMMON_CLK_GXBB) += gxbb.o gxbb-aoclk.o | ||
| 19 | obj-$(CONFIG_COMMON_CLK_G12A) += g12a.o g12a-aoclk.o | ||
| 9 | obj-$(CONFIG_COMMON_CLK_MESON8B) += meson8b.o | 20 | obj-$(CONFIG_COMMON_CLK_MESON8B) += meson8b.o |
| 10 | obj-$(CONFIG_COMMON_CLK_GXBB) += gxbb.o gxbb-aoclk.o gxbb-aoclk-32k.o | ||
| 11 | obj-$(CONFIG_COMMON_CLK_AXG) += axg.o axg-aoclk.o | ||
| 12 | obj-$(CONFIG_COMMON_CLK_AXG_AUDIO) += axg-audio.o | ||
| 13 | obj-$(CONFIG_COMMON_CLK_REGMAP_MESON) += clk-regmap.o | ||
diff --git a/drivers/clk/meson/axg-aoclk.c b/drivers/clk/meson/axg-aoclk.c index 29e088542387..0086f31288eb 100644 --- a/drivers/clk/meson/axg-aoclk.c +++ b/drivers/clk/meson/axg-aoclk.c | |||
| @@ -12,10 +12,27 @@ | |||
| 12 | #include <linux/platform_device.h> | 12 | #include <linux/platform_device.h> |
| 13 | #include <linux/reset-controller.h> | 13 | #include <linux/reset-controller.h> |
| 14 | #include <linux/mfd/syscon.h> | 14 | #include <linux/mfd/syscon.h> |
| 15 | #include "clk-regmap.h" | ||
| 16 | #include "meson-aoclk.h" | 15 | #include "meson-aoclk.h" |
| 17 | #include "axg-aoclk.h" | 16 | #include "axg-aoclk.h" |
| 18 | 17 | ||
| 18 | #include "clk-regmap.h" | ||
| 19 | #include "clk-dualdiv.h" | ||
| 20 | |||
| 21 | #define IN_PREFIX "ao-in-" | ||
| 22 | |||
| 23 | /* | ||
| 24 | * AO Configuration Clock registers offsets | ||
| 25 | * Register offsets from the data sheet must be multiplied by 4. | ||
| 26 | */ | ||
| 27 | #define AO_RTI_PWR_CNTL_REG1 0x0C | ||
| 28 | #define AO_RTI_PWR_CNTL_REG0 0x10 | ||
| 29 | #define AO_RTI_GEN_CNTL_REG0 0x40 | ||
| 30 | #define AO_OSCIN_CNTL 0x58 | ||
| 31 | #define AO_CRT_CLK_CNTL1 0x68 | ||
| 32 | #define AO_SAR_CLK 0x90 | ||
| 33 | #define AO_RTC_ALT_CLK_CNTL0 0x94 | ||
| 34 | #define AO_RTC_ALT_CLK_CNTL1 0x98 | ||
| 35 | |||
| 19 | #define AXG_AO_GATE(_name, _bit) \ | 36 | #define AXG_AO_GATE(_name, _bit) \ |
| 20 | static struct clk_regmap axg_aoclk_##_name = { \ | 37 | static struct clk_regmap axg_aoclk_##_name = { \ |
| 21 | .data = &(struct clk_regmap_gate_data) { \ | 38 | .data = &(struct clk_regmap_gate_data) { \ |
| @@ -25,7 +42,7 @@ static struct clk_regmap axg_aoclk_##_name = { \ | |||
| 25 | .hw.init = &(struct clk_init_data) { \ | 42 | .hw.init = &(struct clk_init_data) { \ |
| 26 | .name = "axg_ao_" #_name, \ | 43 | .name = "axg_ao_" #_name, \ |
| 27 | .ops = &clk_regmap_gate_ops, \ | 44 | .ops = &clk_regmap_gate_ops, \ |
| 28 | .parent_names = (const char *[]){ "clk81" }, \ | 45 | .parent_names = (const char *[]){ IN_PREFIX "mpeg-clk" }, \ |
| 29 | .num_parents = 1, \ | 46 | .num_parents = 1, \ |
| 30 | .flags = CLK_IGNORE_UNUSED, \ | 47 | .flags = CLK_IGNORE_UNUSED, \ |
| 31 | }, \ | 48 | }, \ |
| @@ -39,17 +56,141 @@ AXG_AO_GATE(uart2, 5); | |||
| 39 | AXG_AO_GATE(ir_blaster, 6); | 56 | AXG_AO_GATE(ir_blaster, 6); |
| 40 | AXG_AO_GATE(saradc, 7); | 57 | AXG_AO_GATE(saradc, 7); |
| 41 | 58 | ||
| 59 | static struct clk_regmap axg_aoclk_cts_oscin = { | ||
| 60 | .data = &(struct clk_regmap_gate_data){ | ||
| 61 | .offset = AO_RTI_PWR_CNTL_REG0, | ||
| 62 | .bit_idx = 14, | ||
| 63 | }, | ||
| 64 | .hw.init = &(struct clk_init_data){ | ||
| 65 | .name = "cts_oscin", | ||
| 66 | .ops = &clk_regmap_gate_ro_ops, | ||
| 67 | .parent_names = (const char *[]){ IN_PREFIX "xtal" }, | ||
| 68 | .num_parents = 1, | ||
| 69 | }, | ||
| 70 | }; | ||
| 71 | |||
| 72 | static struct clk_regmap axg_aoclk_32k_pre = { | ||
| 73 | .data = &(struct clk_regmap_gate_data){ | ||
| 74 | .offset = AO_RTC_ALT_CLK_CNTL0, | ||
| 75 | .bit_idx = 31, | ||
| 76 | }, | ||
| 77 | .hw.init = &(struct clk_init_data){ | ||
| 78 | .name = "axg_ao_32k_pre", | ||
| 79 | .ops = &clk_regmap_gate_ops, | ||
| 80 | .parent_names = (const char *[]){ "cts_oscin" }, | ||
| 81 | .num_parents = 1, | ||
| 82 | }, | ||
| 83 | }; | ||
| 84 | |||
| 85 | static const struct meson_clk_dualdiv_param axg_32k_div_table[] = { | ||
| 86 | { | ||
| 87 | .dual = 1, | ||
| 88 | .n1 = 733, | ||
| 89 | .m1 = 8, | ||
| 90 | .n2 = 732, | ||
| 91 | .m2 = 11, | ||
| 92 | }, {} | ||
| 93 | }; | ||
| 94 | |||
| 95 | static struct clk_regmap axg_aoclk_32k_div = { | ||
| 96 | .data = &(struct meson_clk_dualdiv_data){ | ||
| 97 | .n1 = { | ||
| 98 | .reg_off = AO_RTC_ALT_CLK_CNTL0, | ||
| 99 | .shift = 0, | ||
| 100 | .width = 12, | ||
| 101 | }, | ||
| 102 | .n2 = { | ||
| 103 | .reg_off = AO_RTC_ALT_CLK_CNTL0, | ||
| 104 | .shift = 12, | ||
| 105 | .width = 12, | ||
| 106 | }, | ||
| 107 | .m1 = { | ||
| 108 | .reg_off = AO_RTC_ALT_CLK_CNTL1, | ||
| 109 | .shift = 0, | ||
| 110 | .width = 12, | ||
| 111 | }, | ||
| 112 | .m2 = { | ||
| 113 | .reg_off = AO_RTC_ALT_CLK_CNTL1, | ||
| 114 | .shift = 12, | ||
| 115 | .width = 12, | ||
| 116 | }, | ||
| 117 | .dual = { | ||
| 118 | .reg_off = AO_RTC_ALT_CLK_CNTL0, | ||
| 119 | .shift = 28, | ||
| 120 | .width = 1, | ||
| 121 | }, | ||
| 122 | .table = axg_32k_div_table, | ||
| 123 | }, | ||
| 124 | .hw.init = &(struct clk_init_data){ | ||
| 125 | .name = "axg_ao_32k_div", | ||
| 126 | .ops = &meson_clk_dualdiv_ops, | ||
| 127 | .parent_names = (const char *[]){ "axg_ao_32k_pre" }, | ||
| 128 | .num_parents = 1, | ||
| 129 | }, | ||
| 130 | }; | ||
| 131 | |||
| 132 | static struct clk_regmap axg_aoclk_32k_sel = { | ||
| 133 | .data = &(struct clk_regmap_mux_data) { | ||
| 134 | .offset = AO_RTC_ALT_CLK_CNTL1, | ||
| 135 | .mask = 0x1, | ||
| 136 | .shift = 24, | ||
| 137 | .flags = CLK_MUX_ROUND_CLOSEST, | ||
| 138 | }, | ||
| 139 | .hw.init = &(struct clk_init_data){ | ||
| 140 | .name = "axg_ao_32k_sel", | ||
| 141 | .ops = &clk_regmap_mux_ops, | ||
| 142 | .parent_names = (const char *[]){ "axg_ao_32k_div", | ||
| 143 | "axg_ao_32k_pre" }, | ||
| 144 | .num_parents = 2, | ||
| 145 | .flags = CLK_SET_RATE_PARENT, | ||
| 146 | }, | ||
| 147 | }; | ||
| 148 | |||
| 149 | static struct clk_regmap axg_aoclk_32k = { | ||
| 150 | .data = &(struct clk_regmap_gate_data){ | ||
| 151 | .offset = AO_RTC_ALT_CLK_CNTL0, | ||
| 152 | .bit_idx = 30, | ||
| 153 | }, | ||
| 154 | .hw.init = &(struct clk_init_data){ | ||
| 155 | .name = "axg_ao_32k", | ||
| 156 | .ops = &clk_regmap_gate_ops, | ||
| 157 | .parent_names = (const char *[]){ "axg_ao_32k_sel" }, | ||
| 158 | .num_parents = 1, | ||
| 159 | .flags = CLK_SET_RATE_PARENT, | ||
| 160 | }, | ||
| 161 | }; | ||
| 162 | |||
| 163 | static struct clk_regmap axg_aoclk_cts_rtc_oscin = { | ||
| 164 | .data = &(struct clk_regmap_mux_data) { | ||
| 165 | .offset = AO_RTI_PWR_CNTL_REG0, | ||
| 166 | .mask = 0x1, | ||
| 167 | .shift = 10, | ||
| 168 | .flags = CLK_MUX_ROUND_CLOSEST, | ||
| 169 | }, | ||
| 170 | .hw.init = &(struct clk_init_data){ | ||
| 171 | .name = "axg_ao_cts_rtc_oscin", | ||
| 172 | .ops = &clk_regmap_mux_ops, | ||
| 173 | .parent_names = (const char *[]){ "axg_ao_32k", | ||
| 174 | IN_PREFIX "ext_32k-0" }, | ||
| 175 | .num_parents = 2, | ||
| 176 | .flags = CLK_SET_RATE_PARENT, | ||
| 177 | }, | ||
| 178 | }; | ||
| 179 | |||
| 42 | static struct clk_regmap axg_aoclk_clk81 = { | 180 | static struct clk_regmap axg_aoclk_clk81 = { |
| 43 | .data = &(struct clk_regmap_mux_data) { | 181 | .data = &(struct clk_regmap_mux_data) { |
| 44 | .offset = AO_RTI_PWR_CNTL_REG0, | 182 | .offset = AO_RTI_PWR_CNTL_REG0, |
| 45 | .mask = 0x1, | 183 | .mask = 0x1, |
| 46 | .shift = 8, | 184 | .shift = 8, |
| 185 | .flags = CLK_MUX_ROUND_CLOSEST, | ||
| 47 | }, | 186 | }, |
| 48 | .hw.init = &(struct clk_init_data){ | 187 | .hw.init = &(struct clk_init_data){ |
| 49 | .name = "axg_ao_clk81", | 188 | .name = "axg_ao_clk81", |
| 50 | .ops = &clk_regmap_mux_ro_ops, | 189 | .ops = &clk_regmap_mux_ro_ops, |
| 51 | .parent_names = (const char *[]){ "clk81", "ao_alt_xtal"}, | 190 | .parent_names = (const char *[]){ IN_PREFIX "mpeg-clk", |
| 191 | "axg_ao_cts_rtc_oscin"}, | ||
| 52 | .num_parents = 2, | 192 | .num_parents = 2, |
| 193 | .flags = CLK_SET_RATE_PARENT, | ||
| 53 | }, | 194 | }, |
| 54 | }; | 195 | }; |
| 55 | 196 | ||
| @@ -62,7 +203,8 @@ static struct clk_regmap axg_aoclk_saradc_mux = { | |||
| 62 | .hw.init = &(struct clk_init_data){ | 203 | .hw.init = &(struct clk_init_data){ |
| 63 | .name = "axg_ao_saradc_mux", | 204 | .name = "axg_ao_saradc_mux", |
| 64 | .ops = &clk_regmap_mux_ops, | 205 | .ops = &clk_regmap_mux_ops, |
| 65 | .parent_names = (const char *[]){ "xtal", "axg_ao_clk81" }, | 206 | .parent_names = (const char *[]){ IN_PREFIX "xtal", |
| 207 | "axg_ao_clk81" }, | ||
| 66 | .num_parents = 2, | 208 | .num_parents = 2, |
| 67 | }, | 209 | }, |
| 68 | }; | 210 | }; |
| @@ -106,17 +248,23 @@ static const unsigned int axg_aoclk_reset[] = { | |||
| 106 | }; | 248 | }; |
| 107 | 249 | ||
| 108 | static struct clk_regmap *axg_aoclk_regmap[] = { | 250 | static struct clk_regmap *axg_aoclk_regmap[] = { |
| 109 | [CLKID_AO_REMOTE] = &axg_aoclk_remote, | 251 | &axg_aoclk_remote, |
| 110 | [CLKID_AO_I2C_MASTER] = &axg_aoclk_i2c_master, | 252 | &axg_aoclk_i2c_master, |
| 111 | [CLKID_AO_I2C_SLAVE] = &axg_aoclk_i2c_slave, | 253 | &axg_aoclk_i2c_slave, |
| 112 | [CLKID_AO_UART1] = &axg_aoclk_uart1, | 254 | &axg_aoclk_uart1, |
| 113 | [CLKID_AO_UART2] = &axg_aoclk_uart2, | 255 | &axg_aoclk_uart2, |
| 114 | [CLKID_AO_IR_BLASTER] = &axg_aoclk_ir_blaster, | 256 | &axg_aoclk_ir_blaster, |
| 115 | [CLKID_AO_SAR_ADC] = &axg_aoclk_saradc, | 257 | &axg_aoclk_saradc, |
| 116 | [CLKID_AO_CLK81] = &axg_aoclk_clk81, | 258 | &axg_aoclk_cts_oscin, |
| 117 | [CLKID_AO_SAR_ADC_SEL] = &axg_aoclk_saradc_mux, | 259 | &axg_aoclk_32k_pre, |
| 118 | [CLKID_AO_SAR_ADC_DIV] = &axg_aoclk_saradc_div, | 260 | &axg_aoclk_32k_div, |
| 119 | [CLKID_AO_SAR_ADC_CLK] = &axg_aoclk_saradc_gate, | 261 | &axg_aoclk_32k_sel, |
| 262 | &axg_aoclk_32k, | ||
| 263 | &axg_aoclk_cts_rtc_oscin, | ||
| 264 | &axg_aoclk_clk81, | ||
| 265 | &axg_aoclk_saradc_mux, | ||
| 266 | &axg_aoclk_saradc_div, | ||
| 267 | &axg_aoclk_saradc_gate, | ||
| 120 | }; | 268 | }; |
| 121 | 269 | ||
| 122 | static const struct clk_hw_onecell_data axg_aoclk_onecell_data = { | 270 | static const struct clk_hw_onecell_data axg_aoclk_onecell_data = { |
| @@ -132,10 +280,22 @@ static const struct clk_hw_onecell_data axg_aoclk_onecell_data = { | |||
| 132 | [CLKID_AO_SAR_ADC_SEL] = &axg_aoclk_saradc_mux.hw, | 280 | [CLKID_AO_SAR_ADC_SEL] = &axg_aoclk_saradc_mux.hw, |
| 133 | [CLKID_AO_SAR_ADC_DIV] = &axg_aoclk_saradc_div.hw, | 281 | [CLKID_AO_SAR_ADC_DIV] = &axg_aoclk_saradc_div.hw, |
| 134 | [CLKID_AO_SAR_ADC_CLK] = &axg_aoclk_saradc_gate.hw, | 282 | [CLKID_AO_SAR_ADC_CLK] = &axg_aoclk_saradc_gate.hw, |
| 283 | [CLKID_AO_CTS_OSCIN] = &axg_aoclk_cts_oscin.hw, | ||
| 284 | [CLKID_AO_32K_PRE] = &axg_aoclk_32k_pre.hw, | ||
| 285 | [CLKID_AO_32K_DIV] = &axg_aoclk_32k_div.hw, | ||
| 286 | [CLKID_AO_32K_SEL] = &axg_aoclk_32k_sel.hw, | ||
| 287 | [CLKID_AO_32K] = &axg_aoclk_32k.hw, | ||
| 288 | [CLKID_AO_CTS_RTC_OSCIN] = &axg_aoclk_cts_rtc_oscin.hw, | ||
| 135 | }, | 289 | }, |
| 136 | .num = NR_CLKS, | 290 | .num = NR_CLKS, |
| 137 | }; | 291 | }; |
| 138 | 292 | ||
| 293 | static const struct meson_aoclk_input axg_aoclk_inputs[] = { | ||
| 294 | { .name = "xtal", .required = true }, | ||
| 295 | { .name = "mpeg-clk", .required = true }, | ||
| 296 | { .name = "ext-32k-0", .required = false }, | ||
| 297 | }; | ||
| 298 | |||
| 139 | static const struct meson_aoclk_data axg_aoclkc_data = { | 299 | static const struct meson_aoclk_data axg_aoclkc_data = { |
| 140 | .reset_reg = AO_RTI_GEN_CNTL_REG0, | 300 | .reset_reg = AO_RTI_GEN_CNTL_REG0, |
| 141 | .num_reset = ARRAY_SIZE(axg_aoclk_reset), | 301 | .num_reset = ARRAY_SIZE(axg_aoclk_reset), |
| @@ -143,6 +303,9 @@ static const struct meson_aoclk_data axg_aoclkc_data = { | |||
| 143 | .num_clks = ARRAY_SIZE(axg_aoclk_regmap), | 303 | .num_clks = ARRAY_SIZE(axg_aoclk_regmap), |
| 144 | .clks = axg_aoclk_regmap, | 304 | .clks = axg_aoclk_regmap, |
| 145 | .hw_data = &axg_aoclk_onecell_data, | 305 | .hw_data = &axg_aoclk_onecell_data, |
| 306 | .inputs = axg_aoclk_inputs, | ||
| 307 | .num_inputs = ARRAY_SIZE(axg_aoclk_inputs), | ||
| 308 | .input_prefix = IN_PREFIX, | ||
| 146 | }; | 309 | }; |
| 147 | 310 | ||
| 148 | static const struct of_device_id axg_aoclkc_match_table[] = { | 311 | static const struct of_device_id axg_aoclkc_match_table[] = { |
diff --git a/drivers/clk/meson/axg-aoclk.h b/drivers/clk/meson/axg-aoclk.h index 91384d8dd844..3cc27e85170f 100644 --- a/drivers/clk/meson/axg-aoclk.h +++ b/drivers/clk/meson/axg-aoclk.h | |||
| @@ -10,18 +10,7 @@ | |||
| 10 | #ifndef __AXG_AOCLKC_H | 10 | #ifndef __AXG_AOCLKC_H |
| 11 | #define __AXG_AOCLKC_H | 11 | #define __AXG_AOCLKC_H |
| 12 | 12 | ||
| 13 | #define NR_CLKS 11 | 13 | #define NR_CLKS 17 |
| 14 | /* AO Configuration Clock registers offsets | ||
| 15 | * Register offsets from the data sheet must be multiplied by 4. | ||
| 16 | */ | ||
| 17 | #define AO_RTI_PWR_CNTL_REG1 0x0C | ||
| 18 | #define AO_RTI_PWR_CNTL_REG0 0x10 | ||
| 19 | #define AO_RTI_GEN_CNTL_REG0 0x40 | ||
| 20 | #define AO_OSCIN_CNTL 0x58 | ||
| 21 | #define AO_CRT_CLK_CNTL1 0x68 | ||
| 22 | #define AO_SAR_CLK 0x90 | ||
| 23 | #define AO_RTC_ALT_CLK_CNTL0 0x94 | ||
| 24 | #define AO_RTC_ALT_CLK_CNTL1 0x98 | ||
| 25 | 14 | ||
| 26 | #include <dt-bindings/clock/axg-aoclkc.h> | 15 | #include <dt-bindings/clock/axg-aoclkc.h> |
| 27 | #include <dt-bindings/reset/axg-aoclkc.h> | 16 | #include <dt-bindings/reset/axg-aoclkc.h> |
diff --git a/drivers/clk/meson/axg-audio.c b/drivers/clk/meson/axg-audio.c index 8ac3a2295473..7ab200b6c3bf 100644 --- a/drivers/clk/meson/axg-audio.c +++ b/drivers/clk/meson/axg-audio.c | |||
| @@ -14,8 +14,11 @@ | |||
| 14 | #include <linux/reset.h> | 14 | #include <linux/reset.h> |
| 15 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
| 16 | 16 | ||
| 17 | #include "clkc-audio.h" | ||
| 18 | #include "axg-audio.h" | 17 | #include "axg-audio.h" |
| 18 | #include "clk-input.h" | ||
| 19 | #include "clk-regmap.h" | ||
| 20 | #include "clk-phase.h" | ||
| 21 | #include "sclk-div.h" | ||
| 19 | 22 | ||
| 20 | #define AXG_MST_IN_COUNT 8 | 23 | #define AXG_MST_IN_COUNT 8 |
| 21 | #define AXG_SLV_SCLK_COUNT 10 | 24 | #define AXG_SLV_SCLK_COUNT 10 |
diff --git a/drivers/clk/meson/axg.c b/drivers/clk/meson/axg.c index 792735d7e46e..7a8ef80e5f2c 100644 --- a/drivers/clk/meson/axg.c +++ b/drivers/clk/meson/axg.c | |||
| @@ -9,16 +9,17 @@ | |||
| 9 | * Author: Qiufang Dai <qiufang.dai@amlogic.com> | 9 | * Author: Qiufang Dai <qiufang.dai@amlogic.com> |
| 10 | */ | 10 | */ |
| 11 | 11 | ||
| 12 | #include <linux/clk.h> | ||
| 13 | #include <linux/clk-provider.h> | 12 | #include <linux/clk-provider.h> |
| 14 | #include <linux/init.h> | 13 | #include <linux/init.h> |
| 15 | #include <linux/of_device.h> | 14 | #include <linux/of_device.h> |
| 16 | #include <linux/mfd/syscon.h> | ||
| 17 | #include <linux/platform_device.h> | 15 | #include <linux/platform_device.h> |
| 18 | #include <linux/regmap.h> | ||
| 19 | 16 | ||
| 20 | #include "clkc.h" | 17 | #include "clk-input.h" |
| 18 | #include "clk-regmap.h" | ||
| 19 | #include "clk-pll.h" | ||
| 20 | #include "clk-mpll.h" | ||
| 21 | #include "axg.h" | 21 | #include "axg.h" |
| 22 | #include "meson-eeclk.h" | ||
| 22 | 23 | ||
| 23 | static DEFINE_SPINLOCK(meson_clk_lock); | 24 | static DEFINE_SPINLOCK(meson_clk_lock); |
| 24 | 25 | ||
| @@ -58,7 +59,7 @@ static struct clk_regmap axg_fixed_pll_dco = { | |||
| 58 | .hw.init = &(struct clk_init_data){ | 59 | .hw.init = &(struct clk_init_data){ |
| 59 | .name = "fixed_pll_dco", | 60 | .name = "fixed_pll_dco", |
| 60 | .ops = &meson_clk_pll_ro_ops, | 61 | .ops = &meson_clk_pll_ro_ops, |
| 61 | .parent_names = (const char *[]){ "xtal" }, | 62 | .parent_names = (const char *[]){ IN_PREFIX "xtal" }, |
| 62 | .num_parents = 1, | 63 | .num_parents = 1, |
| 63 | }, | 64 | }, |
| 64 | }; | 65 | }; |
| @@ -113,7 +114,7 @@ static struct clk_regmap axg_sys_pll_dco = { | |||
| 113 | .hw.init = &(struct clk_init_data){ | 114 | .hw.init = &(struct clk_init_data){ |
| 114 | .name = "sys_pll_dco", | 115 | .name = "sys_pll_dco", |
| 115 | .ops = &meson_clk_pll_ro_ops, | 116 | .ops = &meson_clk_pll_ro_ops, |
| 116 | .parent_names = (const char *[]){ "xtal" }, | 117 | .parent_names = (const char *[]){ IN_PREFIX "xtal" }, |
| 117 | .num_parents = 1, | 118 | .num_parents = 1, |
| 118 | }, | 119 | }, |
| 119 | }; | 120 | }; |
| @@ -214,7 +215,7 @@ static struct clk_regmap axg_gp0_pll_dco = { | |||
| 214 | .hw.init = &(struct clk_init_data){ | 215 | .hw.init = &(struct clk_init_data){ |
| 215 | .name = "gp0_pll_dco", | 216 | .name = "gp0_pll_dco", |
| 216 | .ops = &meson_clk_pll_ops, | 217 | .ops = &meson_clk_pll_ops, |
| 217 | .parent_names = (const char *[]){ "xtal" }, | 218 | .parent_names = (const char *[]){ IN_PREFIX "xtal" }, |
| 218 | .num_parents = 1, | 219 | .num_parents = 1, |
| 219 | }, | 220 | }, |
| 220 | }; | 221 | }; |
| @@ -283,7 +284,7 @@ static struct clk_regmap axg_hifi_pll_dco = { | |||
| 283 | .hw.init = &(struct clk_init_data){ | 284 | .hw.init = &(struct clk_init_data){ |
| 284 | .name = "hifi_pll_dco", | 285 | .name = "hifi_pll_dco", |
| 285 | .ops = &meson_clk_pll_ops, | 286 | .ops = &meson_clk_pll_ops, |
| 286 | .parent_names = (const char *[]){ "xtal" }, | 287 | .parent_names = (const char *[]){ IN_PREFIX "xtal" }, |
| 287 | .num_parents = 1, | 288 | .num_parents = 1, |
| 288 | }, | 289 | }, |
| 289 | }; | 290 | }; |
| @@ -701,7 +702,7 @@ static struct clk_regmap axg_pcie_pll_dco = { | |||
| 701 | .hw.init = &(struct clk_init_data){ | 702 | .hw.init = &(struct clk_init_data){ |
| 702 | .name = "pcie_pll_dco", | 703 | .name = "pcie_pll_dco", |
| 703 | .ops = &meson_clk_pll_ops, | 704 | .ops = &meson_clk_pll_ops, |
| 704 | .parent_names = (const char *[]){ "xtal" }, | 705 | .parent_names = (const char *[]){ IN_PREFIX "xtal" }, |
| 705 | .num_parents = 1, | 706 | .num_parents = 1, |
| 706 | }, | 707 | }, |
| 707 | }; | 708 | }; |
| @@ -803,7 +804,7 @@ static struct clk_regmap axg_pcie_cml_en1 = { | |||
| 803 | 804 | ||
| 804 | static u32 mux_table_clk81[] = { 0, 2, 3, 4, 5, 6, 7 }; | 805 | static u32 mux_table_clk81[] = { 0, 2, 3, 4, 5, 6, 7 }; |
| 805 | static const char * const clk81_parent_names[] = { | 806 | static const char * const clk81_parent_names[] = { |
| 806 | "xtal", "fclk_div7", "mpll1", "mpll2", "fclk_div4", | 807 | IN_PREFIX "xtal", "fclk_div7", "mpll1", "mpll2", "fclk_div4", |
| 807 | "fclk_div3", "fclk_div5" | 808 | "fclk_div3", "fclk_div5" |
| 808 | }; | 809 | }; |
| 809 | 810 | ||
| @@ -852,7 +853,7 @@ static struct clk_regmap axg_clk81 = { | |||
| 852 | }; | 853 | }; |
| 853 | 854 | ||
| 854 | static const char * const axg_sd_emmc_clk0_parent_names[] = { | 855 | static const char * const axg_sd_emmc_clk0_parent_names[] = { |
| 855 | "xtal", "fclk_div2", "fclk_div3", "fclk_div5", "fclk_div7", | 856 | IN_PREFIX "xtal", "fclk_div2", "fclk_div3", "fclk_div5", "fclk_div7", |
| 856 | 857 | ||
| 857 | /* | 858 | /* |
| 858 | * Following these parent clocks, we should also have had mpll2, mpll3 | 859 | * Following these parent clocks, we should also have had mpll2, mpll3 |
| @@ -957,7 +958,7 @@ static struct clk_regmap axg_sd_emmc_c_clk0 = { | |||
| 957 | static u32 mux_table_gen_clk[] = { 0, 4, 5, 6, 7, 8, | 958 | static u32 mux_table_gen_clk[] = { 0, 4, 5, 6, 7, 8, |
| 958 | 9, 10, 11, 13, 14, }; | 959 | 9, 10, 11, 13, 14, }; |
| 959 | static const char * const gen_clk_parent_names[] = { | 960 | static const char * const gen_clk_parent_names[] = { |
| 960 | "xtal", "hifi_pll", "mpll0", "mpll1", "mpll2", "mpll3", | 961 | IN_PREFIX "xtal", "hifi_pll", "mpll0", "mpll1", "mpll2", "mpll3", |
| 961 | "fclk_div4", "fclk_div3", "fclk_div5", "fclk_div7", "gp0_pll", | 962 | "fclk_div4", "fclk_div3", "fclk_div5", "fclk_div7", "gp0_pll", |
| 962 | }; | 963 | }; |
| 963 | 964 | ||
| @@ -1255,46 +1256,20 @@ static struct clk_regmap *const axg_clk_regmaps[] = { | |||
| 1255 | &axg_pcie_pll_od, | 1256 | &axg_pcie_pll_od, |
| 1256 | }; | 1257 | }; |
| 1257 | 1258 | ||
| 1259 | static const struct meson_eeclkc_data axg_clkc_data = { | ||
| 1260 | .regmap_clks = axg_clk_regmaps, | ||
| 1261 | .regmap_clk_num = ARRAY_SIZE(axg_clk_regmaps), | ||
| 1262 | .hw_onecell_data = &axg_hw_onecell_data, | ||
| 1263 | }; | ||
| 1264 | |||
| 1265 | |||
| 1258 | static const struct of_device_id clkc_match_table[] = { | 1266 | static const struct of_device_id clkc_match_table[] = { |
| 1259 | { .compatible = "amlogic,axg-clkc" }, | 1267 | { .compatible = "amlogic,axg-clkc", .data = &axg_clkc_data }, |
| 1260 | {} | 1268 | {} |
| 1261 | }; | 1269 | }; |
| 1262 | 1270 | ||
| 1263 | static int axg_clkc_probe(struct platform_device *pdev) | ||
| 1264 | { | ||
| 1265 | struct device *dev = &pdev->dev; | ||
| 1266 | struct regmap *map; | ||
| 1267 | int ret, i; | ||
| 1268 | |||
| 1269 | /* Get the hhi system controller node if available */ | ||
| 1270 | map = syscon_node_to_regmap(of_get_parent(dev->of_node)); | ||
| 1271 | if (IS_ERR(map)) { | ||
| 1272 | dev_err(dev, "failed to get HHI regmap\n"); | ||
| 1273 | return PTR_ERR(map); | ||
| 1274 | } | ||
| 1275 | |||
| 1276 | /* Populate regmap for the regmap backed clocks */ | ||
| 1277 | for (i = 0; i < ARRAY_SIZE(axg_clk_regmaps); i++) | ||
| 1278 | axg_clk_regmaps[i]->map = map; | ||
| 1279 | |||
| 1280 | for (i = 0; i < axg_hw_onecell_data.num; i++) { | ||
| 1281 | /* array might be sparse */ | ||
| 1282 | if (!axg_hw_onecell_data.hws[i]) | ||
| 1283 | continue; | ||
| 1284 | |||
| 1285 | ret = devm_clk_hw_register(dev, axg_hw_onecell_data.hws[i]); | ||
| 1286 | if (ret) { | ||
| 1287 | dev_err(dev, "Clock registration failed\n"); | ||
| 1288 | return ret; | ||
| 1289 | } | ||
| 1290 | } | ||
| 1291 | |||
| 1292 | return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, | ||
| 1293 | &axg_hw_onecell_data); | ||
| 1294 | } | ||
| 1295 | |||
| 1296 | static struct platform_driver axg_driver = { | 1271 | static struct platform_driver axg_driver = { |
| 1297 | .probe = axg_clkc_probe, | 1272 | .probe = meson_eeclkc_probe, |
| 1298 | .driver = { | 1273 | .driver = { |
| 1299 | .name = "axg-clkc", | 1274 | .name = "axg-clkc", |
| 1300 | .of_match_table = clkc_match_table, | 1275 | .of_match_table = clkc_match_table, |
diff --git a/drivers/clk/meson/clk-dualdiv.c b/drivers/clk/meson/clk-dualdiv.c new file mode 100644 index 000000000000..c5ca23a5e3e8 --- /dev/null +++ b/drivers/clk/meson/clk-dualdiv.c | |||
| @@ -0,0 +1,138 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | /* | ||
| 3 | * Copyright (c) 2017 BayLibre, SAS | ||
| 4 | * Author: Neil Armstrong <narmstrong@baylibre.com> | ||
| 5 | * Author: Jerome Brunet <jbrunet@baylibre.com> | ||
| 6 | */ | ||
| 7 | |||
| 8 | /* | ||
| 9 | * The AO Domain embeds a dual/divider to generate a more precise | ||
| 10 | * 32,768KHz clock for low-power suspend mode and CEC. | ||
| 11 | * ______ ______ | ||
| 12 | * | | | | | ||
| 13 | * | Div1 |-| Cnt1 | | ||
| 14 | * /|______| |______|\ | ||
| 15 | * -| ______ ______ X--> Out | ||
| 16 | * \| | | |/ | ||
| 17 | * | Div2 |-| Cnt2 | | ||
| 18 | * |______| |______| | ||
| 19 | * | ||
| 20 | * The dividing can be switched to single or dual, with a counter | ||
| 21 | * for each divider to set when the switching is done. | ||
| 22 | */ | ||
| 23 | |||
| 24 | #include <linux/clk-provider.h> | ||
| 25 | #include <linux/module.h> | ||
| 26 | |||
| 27 | #include "clk-regmap.h" | ||
| 28 | #include "clk-dualdiv.h" | ||
| 29 | |||
| 30 | static inline struct meson_clk_dualdiv_data * | ||
| 31 | meson_clk_dualdiv_data(struct clk_regmap *clk) | ||
| 32 | { | ||
| 33 | return (struct meson_clk_dualdiv_data *)clk->data; | ||
| 34 | } | ||
| 35 | |||
| 36 | static unsigned long | ||
| 37 | __dualdiv_param_to_rate(unsigned long parent_rate, | ||
| 38 | const struct meson_clk_dualdiv_param *p) | ||
| 39 | { | ||
| 40 | if (!p->dual) | ||
| 41 | return DIV_ROUND_CLOSEST(parent_rate, p->n1); | ||
| 42 | |||
| 43 | return DIV_ROUND_CLOSEST(parent_rate * (p->m1 + p->m2), | ||
| 44 | p->n1 * p->m1 + p->n2 * p->m2); | ||
| 45 | } | ||
| 46 | |||
| 47 | static unsigned long meson_clk_dualdiv_recalc_rate(struct clk_hw *hw, | ||
| 48 | unsigned long parent_rate) | ||
| 49 | { | ||
| 50 | struct clk_regmap *clk = to_clk_regmap(hw); | ||
| 51 | struct meson_clk_dualdiv_data *dualdiv = meson_clk_dualdiv_data(clk); | ||
| 52 | struct meson_clk_dualdiv_param setting; | ||
| 53 | |||
| 54 | setting.dual = meson_parm_read(clk->map, &dualdiv->dual); | ||
| 55 | setting.n1 = meson_parm_read(clk->map, &dualdiv->n1) + 1; | ||
| 56 | setting.m1 = meson_parm_read(clk->map, &dualdiv->m1) + 1; | ||
| 57 | setting.n2 = meson_parm_read(clk->map, &dualdiv->n2) + 1; | ||
| 58 | setting.m2 = meson_parm_read(clk->map, &dualdiv->m2) + 1; | ||
| 59 | |||
| 60 | return __dualdiv_param_to_rate(parent_rate, &setting); | ||
| 61 | } | ||
| 62 | |||
| 63 | static const struct meson_clk_dualdiv_param * | ||
| 64 | __dualdiv_get_setting(unsigned long rate, unsigned long parent_rate, | ||
| 65 | struct meson_clk_dualdiv_data *dualdiv) | ||
| 66 | { | ||
| 67 | const struct meson_clk_dualdiv_param *table = dualdiv->table; | ||
| 68 | unsigned long best = 0, now = 0; | ||
| 69 | unsigned int i, best_i = 0; | ||
| 70 | |||
| 71 | if (!table) | ||
| 72 | return NULL; | ||
| 73 | |||
| 74 | for (i = 0; table[i].n1; i++) { | ||
| 75 | now = __dualdiv_param_to_rate(parent_rate, &table[i]); | ||
| 76 | |||
| 77 | /* If we get an exact match, don't bother any further */ | ||
| 78 | if (now == rate) { | ||
| 79 | return &table[i]; | ||
| 80 | } else if (abs(now - rate) < abs(best - rate)) { | ||
| 81 | best = now; | ||
| 82 | best_i = i; | ||
| 83 | } | ||
| 84 | } | ||
| 85 | |||
| 86 | return (struct meson_clk_dualdiv_param *)&table[best_i]; | ||
| 87 | } | ||
| 88 | |||
| 89 | static long meson_clk_dualdiv_round_rate(struct clk_hw *hw, unsigned long rate, | ||
| 90 | unsigned long *parent_rate) | ||
| 91 | { | ||
| 92 | struct clk_regmap *clk = to_clk_regmap(hw); | ||
| 93 | struct meson_clk_dualdiv_data *dualdiv = meson_clk_dualdiv_data(clk); | ||
| 94 | const struct meson_clk_dualdiv_param *setting = | ||
| 95 | __dualdiv_get_setting(rate, *parent_rate, dualdiv); | ||
| 96 | |||
| 97 | if (!setting) | ||
| 98 | return meson_clk_dualdiv_recalc_rate(hw, *parent_rate); | ||
| 99 | |||
| 100 | return __dualdiv_param_to_rate(*parent_rate, setting); | ||
| 101 | } | ||
| 102 | |||
| 103 | static int meson_clk_dualdiv_set_rate(struct clk_hw *hw, unsigned long rate, | ||
| 104 | unsigned long parent_rate) | ||
| 105 | { | ||
| 106 | struct clk_regmap *clk = to_clk_regmap(hw); | ||
| 107 | struct meson_clk_dualdiv_data *dualdiv = meson_clk_dualdiv_data(clk); | ||
| 108 | const struct meson_clk_dualdiv_param *setting = | ||
| 109 | __dualdiv_get_setting(rate, parent_rate, dualdiv); | ||
| 110 | |||
| 111 | if (!setting) | ||
| 112 | return -EINVAL; | ||
| 113 | |||
| 114 | meson_parm_write(clk->map, &dualdiv->dual, setting->dual); | ||
| 115 | meson_parm_write(clk->map, &dualdiv->n1, setting->n1 - 1); | ||
| 116 | meson_parm_write(clk->map, &dualdiv->m1, setting->m1 - 1); | ||
| 117 | meson_parm_write(clk->map, &dualdiv->n2, setting->n2 - 1); | ||
| 118 | meson_parm_write(clk->map, &dualdiv->m2, setting->m2 - 1); | ||
| 119 | |||
| 120 | return 0; | ||
| 121 | } | ||
| 122 | |||
| 123 | const struct clk_ops meson_clk_dualdiv_ops = { | ||
| 124 | .recalc_rate = meson_clk_dualdiv_recalc_rate, | ||
| 125 | .round_rate = meson_clk_dualdiv_round_rate, | ||
| 126 | .set_rate = meson_clk_dualdiv_set_rate, | ||
| 127 | }; | ||
| 128 | EXPORT_SYMBOL_GPL(meson_clk_dualdiv_ops); | ||
| 129 | |||
| 130 | const struct clk_ops meson_clk_dualdiv_ro_ops = { | ||
| 131 | .recalc_rate = meson_clk_dualdiv_recalc_rate, | ||
| 132 | }; | ||
| 133 | EXPORT_SYMBOL_GPL(meson_clk_dualdiv_ro_ops); | ||
| 134 | |||
| 135 | MODULE_DESCRIPTION("Amlogic dual divider driver"); | ||
| 136 | MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>"); | ||
| 137 | MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>"); | ||
| 138 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/clk/meson/clk-dualdiv.h b/drivers/clk/meson/clk-dualdiv.h new file mode 100644 index 000000000000..4aa939018012 --- /dev/null +++ b/drivers/clk/meson/clk-dualdiv.h | |||
| @@ -0,0 +1,33 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
| 2 | /* | ||
| 3 | * Copyright (c) 2019 BayLibre, SAS. | ||
| 4 | * Author: Jerome Brunet <jbrunet@baylibre.com> | ||
| 5 | */ | ||
| 6 | |||
| 7 | #ifndef __MESON_CLK_DUALDIV_H | ||
| 8 | #define __MESON_CLK_DUALDIV_H | ||
| 9 | |||
| 10 | #include <linux/clk-provider.h> | ||
| 11 | #include "parm.h" | ||
| 12 | |||
| 13 | struct meson_clk_dualdiv_param { | ||
| 14 | unsigned int n1; | ||
| 15 | unsigned int n2; | ||
| 16 | unsigned int m1; | ||
| 17 | unsigned int m2; | ||
| 18 | unsigned int dual; | ||
| 19 | }; | ||
| 20 | |||
| 21 | struct meson_clk_dualdiv_data { | ||
| 22 | struct parm n1; | ||
| 23 | struct parm n2; | ||
| 24 | struct parm m1; | ||
| 25 | struct parm m2; | ||
| 26 | struct parm dual; | ||
| 27 | const struct meson_clk_dualdiv_param *table; | ||
| 28 | }; | ||
| 29 | |||
| 30 | extern const struct clk_ops meson_clk_dualdiv_ops; | ||
| 31 | extern const struct clk_ops meson_clk_dualdiv_ro_ops; | ||
| 32 | |||
| 33 | #endif /* __MESON_CLK_DUALDIV_H */ | ||
diff --git a/drivers/clk/meson/clk-input.c b/drivers/clk/meson/clk-input.c index 06b3e3bb6a66..086226e9dba6 100644 --- a/drivers/clk/meson/clk-input.c +++ b/drivers/clk/meson/clk-input.c | |||
| @@ -7,7 +7,8 @@ | |||
| 7 | #include <linux/clk.h> | 7 | #include <linux/clk.h> |
| 8 | #include <linux/clk-provider.h> | 8 | #include <linux/clk-provider.h> |
| 9 | #include <linux/device.h> | 9 | #include <linux/device.h> |
| 10 | #include "clkc.h" | 10 | #include <linux/module.h> |
| 11 | #include "clk-input.h" | ||
| 11 | 12 | ||
| 12 | static const struct clk_ops meson_clk_no_ops = {}; | 13 | static const struct clk_ops meson_clk_no_ops = {}; |
| 13 | 14 | ||
| @@ -42,3 +43,7 @@ struct clk_hw *meson_clk_hw_register_input(struct device *dev, | |||
| 42 | return ret ? ERR_PTR(ret) : hw; | 43 | return ret ? ERR_PTR(ret) : hw; |
| 43 | } | 44 | } |
| 44 | EXPORT_SYMBOL_GPL(meson_clk_hw_register_input); | 45 | EXPORT_SYMBOL_GPL(meson_clk_hw_register_input); |
| 46 | |||
| 47 | MODULE_DESCRIPTION("Amlogic clock input helper"); | ||
| 48 | MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>"); | ||
| 49 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/clk/meson/clk-input.h b/drivers/clk/meson/clk-input.h new file mode 100644 index 000000000000..4a541b9685a6 --- /dev/null +++ b/drivers/clk/meson/clk-input.h | |||
| @@ -0,0 +1,19 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
| 2 | /* | ||
| 3 | * Copyright (c) 2019 BayLibre, SAS. | ||
| 4 | * Author: Jerome Brunet <jbrunet@baylibre.com> | ||
| 5 | */ | ||
| 6 | |||
| 7 | #ifndef __MESON_CLK_INPUT_H | ||
| 8 | #define __MESON_CLK_INPUT_H | ||
| 9 | |||
| 10 | #include <linux/clk-provider.h> | ||
| 11 | |||
| 12 | struct device; | ||
| 13 | |||
| 14 | struct clk_hw *meson_clk_hw_register_input(struct device *dev, | ||
| 15 | const char *of_name, | ||
| 16 | const char *clk_name, | ||
| 17 | unsigned long flags); | ||
| 18 | |||
| 19 | #endif /* __MESON_CLK_INPUT_H */ | ||
diff --git a/drivers/clk/meson/clk-mpll.c b/drivers/clk/meson/clk-mpll.c index 650f75cc15a9..f76850d99e59 100644 --- a/drivers/clk/meson/clk-mpll.c +++ b/drivers/clk/meson/clk-mpll.c | |||
| @@ -12,7 +12,11 @@ | |||
| 12 | */ | 12 | */ |
| 13 | 13 | ||
| 14 | #include <linux/clk-provider.h> | 14 | #include <linux/clk-provider.h> |
| 15 | #include "clkc.h" | 15 | #include <linux/module.h> |
| 16 | #include <linux/spinlock.h> | ||
| 17 | |||
| 18 | #include "clk-regmap.h" | ||
| 19 | #include "clk-mpll.h" | ||
| 16 | 20 | ||
| 17 | #define SDM_DEN 16384 | 21 | #define SDM_DEN 16384 |
| 18 | #define N2_MIN 4 | 22 | #define N2_MIN 4 |
| @@ -138,9 +142,15 @@ const struct clk_ops meson_clk_mpll_ro_ops = { | |||
| 138 | .recalc_rate = mpll_recalc_rate, | 142 | .recalc_rate = mpll_recalc_rate, |
| 139 | .round_rate = mpll_round_rate, | 143 | .round_rate = mpll_round_rate, |
| 140 | }; | 144 | }; |
| 145 | EXPORT_SYMBOL_GPL(meson_clk_mpll_ro_ops); | ||
| 141 | 146 | ||
| 142 | const struct clk_ops meson_clk_mpll_ops = { | 147 | const struct clk_ops meson_clk_mpll_ops = { |
| 143 | .recalc_rate = mpll_recalc_rate, | 148 | .recalc_rate = mpll_recalc_rate, |
| 144 | .round_rate = mpll_round_rate, | 149 | .round_rate = mpll_round_rate, |
| 145 | .set_rate = mpll_set_rate, | 150 | .set_rate = mpll_set_rate, |
| 146 | }; | 151 | }; |
| 152 | EXPORT_SYMBOL_GPL(meson_clk_mpll_ops); | ||
| 153 | |||
| 154 | MODULE_DESCRIPTION("Amlogic MPLL driver"); | ||
| 155 | MODULE_AUTHOR("Michael Turquette <mturquette@baylibre.com>"); | ||
| 156 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/clk/meson/clk-mpll.h b/drivers/clk/meson/clk-mpll.h new file mode 100644 index 000000000000..cf79340006dd --- /dev/null +++ b/drivers/clk/meson/clk-mpll.h | |||
| @@ -0,0 +1,30 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
| 2 | /* | ||
| 3 | * Copyright (c) 2019 BayLibre, SAS. | ||
| 4 | * Author: Jerome Brunet <jbrunet@baylibre.com> | ||
| 5 | */ | ||
| 6 | |||
| 7 | #ifndef __MESON_CLK_MPLL_H | ||
| 8 | #define __MESON_CLK_MPLL_H | ||
| 9 | |||
| 10 | #include <linux/clk-provider.h> | ||
| 11 | #include <linux/spinlock.h> | ||
| 12 | |||
| 13 | #include "parm.h" | ||
| 14 | |||
| 15 | struct meson_clk_mpll_data { | ||
| 16 | struct parm sdm; | ||
| 17 | struct parm sdm_en; | ||
| 18 | struct parm n2; | ||
| 19 | struct parm ssen; | ||
| 20 | struct parm misc; | ||
| 21 | spinlock_t *lock; | ||
| 22 | u8 flags; | ||
| 23 | }; | ||
| 24 | |||
| 25 | #define CLK_MESON_MPLL_ROUND_CLOSEST BIT(0) | ||
| 26 | |||
| 27 | extern const struct clk_ops meson_clk_mpll_ro_ops; | ||
| 28 | extern const struct clk_ops meson_clk_mpll_ops; | ||
| 29 | |||
| 30 | #endif /* __MESON_CLK_MPLL_H */ | ||
diff --git a/drivers/clk/meson/clk-phase.c b/drivers/clk/meson/clk-phase.c index cba43748ce3d..80c3ada193a4 100644 --- a/drivers/clk/meson/clk-phase.c +++ b/drivers/clk/meson/clk-phase.c | |||
| @@ -5,7 +5,10 @@ | |||
| 5 | */ | 5 | */ |
| 6 | 6 | ||
| 7 | #include <linux/clk-provider.h> | 7 | #include <linux/clk-provider.h> |
| 8 | #include "clkc.h" | 8 | #include <linux/module.h> |
| 9 | |||
| 10 | #include "clk-regmap.h" | ||
| 11 | #include "clk-phase.h" | ||
| 9 | 12 | ||
| 10 | #define phase_step(_width) (360 / (1 << (_width))) | 13 | #define phase_step(_width) (360 / (1 << (_width))) |
| 11 | 14 | ||
| @@ -15,13 +18,12 @@ meson_clk_phase_data(struct clk_regmap *clk) | |||
| 15 | return (struct meson_clk_phase_data *)clk->data; | 18 | return (struct meson_clk_phase_data *)clk->data; |
| 16 | } | 19 | } |
| 17 | 20 | ||
| 18 | int meson_clk_degrees_from_val(unsigned int val, unsigned int width) | 21 | static int meson_clk_degrees_from_val(unsigned int val, unsigned int width) |
| 19 | { | 22 | { |
| 20 | return phase_step(width) * val; | 23 | return phase_step(width) * val; |
| 21 | } | 24 | } |
| 22 | EXPORT_SYMBOL_GPL(meson_clk_degrees_from_val); | ||
| 23 | 25 | ||
| 24 | unsigned int meson_clk_degrees_to_val(int degrees, unsigned int width) | 26 | static unsigned int meson_clk_degrees_to_val(int degrees, unsigned int width) |
| 25 | { | 27 | { |
| 26 | unsigned int val = DIV_ROUND_CLOSEST(degrees, phase_step(width)); | 28 | unsigned int val = DIV_ROUND_CLOSEST(degrees, phase_step(width)); |
| 27 | 29 | ||
| @@ -31,7 +33,6 @@ unsigned int meson_clk_degrees_to_val(int degrees, unsigned int width) | |||
| 31 | */ | 33 | */ |
| 32 | return val % (1 << width); | 34 | return val % (1 << width); |
| 33 | } | 35 | } |
| 34 | EXPORT_SYMBOL_GPL(meson_clk_degrees_to_val); | ||
| 35 | 36 | ||
| 36 | static int meson_clk_phase_get_phase(struct clk_hw *hw) | 37 | static int meson_clk_phase_get_phase(struct clk_hw *hw) |
| 37 | { | 38 | { |
| @@ -61,3 +62,67 @@ const struct clk_ops meson_clk_phase_ops = { | |||
| 61 | .set_phase = meson_clk_phase_set_phase, | 62 | .set_phase = meson_clk_phase_set_phase, |
| 62 | }; | 63 | }; |
| 63 | EXPORT_SYMBOL_GPL(meson_clk_phase_ops); | 64 | EXPORT_SYMBOL_GPL(meson_clk_phase_ops); |
| 65 | |||
| 66 | /* | ||
| 67 | * This is a special clock for the audio controller. | ||
| 68 | * The phase of mst_sclk clock output can be controlled independently | ||
| 69 | * for the outside world (ph0), the tdmout (ph1) and tdmin (ph2). | ||
| 70 | * Controlling these 3 phases as just one makes things simpler and | ||
| 71 | * give the same clock view to all the element on the i2s bus. | ||
| 72 | * If necessary, we can still control the phase in the tdm block | ||
| 73 | * which makes these independent control redundant. | ||
| 74 | */ | ||
| 75 | static inline struct meson_clk_triphase_data * | ||
| 76 | meson_clk_triphase_data(struct clk_regmap *clk) | ||
| 77 | { | ||
| 78 | return (struct meson_clk_triphase_data *)clk->data; | ||
| 79 | } | ||
| 80 | |||
| 81 | static void meson_clk_triphase_sync(struct clk_hw *hw) | ||
| 82 | { | ||
| 83 | struct clk_regmap *clk = to_clk_regmap(hw); | ||
| 84 | struct meson_clk_triphase_data *tph = meson_clk_triphase_data(clk); | ||
| 85 | unsigned int val; | ||
| 86 | |||
| 87 | /* Get phase 0 and sync it to phase 1 and 2 */ | ||
| 88 | val = meson_parm_read(clk->map, &tph->ph0); | ||
| 89 | meson_parm_write(clk->map, &tph->ph1, val); | ||
| 90 | meson_parm_write(clk->map, &tph->ph2, val); | ||
| 91 | } | ||
| 92 | |||
| 93 | static int meson_clk_triphase_get_phase(struct clk_hw *hw) | ||
| 94 | { | ||
| 95 | struct clk_regmap *clk = to_clk_regmap(hw); | ||
| 96 | struct meson_clk_triphase_data *tph = meson_clk_triphase_data(clk); | ||
| 97 | unsigned int val; | ||
| 98 | |||
| 99 | /* Phase are in sync, reading phase 0 is enough */ | ||
| 100 | val = meson_parm_read(clk->map, &tph->ph0); | ||
| 101 | |||
| 102 | return meson_clk_degrees_from_val(val, tph->ph0.width); | ||
| 103 | } | ||
| 104 | |||
| 105 | static int meson_clk_triphase_set_phase(struct clk_hw *hw, int degrees) | ||
| 106 | { | ||
| 107 | struct clk_regmap *clk = to_clk_regmap(hw); | ||
| 108 | struct meson_clk_triphase_data *tph = meson_clk_triphase_data(clk); | ||
| 109 | unsigned int val; | ||
| 110 | |||
| 111 | val = meson_clk_degrees_to_val(degrees, tph->ph0.width); | ||
| 112 | meson_parm_write(clk->map, &tph->ph0, val); | ||
| 113 | meson_parm_write(clk->map, &tph->ph1, val); | ||
| 114 | meson_parm_write(clk->map, &tph->ph2, val); | ||
| 115 | |||
| 116 | return 0; | ||
| 117 | } | ||
| 118 | |||
| 119 | const struct clk_ops meson_clk_triphase_ops = { | ||
| 120 | .init = meson_clk_triphase_sync, | ||
| 121 | .get_phase = meson_clk_triphase_get_phase, | ||
| 122 | .set_phase = meson_clk_triphase_set_phase, | ||
| 123 | }; | ||
| 124 | EXPORT_SYMBOL_GPL(meson_clk_triphase_ops); | ||
| 125 | |||
| 126 | MODULE_DESCRIPTION("Amlogic phase driver"); | ||
| 127 | MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>"); | ||
| 128 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/clk/meson/clk-phase.h b/drivers/clk/meson/clk-phase.h new file mode 100644 index 000000000000..5579f9ced142 --- /dev/null +++ b/drivers/clk/meson/clk-phase.h | |||
| @@ -0,0 +1,26 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
| 2 | /* | ||
| 3 | * Copyright (c) 2019 BayLibre, SAS. | ||
| 4 | * Author: Jerome Brunet <jbrunet@baylibre.com> | ||
| 5 | */ | ||
| 6 | |||
| 7 | #ifndef __MESON_CLK_PHASE_H | ||
| 8 | #define __MESON_CLK_PHASE_H | ||
| 9 | |||
| 10 | #include <linux/clk-provider.h> | ||
| 11 | #include "parm.h" | ||
| 12 | |||
| 13 | struct meson_clk_phase_data { | ||
| 14 | struct parm ph; | ||
| 15 | }; | ||
| 16 | |||
| 17 | struct meson_clk_triphase_data { | ||
| 18 | struct parm ph0; | ||
| 19 | struct parm ph1; | ||
| 20 | struct parm ph2; | ||
| 21 | }; | ||
| 22 | |||
| 23 | extern const struct clk_ops meson_clk_phase_ops; | ||
| 24 | extern const struct clk_ops meson_clk_triphase_ops; | ||
| 25 | |||
| 26 | #endif /* __MESON_CLK_PHASE_H */ | ||
diff --git a/drivers/clk/meson/clk-pll.c b/drivers/clk/meson/clk-pll.c index afffc1547e20..41e16dd7272a 100644 --- a/drivers/clk/meson/clk-pll.c +++ b/drivers/clk/meson/clk-pll.c | |||
| @@ -32,11 +32,10 @@ | |||
| 32 | #include <linux/io.h> | 32 | #include <linux/io.h> |
| 33 | #include <linux/math64.h> | 33 | #include <linux/math64.h> |
| 34 | #include <linux/module.h> | 34 | #include <linux/module.h> |
| 35 | #include <linux/of_address.h> | 35 | #include <linux/rational.h> |
| 36 | #include <linux/slab.h> | ||
| 37 | #include <linux/string.h> | ||
| 38 | 36 | ||
| 39 | #include "clkc.h" | 37 | #include "clk-regmap.h" |
| 38 | #include "clk-pll.h" | ||
| 40 | 39 | ||
| 41 | static inline struct meson_clk_pll_data * | 40 | static inline struct meson_clk_pll_data * |
| 42 | meson_clk_pll_data(struct clk_regmap *clk) | 41 | meson_clk_pll_data(struct clk_regmap *clk) |
| @@ -44,12 +43,21 @@ meson_clk_pll_data(struct clk_regmap *clk) | |||
| 44 | return (struct meson_clk_pll_data *)clk->data; | 43 | return (struct meson_clk_pll_data *)clk->data; |
| 45 | } | 44 | } |
| 46 | 45 | ||
| 46 | static int __pll_round_closest_mult(struct meson_clk_pll_data *pll) | ||
| 47 | { | ||
| 48 | if ((pll->flags & CLK_MESON_PLL_ROUND_CLOSEST) && | ||
| 49 | !MESON_PARM_APPLICABLE(&pll->frac)) | ||
| 50 | return 1; | ||
| 51 | |||
| 52 | return 0; | ||
| 53 | } | ||
| 54 | |||
| 47 | static unsigned long __pll_params_to_rate(unsigned long parent_rate, | 55 | static unsigned long __pll_params_to_rate(unsigned long parent_rate, |
| 48 | const struct pll_params_table *pllt, | 56 | unsigned int m, unsigned int n, |
| 49 | u16 frac, | 57 | unsigned int frac, |
| 50 | struct meson_clk_pll_data *pll) | 58 | struct meson_clk_pll_data *pll) |
| 51 | { | 59 | { |
| 52 | u64 rate = (u64)parent_rate * pllt->m; | 60 | u64 rate = (u64)parent_rate * m; |
| 53 | 61 | ||
| 54 | if (frac && MESON_PARM_APPLICABLE(&pll->frac)) { | 62 | if (frac && MESON_PARM_APPLICABLE(&pll->frac)) { |
| 55 | u64 frac_rate = (u64)parent_rate * frac; | 63 | u64 frac_rate = (u64)parent_rate * frac; |
| @@ -58,7 +66,7 @@ static unsigned long __pll_params_to_rate(unsigned long parent_rate, | |||
| 58 | (1 << pll->frac.width)); | 66 | (1 << pll->frac.width)); |
| 59 | } | 67 | } |
| 60 | 68 | ||
| 61 | return DIV_ROUND_UP_ULL(rate, pllt->n); | 69 | return DIV_ROUND_UP_ULL(rate, n); |
| 62 | } | 70 | } |
| 63 | 71 | ||
| 64 | static unsigned long meson_clk_pll_recalc_rate(struct clk_hw *hw, | 72 | static unsigned long meson_clk_pll_recalc_rate(struct clk_hw *hw, |
| @@ -66,35 +74,39 @@ static unsigned long meson_clk_pll_recalc_rate(struct clk_hw *hw, | |||
| 66 | { | 74 | { |
| 67 | struct clk_regmap *clk = to_clk_regmap(hw); | 75 | struct clk_regmap *clk = to_clk_regmap(hw); |
| 68 | struct meson_clk_pll_data *pll = meson_clk_pll_data(clk); | 76 | struct meson_clk_pll_data *pll = meson_clk_pll_data(clk); |
| 69 | struct pll_params_table pllt; | 77 | unsigned int m, n, frac; |
| 70 | u16 frac; | ||
| 71 | 78 | ||
| 72 | pllt.n = meson_parm_read(clk->map, &pll->n); | 79 | n = meson_parm_read(clk->map, &pll->n); |
| 73 | pllt.m = meson_parm_read(clk->map, &pll->m); | 80 | m = meson_parm_read(clk->map, &pll->m); |
| 74 | 81 | ||
| 75 | frac = MESON_PARM_APPLICABLE(&pll->frac) ? | 82 | frac = MESON_PARM_APPLICABLE(&pll->frac) ? |
| 76 | meson_parm_read(clk->map, &pll->frac) : | 83 | meson_parm_read(clk->map, &pll->frac) : |
| 77 | 0; | 84 | 0; |
| 78 | 85 | ||
| 79 | return __pll_params_to_rate(parent_rate, &pllt, frac, pll); | 86 | return __pll_params_to_rate(parent_rate, m, n, frac, pll); |
| 80 | } | 87 | } |
| 81 | 88 | ||
| 82 | static u16 __pll_params_with_frac(unsigned long rate, | 89 | static unsigned int __pll_params_with_frac(unsigned long rate, |
| 83 | unsigned long parent_rate, | 90 | unsigned long parent_rate, |
| 84 | const struct pll_params_table *pllt, | 91 | unsigned int m, |
| 85 | struct meson_clk_pll_data *pll) | 92 | unsigned int n, |
| 93 | struct meson_clk_pll_data *pll) | ||
| 86 | { | 94 | { |
| 87 | u16 frac_max = (1 << pll->frac.width); | 95 | unsigned int frac_max = (1 << pll->frac.width); |
| 88 | u64 val = (u64)rate * pllt->n; | 96 | u64 val = (u64)rate * n; |
| 97 | |||
| 98 | /* Bail out if we are already over the requested rate */ | ||
| 99 | if (rate < parent_rate * m / n) | ||
| 100 | return 0; | ||
| 89 | 101 | ||
| 90 | if (pll->flags & CLK_MESON_PLL_ROUND_CLOSEST) | 102 | if (pll->flags & CLK_MESON_PLL_ROUND_CLOSEST) |
| 91 | val = DIV_ROUND_CLOSEST_ULL(val * frac_max, parent_rate); | 103 | val = DIV_ROUND_CLOSEST_ULL(val * frac_max, parent_rate); |
| 92 | else | 104 | else |
| 93 | val = div_u64(val * frac_max, parent_rate); | 105 | val = div_u64(val * frac_max, parent_rate); |
| 94 | 106 | ||
| 95 | val -= pllt->m * frac_max; | 107 | val -= m * frac_max; |
| 96 | 108 | ||
| 97 | return min((u16)val, (u16)(frac_max - 1)); | 109 | return min((unsigned int)val, (frac_max - 1)); |
| 98 | } | 110 | } |
| 99 | 111 | ||
| 100 | static bool meson_clk_pll_is_better(unsigned long rate, | 112 | static bool meson_clk_pll_is_better(unsigned long rate, |
| @@ -102,45 +114,123 @@ static bool meson_clk_pll_is_better(unsigned long rate, | |||
| 102 | unsigned long now, | 114 | unsigned long now, |
| 103 | struct meson_clk_pll_data *pll) | 115 | struct meson_clk_pll_data *pll) |
| 104 | { | 116 | { |
| 105 | if (!(pll->flags & CLK_MESON_PLL_ROUND_CLOSEST) || | 117 | if (__pll_round_closest_mult(pll)) { |
| 106 | MESON_PARM_APPLICABLE(&pll->frac)) { | ||
| 107 | /* Round down */ | ||
| 108 | if (now < rate && best < now) | ||
| 109 | return true; | ||
| 110 | } else { | ||
| 111 | /* Round Closest */ | 118 | /* Round Closest */ |
| 112 | if (abs(now - rate) < abs(best - rate)) | 119 | if (abs(now - rate) < abs(best - rate)) |
| 113 | return true; | 120 | return true; |
| 121 | } else { | ||
| 122 | /* Round down */ | ||
| 123 | if (now < rate && best < now) | ||
| 124 | return true; | ||
| 114 | } | 125 | } |
| 115 | 126 | ||
| 116 | return false; | 127 | return false; |
| 117 | } | 128 | } |
| 118 | 129 | ||
| 119 | static const struct pll_params_table * | 130 | static int meson_clk_get_pll_table_index(unsigned int index, |
| 120 | meson_clk_get_pll_settings(unsigned long rate, | 131 | unsigned int *m, |
| 121 | unsigned long parent_rate, | 132 | unsigned int *n, |
| 122 | struct meson_clk_pll_data *pll) | 133 | struct meson_clk_pll_data *pll) |
| 123 | { | 134 | { |
| 124 | const struct pll_params_table *table = pll->table; | 135 | if (!pll->table[index].n) |
| 125 | unsigned long best = 0, now = 0; | 136 | return -EINVAL; |
| 126 | unsigned int i, best_i = 0; | 137 | |
| 138 | *m = pll->table[index].m; | ||
| 139 | *n = pll->table[index].n; | ||
| 140 | |||
| 141 | return 0; | ||
| 142 | } | ||
| 143 | |||
| 144 | static unsigned int meson_clk_get_pll_range_m(unsigned long rate, | ||
| 145 | unsigned long parent_rate, | ||
| 146 | unsigned int n, | ||
| 147 | struct meson_clk_pll_data *pll) | ||
| 148 | { | ||
| 149 | u64 val = (u64)rate * n; | ||
| 127 | 150 | ||
| 128 | if (!table) | 151 | if (__pll_round_closest_mult(pll)) |
| 129 | return NULL; | 152 | return DIV_ROUND_CLOSEST_ULL(val, parent_rate); |
| 130 | 153 | ||
| 131 | for (i = 0; table[i].n; i++) { | 154 | return div_u64(val, parent_rate); |
| 132 | now = __pll_params_to_rate(parent_rate, &table[i], 0, pll); | 155 | } |
| 133 | 156 | ||
| 134 | /* If we get an exact match, don't bother any further */ | 157 | static int meson_clk_get_pll_range_index(unsigned long rate, |
| 135 | if (now == rate) { | 158 | unsigned long parent_rate, |
| 136 | return &table[i]; | 159 | unsigned int index, |
| 137 | } else if (meson_clk_pll_is_better(rate, best, now, pll)) { | 160 | unsigned int *m, |
| 161 | unsigned int *n, | ||
| 162 | struct meson_clk_pll_data *pll) | ||
| 163 | { | ||
| 164 | *n = index + 1; | ||
| 165 | |||
| 166 | /* Check the predivider range */ | ||
| 167 | if (*n >= (1 << pll->n.width)) | ||
| 168 | return -EINVAL; | ||
| 169 | |||
| 170 | if (*n == 1) { | ||
| 171 | /* Get the boundaries out the way */ | ||
| 172 | if (rate <= pll->range->min * parent_rate) { | ||
| 173 | *m = pll->range->min; | ||
| 174 | return -ENODATA; | ||
| 175 | } else if (rate >= pll->range->max * parent_rate) { | ||
| 176 | *m = pll->range->max; | ||
| 177 | return -ENODATA; | ||
| 178 | } | ||
| 179 | } | ||
| 180 | |||
| 181 | *m = meson_clk_get_pll_range_m(rate, parent_rate, *n, pll); | ||
| 182 | |||
| 183 | /* the pre-divider gives a multiplier too big - stop */ | ||
| 184 | if (*m >= (1 << pll->m.width)) | ||
| 185 | return -EINVAL; | ||
| 186 | |||
| 187 | return 0; | ||
| 188 | } | ||
| 189 | |||
| 190 | static int meson_clk_get_pll_get_index(unsigned long rate, | ||
| 191 | unsigned long parent_rate, | ||
| 192 | unsigned int index, | ||
| 193 | unsigned int *m, | ||
| 194 | unsigned int *n, | ||
| 195 | struct meson_clk_pll_data *pll) | ||
| 196 | { | ||
| 197 | if (pll->range) | ||
| 198 | return meson_clk_get_pll_range_index(rate, parent_rate, | ||
| 199 | index, m, n, pll); | ||
| 200 | else if (pll->table) | ||
| 201 | return meson_clk_get_pll_table_index(index, m, n, pll); | ||
| 202 | |||
| 203 | return -EINVAL; | ||
| 204 | } | ||
| 205 | |||
| 206 | static int meson_clk_get_pll_settings(unsigned long rate, | ||
| 207 | unsigned long parent_rate, | ||
| 208 | unsigned int *best_m, | ||
| 209 | unsigned int *best_n, | ||
| 210 | struct meson_clk_pll_data *pll) | ||
| 211 | { | ||
| 212 | unsigned long best = 0, now = 0; | ||
| 213 | unsigned int i, m, n; | ||
| 214 | int ret; | ||
| 215 | |||
| 216 | for (i = 0, ret = 0; !ret; i++) { | ||
| 217 | ret = meson_clk_get_pll_get_index(rate, parent_rate, | ||
| 218 | i, &m, &n, pll); | ||
| 219 | if (ret == -EINVAL) | ||
| 220 | break; | ||
| 221 | |||
| 222 | now = __pll_params_to_rate(parent_rate, m, n, 0, pll); | ||
| 223 | if (meson_clk_pll_is_better(rate, best, now, pll)) { | ||
| 138 | best = now; | 224 | best = now; |
| 139 | best_i = i; | 225 | *best_m = m; |
| 226 | *best_n = n; | ||
| 227 | |||
| 228 | if (now == rate) | ||
| 229 | break; | ||
| 140 | } | 230 | } |
| 141 | } | 231 | } |
| 142 | 232 | ||
| 143 | return (struct pll_params_table *)&table[best_i]; | 233 | return best ? 0 : -EINVAL; |
| 144 | } | 234 | } |
| 145 | 235 | ||
| 146 | static long meson_clk_pll_round_rate(struct clk_hw *hw, unsigned long rate, | 236 | static long meson_clk_pll_round_rate(struct clk_hw *hw, unsigned long rate, |
| @@ -148,15 +238,15 @@ static long meson_clk_pll_round_rate(struct clk_hw *hw, unsigned long rate, | |||
| 148 | { | 238 | { |
| 149 | struct clk_regmap *clk = to_clk_regmap(hw); | 239 | struct clk_regmap *clk = to_clk_regmap(hw); |
| 150 | struct meson_clk_pll_data *pll = meson_clk_pll_data(clk); | 240 | struct meson_clk_pll_data *pll = meson_clk_pll_data(clk); |
| 151 | const struct pll_params_table *pllt = | 241 | unsigned int m, n, frac; |
| 152 | meson_clk_get_pll_settings(rate, *parent_rate, pll); | ||
| 153 | unsigned long round; | 242 | unsigned long round; |
| 154 | u16 frac; | 243 | int ret; |
| 155 | 244 | ||
| 156 | if (!pllt) | 245 | ret = meson_clk_get_pll_settings(rate, *parent_rate, &m, &n, pll); |
| 246 | if (ret) | ||
| 157 | return meson_clk_pll_recalc_rate(hw, *parent_rate); | 247 | return meson_clk_pll_recalc_rate(hw, *parent_rate); |
| 158 | 248 | ||
| 159 | round = __pll_params_to_rate(*parent_rate, pllt, 0, pll); | 249 | round = __pll_params_to_rate(*parent_rate, m, n, 0, pll); |
| 160 | 250 | ||
| 161 | if (!MESON_PARM_APPLICABLE(&pll->frac) || rate == round) | 251 | if (!MESON_PARM_APPLICABLE(&pll->frac) || rate == round) |
| 162 | return round; | 252 | return round; |
| @@ -165,9 +255,9 @@ static long meson_clk_pll_round_rate(struct clk_hw *hw, unsigned long rate, | |||
| 165 | * The rate provided by the setting is not an exact match, let's | 255 | * The rate provided by the setting is not an exact match, let's |
| 166 | * try to improve the result using the fractional parameter | 256 | * try to improve the result using the fractional parameter |
| 167 | */ | 257 | */ |
| 168 | frac = __pll_params_with_frac(rate, *parent_rate, pllt, pll); | 258 | frac = __pll_params_with_frac(rate, *parent_rate, m, n, pll); |
| 169 | 259 | ||
| 170 | return __pll_params_to_rate(*parent_rate, pllt, frac, pll); | 260 | return __pll_params_to_rate(*parent_rate, m, n, frac, pll); |
| 171 | } | 261 | } |
| 172 | 262 | ||
| 173 | static int meson_clk_pll_wait_lock(struct clk_hw *hw) | 263 | static int meson_clk_pll_wait_lock(struct clk_hw *hw) |
| @@ -254,30 +344,27 @@ static int meson_clk_pll_set_rate(struct clk_hw *hw, unsigned long rate, | |||
| 254 | { | 344 | { |
| 255 | struct clk_regmap *clk = to_clk_regmap(hw); | 345 | struct clk_regmap *clk = to_clk_regmap(hw); |
| 256 | struct meson_clk_pll_data *pll = meson_clk_pll_data(clk); | 346 | struct meson_clk_pll_data *pll = meson_clk_pll_data(clk); |
| 257 | const struct pll_params_table *pllt; | 347 | unsigned int enabled, m, n, frac = 0, ret; |
| 258 | unsigned int enabled; | ||
| 259 | unsigned long old_rate; | 348 | unsigned long old_rate; |
| 260 | u16 frac = 0; | ||
| 261 | 349 | ||
| 262 | if (parent_rate == 0 || rate == 0) | 350 | if (parent_rate == 0 || rate == 0) |
| 263 | return -EINVAL; | 351 | return -EINVAL; |
| 264 | 352 | ||
| 265 | old_rate = rate; | 353 | old_rate = rate; |
| 266 | 354 | ||
| 267 | pllt = meson_clk_get_pll_settings(rate, parent_rate, pll); | 355 | ret = meson_clk_get_pll_settings(rate, parent_rate, &m, &n, pll); |
| 268 | if (!pllt) | 356 | if (ret) |
| 269 | return -EINVAL; | 357 | return ret; |
| 270 | 358 | ||
| 271 | enabled = meson_parm_read(clk->map, &pll->en); | 359 | enabled = meson_parm_read(clk->map, &pll->en); |
| 272 | if (enabled) | 360 | if (enabled) |
| 273 | meson_clk_pll_disable(hw); | 361 | meson_clk_pll_disable(hw); |
| 274 | 362 | ||
| 275 | meson_parm_write(clk->map, &pll->n, pllt->n); | 363 | meson_parm_write(clk->map, &pll->n, n); |
| 276 | meson_parm_write(clk->map, &pll->m, pllt->m); | 364 | meson_parm_write(clk->map, &pll->m, m); |
| 277 | |||
| 278 | 365 | ||
| 279 | if (MESON_PARM_APPLICABLE(&pll->frac)) { | 366 | if (MESON_PARM_APPLICABLE(&pll->frac)) { |
| 280 | frac = __pll_params_with_frac(rate, parent_rate, pllt, pll); | 367 | frac = __pll_params_with_frac(rate, parent_rate, m, n, pll); |
| 281 | meson_parm_write(clk->map, &pll->frac, frac); | 368 | meson_parm_write(clk->map, &pll->frac, frac); |
| 282 | } | 369 | } |
| 283 | 370 | ||
| @@ -309,8 +396,15 @@ const struct clk_ops meson_clk_pll_ops = { | |||
| 309 | .enable = meson_clk_pll_enable, | 396 | .enable = meson_clk_pll_enable, |
| 310 | .disable = meson_clk_pll_disable | 397 | .disable = meson_clk_pll_disable |
| 311 | }; | 398 | }; |
| 399 | EXPORT_SYMBOL_GPL(meson_clk_pll_ops); | ||
| 312 | 400 | ||
| 313 | const struct clk_ops meson_clk_pll_ro_ops = { | 401 | const struct clk_ops meson_clk_pll_ro_ops = { |
| 314 | .recalc_rate = meson_clk_pll_recalc_rate, | 402 | .recalc_rate = meson_clk_pll_recalc_rate, |
| 315 | .is_enabled = meson_clk_pll_is_enabled, | 403 | .is_enabled = meson_clk_pll_is_enabled, |
| 316 | }; | 404 | }; |
| 405 | EXPORT_SYMBOL_GPL(meson_clk_pll_ro_ops); | ||
| 406 | |||
| 407 | MODULE_DESCRIPTION("Amlogic PLL driver"); | ||
| 408 | MODULE_AUTHOR("Carlo Caione <carlo@endlessm.com>"); | ||
| 409 | MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>"); | ||
| 410 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/clk/meson/clk-pll.h b/drivers/clk/meson/clk-pll.h new file mode 100644 index 000000000000..55af2e285b1b --- /dev/null +++ b/drivers/clk/meson/clk-pll.h | |||
| @@ -0,0 +1,49 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
| 2 | /* | ||
| 3 | * Copyright (c) 2019 BayLibre, SAS. | ||
| 4 | * Author: Jerome Brunet <jbrunet@baylibre.com> | ||
| 5 | */ | ||
| 6 | |||
| 7 | #ifndef __MESON_CLK_PLL_H | ||
| 8 | #define __MESON_CLK_PLL_H | ||
| 9 | |||
| 10 | #include <linux/clk-provider.h> | ||
| 11 | #include <linux/regmap.h> | ||
| 12 | #include "parm.h" | ||
| 13 | |||
| 14 | struct pll_params_table { | ||
| 15 | unsigned int m; | ||
| 16 | unsigned int n; | ||
| 17 | }; | ||
| 18 | |||
| 19 | struct pll_mult_range { | ||
| 20 | unsigned int min; | ||
| 21 | unsigned int max; | ||
| 22 | }; | ||
| 23 | |||
| 24 | #define PLL_PARAMS(_m, _n) \ | ||
| 25 | { \ | ||
| 26 | .m = (_m), \ | ||
| 27 | .n = (_n), \ | ||
| 28 | } | ||
| 29 | |||
| 30 | #define CLK_MESON_PLL_ROUND_CLOSEST BIT(0) | ||
| 31 | |||
| 32 | struct meson_clk_pll_data { | ||
| 33 | struct parm en; | ||
| 34 | struct parm m; | ||
| 35 | struct parm n; | ||
| 36 | struct parm frac; | ||
| 37 | struct parm l; | ||
| 38 | struct parm rst; | ||
| 39 | const struct reg_sequence *init_regs; | ||
| 40 | unsigned int init_count; | ||
| 41 | const struct pll_params_table *table; | ||
| 42 | const struct pll_mult_range *range; | ||
| 43 | u8 flags; | ||
| 44 | }; | ||
| 45 | |||
| 46 | extern const struct clk_ops meson_clk_pll_ro_ops; | ||
| 47 | extern const struct clk_ops meson_clk_pll_ops; | ||
| 48 | |||
| 49 | #endif /* __MESON_CLK_PLL_H */ | ||
diff --git a/drivers/clk/meson/clk-regmap.c b/drivers/clk/meson/clk-regmap.c index c515f67322a3..dcd1757cc5df 100644 --- a/drivers/clk/meson/clk-regmap.c +++ b/drivers/clk/meson/clk-regmap.c | |||
| @@ -4,6 +4,7 @@ | |||
| 4 | * Author: Jerome Brunet <jbrunet@baylibre.com> | 4 | * Author: Jerome Brunet <jbrunet@baylibre.com> |
| 5 | */ | 5 | */ |
| 6 | 6 | ||
| 7 | #include <linux/module.h> | ||
| 7 | #include "clk-regmap.h" | 8 | #include "clk-regmap.h" |
| 8 | 9 | ||
| 9 | static int clk_regmap_gate_endisable(struct clk_hw *hw, int enable) | 10 | static int clk_regmap_gate_endisable(struct clk_hw *hw, int enable) |
| @@ -180,3 +181,7 @@ const struct clk_ops clk_regmap_mux_ro_ops = { | |||
| 180 | .get_parent = clk_regmap_mux_get_parent, | 181 | .get_parent = clk_regmap_mux_get_parent, |
| 181 | }; | 182 | }; |
| 182 | EXPORT_SYMBOL_GPL(clk_regmap_mux_ro_ops); | 183 | EXPORT_SYMBOL_GPL(clk_regmap_mux_ro_ops); |
| 184 | |||
| 185 | MODULE_DESCRIPTION("Amlogic regmap backed clock driver"); | ||
| 186 | MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>"); | ||
| 187 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/clk/meson/clk-regmap.h b/drivers/clk/meson/clk-regmap.h index e9c5728d40eb..1dd0abe3ba91 100644 --- a/drivers/clk/meson/clk-regmap.h +++ b/drivers/clk/meson/clk-regmap.h | |||
| @@ -111,4 +111,24 @@ clk_get_regmap_mux_data(struct clk_regmap *clk) | |||
| 111 | extern const struct clk_ops clk_regmap_mux_ops; | 111 | extern const struct clk_ops clk_regmap_mux_ops; |
| 112 | extern const struct clk_ops clk_regmap_mux_ro_ops; | 112 | extern const struct clk_ops clk_regmap_mux_ro_ops; |
| 113 | 113 | ||
| 114 | #define __MESON_GATE(_name, _reg, _bit, _ops) \ | ||
| 115 | struct clk_regmap _name = { \ | ||
| 116 | .data = &(struct clk_regmap_gate_data){ \ | ||
| 117 | .offset = (_reg), \ | ||
| 118 | .bit_idx = (_bit), \ | ||
| 119 | }, \ | ||
| 120 | .hw.init = &(struct clk_init_data) { \ | ||
| 121 | .name = #_name, \ | ||
| 122 | .ops = _ops, \ | ||
| 123 | .parent_names = (const char *[]){ "clk81" }, \ | ||
| 124 | .num_parents = 1, \ | ||
| 125 | .flags = (CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED), \ | ||
| 126 | }, \ | ||
| 127 | } | ||
| 128 | |||
| 129 | #define MESON_GATE(_name, _reg, _bit) \ | ||
| 130 | __MESON_GATE(_name, _reg, _bit, &clk_regmap_gate_ops) | ||
| 131 | |||
| 132 | #define MESON_GATE_RO(_name, _reg, _bit) \ | ||
| 133 | __MESON_GATE(_name, _reg, _bit, &clk_regmap_gate_ro_ops) | ||
| 114 | #endif /* __CLK_REGMAP_H */ | 134 | #endif /* __CLK_REGMAP_H */ |
diff --git a/drivers/clk/meson/clk-triphase.c b/drivers/clk/meson/clk-triphase.c deleted file mode 100644 index 4a59936251e5..000000000000 --- a/drivers/clk/meson/clk-triphase.c +++ /dev/null | |||
| @@ -1,68 +0,0 @@ | |||
| 1 | // SPDX-License-Identifier: (GPL-2.0 OR MIT) | ||
| 2 | /* | ||
| 3 | * Copyright (c) 2018 BayLibre, SAS. | ||
| 4 | * Author: Jerome Brunet <jbrunet@baylibre.com> | ||
| 5 | */ | ||
| 6 | |||
| 7 | #include <linux/clk-provider.h> | ||
| 8 | #include "clkc-audio.h" | ||
| 9 | |||
| 10 | /* | ||
| 11 | * This is a special clock for the audio controller. | ||
| 12 | * The phase of mst_sclk clock output can be controlled independently | ||
| 13 | * for the outside world (ph0), the tdmout (ph1) and tdmin (ph2). | ||
| 14 | * Controlling these 3 phases as just one makes things simpler and | ||
| 15 | * give the same clock view to all the element on the i2s bus. | ||
| 16 | * If necessary, we can still control the phase in the tdm block | ||
| 17 | * which makes these independent control redundant. | ||
| 18 | */ | ||
| 19 | static inline struct meson_clk_triphase_data * | ||
| 20 | meson_clk_triphase_data(struct clk_regmap *clk) | ||
| 21 | { | ||
| 22 | return (struct meson_clk_triphase_data *)clk->data; | ||
| 23 | } | ||
| 24 | |||
| 25 | static void meson_clk_triphase_sync(struct clk_hw *hw) | ||
| 26 | { | ||
| 27 | struct clk_regmap *clk = to_clk_regmap(hw); | ||
| 28 | struct meson_clk_triphase_data *tph = meson_clk_triphase_data(clk); | ||
| 29 | unsigned int val; | ||
| 30 | |||
| 31 | /* Get phase 0 and sync it to phase 1 and 2 */ | ||
| 32 | val = meson_parm_read(clk->map, &tph->ph0); | ||
| 33 | meson_parm_write(clk->map, &tph->ph1, val); | ||
| 34 | meson_parm_write(clk->map, &tph->ph2, val); | ||
| 35 | } | ||
| 36 | |||
| 37 | static int meson_clk_triphase_get_phase(struct clk_hw *hw) | ||
| 38 | { | ||
| 39 | struct clk_regmap *clk = to_clk_regmap(hw); | ||
| 40 | struct meson_clk_triphase_data *tph = meson_clk_triphase_data(clk); | ||
| 41 | unsigned int val; | ||
| 42 | |||
| 43 | /* Phase are in sync, reading phase 0 is enough */ | ||
| 44 | val = meson_parm_read(clk->map, &tph->ph0); | ||
| 45 | |||
| 46 | return meson_clk_degrees_from_val(val, tph->ph0.width); | ||
| 47 | } | ||
| 48 | |||
| 49 | static int meson_clk_triphase_set_phase(struct clk_hw *hw, int degrees) | ||
| 50 | { | ||
| 51 | struct clk_regmap *clk = to_clk_regmap(hw); | ||
| 52 | struct meson_clk_triphase_data *tph = meson_clk_triphase_data(clk); | ||
| 53 | unsigned int val; | ||
| 54 | |||
| 55 | val = meson_clk_degrees_to_val(degrees, tph->ph0.width); | ||
| 56 | meson_parm_write(clk->map, &tph->ph0, val); | ||
| 57 | meson_parm_write(clk->map, &tph->ph1, val); | ||
| 58 | meson_parm_write(clk->map, &tph->ph2, val); | ||
| 59 | |||
| 60 | return 0; | ||
| 61 | } | ||
| 62 | |||
| 63 | const struct clk_ops meson_clk_triphase_ops = { | ||
| 64 | .init = meson_clk_triphase_sync, | ||
| 65 | .get_phase = meson_clk_triphase_get_phase, | ||
| 66 | .set_phase = meson_clk_triphase_set_phase, | ||
| 67 | }; | ||
| 68 | EXPORT_SYMBOL_GPL(meson_clk_triphase_ops); | ||
diff --git a/drivers/clk/meson/clkc.h b/drivers/clk/meson/clkc.h deleted file mode 100644 index 6183b22c4bf2..000000000000 --- a/drivers/clk/meson/clkc.h +++ /dev/null | |||
| @@ -1,127 +0,0 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
| 2 | /* | ||
| 3 | * Copyright (c) 2015 Endless Mobile, Inc. | ||
| 4 | * Author: Carlo Caione <carlo@endlessm.com> | ||
| 5 | */ | ||
| 6 | |||
| 7 | #ifndef __CLKC_H | ||
| 8 | #define __CLKC_H | ||
| 9 | |||
| 10 | #include <linux/clk-provider.h> | ||
| 11 | #include "clk-regmap.h" | ||
| 12 | |||
| 13 | #define PMASK(width) GENMASK(width - 1, 0) | ||
| 14 | #define SETPMASK(width, shift) GENMASK(shift + width - 1, shift) | ||
| 15 | #define CLRPMASK(width, shift) (~SETPMASK(width, shift)) | ||
| 16 | |||
| 17 | #define PARM_GET(width, shift, reg) \ | ||
| 18 | (((reg) & SETPMASK(width, shift)) >> (shift)) | ||
| 19 | #define PARM_SET(width, shift, reg, val) \ | ||
| 20 | (((reg) & CLRPMASK(width, shift)) | ((val) << (shift))) | ||
| 21 | |||
| 22 | #define MESON_PARM_APPLICABLE(p) (!!((p)->width)) | ||
| 23 | |||
| 24 | struct parm { | ||
| 25 | u16 reg_off; | ||
| 26 | u8 shift; | ||
| 27 | u8 width; | ||
| 28 | }; | ||
| 29 | |||
| 30 | static inline unsigned int meson_parm_read(struct regmap *map, struct parm *p) | ||
| 31 | { | ||
| 32 | unsigned int val; | ||
| 33 | |||
| 34 | regmap_read(map, p->reg_off, &val); | ||
| 35 | return PARM_GET(p->width, p->shift, val); | ||
| 36 | } | ||
| 37 | |||
| 38 | static inline void meson_parm_write(struct regmap *map, struct parm *p, | ||
| 39 | unsigned int val) | ||
| 40 | { | ||
| 41 | regmap_update_bits(map, p->reg_off, SETPMASK(p->width, p->shift), | ||
| 42 | val << p->shift); | ||
| 43 | } | ||
| 44 | |||
| 45 | |||
| 46 | struct pll_params_table { | ||
| 47 | u16 m; | ||
| 48 | u16 n; | ||
| 49 | }; | ||
| 50 | |||
| 51 | #define PLL_PARAMS(_m, _n) \ | ||
| 52 | { \ | ||
| 53 | .m = (_m), \ | ||
| 54 | .n = (_n), \ | ||
| 55 | } | ||
| 56 | |||
| 57 | #define CLK_MESON_PLL_ROUND_CLOSEST BIT(0) | ||
| 58 | |||
| 59 | struct meson_clk_pll_data { | ||
| 60 | struct parm en; | ||
| 61 | struct parm m; | ||
| 62 | struct parm n; | ||
| 63 | struct parm frac; | ||
| 64 | struct parm l; | ||
| 65 | struct parm rst; | ||
| 66 | const struct reg_sequence *init_regs; | ||
| 67 | unsigned int init_count; | ||
| 68 | const struct pll_params_table *table; | ||
| 69 | u8 flags; | ||
| 70 | }; | ||
| 71 | |||
| 72 | #define to_meson_clk_pll(_hw) container_of(_hw, struct meson_clk_pll, hw) | ||
| 73 | |||
| 74 | struct meson_clk_mpll_data { | ||
| 75 | struct parm sdm; | ||
| 76 | struct parm sdm_en; | ||
| 77 | struct parm n2; | ||
| 78 | struct parm ssen; | ||
| 79 | struct parm misc; | ||
| 80 | spinlock_t *lock; | ||
| 81 | u8 flags; | ||
| 82 | }; | ||
| 83 | |||
| 84 | #define CLK_MESON_MPLL_ROUND_CLOSEST BIT(0) | ||
| 85 | |||
| 86 | struct meson_clk_phase_data { | ||
| 87 | struct parm ph; | ||
| 88 | }; | ||
| 89 | |||
| 90 | int meson_clk_degrees_from_val(unsigned int val, unsigned int width); | ||
| 91 | unsigned int meson_clk_degrees_to_val(int degrees, unsigned int width); | ||
| 92 | |||
| 93 | struct meson_vid_pll_div_data { | ||
| 94 | struct parm val; | ||
| 95 | struct parm sel; | ||
| 96 | }; | ||
| 97 | |||
| 98 | #define MESON_GATE(_name, _reg, _bit) \ | ||
| 99 | struct clk_regmap _name = { \ | ||
| 100 | .data = &(struct clk_regmap_gate_data){ \ | ||
| 101 | .offset = (_reg), \ | ||
| 102 | .bit_idx = (_bit), \ | ||
| 103 | }, \ | ||
| 104 | .hw.init = &(struct clk_init_data) { \ | ||
| 105 | .name = #_name, \ | ||
| 106 | .ops = &clk_regmap_gate_ops, \ | ||
| 107 | .parent_names = (const char *[]){ "clk81" }, \ | ||
| 108 | .num_parents = 1, \ | ||
| 109 | .flags = (CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED), \ | ||
| 110 | }, \ | ||
| 111 | }; | ||
| 112 | |||
| 113 | /* clk_ops */ | ||
| 114 | extern const struct clk_ops meson_clk_pll_ro_ops; | ||
| 115 | extern const struct clk_ops meson_clk_pll_ops; | ||
| 116 | extern const struct clk_ops meson_clk_cpu_ops; | ||
| 117 | extern const struct clk_ops meson_clk_mpll_ro_ops; | ||
| 118 | extern const struct clk_ops meson_clk_mpll_ops; | ||
| 119 | extern const struct clk_ops meson_clk_phase_ops; | ||
| 120 | extern const struct clk_ops meson_vid_pll_div_ro_ops; | ||
| 121 | |||
| 122 | struct clk_hw *meson_clk_hw_register_input(struct device *dev, | ||
| 123 | const char *of_name, | ||
| 124 | const char *clk_name, | ||
| 125 | unsigned long flags); | ||
| 126 | |||
| 127 | #endif /* __CLKC_H */ | ||
diff --git a/drivers/clk/meson/g12a-aoclk.c b/drivers/clk/meson/g12a-aoclk.c new file mode 100644 index 000000000000..1994e735396b --- /dev/null +++ b/drivers/clk/meson/g12a-aoclk.c | |||
| @@ -0,0 +1,454 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0+ | ||
| 2 | /* | ||
| 3 | * Amlogic Meson-AXG Clock Controller Driver | ||
| 4 | * | ||
| 5 | * Copyright (c) 2016 Baylibre SAS. | ||
| 6 | * Author: Michael Turquette <mturquette@baylibre.com> | ||
| 7 | * | ||
| 8 | * Copyright (c) 2019 Baylibre SAS. | ||
| 9 | * Author: Neil Armstrong <narmstrong@baylibre.com> | ||
| 10 | */ | ||
| 11 | #include <linux/clk-provider.h> | ||
| 12 | #include <linux/platform_device.h> | ||
| 13 | #include <linux/reset-controller.h> | ||
| 14 | #include <linux/mfd/syscon.h> | ||
| 15 | #include "meson-aoclk.h" | ||
| 16 | #include "g12a-aoclk.h" | ||
| 17 | |||
| 18 | #include "clk-regmap.h" | ||
| 19 | #include "clk-dualdiv.h" | ||
| 20 | |||
| 21 | #define IN_PREFIX "ao-in-" | ||
| 22 | |||
| 23 | /* | ||
| 24 | * AO Configuration Clock registers offsets | ||
| 25 | * Register offsets from the data sheet must be multiplied by 4. | ||
| 26 | */ | ||
| 27 | #define AO_RTI_STATUS_REG3 0x0C | ||
| 28 | #define AO_RTI_PWR_CNTL_REG0 0x10 | ||
| 29 | #define AO_RTI_GEN_CNTL_REG0 0x40 | ||
| 30 | #define AO_CLK_GATE0 0x4c | ||
| 31 | #define AO_CLK_GATE0_SP 0x50 | ||
| 32 | #define AO_OSCIN_CNTL 0x58 | ||
| 33 | #define AO_CEC_CLK_CNTL_REG0 0x74 | ||
| 34 | #define AO_CEC_CLK_CNTL_REG1 0x78 | ||
| 35 | #define AO_SAR_CLK 0x90 | ||
| 36 | #define AO_RTC_ALT_CLK_CNTL0 0x94 | ||
| 37 | #define AO_RTC_ALT_CLK_CNTL1 0x98 | ||
| 38 | |||
| 39 | /* | ||
| 40 | * Like every other peripheral clock gate in Amlogic Clock drivers, | ||
| 41 | * we are using CLK_IGNORE_UNUSED here, so we keep the state of the | ||
| 42 | * bootloader. The goal is to remove this flag at some point. | ||
| 43 | * Actually removing it will require some extensive test to be done safely. | ||
| 44 | */ | ||
| 45 | #define AXG_AO_GATE(_name, _reg, _bit) \ | ||
| 46 | static struct clk_regmap g12a_aoclk_##_name = { \ | ||
| 47 | .data = &(struct clk_regmap_gate_data) { \ | ||
| 48 | .offset = (_reg), \ | ||
| 49 | .bit_idx = (_bit), \ | ||
| 50 | }, \ | ||
| 51 | .hw.init = &(struct clk_init_data) { \ | ||
| 52 | .name = "g12a_ao_" #_name, \ | ||
| 53 | .ops = &clk_regmap_gate_ops, \ | ||
| 54 | .parent_names = (const char *[]){ IN_PREFIX "mpeg-clk" }, \ | ||
| 55 | .num_parents = 1, \ | ||
| 56 | .flags = CLK_IGNORE_UNUSED, \ | ||
| 57 | }, \ | ||
| 58 | } | ||
| 59 | |||
| 60 | AXG_AO_GATE(ahb, AO_CLK_GATE0, 0); | ||
| 61 | AXG_AO_GATE(ir_in, AO_CLK_GATE0, 1); | ||
| 62 | AXG_AO_GATE(i2c_m0, AO_CLK_GATE0, 2); | ||
| 63 | AXG_AO_GATE(i2c_s0, AO_CLK_GATE0, 3); | ||
| 64 | AXG_AO_GATE(uart, AO_CLK_GATE0, 4); | ||
| 65 | AXG_AO_GATE(prod_i2c, AO_CLK_GATE0, 5); | ||
| 66 | AXG_AO_GATE(uart2, AO_CLK_GATE0, 6); | ||
| 67 | AXG_AO_GATE(ir_out, AO_CLK_GATE0, 7); | ||
| 68 | AXG_AO_GATE(saradc, AO_CLK_GATE0, 8); | ||
| 69 | AXG_AO_GATE(mailbox, AO_CLK_GATE0_SP, 0); | ||
| 70 | AXG_AO_GATE(m3, AO_CLK_GATE0_SP, 1); | ||
| 71 | AXG_AO_GATE(ahb_sram, AO_CLK_GATE0_SP, 2); | ||
| 72 | AXG_AO_GATE(rti, AO_CLK_GATE0_SP, 3); | ||
| 73 | AXG_AO_GATE(m4_fclk, AO_CLK_GATE0_SP, 4); | ||
| 74 | AXG_AO_GATE(m4_hclk, AO_CLK_GATE0_SP, 5); | ||
| 75 | |||
| 76 | static struct clk_regmap g12a_aoclk_cts_oscin = { | ||
| 77 | .data = &(struct clk_regmap_gate_data){ | ||
| 78 | .offset = AO_RTI_PWR_CNTL_REG0, | ||
| 79 | .bit_idx = 14, | ||
| 80 | }, | ||
| 81 | .hw.init = &(struct clk_init_data){ | ||
| 82 | .name = "cts_oscin", | ||
| 83 | .ops = &clk_regmap_gate_ro_ops, | ||
| 84 | .parent_names = (const char *[]){ IN_PREFIX "xtal" }, | ||
| 85 | .num_parents = 1, | ||
| 86 | }, | ||
| 87 | }; | ||
| 88 | |||
| 89 | static const struct meson_clk_dualdiv_param g12a_32k_div_table[] = { | ||
| 90 | { | ||
| 91 | .dual = 1, | ||
| 92 | .n1 = 733, | ||
| 93 | .m1 = 8, | ||
| 94 | .n2 = 732, | ||
| 95 | .m2 = 11, | ||
| 96 | }, {} | ||
| 97 | }; | ||
| 98 | |||
| 99 | /* 32k_by_oscin clock */ | ||
| 100 | |||
| 101 | static struct clk_regmap g12a_aoclk_32k_by_oscin_pre = { | ||
| 102 | .data = &(struct clk_regmap_gate_data){ | ||
| 103 | .offset = AO_RTC_ALT_CLK_CNTL0, | ||
| 104 | .bit_idx = 31, | ||
| 105 | }, | ||
| 106 | .hw.init = &(struct clk_init_data){ | ||
| 107 | .name = "g12a_ao_32k_by_oscin_pre", | ||
| 108 | .ops = &clk_regmap_gate_ops, | ||
| 109 | .parent_names = (const char *[]){ "cts_oscin" }, | ||
| 110 | .num_parents = 1, | ||
| 111 | }, | ||
| 112 | }; | ||
| 113 | |||
| 114 | static struct clk_regmap g12a_aoclk_32k_by_oscin_div = { | ||
| 115 | .data = &(struct meson_clk_dualdiv_data){ | ||
| 116 | .n1 = { | ||
| 117 | .reg_off = AO_RTC_ALT_CLK_CNTL0, | ||
| 118 | .shift = 0, | ||
| 119 | .width = 12, | ||
| 120 | }, | ||
| 121 | .n2 = { | ||
| 122 | .reg_off = AO_RTC_ALT_CLK_CNTL0, | ||
| 123 | .shift = 12, | ||
| 124 | .width = 12, | ||
| 125 | }, | ||
| 126 | .m1 = { | ||
| 127 | .reg_off = AO_RTC_ALT_CLK_CNTL1, | ||
| 128 | .shift = 0, | ||
| 129 | .width = 12, | ||
| 130 | }, | ||
| 131 | .m2 = { | ||
| 132 | .reg_off = AO_RTC_ALT_CLK_CNTL1, | ||
| 133 | .shift = 12, | ||
| 134 | .width = 12, | ||
| 135 | }, | ||
| 136 | .dual = { | ||
| 137 | .reg_off = AO_RTC_ALT_CLK_CNTL0, | ||
| 138 | .shift = 28, | ||
| 139 | .width = 1, | ||
| 140 | }, | ||
| 141 | .table = g12a_32k_div_table, | ||
| 142 | }, | ||
| 143 | .hw.init = &(struct clk_init_data){ | ||
| 144 | .name = "g12a_ao_32k_by_oscin_div", | ||
| 145 | .ops = &meson_clk_dualdiv_ops, | ||
| 146 | .parent_names = (const char *[]){ "g12a_ao_32k_by_oscin_pre" }, | ||
| 147 | .num_parents = 1, | ||
| 148 | }, | ||
| 149 | }; | ||
| 150 | |||
| 151 | static struct clk_regmap g12a_aoclk_32k_by_oscin_sel = { | ||
| 152 | .data = &(struct clk_regmap_mux_data) { | ||
| 153 | .offset = AO_RTC_ALT_CLK_CNTL1, | ||
| 154 | .mask = 0x1, | ||
| 155 | .shift = 24, | ||
| 156 | .flags = CLK_MUX_ROUND_CLOSEST, | ||
| 157 | }, | ||
| 158 | .hw.init = &(struct clk_init_data){ | ||
| 159 | .name = "g12a_ao_32k_by_oscin_sel", | ||
| 160 | .ops = &clk_regmap_mux_ops, | ||
| 161 | .parent_names = (const char *[]){ "g12a_ao_32k_by_oscin_div", | ||
| 162 | "g12a_ao_32k_by_oscin_pre" }, | ||
| 163 | .num_parents = 2, | ||
| 164 | .flags = CLK_SET_RATE_PARENT, | ||
| 165 | }, | ||
| 166 | }; | ||
| 167 | |||
| 168 | static struct clk_regmap g12a_aoclk_32k_by_oscin = { | ||
| 169 | .data = &(struct clk_regmap_gate_data){ | ||
| 170 | .offset = AO_RTC_ALT_CLK_CNTL0, | ||
| 171 | .bit_idx = 30, | ||
| 172 | }, | ||
| 173 | .hw.init = &(struct clk_init_data){ | ||
| 174 | .name = "g12a_ao_32k_by_oscin", | ||
| 175 | .ops = &clk_regmap_gate_ops, | ||
| 176 | .parent_names = (const char *[]){ "g12a_ao_32k_by_oscin_sel" }, | ||
| 177 | .num_parents = 1, | ||
| 178 | .flags = CLK_SET_RATE_PARENT, | ||
| 179 | }, | ||
| 180 | }; | ||
| 181 | |||
| 182 | /* cec clock */ | ||
| 183 | |||
| 184 | static struct clk_regmap g12a_aoclk_cec_pre = { | ||
| 185 | .data = &(struct clk_regmap_gate_data){ | ||
| 186 | .offset = AO_CEC_CLK_CNTL_REG0, | ||
| 187 | .bit_idx = 31, | ||
| 188 | }, | ||
| 189 | .hw.init = &(struct clk_init_data){ | ||
| 190 | .name = "g12a_ao_cec_pre", | ||
| 191 | .ops = &clk_regmap_gate_ops, | ||
| 192 | .parent_names = (const char *[]){ "cts_oscin" }, | ||
| 193 | .num_parents = 1, | ||
| 194 | }, | ||
| 195 | }; | ||
| 196 | |||
| 197 | static struct clk_regmap g12a_aoclk_cec_div = { | ||
| 198 | .data = &(struct meson_clk_dualdiv_data){ | ||
| 199 | .n1 = { | ||
| 200 | .reg_off = AO_CEC_CLK_CNTL_REG0, | ||
| 201 | .shift = 0, | ||
| 202 | .width = 12, | ||
| 203 | }, | ||
| 204 | .n2 = { | ||
| 205 | .reg_off = AO_CEC_CLK_CNTL_REG0, | ||
| 206 | .shift = 12, | ||
| 207 | .width = 12, | ||
| 208 | }, | ||
| 209 | .m1 = { | ||
| 210 | .reg_off = AO_CEC_CLK_CNTL_REG1, | ||
| 211 | .shift = 0, | ||
| 212 | .width = 12, | ||
| 213 | }, | ||
| 214 | .m2 = { | ||
| 215 | .reg_off = AO_CEC_CLK_CNTL_REG1, | ||
| 216 | .shift = 12, | ||
| 217 | .width = 12, | ||
| 218 | }, | ||
| 219 | .dual = { | ||
| 220 | .reg_off = AO_CEC_CLK_CNTL_REG0, | ||
| 221 | .shift = 28, | ||
| 222 | .width = 1, | ||
| 223 | }, | ||
| 224 | .table = g12a_32k_div_table, | ||
| 225 | }, | ||
| 226 | .hw.init = &(struct clk_init_data){ | ||
| 227 | .name = "g12a_ao_cec_div", | ||
| 228 | .ops = &meson_clk_dualdiv_ops, | ||
| 229 | .parent_names = (const char *[]){ "g12a_ao_cec_pre" }, | ||
| 230 | .num_parents = 1, | ||
| 231 | }, | ||
| 232 | }; | ||
| 233 | |||
| 234 | static struct clk_regmap g12a_aoclk_cec_sel = { | ||
| 235 | .data = &(struct clk_regmap_mux_data) { | ||
| 236 | .offset = AO_CEC_CLK_CNTL_REG1, | ||
| 237 | .mask = 0x1, | ||
| 238 | .shift = 24, | ||
| 239 | .flags = CLK_MUX_ROUND_CLOSEST, | ||
| 240 | }, | ||
| 241 | .hw.init = &(struct clk_init_data){ | ||
| 242 | .name = "g12a_ao_cec_sel", | ||
| 243 | .ops = &clk_regmap_mux_ops, | ||
| 244 | .parent_names = (const char *[]){ "g12a_ao_cec_div", | ||
| 245 | "g12a_ao_cec_pre" }, | ||
| 246 | .num_parents = 2, | ||
| 247 | .flags = CLK_SET_RATE_PARENT, | ||
| 248 | }, | ||
| 249 | }; | ||
| 250 | |||
| 251 | static struct clk_regmap g12a_aoclk_cec = { | ||
| 252 | .data = &(struct clk_regmap_gate_data){ | ||
| 253 | .offset = AO_CEC_CLK_CNTL_REG0, | ||
| 254 | .bit_idx = 30, | ||
| 255 | }, | ||
| 256 | .hw.init = &(struct clk_init_data){ | ||
| 257 | .name = "g12a_ao_cec", | ||
| 258 | .ops = &clk_regmap_gate_ops, | ||
| 259 | .parent_names = (const char *[]){ "g12a_ao_cec_sel" }, | ||
| 260 | .num_parents = 1, | ||
| 261 | .flags = CLK_SET_RATE_PARENT, | ||
| 262 | }, | ||
| 263 | }; | ||
| 264 | |||
| 265 | static struct clk_regmap g12a_aoclk_cts_rtc_oscin = { | ||
| 266 | .data = &(struct clk_regmap_mux_data) { | ||
| 267 | .offset = AO_RTI_PWR_CNTL_REG0, | ||
| 268 | .mask = 0x1, | ||
| 269 | .shift = 10, | ||
| 270 | .flags = CLK_MUX_ROUND_CLOSEST, | ||
| 271 | }, | ||
| 272 | .hw.init = &(struct clk_init_data){ | ||
| 273 | .name = "g12a_ao_cts_rtc_oscin", | ||
| 274 | .ops = &clk_regmap_mux_ops, | ||
| 275 | .parent_names = (const char *[]){ "g12a_ao_32k_by_oscin", | ||
| 276 | IN_PREFIX "ext_32k-0" }, | ||
| 277 | .num_parents = 2, | ||
| 278 | .flags = CLK_SET_RATE_PARENT, | ||
| 279 | }, | ||
| 280 | }; | ||
| 281 | |||
| 282 | static struct clk_regmap g12a_aoclk_clk81 = { | ||
| 283 | .data = &(struct clk_regmap_mux_data) { | ||
| 284 | .offset = AO_RTI_PWR_CNTL_REG0, | ||
| 285 | .mask = 0x1, | ||
| 286 | .shift = 8, | ||
| 287 | .flags = CLK_MUX_ROUND_CLOSEST, | ||
| 288 | }, | ||
| 289 | .hw.init = &(struct clk_init_data){ | ||
| 290 | .name = "g12a_ao_clk81", | ||
| 291 | .ops = &clk_regmap_mux_ro_ops, | ||
| 292 | .parent_names = (const char *[]){ IN_PREFIX "mpeg-clk", | ||
| 293 | "g12a_ao_cts_rtc_oscin"}, | ||
| 294 | .num_parents = 2, | ||
| 295 | .flags = CLK_SET_RATE_PARENT, | ||
| 296 | }, | ||
| 297 | }; | ||
| 298 | |||
| 299 | static struct clk_regmap g12a_aoclk_saradc_mux = { | ||
| 300 | .data = &(struct clk_regmap_mux_data) { | ||
| 301 | .offset = AO_SAR_CLK, | ||
| 302 | .mask = 0x3, | ||
| 303 | .shift = 9, | ||
| 304 | }, | ||
| 305 | .hw.init = &(struct clk_init_data){ | ||
| 306 | .name = "g12a_ao_saradc_mux", | ||
| 307 | .ops = &clk_regmap_mux_ops, | ||
| 308 | .parent_names = (const char *[]){ IN_PREFIX "xtal", | ||
| 309 | "g12a_ao_clk81" }, | ||
| 310 | .num_parents = 2, | ||
| 311 | }, | ||
| 312 | }; | ||
| 313 | |||
| 314 | static struct clk_regmap g12a_aoclk_saradc_div = { | ||
| 315 | .data = &(struct clk_regmap_div_data) { | ||
| 316 | .offset = AO_SAR_CLK, | ||
| 317 | .shift = 0, | ||
| 318 | .width = 8, | ||
| 319 | }, | ||
| 320 | .hw.init = &(struct clk_init_data){ | ||
| 321 | .name = "g12a_ao_saradc_div", | ||
| 322 | .ops = &clk_regmap_divider_ops, | ||
| 323 | .parent_names = (const char *[]){ "g12a_ao_saradc_mux" }, | ||
| 324 | .num_parents = 1, | ||
| 325 | .flags = CLK_SET_RATE_PARENT, | ||
| 326 | }, | ||
| 327 | }; | ||
| 328 | |||
| 329 | static struct clk_regmap g12a_aoclk_saradc_gate = { | ||
| 330 | .data = &(struct clk_regmap_gate_data) { | ||
| 331 | .offset = AO_SAR_CLK, | ||
| 332 | .bit_idx = 8, | ||
| 333 | }, | ||
| 334 | .hw.init = &(struct clk_init_data){ | ||
| 335 | .name = "g12a_ao_saradc_gate", | ||
| 336 | .ops = &clk_regmap_gate_ops, | ||
| 337 | .parent_names = (const char *[]){ "g12a_ao_saradc_div" }, | ||
| 338 | .num_parents = 1, | ||
| 339 | .flags = CLK_SET_RATE_PARENT, | ||
| 340 | }, | ||
| 341 | }; | ||
| 342 | |||
| 343 | static const unsigned int g12a_aoclk_reset[] = { | ||
| 344 | [RESET_AO_IR_IN] = 16, | ||
| 345 | [RESET_AO_UART] = 17, | ||
| 346 | [RESET_AO_I2C_M] = 18, | ||
| 347 | [RESET_AO_I2C_S] = 19, | ||
| 348 | [RESET_AO_SAR_ADC] = 20, | ||
| 349 | [RESET_AO_UART2] = 22, | ||
| 350 | [RESET_AO_IR_OUT] = 23, | ||
| 351 | }; | ||
| 352 | |||
| 353 | static struct clk_regmap *g12a_aoclk_regmap[] = { | ||
| 354 | &g12a_aoclk_ahb, | ||
| 355 | &g12a_aoclk_ir_in, | ||
| 356 | &g12a_aoclk_i2c_m0, | ||
| 357 | &g12a_aoclk_i2c_s0, | ||
| 358 | &g12a_aoclk_uart, | ||
| 359 | &g12a_aoclk_prod_i2c, | ||
| 360 | &g12a_aoclk_uart2, | ||
| 361 | &g12a_aoclk_ir_out, | ||
| 362 | &g12a_aoclk_saradc, | ||
| 363 | &g12a_aoclk_mailbox, | ||
| 364 | &g12a_aoclk_m3, | ||
| 365 | &g12a_aoclk_ahb_sram, | ||
| 366 | &g12a_aoclk_rti, | ||
| 367 | &g12a_aoclk_m4_fclk, | ||
| 368 | &g12a_aoclk_m4_hclk, | ||
| 369 | &g12a_aoclk_cts_oscin, | ||
| 370 | &g12a_aoclk_32k_by_oscin_pre, | ||
| 371 | &g12a_aoclk_32k_by_oscin_div, | ||
| 372 | &g12a_aoclk_32k_by_oscin_sel, | ||
| 373 | &g12a_aoclk_32k_by_oscin, | ||
| 374 | &g12a_aoclk_cec_pre, | ||
| 375 | &g12a_aoclk_cec_div, | ||
| 376 | &g12a_aoclk_cec_sel, | ||
| 377 | &g12a_aoclk_cec, | ||
| 378 | &g12a_aoclk_cts_rtc_oscin, | ||
| 379 | &g12a_aoclk_clk81, | ||
| 380 | &g12a_aoclk_saradc_mux, | ||
| 381 | &g12a_aoclk_saradc_div, | ||
| 382 | &g12a_aoclk_saradc_gate, | ||
| 383 | }; | ||
| 384 | |||
| 385 | static const struct clk_hw_onecell_data g12a_aoclk_onecell_data = { | ||
| 386 | .hws = { | ||
| 387 | [CLKID_AO_AHB] = &g12a_aoclk_ahb.hw, | ||
| 388 | [CLKID_AO_IR_IN] = &g12a_aoclk_ir_in.hw, | ||
| 389 | [CLKID_AO_I2C_M0] = &g12a_aoclk_i2c_m0.hw, | ||
| 390 | [CLKID_AO_I2C_S0] = &g12a_aoclk_i2c_s0.hw, | ||
| 391 | [CLKID_AO_UART] = &g12a_aoclk_uart.hw, | ||
| 392 | [CLKID_AO_PROD_I2C] = &g12a_aoclk_prod_i2c.hw, | ||
| 393 | [CLKID_AO_UART2] = &g12a_aoclk_uart2.hw, | ||
| 394 | [CLKID_AO_IR_OUT] = &g12a_aoclk_ir_out.hw, | ||
| 395 | [CLKID_AO_SAR_ADC] = &g12a_aoclk_saradc.hw, | ||
| 396 | [CLKID_AO_MAILBOX] = &g12a_aoclk_mailbox.hw, | ||
| 397 | [CLKID_AO_M3] = &g12a_aoclk_m3.hw, | ||
| 398 | [CLKID_AO_AHB_SRAM] = &g12a_aoclk_ahb_sram.hw, | ||
| 399 | [CLKID_AO_RTI] = &g12a_aoclk_rti.hw, | ||
| 400 | [CLKID_AO_M4_FCLK] = &g12a_aoclk_m4_fclk.hw, | ||
| 401 | [CLKID_AO_M4_HCLK] = &g12a_aoclk_m4_hclk.hw, | ||
| 402 | [CLKID_AO_CLK81] = &g12a_aoclk_clk81.hw, | ||
| 403 | [CLKID_AO_SAR_ADC_SEL] = &g12a_aoclk_saradc_mux.hw, | ||
| 404 | [CLKID_AO_SAR_ADC_DIV] = &g12a_aoclk_saradc_div.hw, | ||
| 405 | [CLKID_AO_SAR_ADC_CLK] = &g12a_aoclk_saradc_gate.hw, | ||
| 406 | [CLKID_AO_CTS_OSCIN] = &g12a_aoclk_cts_oscin.hw, | ||
| 407 | [CLKID_AO_32K_PRE] = &g12a_aoclk_32k_by_oscin_pre.hw, | ||
| 408 | [CLKID_AO_32K_DIV] = &g12a_aoclk_32k_by_oscin_div.hw, | ||
| 409 | [CLKID_AO_32K_SEL] = &g12a_aoclk_32k_by_oscin_sel.hw, | ||
| 410 | [CLKID_AO_32K] = &g12a_aoclk_32k_by_oscin.hw, | ||
| 411 | [CLKID_AO_CEC_PRE] = &g12a_aoclk_cec_pre.hw, | ||
| 412 | [CLKID_AO_CEC_DIV] = &g12a_aoclk_cec_div.hw, | ||
| 413 | [CLKID_AO_CEC_SEL] = &g12a_aoclk_cec_sel.hw, | ||
| 414 | [CLKID_AO_CEC] = &g12a_aoclk_cec.hw, | ||
| 415 | [CLKID_AO_CTS_RTC_OSCIN] = &g12a_aoclk_cts_rtc_oscin.hw, | ||
| 416 | }, | ||
| 417 | .num = NR_CLKS, | ||
| 418 | }; | ||
| 419 | |||
| 420 | static const struct meson_aoclk_input g12a_aoclk_inputs[] = { | ||
| 421 | { .name = "xtal", .required = true }, | ||
| 422 | { .name = "mpeg-clk", .required = true }, | ||
| 423 | { .name = "ext-32k-0", .required = false }, | ||
| 424 | }; | ||
| 425 | |||
| 426 | static const struct meson_aoclk_data g12a_aoclkc_data = { | ||
| 427 | .reset_reg = AO_RTI_GEN_CNTL_REG0, | ||
| 428 | .num_reset = ARRAY_SIZE(g12a_aoclk_reset), | ||
| 429 | .reset = g12a_aoclk_reset, | ||
| 430 | .num_clks = ARRAY_SIZE(g12a_aoclk_regmap), | ||
| 431 | .clks = g12a_aoclk_regmap, | ||
| 432 | .hw_data = &g12a_aoclk_onecell_data, | ||
| 433 | .inputs = g12a_aoclk_inputs, | ||
| 434 | .num_inputs = ARRAY_SIZE(g12a_aoclk_inputs), | ||
| 435 | .input_prefix = IN_PREFIX, | ||
| 436 | }; | ||
| 437 | |||
| 438 | static const struct of_device_id g12a_aoclkc_match_table[] = { | ||
| 439 | { | ||
| 440 | .compatible = "amlogic,meson-g12a-aoclkc", | ||
| 441 | .data = &g12a_aoclkc_data, | ||
| 442 | }, | ||
| 443 | { } | ||
| 444 | }; | ||
| 445 | |||
| 446 | static struct platform_driver g12a_aoclkc_driver = { | ||
| 447 | .probe = meson_aoclkc_probe, | ||
| 448 | .driver = { | ||
| 449 | .name = "g12a-aoclkc", | ||
| 450 | .of_match_table = g12a_aoclkc_match_table, | ||
| 451 | }, | ||
| 452 | }; | ||
| 453 | |||
| 454 | builtin_platform_driver(g12a_aoclkc_driver); | ||
diff --git a/drivers/clk/meson/g12a-aoclk.h b/drivers/clk/meson/g12a-aoclk.h new file mode 100644 index 000000000000..04b0d5506641 --- /dev/null +++ b/drivers/clk/meson/g12a-aoclk.h | |||
| @@ -0,0 +1,34 @@ | |||
| 1 | /* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */ | ||
| 2 | /* | ||
| 3 | * Copyright (c) 2019 BayLibre, SAS | ||
| 4 | * Author: Neil Armstrong <narmstrong@baylibre.com> | ||
| 5 | */ | ||
| 6 | |||
| 7 | #ifndef __G12A_AOCLKC_H | ||
| 8 | #define __G12A_AOCLKC_H | ||
| 9 | |||
| 10 | /* | ||
| 11 | * CLKID index values | ||
| 12 | * | ||
| 13 | * These indices are entirely contrived and do not map onto the hardware. | ||
| 14 | * It has now been decided to expose everything by default in the DT header: | ||
| 15 | * include/dt-bindings/clock/g12a-aoclkc.h. Only the clocks ids we don't want | ||
| 16 | * to expose, such as the internal muxes and dividers of composite clocks, | ||
| 17 | * will remain defined here. | ||
| 18 | */ | ||
| 19 | #define CLKID_AO_SAR_ADC_SEL 16 | ||
| 20 | #define CLKID_AO_SAR_ADC_DIV 17 | ||
| 21 | #define CLKID_AO_CTS_OSCIN 19 | ||
| 22 | #define CLKID_AO_32K_PRE 20 | ||
| 23 | #define CLKID_AO_32K_DIV 21 | ||
| 24 | #define CLKID_AO_32K_SEL 22 | ||
| 25 | #define CLKID_AO_CEC_PRE 24 | ||
| 26 | #define CLKID_AO_CEC_DIV 25 | ||
| 27 | #define CLKID_AO_CEC_SEL 26 | ||
| 28 | |||
| 29 | #define NR_CLKS 29 | ||
| 30 | |||
| 31 | #include <dt-bindings/clock/g12a-aoclkc.h> | ||
| 32 | #include <dt-bindings/reset/g12a-aoclkc.h> | ||
| 33 | |||
| 34 | #endif /* __G12A_AOCLKC_H */ | ||
diff --git a/drivers/clk/meson/g12a.c b/drivers/clk/meson/g12a.c new file mode 100644 index 000000000000..0e1ce8c03259 --- /dev/null +++ b/drivers/clk/meson/g12a.c | |||
| @@ -0,0 +1,2359 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0+ | ||
| 2 | /* | ||
| 3 | * Amlogic Meson-G12A Clock Controller Driver | ||
| 4 | * | ||
| 5 | * Copyright (c) 2016 Baylibre SAS. | ||
| 6 | * Author: Michael Turquette <mturquette@baylibre.com> | ||
| 7 | * | ||
| 8 | * Copyright (c) 2018 Amlogic, inc. | ||
| 9 | * Author: Qiufang Dai <qiufang.dai@amlogic.com> | ||
| 10 | * Author: Jian Hu <jian.hu@amlogic.com> | ||
| 11 | */ | ||
| 12 | |||
| 13 | #include <linux/clk-provider.h> | ||
| 14 | #include <linux/init.h> | ||
| 15 | #include <linux/of_device.h> | ||
| 16 | #include <linux/platform_device.h> | ||
| 17 | |||
| 18 | #include "clk-input.h" | ||
| 19 | #include "clk-mpll.h" | ||
| 20 | #include "clk-pll.h" | ||
| 21 | #include "clk-regmap.h" | ||
| 22 | #include "vid-pll-div.h" | ||
| 23 | #include "meson-eeclk.h" | ||
| 24 | #include "g12a.h" | ||
| 25 | |||
| 26 | static DEFINE_SPINLOCK(meson_clk_lock); | ||
| 27 | |||
| 28 | static struct clk_regmap g12a_fixed_pll_dco = { | ||
| 29 | .data = &(struct meson_clk_pll_data){ | ||
| 30 | .en = { | ||
| 31 | .reg_off = HHI_FIX_PLL_CNTL0, | ||
| 32 | .shift = 28, | ||
| 33 | .width = 1, | ||
| 34 | }, | ||
| 35 | .m = { | ||
| 36 | .reg_off = HHI_FIX_PLL_CNTL0, | ||
| 37 | .shift = 0, | ||
| 38 | .width = 8, | ||
| 39 | }, | ||
| 40 | .n = { | ||
| 41 | .reg_off = HHI_FIX_PLL_CNTL0, | ||
| 42 | .shift = 10, | ||
| 43 | .width = 5, | ||
| 44 | }, | ||
| 45 | .frac = { | ||
| 46 | .reg_off = HHI_FIX_PLL_CNTL1, | ||
| 47 | .shift = 0, | ||
| 48 | .width = 17, | ||
| 49 | }, | ||
| 50 | .l = { | ||
| 51 | .reg_off = HHI_FIX_PLL_CNTL0, | ||
| 52 | .shift = 31, | ||
| 53 | .width = 1, | ||
| 54 | }, | ||
| 55 | .rst = { | ||
| 56 | .reg_off = HHI_FIX_PLL_CNTL0, | ||
| 57 | .shift = 29, | ||
| 58 | .width = 1, | ||
| 59 | }, | ||
| 60 | }, | ||
| 61 | .hw.init = &(struct clk_init_data){ | ||
| 62 | .name = "fixed_pll_dco", | ||
| 63 | .ops = &meson_clk_pll_ro_ops, | ||
| 64 | .parent_names = (const char *[]){ IN_PREFIX "xtal" }, | ||
| 65 | .num_parents = 1, | ||
| 66 | }, | ||
| 67 | }; | ||
| 68 | |||
| 69 | static struct clk_regmap g12a_fixed_pll = { | ||
| 70 | .data = &(struct clk_regmap_div_data){ | ||
| 71 | .offset = HHI_FIX_PLL_CNTL0, | ||
| 72 | .shift = 16, | ||
| 73 | .width = 2, | ||
| 74 | .flags = CLK_DIVIDER_POWER_OF_TWO, | ||
| 75 | }, | ||
| 76 | .hw.init = &(struct clk_init_data){ | ||
| 77 | .name = "fixed_pll", | ||
| 78 | .ops = &clk_regmap_divider_ro_ops, | ||
| 79 | .parent_names = (const char *[]){ "fixed_pll_dco" }, | ||
| 80 | .num_parents = 1, | ||
| 81 | /* | ||
| 82 | * This clock won't ever change at runtime so | ||
| 83 | * CLK_SET_RATE_PARENT is not required | ||
| 84 | */ | ||
| 85 | }, | ||
| 86 | }; | ||
| 87 | |||
| 88 | /* | ||
| 89 | * Internal sys pll emulation configuration parameters | ||
| 90 | */ | ||
| 91 | static const struct reg_sequence g12a_sys_init_regs[] = { | ||
| 92 | { .reg = HHI_SYS_PLL_CNTL1, .def = 0x00000000 }, | ||
| 93 | { .reg = HHI_SYS_PLL_CNTL2, .def = 0x00000000 }, | ||
| 94 | { .reg = HHI_SYS_PLL_CNTL3, .def = 0x48681c00 }, | ||
| 95 | { .reg = HHI_SYS_PLL_CNTL4, .def = 0x88770290 }, | ||
| 96 | { .reg = HHI_SYS_PLL_CNTL5, .def = 0x39272000 }, | ||
| 97 | { .reg = HHI_SYS_PLL_CNTL6, .def = 0x56540000 }, | ||
| 98 | }; | ||
| 99 | |||
| 100 | static struct clk_regmap g12a_sys_pll_dco = { | ||
| 101 | .data = &(struct meson_clk_pll_data){ | ||
| 102 | .en = { | ||
| 103 | .reg_off = HHI_SYS_PLL_CNTL0, | ||
| 104 | .shift = 28, | ||
| 105 | .width = 1, | ||
| 106 | }, | ||
| 107 | .m = { | ||
| 108 | .reg_off = HHI_SYS_PLL_CNTL0, | ||
| 109 | .shift = 0, | ||
| 110 | .width = 8, | ||
| 111 | }, | ||
| 112 | .n = { | ||
| 113 | .reg_off = HHI_SYS_PLL_CNTL0, | ||
| 114 | .shift = 10, | ||
| 115 | .width = 5, | ||
| 116 | }, | ||
| 117 | .l = { | ||
| 118 | .reg_off = HHI_SYS_PLL_CNTL0, | ||
| 119 | .shift = 31, | ||
| 120 | .width = 1, | ||
| 121 | }, | ||
| 122 | .rst = { | ||
| 123 | .reg_off = HHI_SYS_PLL_CNTL0, | ||
| 124 | .shift = 29, | ||
| 125 | .width = 1, | ||
| 126 | }, | ||
| 127 | .init_regs = g12a_sys_init_regs, | ||
| 128 | .init_count = ARRAY_SIZE(g12a_sys_init_regs), | ||
| 129 | }, | ||
| 130 | .hw.init = &(struct clk_init_data){ | ||
| 131 | .name = "sys_pll_dco", | ||
| 132 | .ops = &meson_clk_pll_ro_ops, | ||
| 133 | .parent_names = (const char *[]){ IN_PREFIX "xtal" }, | ||
| 134 | .num_parents = 1, | ||
| 135 | }, | ||
| 136 | }; | ||
| 137 | |||
| 138 | static struct clk_regmap g12a_sys_pll = { | ||
| 139 | .data = &(struct clk_regmap_div_data){ | ||
| 140 | .offset = HHI_SYS_PLL_CNTL0, | ||
| 141 | .shift = 16, | ||
| 142 | .width = 3, | ||
| 143 | .flags = CLK_DIVIDER_POWER_OF_TWO, | ||
| 144 | }, | ||
| 145 | .hw.init = &(struct clk_init_data){ | ||
| 146 | .name = "sys_pll", | ||
| 147 | .ops = &clk_regmap_divider_ro_ops, | ||
| 148 | .parent_names = (const char *[]){ "sys_pll_dco" }, | ||
| 149 | .num_parents = 1, | ||
| 150 | }, | ||
| 151 | }; | ||
| 152 | |||
| 153 | static const struct pll_mult_range g12a_gp0_pll_mult_range = { | ||
| 154 | .min = 55, | ||
| 155 | .max = 255, | ||
| 156 | }; | ||
| 157 | |||
| 158 | /* | ||
| 159 | * Internal gp0 pll emulation configuration parameters | ||
| 160 | */ | ||
| 161 | static const struct reg_sequence g12a_gp0_init_regs[] = { | ||
| 162 | { .reg = HHI_GP0_PLL_CNTL1, .def = 0x00000000 }, | ||
| 163 | { .reg = HHI_GP0_PLL_CNTL2, .def = 0x00000000 }, | ||
| 164 | { .reg = HHI_GP0_PLL_CNTL3, .def = 0x48681c00 }, | ||
| 165 | { .reg = HHI_GP0_PLL_CNTL4, .def = 0x33771290 }, | ||
| 166 | { .reg = HHI_GP0_PLL_CNTL5, .def = 0x39272000 }, | ||
| 167 | { .reg = HHI_GP0_PLL_CNTL6, .def = 0x56540000 }, | ||
| 168 | }; | ||
| 169 | |||
| 170 | static struct clk_regmap g12a_gp0_pll_dco = { | ||
| 171 | .data = &(struct meson_clk_pll_data){ | ||
| 172 | .en = { | ||
| 173 | .reg_off = HHI_GP0_PLL_CNTL0, | ||
| 174 | .shift = 28, | ||
| 175 | .width = 1, | ||
| 176 | }, | ||
| 177 | .m = { | ||
| 178 | .reg_off = HHI_GP0_PLL_CNTL0, | ||
| 179 | .shift = 0, | ||
| 180 | .width = 8, | ||
| 181 | }, | ||
| 182 | .n = { | ||
| 183 | .reg_off = HHI_GP0_PLL_CNTL0, | ||
| 184 | .shift = 10, | ||
| 185 | .width = 5, | ||
| 186 | }, | ||
| 187 | .frac = { | ||
| 188 | .reg_off = HHI_GP0_PLL_CNTL1, | ||
| 189 | .shift = 0, | ||
| 190 | .width = 17, | ||
| 191 | }, | ||
| 192 | .l = { | ||
| 193 | .reg_off = HHI_GP0_PLL_CNTL0, | ||
| 194 | .shift = 31, | ||
| 195 | .width = 1, | ||
| 196 | }, | ||
| 197 | .rst = { | ||
| 198 | .reg_off = HHI_GP0_PLL_CNTL0, | ||
| 199 | .shift = 29, | ||
| 200 | .width = 1, | ||
| 201 | }, | ||
| 202 | .range = &g12a_gp0_pll_mult_range, | ||
| 203 | .init_regs = g12a_gp0_init_regs, | ||
| 204 | .init_count = ARRAY_SIZE(g12a_gp0_init_regs), | ||
| 205 | }, | ||
| 206 | .hw.init = &(struct clk_init_data){ | ||
| 207 | .name = "gp0_pll_dco", | ||
| 208 | .ops = &meson_clk_pll_ops, | ||
| 209 | .parent_names = (const char *[]){ IN_PREFIX "xtal" }, | ||
| 210 | .num_parents = 1, | ||
| 211 | }, | ||
| 212 | }; | ||
| 213 | |||
| 214 | static struct clk_regmap g12a_gp0_pll = { | ||
| 215 | .data = &(struct clk_regmap_div_data){ | ||
| 216 | .offset = HHI_GP0_PLL_CNTL0, | ||
| 217 | .shift = 16, | ||
| 218 | .width = 3, | ||
| 219 | .flags = (CLK_DIVIDER_POWER_OF_TWO | | ||
| 220 | CLK_DIVIDER_ROUND_CLOSEST), | ||
| 221 | }, | ||
| 222 | .hw.init = &(struct clk_init_data){ | ||
| 223 | .name = "gp0_pll", | ||
| 224 | .ops = &clk_regmap_divider_ops, | ||
| 225 | .parent_names = (const char *[]){ "gp0_pll_dco" }, | ||
| 226 | .num_parents = 1, | ||
| 227 | .flags = CLK_SET_RATE_PARENT, | ||
| 228 | }, | ||
| 229 | }; | ||
| 230 | |||
| 231 | /* | ||
| 232 | * Internal hifi pll emulation configuration parameters | ||
| 233 | */ | ||
| 234 | static const struct reg_sequence g12a_hifi_init_regs[] = { | ||
| 235 | { .reg = HHI_HIFI_PLL_CNTL1, .def = 0x00000000 }, | ||
| 236 | { .reg = HHI_HIFI_PLL_CNTL2, .def = 0x00000000 }, | ||
| 237 | { .reg = HHI_HIFI_PLL_CNTL3, .def = 0x6a285c00 }, | ||
| 238 | { .reg = HHI_HIFI_PLL_CNTL4, .def = 0x65771290 }, | ||
| 239 | { .reg = HHI_HIFI_PLL_CNTL5, .def = 0x39272000 }, | ||
| 240 | { .reg = HHI_HIFI_PLL_CNTL6, .def = 0x56540000 }, | ||
| 241 | }; | ||
| 242 | |||
| 243 | static struct clk_regmap g12a_hifi_pll_dco = { | ||
| 244 | .data = &(struct meson_clk_pll_data){ | ||
| 245 | .en = { | ||
| 246 | .reg_off = HHI_HIFI_PLL_CNTL0, | ||
| 247 | .shift = 28, | ||
| 248 | .width = 1, | ||
| 249 | }, | ||
| 250 | .m = { | ||
| 251 | .reg_off = HHI_HIFI_PLL_CNTL0, | ||
| 252 | .shift = 0, | ||
| 253 | .width = 8, | ||
| 254 | }, | ||
| 255 | .n = { | ||
| 256 | .reg_off = HHI_HIFI_PLL_CNTL0, | ||
| 257 | .shift = 10, | ||
| 258 | .width = 5, | ||
| 259 | }, | ||
| 260 | .frac = { | ||
| 261 | .reg_off = HHI_HIFI_PLL_CNTL1, | ||
| 262 | .shift = 0, | ||
| 263 | .width = 17, | ||
| 264 | }, | ||
| 265 | .l = { | ||
| 266 | .reg_off = HHI_HIFI_PLL_CNTL0, | ||
| 267 | .shift = 31, | ||
| 268 | .width = 1, | ||
| 269 | }, | ||
| 270 | .rst = { | ||
| 271 | .reg_off = HHI_HIFI_PLL_CNTL0, | ||
| 272 | .shift = 29, | ||
| 273 | .width = 1, | ||
| 274 | }, | ||
| 275 | .range = &g12a_gp0_pll_mult_range, | ||
| 276 | .init_regs = g12a_hifi_init_regs, | ||
| 277 | .init_count = ARRAY_SIZE(g12a_hifi_init_regs), | ||
| 278 | .flags = CLK_MESON_PLL_ROUND_CLOSEST, | ||
| 279 | }, | ||
| 280 | .hw.init = &(struct clk_init_data){ | ||
| 281 | .name = "hifi_pll_dco", | ||
| 282 | .ops = &meson_clk_pll_ops, | ||
| 283 | .parent_names = (const char *[]){ IN_PREFIX "xtal" }, | ||
| 284 | .num_parents = 1, | ||
| 285 | }, | ||
| 286 | }; | ||
| 287 | |||
| 288 | static struct clk_regmap g12a_hifi_pll = { | ||
| 289 | .data = &(struct clk_regmap_div_data){ | ||
| 290 | .offset = HHI_HIFI_PLL_CNTL0, | ||
| 291 | .shift = 16, | ||
| 292 | .width = 2, | ||
| 293 | .flags = (CLK_DIVIDER_POWER_OF_TWO | | ||
| 294 | CLK_DIVIDER_ROUND_CLOSEST), | ||
| 295 | }, | ||
| 296 | .hw.init = &(struct clk_init_data){ | ||
| 297 | .name = "hifi_pll", | ||
| 298 | .ops = &clk_regmap_divider_ops, | ||
| 299 | .parent_names = (const char *[]){ "hifi_pll_dco" }, | ||
| 300 | .num_parents = 1, | ||
| 301 | .flags = CLK_SET_RATE_PARENT, | ||
| 302 | }, | ||
| 303 | }; | ||
| 304 | |||
| 305 | static struct clk_regmap g12a_hdmi_pll_dco = { | ||
| 306 | .data = &(struct meson_clk_pll_data){ | ||
| 307 | .en = { | ||
| 308 | .reg_off = HHI_HDMI_PLL_CNTL0, | ||
| 309 | .shift = 28, | ||
| 310 | .width = 1, | ||
| 311 | }, | ||
| 312 | .m = { | ||
| 313 | .reg_off = HHI_HDMI_PLL_CNTL0, | ||
| 314 | .shift = 0, | ||
| 315 | .width = 8, | ||
| 316 | }, | ||
| 317 | .n = { | ||
| 318 | .reg_off = HHI_HDMI_PLL_CNTL0, | ||
| 319 | .shift = 10, | ||
| 320 | .width = 5, | ||
| 321 | }, | ||
| 322 | .frac = { | ||
| 323 | .reg_off = HHI_HDMI_PLL_CNTL1, | ||
| 324 | .shift = 0, | ||
| 325 | .width = 16, | ||
| 326 | }, | ||
| 327 | .l = { | ||
| 328 | .reg_off = HHI_HDMI_PLL_CNTL0, | ||
| 329 | .shift = 30, | ||
| 330 | .width = 1, | ||
| 331 | }, | ||
| 332 | .rst = { | ||
| 333 | .reg_off = HHI_HDMI_PLL_CNTL0, | ||
| 334 | .shift = 29, | ||
| 335 | .width = 1, | ||
| 336 | }, | ||
| 337 | }, | ||
| 338 | .hw.init = &(struct clk_init_data){ | ||
| 339 | .name = "hdmi_pll_dco", | ||
| 340 | .ops = &meson_clk_pll_ro_ops, | ||
| 341 | .parent_names = (const char *[]){ IN_PREFIX "xtal" }, | ||
| 342 | .num_parents = 1, | ||
| 343 | /* | ||
| 344 | * Display directly handle hdmi pll registers ATM, we need | ||
| 345 | * NOCACHE to keep our view of the clock as accurate as possible | ||
| 346 | */ | ||
| 347 | .flags = CLK_GET_RATE_NOCACHE, | ||
| 348 | }, | ||
| 349 | }; | ||
| 350 | |||
| 351 | static struct clk_regmap g12a_hdmi_pll_od = { | ||
| 352 | .data = &(struct clk_regmap_div_data){ | ||
| 353 | .offset = HHI_HDMI_PLL_CNTL0, | ||
| 354 | .shift = 16, | ||
| 355 | .width = 2, | ||
| 356 | .flags = CLK_DIVIDER_POWER_OF_TWO, | ||
| 357 | }, | ||
| 358 | .hw.init = &(struct clk_init_data){ | ||
| 359 | .name = "hdmi_pll_od", | ||
| 360 | .ops = &clk_regmap_divider_ro_ops, | ||
| 361 | .parent_names = (const char *[]){ "hdmi_pll_dco" }, | ||
| 362 | .num_parents = 1, | ||
| 363 | .flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT, | ||
| 364 | }, | ||
| 365 | }; | ||
| 366 | |||
| 367 | static struct clk_regmap g12a_hdmi_pll_od2 = { | ||
| 368 | .data = &(struct clk_regmap_div_data){ | ||
| 369 | .offset = HHI_HDMI_PLL_CNTL0, | ||
| 370 | .shift = 18, | ||
| 371 | .width = 2, | ||
| 372 | .flags = CLK_DIVIDER_POWER_OF_TWO, | ||
| 373 | }, | ||
| 374 | .hw.init = &(struct clk_init_data){ | ||
| 375 | .name = "hdmi_pll_od2", | ||
| 376 | .ops = &clk_regmap_divider_ro_ops, | ||
| 377 | .parent_names = (const char *[]){ "hdmi_pll_od" }, | ||
| 378 | .num_parents = 1, | ||
| 379 | .flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT, | ||
| 380 | }, | ||
| 381 | }; | ||
| 382 | |||
| 383 | static struct clk_regmap g12a_hdmi_pll = { | ||
| 384 | .data = &(struct clk_regmap_div_data){ | ||
| 385 | .offset = HHI_HDMI_PLL_CNTL0, | ||
| 386 | .shift = 20, | ||
| 387 | .width = 2, | ||
| 388 | .flags = CLK_DIVIDER_POWER_OF_TWO, | ||
| 389 | }, | ||
| 390 | .hw.init = &(struct clk_init_data){ | ||
| 391 | .name = "hdmi_pll", | ||
| 392 | .ops = &clk_regmap_divider_ro_ops, | ||
| 393 | .parent_names = (const char *[]){ "hdmi_pll_od2" }, | ||
| 394 | .num_parents = 1, | ||
| 395 | .flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT, | ||
| 396 | }, | ||
| 397 | }; | ||
| 398 | |||
| 399 | static struct clk_fixed_factor g12a_fclk_div2_div = { | ||
| 400 | .mult = 1, | ||
| 401 | .div = 2, | ||
| 402 | .hw.init = &(struct clk_init_data){ | ||
| 403 | .name = "fclk_div2_div", | ||
| 404 | .ops = &clk_fixed_factor_ops, | ||
| 405 | .parent_names = (const char *[]){ "fixed_pll" }, | ||
| 406 | .num_parents = 1, | ||
| 407 | }, | ||
| 408 | }; | ||
| 409 | |||
| 410 | static struct clk_regmap g12a_fclk_div2 = { | ||
| 411 | .data = &(struct clk_regmap_gate_data){ | ||
| 412 | .offset = HHI_FIX_PLL_CNTL1, | ||
| 413 | .bit_idx = 24, | ||
| 414 | }, | ||
| 415 | .hw.init = &(struct clk_init_data){ | ||
| 416 | .name = "fclk_div2", | ||
| 417 | .ops = &clk_regmap_gate_ops, | ||
| 418 | .parent_names = (const char *[]){ "fclk_div2_div" }, | ||
| 419 | .num_parents = 1, | ||
| 420 | }, | ||
| 421 | }; | ||
| 422 | |||
| 423 | static struct clk_fixed_factor g12a_fclk_div3_div = { | ||
| 424 | .mult = 1, | ||
| 425 | .div = 3, | ||
| 426 | .hw.init = &(struct clk_init_data){ | ||
| 427 | .name = "fclk_div3_div", | ||
| 428 | .ops = &clk_fixed_factor_ops, | ||
| 429 | .parent_names = (const char *[]){ "fixed_pll" }, | ||
| 430 | .num_parents = 1, | ||
| 431 | }, | ||
| 432 | }; | ||
| 433 | |||
| 434 | static struct clk_regmap g12a_fclk_div3 = { | ||
| 435 | .data = &(struct clk_regmap_gate_data){ | ||
| 436 | .offset = HHI_FIX_PLL_CNTL1, | ||
| 437 | .bit_idx = 20, | ||
| 438 | }, | ||
| 439 | .hw.init = &(struct clk_init_data){ | ||
| 440 | .name = "fclk_div3", | ||
| 441 | .ops = &clk_regmap_gate_ops, | ||
| 442 | .parent_names = (const char *[]){ "fclk_div3_div" }, | ||
| 443 | .num_parents = 1, | ||
| 444 | }, | ||
| 445 | }; | ||
| 446 | |||
| 447 | static struct clk_fixed_factor g12a_fclk_div4_div = { | ||
| 448 | .mult = 1, | ||
| 449 | .div = 4, | ||
| 450 | .hw.init = &(struct clk_init_data){ | ||
| 451 | .name = "fclk_div4_div", | ||
| 452 | .ops = &clk_fixed_factor_ops, | ||
| 453 | .parent_names = (const char *[]){ "fixed_pll" }, | ||
| 454 | .num_parents = 1, | ||
| 455 | }, | ||
| 456 | }; | ||
| 457 | |||
| 458 | static struct clk_regmap g12a_fclk_div4 = { | ||
| 459 | .data = &(struct clk_regmap_gate_data){ | ||
| 460 | .offset = HHI_FIX_PLL_CNTL1, | ||
| 461 | .bit_idx = 21, | ||
| 462 | }, | ||
| 463 | .hw.init = &(struct clk_init_data){ | ||
| 464 | .name = "fclk_div4", | ||
| 465 | .ops = &clk_regmap_gate_ops, | ||
| 466 | .parent_names = (const char *[]){ "fclk_div4_div" }, | ||
| 467 | .num_parents = 1, | ||
| 468 | }, | ||
| 469 | }; | ||
| 470 | |||
| 471 | static struct clk_fixed_factor g12a_fclk_div5_div = { | ||
| 472 | .mult = 1, | ||
| 473 | .div = 5, | ||
| 474 | .hw.init = &(struct clk_init_data){ | ||
| 475 | .name = "fclk_div5_div", | ||
| 476 | .ops = &clk_fixed_factor_ops, | ||
| 477 | .parent_names = (const char *[]){ "fixed_pll" }, | ||
| 478 | .num_parents = 1, | ||
| 479 | }, | ||
| 480 | }; | ||
| 481 | |||
| 482 | static struct clk_regmap g12a_fclk_div5 = { | ||
| 483 | .data = &(struct clk_regmap_gate_data){ | ||
| 484 | .offset = HHI_FIX_PLL_CNTL1, | ||
| 485 | .bit_idx = 22, | ||
| 486 | }, | ||
| 487 | .hw.init = &(struct clk_init_data){ | ||
| 488 | .name = "fclk_div5", | ||
| 489 | .ops = &clk_regmap_gate_ops, | ||
| 490 | .parent_names = (const char *[]){ "fclk_div5_div" }, | ||
| 491 | .num_parents = 1, | ||
| 492 | }, | ||
| 493 | }; | ||
| 494 | |||
| 495 | static struct clk_fixed_factor g12a_fclk_div7_div = { | ||
| 496 | .mult = 1, | ||
| 497 | .div = 7, | ||
| 498 | .hw.init = &(struct clk_init_data){ | ||
| 499 | .name = "fclk_div7_div", | ||
| 500 | .ops = &clk_fixed_factor_ops, | ||
| 501 | .parent_names = (const char *[]){ "fixed_pll" }, | ||
| 502 | .num_parents = 1, | ||
| 503 | }, | ||
| 504 | }; | ||
| 505 | |||
| 506 | static struct clk_regmap g12a_fclk_div7 = { | ||
| 507 | .data = &(struct clk_regmap_gate_data){ | ||
| 508 | .offset = HHI_FIX_PLL_CNTL1, | ||
| 509 | .bit_idx = 23, | ||
| 510 | }, | ||
| 511 | .hw.init = &(struct clk_init_data){ | ||
| 512 | .name = "fclk_div7", | ||
| 513 | .ops = &clk_regmap_gate_ops, | ||
| 514 | .parent_names = (const char *[]){ "fclk_div7_div" }, | ||
| 515 | .num_parents = 1, | ||
| 516 | }, | ||
| 517 | }; | ||
| 518 | |||
| 519 | static struct clk_fixed_factor g12a_fclk_div2p5_div = { | ||
| 520 | .mult = 1, | ||
| 521 | .div = 5, | ||
| 522 | .hw.init = &(struct clk_init_data){ | ||
| 523 | .name = "fclk_div2p5_div", | ||
| 524 | .ops = &clk_fixed_factor_ops, | ||
| 525 | .parent_names = (const char *[]){ "fixed_pll_dco" }, | ||
| 526 | .num_parents = 1, | ||
| 527 | }, | ||
| 528 | }; | ||
| 529 | |||
| 530 | static struct clk_regmap g12a_fclk_div2p5 = { | ||
| 531 | .data = &(struct clk_regmap_gate_data){ | ||
| 532 | .offset = HHI_FIX_PLL_CNTL1, | ||
| 533 | .bit_idx = 25, | ||
| 534 | }, | ||
| 535 | .hw.init = &(struct clk_init_data){ | ||
| 536 | .name = "fclk_div2p5", | ||
| 537 | .ops = &clk_regmap_gate_ops, | ||
| 538 | .parent_names = (const char *[]){ "fclk_div2p5_div" }, | ||
| 539 | .num_parents = 1, | ||
| 540 | }, | ||
| 541 | }; | ||
| 542 | |||
| 543 | static struct clk_fixed_factor g12a_mpll_50m_div = { | ||
| 544 | .mult = 1, | ||
| 545 | .div = 80, | ||
| 546 | .hw.init = &(struct clk_init_data){ | ||
| 547 | .name = "mpll_50m_div", | ||
| 548 | .ops = &clk_fixed_factor_ops, | ||
| 549 | .parent_names = (const char *[]){ "fixed_pll_dco" }, | ||
| 550 | .num_parents = 1, | ||
| 551 | }, | ||
| 552 | }; | ||
| 553 | |||
| 554 | static struct clk_regmap g12a_mpll_50m = { | ||
| 555 | .data = &(struct clk_regmap_mux_data){ | ||
| 556 | .offset = HHI_FIX_PLL_CNTL3, | ||
| 557 | .mask = 0x1, | ||
| 558 | .shift = 5, | ||
| 559 | }, | ||
| 560 | .hw.init = &(struct clk_init_data){ | ||
| 561 | .name = "mpll_50m", | ||
| 562 | .ops = &clk_regmap_mux_ro_ops, | ||
| 563 | .parent_names = (const char *[]){ IN_PREFIX "xtal", | ||
| 564 | "mpll_50m_div" }, | ||
| 565 | .num_parents = 2, | ||
| 566 | }, | ||
| 567 | }; | ||
| 568 | |||
| 569 | static struct clk_fixed_factor g12a_mpll_prediv = { | ||
| 570 | .mult = 1, | ||
| 571 | .div = 2, | ||
| 572 | .hw.init = &(struct clk_init_data){ | ||
| 573 | .name = "mpll_prediv", | ||
| 574 | .ops = &clk_fixed_factor_ops, | ||
| 575 | .parent_names = (const char *[]){ "fixed_pll_dco" }, | ||
| 576 | .num_parents = 1, | ||
| 577 | }, | ||
| 578 | }; | ||
| 579 | |||
| 580 | static struct clk_regmap g12a_mpll0_div = { | ||
| 581 | .data = &(struct meson_clk_mpll_data){ | ||
| 582 | .sdm = { | ||
| 583 | .reg_off = HHI_MPLL_CNTL1, | ||
| 584 | .shift = 0, | ||
| 585 | .width = 14, | ||
| 586 | }, | ||
| 587 | .sdm_en = { | ||
| 588 | .reg_off = HHI_MPLL_CNTL1, | ||
| 589 | .shift = 30, | ||
| 590 | .width = 1, | ||
| 591 | }, | ||
| 592 | .n2 = { | ||
| 593 | .reg_off = HHI_MPLL_CNTL1, | ||
| 594 | .shift = 20, | ||
| 595 | .width = 9, | ||
| 596 | }, | ||
| 597 | .ssen = { | ||
| 598 | .reg_off = HHI_MPLL_CNTL1, | ||
| 599 | .shift = 29, | ||
| 600 | .width = 1, | ||
| 601 | }, | ||
| 602 | .lock = &meson_clk_lock, | ||
| 603 | }, | ||
| 604 | .hw.init = &(struct clk_init_data){ | ||
| 605 | .name = "mpll0_div", | ||
| 606 | .ops = &meson_clk_mpll_ops, | ||
| 607 | .parent_names = (const char *[]){ "mpll_prediv" }, | ||
| 608 | .num_parents = 1, | ||
| 609 | }, | ||
| 610 | }; | ||
| 611 | |||
| 612 | static struct clk_regmap g12a_mpll0 = { | ||
| 613 | .data = &(struct clk_regmap_gate_data){ | ||
| 614 | .offset = HHI_MPLL_CNTL1, | ||
| 615 | .bit_idx = 31, | ||
| 616 | }, | ||
| 617 | .hw.init = &(struct clk_init_data){ | ||
| 618 | .name = "mpll0", | ||
| 619 | .ops = &clk_regmap_gate_ops, | ||
| 620 | .parent_names = (const char *[]){ "mpll0_div" }, | ||
| 621 | .num_parents = 1, | ||
| 622 | .flags = CLK_SET_RATE_PARENT, | ||
| 623 | }, | ||
| 624 | }; | ||
| 625 | |||
| 626 | static struct clk_regmap g12a_mpll1_div = { | ||
| 627 | .data = &(struct meson_clk_mpll_data){ | ||
| 628 | .sdm = { | ||
| 629 | .reg_off = HHI_MPLL_CNTL3, | ||
| 630 | .shift = 0, | ||
| 631 | .width = 14, | ||
| 632 | }, | ||
| 633 | .sdm_en = { | ||
| 634 | .reg_off = HHI_MPLL_CNTL3, | ||
| 635 | .shift = 30, | ||
| 636 | .width = 1, | ||
| 637 | }, | ||
| 638 | .n2 = { | ||
| 639 | .reg_off = HHI_MPLL_CNTL3, | ||
| 640 | .shift = 20, | ||
| 641 | .width = 9, | ||
| 642 | }, | ||
| 643 | .ssen = { | ||
| 644 | .reg_off = HHI_MPLL_CNTL3, | ||
| 645 | .shift = 29, | ||
| 646 | .width = 1, | ||
| 647 | }, | ||
| 648 | .lock = &meson_clk_lock, | ||
| 649 | }, | ||
| 650 | .hw.init = &(struct clk_init_data){ | ||
| 651 | .name = "mpll1_div", | ||
| 652 | .ops = &meson_clk_mpll_ops, | ||
| 653 | .parent_names = (const char *[]){ "mpll_prediv" }, | ||
| 654 | .num_parents = 1, | ||
| 655 | }, | ||
| 656 | }; | ||
| 657 | |||
| 658 | static struct clk_regmap g12a_mpll1 = { | ||
| 659 | .data = &(struct clk_regmap_gate_data){ | ||
| 660 | .offset = HHI_MPLL_CNTL3, | ||
| 661 | .bit_idx = 31, | ||
| 662 | }, | ||
| 663 | .hw.init = &(struct clk_init_data){ | ||
| 664 | .name = "mpll1", | ||
| 665 | .ops = &clk_regmap_gate_ops, | ||
| 666 | .parent_names = (const char *[]){ "mpll1_div" }, | ||
| 667 | .num_parents = 1, | ||
| 668 | .flags = CLK_SET_RATE_PARENT, | ||
| 669 | }, | ||
| 670 | }; | ||
| 671 | |||
| 672 | static struct clk_regmap g12a_mpll2_div = { | ||
| 673 | .data = &(struct meson_clk_mpll_data){ | ||
| 674 | .sdm = { | ||
| 675 | .reg_off = HHI_MPLL_CNTL5, | ||
| 676 | .shift = 0, | ||
| 677 | .width = 14, | ||
| 678 | }, | ||
| 679 | .sdm_en = { | ||
| 680 | .reg_off = HHI_MPLL_CNTL5, | ||
| 681 | .shift = 30, | ||
| 682 | .width = 1, | ||
| 683 | }, | ||
| 684 | .n2 = { | ||
| 685 | .reg_off = HHI_MPLL_CNTL5, | ||
| 686 | .shift = 20, | ||
| 687 | .width = 9, | ||
| 688 | }, | ||
| 689 | .ssen = { | ||
| 690 | .reg_off = HHI_MPLL_CNTL5, | ||
| 691 | .shift = 29, | ||
| 692 | .width = 1, | ||
| 693 | }, | ||
| 694 | .lock = &meson_clk_lock, | ||
| 695 | }, | ||
| 696 | .hw.init = &(struct clk_init_data){ | ||
| 697 | .name = "mpll2_div", | ||
| 698 | .ops = &meson_clk_mpll_ops, | ||
| 699 | .parent_names = (const char *[]){ "mpll_prediv" }, | ||
| 700 | .num_parents = 1, | ||
| 701 | }, | ||
| 702 | }; | ||
| 703 | |||
| 704 | static struct clk_regmap g12a_mpll2 = { | ||
| 705 | .data = &(struct clk_regmap_gate_data){ | ||
| 706 | .offset = HHI_MPLL_CNTL5, | ||
| 707 | .bit_idx = 31, | ||
| 708 | }, | ||
| 709 | .hw.init = &(struct clk_init_data){ | ||
| 710 | .name = "mpll2", | ||
| 711 | .ops = &clk_regmap_gate_ops, | ||
| 712 | .parent_names = (const char *[]){ "mpll2_div" }, | ||
| 713 | .num_parents = 1, | ||
| 714 | .flags = CLK_SET_RATE_PARENT, | ||
| 715 | }, | ||
| 716 | }; | ||
| 717 | |||
| 718 | static struct clk_regmap g12a_mpll3_div = { | ||
| 719 | .data = &(struct meson_clk_mpll_data){ | ||
| 720 | .sdm = { | ||
| 721 | .reg_off = HHI_MPLL_CNTL7, | ||
| 722 | .shift = 0, | ||
| 723 | .width = 14, | ||
| 724 | }, | ||
| 725 | .sdm_en = { | ||
| 726 | .reg_off = HHI_MPLL_CNTL7, | ||
| 727 | .shift = 30, | ||
| 728 | .width = 1, | ||
| 729 | }, | ||
| 730 | .n2 = { | ||
| 731 | .reg_off = HHI_MPLL_CNTL7, | ||
| 732 | .shift = 20, | ||
| 733 | .width = 9, | ||
| 734 | }, | ||
| 735 | .ssen = { | ||
| 736 | .reg_off = HHI_MPLL_CNTL7, | ||
| 737 | .shift = 29, | ||
| 738 | .width = 1, | ||
| 739 | }, | ||
| 740 | .lock = &meson_clk_lock, | ||
| 741 | }, | ||
| 742 | .hw.init = &(struct clk_init_data){ | ||
| 743 | .name = "mpll3_div", | ||
| 744 | .ops = &meson_clk_mpll_ops, | ||
| 745 | .parent_names = (const char *[]){ "mpll_prediv" }, | ||
| 746 | .num_parents = 1, | ||
| 747 | }, | ||
| 748 | }; | ||
| 749 | |||
| 750 | static struct clk_regmap g12a_mpll3 = { | ||
| 751 | .data = &(struct clk_regmap_gate_data){ | ||
| 752 | .offset = HHI_MPLL_CNTL7, | ||
| 753 | .bit_idx = 31, | ||
| 754 | }, | ||
| 755 | .hw.init = &(struct clk_init_data){ | ||
| 756 | .name = "mpll3", | ||
| 757 | .ops = &clk_regmap_gate_ops, | ||
| 758 | .parent_names = (const char *[]){ "mpll3_div" }, | ||
| 759 | .num_parents = 1, | ||
| 760 | .flags = CLK_SET_RATE_PARENT, | ||
| 761 | }, | ||
| 762 | }; | ||
| 763 | |||
| 764 | static u32 mux_table_clk81[] = { 0, 2, 3, 4, 5, 6, 7 }; | ||
| 765 | static const char * const clk81_parent_names[] = { | ||
| 766 | IN_PREFIX "xtal", "fclk_div7", "mpll1", "mpll2", "fclk_div4", | ||
| 767 | "fclk_div3", "fclk_div5" | ||
| 768 | }; | ||
| 769 | |||
| 770 | static struct clk_regmap g12a_mpeg_clk_sel = { | ||
| 771 | .data = &(struct clk_regmap_mux_data){ | ||
| 772 | .offset = HHI_MPEG_CLK_CNTL, | ||
| 773 | .mask = 0x7, | ||
| 774 | .shift = 12, | ||
| 775 | .table = mux_table_clk81, | ||
| 776 | }, | ||
| 777 | .hw.init = &(struct clk_init_data){ | ||
| 778 | .name = "mpeg_clk_sel", | ||
| 779 | .ops = &clk_regmap_mux_ro_ops, | ||
| 780 | .parent_names = clk81_parent_names, | ||
| 781 | .num_parents = ARRAY_SIZE(clk81_parent_names), | ||
| 782 | }, | ||
| 783 | }; | ||
| 784 | |||
| 785 | static struct clk_regmap g12a_mpeg_clk_div = { | ||
| 786 | .data = &(struct clk_regmap_div_data){ | ||
| 787 | .offset = HHI_MPEG_CLK_CNTL, | ||
| 788 | .shift = 0, | ||
| 789 | .width = 7, | ||
| 790 | }, | ||
| 791 | .hw.init = &(struct clk_init_data){ | ||
| 792 | .name = "mpeg_clk_div", | ||
| 793 | .ops = &clk_regmap_divider_ops, | ||
| 794 | .parent_names = (const char *[]){ "mpeg_clk_sel" }, | ||
| 795 | .num_parents = 1, | ||
| 796 | .flags = CLK_SET_RATE_PARENT, | ||
| 797 | }, | ||
| 798 | }; | ||
| 799 | |||
| 800 | static struct clk_regmap g12a_clk81 = { | ||
| 801 | .data = &(struct clk_regmap_gate_data){ | ||
| 802 | .offset = HHI_MPEG_CLK_CNTL, | ||
| 803 | .bit_idx = 7, | ||
| 804 | }, | ||
| 805 | .hw.init = &(struct clk_init_data){ | ||
| 806 | .name = "clk81", | ||
| 807 | .ops = &clk_regmap_gate_ops, | ||
| 808 | .parent_names = (const char *[]){ "mpeg_clk_div" }, | ||
| 809 | .num_parents = 1, | ||
| 810 | .flags = (CLK_SET_RATE_PARENT | CLK_IS_CRITICAL), | ||
| 811 | }, | ||
| 812 | }; | ||
| 813 | |||
| 814 | static const char * const g12a_sd_emmc_clk0_parent_names[] = { | ||
| 815 | IN_PREFIX "xtal", "fclk_div2", "fclk_div3", "fclk_div5", "fclk_div7", | ||
| 816 | |||
| 817 | /* | ||
| 818 | * Following these parent clocks, we should also have had mpll2, mpll3 | ||
| 819 | * and gp0_pll but these clocks are too precious to be used here. All | ||
| 820 | * the necessary rates for MMC and NAND operation can be acheived using | ||
| 821 | * g12a_ee_core or fclk_div clocks | ||
| 822 | */ | ||
| 823 | }; | ||
| 824 | |||
| 825 | /* SDIO clock */ | ||
| 826 | static struct clk_regmap g12a_sd_emmc_a_clk0_sel = { | ||
| 827 | .data = &(struct clk_regmap_mux_data){ | ||
| 828 | .offset = HHI_SD_EMMC_CLK_CNTL, | ||
| 829 | .mask = 0x7, | ||
| 830 | .shift = 9, | ||
| 831 | }, | ||
| 832 | .hw.init = &(struct clk_init_data) { | ||
| 833 | .name = "sd_emmc_a_clk0_sel", | ||
| 834 | .ops = &clk_regmap_mux_ops, | ||
| 835 | .parent_names = g12a_sd_emmc_clk0_parent_names, | ||
| 836 | .num_parents = ARRAY_SIZE(g12a_sd_emmc_clk0_parent_names), | ||
| 837 | .flags = CLK_SET_RATE_PARENT, | ||
| 838 | }, | ||
| 839 | }; | ||
| 840 | |||
| 841 | static struct clk_regmap g12a_sd_emmc_a_clk0_div = { | ||
| 842 | .data = &(struct clk_regmap_div_data){ | ||
| 843 | .offset = HHI_SD_EMMC_CLK_CNTL, | ||
| 844 | .shift = 0, | ||
| 845 | .width = 7, | ||
| 846 | }, | ||
| 847 | .hw.init = &(struct clk_init_data) { | ||
| 848 | .name = "sd_emmc_a_clk0_div", | ||
| 849 | .ops = &clk_regmap_divider_ops, | ||
| 850 | .parent_names = (const char *[]){ "sd_emmc_a_clk0_sel" }, | ||
| 851 | .num_parents = 1, | ||
| 852 | .flags = CLK_SET_RATE_PARENT, | ||
| 853 | }, | ||
| 854 | }; | ||
| 855 | |||
| 856 | static struct clk_regmap g12a_sd_emmc_a_clk0 = { | ||
| 857 | .data = &(struct clk_regmap_gate_data){ | ||
| 858 | .offset = HHI_SD_EMMC_CLK_CNTL, | ||
| 859 | .bit_idx = 7, | ||
| 860 | }, | ||
| 861 | .hw.init = &(struct clk_init_data){ | ||
| 862 | .name = "sd_emmc_a_clk0", | ||
| 863 | .ops = &clk_regmap_gate_ops, | ||
| 864 | .parent_names = (const char *[]){ "sd_emmc_a_clk0_div" }, | ||
| 865 | .num_parents = 1, | ||
| 866 | .flags = CLK_SET_RATE_PARENT, | ||
| 867 | }, | ||
| 868 | }; | ||
| 869 | |||
| 870 | /* SDcard clock */ | ||
| 871 | static struct clk_regmap g12a_sd_emmc_b_clk0_sel = { | ||
| 872 | .data = &(struct clk_regmap_mux_data){ | ||
| 873 | .offset = HHI_SD_EMMC_CLK_CNTL, | ||
| 874 | .mask = 0x7, | ||
| 875 | .shift = 25, | ||
| 876 | }, | ||
| 877 | .hw.init = &(struct clk_init_data) { | ||
| 878 | .name = "sd_emmc_b_clk0_sel", | ||
| 879 | .ops = &clk_regmap_mux_ops, | ||
| 880 | .parent_names = g12a_sd_emmc_clk0_parent_names, | ||
| 881 | .num_parents = ARRAY_SIZE(g12a_sd_emmc_clk0_parent_names), | ||
| 882 | .flags = CLK_SET_RATE_PARENT, | ||
| 883 | }, | ||
| 884 | }; | ||
| 885 | |||
| 886 | static struct clk_regmap g12a_sd_emmc_b_clk0_div = { | ||
| 887 | .data = &(struct clk_regmap_div_data){ | ||
| 888 | .offset = HHI_SD_EMMC_CLK_CNTL, | ||
| 889 | .shift = 16, | ||
| 890 | .width = 7, | ||
| 891 | }, | ||
| 892 | .hw.init = &(struct clk_init_data) { | ||
| 893 | .name = "sd_emmc_b_clk0_div", | ||
| 894 | .ops = &clk_regmap_divider_ops, | ||
| 895 | .parent_names = (const char *[]){ "sd_emmc_b_clk0_sel" }, | ||
| 896 | .num_parents = 1, | ||
| 897 | .flags = CLK_SET_RATE_PARENT, | ||
| 898 | }, | ||
| 899 | }; | ||
| 900 | |||
| 901 | static struct clk_regmap g12a_sd_emmc_b_clk0 = { | ||
| 902 | .data = &(struct clk_regmap_gate_data){ | ||
| 903 | .offset = HHI_SD_EMMC_CLK_CNTL, | ||
| 904 | .bit_idx = 23, | ||
| 905 | }, | ||
| 906 | .hw.init = &(struct clk_init_data){ | ||
| 907 | .name = "sd_emmc_b_clk0", | ||
| 908 | .ops = &clk_regmap_gate_ops, | ||
| 909 | .parent_names = (const char *[]){ "sd_emmc_b_clk0_div" }, | ||
| 910 | .num_parents = 1, | ||
| 911 | .flags = CLK_SET_RATE_PARENT, | ||
| 912 | }, | ||
| 913 | }; | ||
| 914 | |||
| 915 | /* EMMC/NAND clock */ | ||
| 916 | static struct clk_regmap g12a_sd_emmc_c_clk0_sel = { | ||
| 917 | .data = &(struct clk_regmap_mux_data){ | ||
| 918 | .offset = HHI_NAND_CLK_CNTL, | ||
| 919 | .mask = 0x7, | ||
| 920 | .shift = 9, | ||
| 921 | }, | ||
| 922 | .hw.init = &(struct clk_init_data) { | ||
| 923 | .name = "sd_emmc_c_clk0_sel", | ||
| 924 | .ops = &clk_regmap_mux_ops, | ||
| 925 | .parent_names = g12a_sd_emmc_clk0_parent_names, | ||
| 926 | .num_parents = ARRAY_SIZE(g12a_sd_emmc_clk0_parent_names), | ||
| 927 | .flags = CLK_SET_RATE_PARENT, | ||
| 928 | }, | ||
| 929 | }; | ||
| 930 | |||
| 931 | static struct clk_regmap g12a_sd_emmc_c_clk0_div = { | ||
| 932 | .data = &(struct clk_regmap_div_data){ | ||
| 933 | .offset = HHI_NAND_CLK_CNTL, | ||
| 934 | .shift = 0, | ||
| 935 | .width = 7, | ||
| 936 | }, | ||
| 937 | .hw.init = &(struct clk_init_data) { | ||
| 938 | .name = "sd_emmc_c_clk0_div", | ||
| 939 | .ops = &clk_regmap_divider_ops, | ||
| 940 | .parent_names = (const char *[]){ "sd_emmc_c_clk0_sel" }, | ||
| 941 | .num_parents = 1, | ||
| 942 | .flags = CLK_SET_RATE_PARENT, | ||
| 943 | }, | ||
| 944 | }; | ||
| 945 | |||
| 946 | static struct clk_regmap g12a_sd_emmc_c_clk0 = { | ||
| 947 | .data = &(struct clk_regmap_gate_data){ | ||
| 948 | .offset = HHI_NAND_CLK_CNTL, | ||
| 949 | .bit_idx = 7, | ||
| 950 | }, | ||
| 951 | .hw.init = &(struct clk_init_data){ | ||
| 952 | .name = "sd_emmc_c_clk0", | ||
| 953 | .ops = &clk_regmap_gate_ops, | ||
| 954 | .parent_names = (const char *[]){ "sd_emmc_c_clk0_div" }, | ||
| 955 | .num_parents = 1, | ||
| 956 | .flags = CLK_SET_RATE_PARENT, | ||
| 957 | }, | ||
| 958 | }; | ||
| 959 | |||
| 960 | /* VPU Clock */ | ||
| 961 | |||
| 962 | static const char * const g12a_vpu_parent_names[] = { | ||
| 963 | "fclk_div4", "fclk_div3", "fclk_div5", "fclk_div7", | ||
| 964 | "mpll1", "vid_pll", "hifi_pll", "gp0_pll", | ||
| 965 | }; | ||
| 966 | |||
| 967 | static struct clk_regmap g12a_vpu_0_sel = { | ||
| 968 | .data = &(struct clk_regmap_mux_data){ | ||
| 969 | .offset = HHI_VPU_CLK_CNTL, | ||
| 970 | .mask = 0x3, | ||
| 971 | .shift = 9, | ||
| 972 | }, | ||
| 973 | .hw.init = &(struct clk_init_data){ | ||
| 974 | .name = "vpu_0_sel", | ||
| 975 | .ops = &clk_regmap_mux_ops, | ||
| 976 | .parent_names = g12a_vpu_parent_names, | ||
| 977 | .num_parents = ARRAY_SIZE(g12a_vpu_parent_names), | ||
| 978 | .flags = CLK_SET_RATE_NO_REPARENT, | ||
| 979 | }, | ||
| 980 | }; | ||
| 981 | |||
| 982 | static struct clk_regmap g12a_vpu_0_div = { | ||
| 983 | .data = &(struct clk_regmap_div_data){ | ||
| 984 | .offset = HHI_VPU_CLK_CNTL, | ||
| 985 | .shift = 0, | ||
| 986 | .width = 7, | ||
| 987 | }, | ||
| 988 | .hw.init = &(struct clk_init_data){ | ||
| 989 | .name = "vpu_0_div", | ||
| 990 | .ops = &clk_regmap_divider_ops, | ||
| 991 | .parent_names = (const char *[]){ "vpu_0_sel" }, | ||
| 992 | .num_parents = 1, | ||
| 993 | .flags = CLK_SET_RATE_PARENT, | ||
| 994 | }, | ||
| 995 | }; | ||
| 996 | |||
| 997 | static struct clk_regmap g12a_vpu_0 = { | ||
| 998 | .data = &(struct clk_regmap_gate_data){ | ||
| 999 | .offset = HHI_VPU_CLK_CNTL, | ||
| 1000 | .bit_idx = 8, | ||
| 1001 | }, | ||
| 1002 | .hw.init = &(struct clk_init_data) { | ||
| 1003 | .name = "vpu_0", | ||
| 1004 | .ops = &clk_regmap_gate_ops, | ||
| 1005 | .parent_names = (const char *[]){ "vpu_0_div" }, | ||
| 1006 | .num_parents = 1, | ||
| 1007 | .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, | ||
| 1008 | }, | ||
| 1009 | }; | ||
| 1010 | |||
| 1011 | static struct clk_regmap g12a_vpu_1_sel = { | ||
| 1012 | .data = &(struct clk_regmap_mux_data){ | ||
| 1013 | .offset = HHI_VPU_CLK_CNTL, | ||
| 1014 | .mask = 0x3, | ||
| 1015 | .shift = 25, | ||
| 1016 | }, | ||
| 1017 | .hw.init = &(struct clk_init_data){ | ||
| 1018 | .name = "vpu_1_sel", | ||
| 1019 | .ops = &clk_regmap_mux_ops, | ||
| 1020 | .parent_names = g12a_vpu_parent_names, | ||
| 1021 | .num_parents = ARRAY_SIZE(g12a_vpu_parent_names), | ||
| 1022 | .flags = CLK_SET_RATE_NO_REPARENT, | ||
| 1023 | }, | ||
| 1024 | }; | ||
| 1025 | |||
| 1026 | static struct clk_regmap g12a_vpu_1_div = { | ||
| 1027 | .data = &(struct clk_regmap_div_data){ | ||
| 1028 | .offset = HHI_VPU_CLK_CNTL, | ||
| 1029 | .shift = 16, | ||
| 1030 | .width = 7, | ||
| 1031 | }, | ||
| 1032 | .hw.init = &(struct clk_init_data){ | ||
| 1033 | .name = "vpu_1_div", | ||
| 1034 | .ops = &clk_regmap_divider_ops, | ||
| 1035 | .parent_names = (const char *[]){ "vpu_1_sel" }, | ||
| 1036 | .num_parents = 1, | ||
| 1037 | .flags = CLK_SET_RATE_PARENT, | ||
| 1038 | }, | ||
| 1039 | }; | ||
| 1040 | |||
| 1041 | static struct clk_regmap g12a_vpu_1 = { | ||
| 1042 | .data = &(struct clk_regmap_gate_data){ | ||
| 1043 | .offset = HHI_VPU_CLK_CNTL, | ||
| 1044 | .bit_idx = 24, | ||
| 1045 | }, | ||
| 1046 | .hw.init = &(struct clk_init_data) { | ||
| 1047 | .name = "vpu_1", | ||
| 1048 | .ops = &clk_regmap_gate_ops, | ||
| 1049 | .parent_names = (const char *[]){ "vpu_1_div" }, | ||
| 1050 | .num_parents = 1, | ||
| 1051 | .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, | ||
| 1052 | }, | ||
| 1053 | }; | ||
| 1054 | |||
| 1055 | static struct clk_regmap g12a_vpu = { | ||
| 1056 | .data = &(struct clk_regmap_mux_data){ | ||
| 1057 | .offset = HHI_VPU_CLK_CNTL, | ||
| 1058 | .mask = 1, | ||
| 1059 | .shift = 31, | ||
| 1060 | }, | ||
| 1061 | .hw.init = &(struct clk_init_data){ | ||
| 1062 | .name = "vpu", | ||
| 1063 | .ops = &clk_regmap_mux_ops, | ||
| 1064 | /* | ||
| 1065 | * bit 31 selects from 2 possible parents: | ||
| 1066 | * vpu_0 or vpu_1 | ||
| 1067 | */ | ||
| 1068 | .parent_names = (const char *[]){ "vpu_0", "vpu_1" }, | ||
| 1069 | .num_parents = 2, | ||
| 1070 | .flags = CLK_SET_RATE_NO_REPARENT, | ||
| 1071 | }, | ||
| 1072 | }; | ||
| 1073 | |||
| 1074 | /* VAPB Clock */ | ||
| 1075 | |||
| 1076 | static const char * const g12a_vapb_parent_names[] = { | ||
| 1077 | "fclk_div4", "fclk_div3", "fclk_div5", "fclk_div7", | ||
| 1078 | "mpll1", "vid_pll", "mpll2", "fclk_div2p5", | ||
| 1079 | }; | ||
| 1080 | |||
| 1081 | static struct clk_regmap g12a_vapb_0_sel = { | ||
| 1082 | .data = &(struct clk_regmap_mux_data){ | ||
| 1083 | .offset = HHI_VAPBCLK_CNTL, | ||
| 1084 | .mask = 0x3, | ||
| 1085 | .shift = 9, | ||
| 1086 | }, | ||
| 1087 | .hw.init = &(struct clk_init_data){ | ||
| 1088 | .name = "vapb_0_sel", | ||
| 1089 | .ops = &clk_regmap_mux_ops, | ||
| 1090 | .parent_names = g12a_vapb_parent_names, | ||
| 1091 | .num_parents = ARRAY_SIZE(g12a_vapb_parent_names), | ||
| 1092 | .flags = CLK_SET_RATE_NO_REPARENT, | ||
| 1093 | }, | ||
| 1094 | }; | ||
| 1095 | |||
| 1096 | static struct clk_regmap g12a_vapb_0_div = { | ||
| 1097 | .data = &(struct clk_regmap_div_data){ | ||
| 1098 | .offset = HHI_VAPBCLK_CNTL, | ||
| 1099 | .shift = 0, | ||
| 1100 | .width = 7, | ||
| 1101 | }, | ||
| 1102 | .hw.init = &(struct clk_init_data){ | ||
| 1103 | .name = "vapb_0_div", | ||
| 1104 | .ops = &clk_regmap_divider_ops, | ||
| 1105 | .parent_names = (const char *[]){ "vapb_0_sel" }, | ||
| 1106 | .num_parents = 1, | ||
| 1107 | .flags = CLK_SET_RATE_PARENT, | ||
| 1108 | }, | ||
| 1109 | }; | ||
| 1110 | |||
| 1111 | static struct clk_regmap g12a_vapb_0 = { | ||
| 1112 | .data = &(struct clk_regmap_gate_data){ | ||
| 1113 | .offset = HHI_VAPBCLK_CNTL, | ||
| 1114 | .bit_idx = 8, | ||
| 1115 | }, | ||
| 1116 | .hw.init = &(struct clk_init_data) { | ||
| 1117 | .name = "vapb_0", | ||
| 1118 | .ops = &clk_regmap_gate_ops, | ||
| 1119 | .parent_names = (const char *[]){ "vapb_0_div" }, | ||
| 1120 | .num_parents = 1, | ||
| 1121 | .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, | ||
| 1122 | }, | ||
| 1123 | }; | ||
| 1124 | |||
| 1125 | static struct clk_regmap g12a_vapb_1_sel = { | ||
| 1126 | .data = &(struct clk_regmap_mux_data){ | ||
| 1127 | .offset = HHI_VAPBCLK_CNTL, | ||
| 1128 | .mask = 0x3, | ||
| 1129 | .shift = 25, | ||
| 1130 | }, | ||
| 1131 | .hw.init = &(struct clk_init_data){ | ||
| 1132 | .name = "vapb_1_sel", | ||
| 1133 | .ops = &clk_regmap_mux_ops, | ||
| 1134 | .parent_names = g12a_vapb_parent_names, | ||
| 1135 | .num_parents = ARRAY_SIZE(g12a_vapb_parent_names), | ||
| 1136 | .flags = CLK_SET_RATE_NO_REPARENT, | ||
| 1137 | }, | ||
| 1138 | }; | ||
| 1139 | |||
| 1140 | static struct clk_regmap g12a_vapb_1_div = { | ||
| 1141 | .data = &(struct clk_regmap_div_data){ | ||
| 1142 | .offset = HHI_VAPBCLK_CNTL, | ||
| 1143 | .shift = 16, | ||
| 1144 | .width = 7, | ||
| 1145 | }, | ||
| 1146 | .hw.init = &(struct clk_init_data){ | ||
| 1147 | .name = "vapb_1_div", | ||
| 1148 | .ops = &clk_regmap_divider_ops, | ||
| 1149 | .parent_names = (const char *[]){ "vapb_1_sel" }, | ||
| 1150 | .num_parents = 1, | ||
| 1151 | .flags = CLK_SET_RATE_PARENT, | ||
| 1152 | }, | ||
| 1153 | }; | ||
| 1154 | |||
| 1155 | static struct clk_regmap g12a_vapb_1 = { | ||
| 1156 | .data = &(struct clk_regmap_gate_data){ | ||
| 1157 | .offset = HHI_VAPBCLK_CNTL, | ||
| 1158 | .bit_idx = 24, | ||
| 1159 | }, | ||
| 1160 | .hw.init = &(struct clk_init_data) { | ||
| 1161 | .name = "vapb_1", | ||
| 1162 | .ops = &clk_regmap_gate_ops, | ||
| 1163 | .parent_names = (const char *[]){ "vapb_1_div" }, | ||
| 1164 | .num_parents = 1, | ||
| 1165 | .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, | ||
| 1166 | }, | ||
| 1167 | }; | ||
| 1168 | |||
| 1169 | static struct clk_regmap g12a_vapb_sel = { | ||
| 1170 | .data = &(struct clk_regmap_mux_data){ | ||
| 1171 | .offset = HHI_VAPBCLK_CNTL, | ||
| 1172 | .mask = 1, | ||
| 1173 | .shift = 31, | ||
| 1174 | }, | ||
| 1175 | .hw.init = &(struct clk_init_data){ | ||
| 1176 | .name = "vapb_sel", | ||
| 1177 | .ops = &clk_regmap_mux_ops, | ||
| 1178 | /* | ||
| 1179 | * bit 31 selects from 2 possible parents: | ||
| 1180 | * vapb_0 or vapb_1 | ||
| 1181 | */ | ||
| 1182 | .parent_names = (const char *[]){ "vapb_0", "vapb_1" }, | ||
| 1183 | .num_parents = 2, | ||
| 1184 | .flags = CLK_SET_RATE_NO_REPARENT, | ||
| 1185 | }, | ||
| 1186 | }; | ||
| 1187 | |||
| 1188 | static struct clk_regmap g12a_vapb = { | ||
| 1189 | .data = &(struct clk_regmap_gate_data){ | ||
| 1190 | .offset = HHI_VAPBCLK_CNTL, | ||
| 1191 | .bit_idx = 30, | ||
| 1192 | }, | ||
| 1193 | .hw.init = &(struct clk_init_data) { | ||
| 1194 | .name = "vapb", | ||
| 1195 | .ops = &clk_regmap_gate_ops, | ||
| 1196 | .parent_names = (const char *[]){ "vapb_sel" }, | ||
| 1197 | .num_parents = 1, | ||
| 1198 | .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, | ||
| 1199 | }, | ||
| 1200 | }; | ||
| 1201 | |||
| 1202 | /* Video Clocks */ | ||
| 1203 | |||
| 1204 | static struct clk_regmap g12a_vid_pll_div = { | ||
| 1205 | .data = &(struct meson_vid_pll_div_data){ | ||
| 1206 | .val = { | ||
| 1207 | .reg_off = HHI_VID_PLL_CLK_DIV, | ||
| 1208 | .shift = 0, | ||
| 1209 | .width = 15, | ||
| 1210 | }, | ||
| 1211 | .sel = { | ||
| 1212 | .reg_off = HHI_VID_PLL_CLK_DIV, | ||
| 1213 | .shift = 16, | ||
| 1214 | .width = 2, | ||
| 1215 | }, | ||
| 1216 | }, | ||
| 1217 | .hw.init = &(struct clk_init_data) { | ||
| 1218 | .name = "vid_pll_div", | ||
| 1219 | .ops = &meson_vid_pll_div_ro_ops, | ||
| 1220 | .parent_names = (const char *[]){ "hdmi_pll" }, | ||
| 1221 | .num_parents = 1, | ||
| 1222 | .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE, | ||
| 1223 | }, | ||
| 1224 | }; | ||
| 1225 | |||
| 1226 | static const char * const g12a_vid_pll_parent_names[] = { "vid_pll_div", | ||
| 1227 | "hdmi_pll" }; | ||
| 1228 | |||
| 1229 | static struct clk_regmap g12a_vid_pll_sel = { | ||
| 1230 | .data = &(struct clk_regmap_mux_data){ | ||
| 1231 | .offset = HHI_VID_PLL_CLK_DIV, | ||
| 1232 | .mask = 0x1, | ||
| 1233 | .shift = 18, | ||
| 1234 | }, | ||
| 1235 | .hw.init = &(struct clk_init_data){ | ||
| 1236 | .name = "vid_pll_sel", | ||
| 1237 | .ops = &clk_regmap_mux_ops, | ||
| 1238 | /* | ||
| 1239 | * bit 18 selects from 2 possible parents: | ||
| 1240 | * vid_pll_div or hdmi_pll | ||
| 1241 | */ | ||
| 1242 | .parent_names = g12a_vid_pll_parent_names, | ||
| 1243 | .num_parents = ARRAY_SIZE(g12a_vid_pll_parent_names), | ||
| 1244 | .flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE, | ||
| 1245 | }, | ||
| 1246 | }; | ||
| 1247 | |||
| 1248 | static struct clk_regmap g12a_vid_pll = { | ||
| 1249 | .data = &(struct clk_regmap_gate_data){ | ||
| 1250 | .offset = HHI_VID_PLL_CLK_DIV, | ||
| 1251 | .bit_idx = 19, | ||
| 1252 | }, | ||
| 1253 | .hw.init = &(struct clk_init_data) { | ||
| 1254 | .name = "vid_pll", | ||
| 1255 | .ops = &clk_regmap_gate_ops, | ||
| 1256 | .parent_names = (const char *[]){ "vid_pll_sel" }, | ||
| 1257 | .num_parents = 1, | ||
| 1258 | .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, | ||
| 1259 | }, | ||
| 1260 | }; | ||
| 1261 | |||
| 1262 | static const char * const g12a_vclk_parent_names[] = { | ||
| 1263 | "vid_pll", "gp0_pll", "hifi_pll", "mpll1", "fclk_div3", "fclk_div4", | ||
| 1264 | "fclk_div5", "fclk_div7" | ||
| 1265 | }; | ||
| 1266 | |||
| 1267 | static struct clk_regmap g12a_vclk_sel = { | ||
| 1268 | .data = &(struct clk_regmap_mux_data){ | ||
| 1269 | .offset = HHI_VID_CLK_CNTL, | ||
| 1270 | .mask = 0x7, | ||
| 1271 | .shift = 16, | ||
| 1272 | }, | ||
| 1273 | .hw.init = &(struct clk_init_data){ | ||
| 1274 | .name = "vclk_sel", | ||
| 1275 | .ops = &clk_regmap_mux_ops, | ||
| 1276 | .parent_names = g12a_vclk_parent_names, | ||
| 1277 | .num_parents = ARRAY_SIZE(g12a_vclk_parent_names), | ||
| 1278 | .flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE, | ||
| 1279 | }, | ||
| 1280 | }; | ||
| 1281 | |||
| 1282 | static struct clk_regmap g12a_vclk2_sel = { | ||
| 1283 | .data = &(struct clk_regmap_mux_data){ | ||
| 1284 | .offset = HHI_VIID_CLK_CNTL, | ||
| 1285 | .mask = 0x7, | ||
| 1286 | .shift = 16, | ||
| 1287 | }, | ||
| 1288 | .hw.init = &(struct clk_init_data){ | ||
| 1289 | .name = "vclk2_sel", | ||
| 1290 | .ops = &clk_regmap_mux_ops, | ||
| 1291 | .parent_names = g12a_vclk_parent_names, | ||
| 1292 | .num_parents = ARRAY_SIZE(g12a_vclk_parent_names), | ||
| 1293 | .flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE, | ||
| 1294 | }, | ||
| 1295 | }; | ||
| 1296 | |||
| 1297 | static struct clk_regmap g12a_vclk_input = { | ||
| 1298 | .data = &(struct clk_regmap_gate_data){ | ||
| 1299 | .offset = HHI_VID_CLK_DIV, | ||
| 1300 | .bit_idx = 16, | ||
| 1301 | }, | ||
| 1302 | .hw.init = &(struct clk_init_data) { | ||
| 1303 | .name = "vclk_input", | ||
| 1304 | .ops = &clk_regmap_gate_ops, | ||
| 1305 | .parent_names = (const char *[]){ "vclk_sel" }, | ||
| 1306 | .num_parents = 1, | ||
| 1307 | .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, | ||
| 1308 | }, | ||
| 1309 | }; | ||
| 1310 | |||
| 1311 | static struct clk_regmap g12a_vclk2_input = { | ||
| 1312 | .data = &(struct clk_regmap_gate_data){ | ||
| 1313 | .offset = HHI_VIID_CLK_DIV, | ||
| 1314 | .bit_idx = 16, | ||
| 1315 | }, | ||
| 1316 | .hw.init = &(struct clk_init_data) { | ||
| 1317 | .name = "vclk2_input", | ||
| 1318 | .ops = &clk_regmap_gate_ops, | ||
| 1319 | .parent_names = (const char *[]){ "vclk2_sel" }, | ||
| 1320 | .num_parents = 1, | ||
| 1321 | .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, | ||
| 1322 | }, | ||
| 1323 | }; | ||
| 1324 | |||
| 1325 | static struct clk_regmap g12a_vclk_div = { | ||
| 1326 | .data = &(struct clk_regmap_div_data){ | ||
| 1327 | .offset = HHI_VID_CLK_DIV, | ||
| 1328 | .shift = 0, | ||
| 1329 | .width = 8, | ||
| 1330 | }, | ||
| 1331 | .hw.init = &(struct clk_init_data){ | ||
| 1332 | .name = "vclk_div", | ||
| 1333 | .ops = &clk_regmap_divider_ops, | ||
| 1334 | .parent_names = (const char *[]){ "vclk_input" }, | ||
| 1335 | .num_parents = 1, | ||
| 1336 | .flags = CLK_GET_RATE_NOCACHE, | ||
| 1337 | }, | ||
| 1338 | }; | ||
| 1339 | |||
| 1340 | static struct clk_regmap g12a_vclk2_div = { | ||
| 1341 | .data = &(struct clk_regmap_div_data){ | ||
| 1342 | .offset = HHI_VIID_CLK_DIV, | ||
| 1343 | .shift = 0, | ||
| 1344 | .width = 8, | ||
| 1345 | }, | ||
| 1346 | .hw.init = &(struct clk_init_data){ | ||
| 1347 | .name = "vclk2_div", | ||
| 1348 | .ops = &clk_regmap_divider_ops, | ||
| 1349 | .parent_names = (const char *[]){ "vclk2_input" }, | ||
| 1350 | .num_parents = 1, | ||
| 1351 | .flags = CLK_GET_RATE_NOCACHE, | ||
| 1352 | }, | ||
| 1353 | }; | ||
| 1354 | |||
| 1355 | static struct clk_regmap g12a_vclk = { | ||
| 1356 | .data = &(struct clk_regmap_gate_data){ | ||
| 1357 | .offset = HHI_VID_CLK_CNTL, | ||
| 1358 | .bit_idx = 19, | ||
| 1359 | }, | ||
| 1360 | .hw.init = &(struct clk_init_data) { | ||
| 1361 | .name = "vclk", | ||
| 1362 | .ops = &clk_regmap_gate_ops, | ||
| 1363 | .parent_names = (const char *[]){ "vclk_div" }, | ||
| 1364 | .num_parents = 1, | ||
| 1365 | .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, | ||
| 1366 | }, | ||
| 1367 | }; | ||
| 1368 | |||
| 1369 | static struct clk_regmap g12a_vclk2 = { | ||
| 1370 | .data = &(struct clk_regmap_gate_data){ | ||
| 1371 | .offset = HHI_VIID_CLK_CNTL, | ||
| 1372 | .bit_idx = 19, | ||
| 1373 | }, | ||
| 1374 | .hw.init = &(struct clk_init_data) { | ||
| 1375 | .name = "vclk2", | ||
| 1376 | .ops = &clk_regmap_gate_ops, | ||
| 1377 | .parent_names = (const char *[]){ "vclk2_div" }, | ||
| 1378 | .num_parents = 1, | ||
| 1379 | .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, | ||
| 1380 | }, | ||
| 1381 | }; | ||
| 1382 | |||
| 1383 | static struct clk_regmap g12a_vclk_div1 = { | ||
| 1384 | .data = &(struct clk_regmap_gate_data){ | ||
| 1385 | .offset = HHI_VID_CLK_CNTL, | ||
| 1386 | .bit_idx = 0, | ||
| 1387 | }, | ||
| 1388 | .hw.init = &(struct clk_init_data) { | ||
| 1389 | .name = "vclk_div1", | ||
| 1390 | .ops = &clk_regmap_gate_ops, | ||
| 1391 | .parent_names = (const char *[]){ "vclk" }, | ||
| 1392 | .num_parents = 1, | ||
| 1393 | .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, | ||
| 1394 | }, | ||
| 1395 | }; | ||
| 1396 | |||
| 1397 | static struct clk_regmap g12a_vclk_div2_en = { | ||
| 1398 | .data = &(struct clk_regmap_gate_data){ | ||
| 1399 | .offset = HHI_VID_CLK_CNTL, | ||
| 1400 | .bit_idx = 1, | ||
| 1401 | }, | ||
| 1402 | .hw.init = &(struct clk_init_data) { | ||
| 1403 | .name = "vclk_div2_en", | ||
| 1404 | .ops = &clk_regmap_gate_ops, | ||
| 1405 | .parent_names = (const char *[]){ "vclk" }, | ||
| 1406 | .num_parents = 1, | ||
| 1407 | .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, | ||
| 1408 | }, | ||
| 1409 | }; | ||
| 1410 | |||
| 1411 | static struct clk_regmap g12a_vclk_div4_en = { | ||
| 1412 | .data = &(struct clk_regmap_gate_data){ | ||
| 1413 | .offset = HHI_VID_CLK_CNTL, | ||
| 1414 | .bit_idx = 2, | ||
| 1415 | }, | ||
| 1416 | .hw.init = &(struct clk_init_data) { | ||
| 1417 | .name = "vclk_div4_en", | ||
| 1418 | .ops = &clk_regmap_gate_ops, | ||
| 1419 | .parent_names = (const char *[]){ "vclk" }, | ||
| 1420 | .num_parents = 1, | ||
| 1421 | .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, | ||
| 1422 | }, | ||
| 1423 | }; | ||
| 1424 | |||
| 1425 | static struct clk_regmap g12a_vclk_div6_en = { | ||
| 1426 | .data = &(struct clk_regmap_gate_data){ | ||
| 1427 | .offset = HHI_VID_CLK_CNTL, | ||
| 1428 | .bit_idx = 3, | ||
| 1429 | }, | ||
| 1430 | .hw.init = &(struct clk_init_data) { | ||
| 1431 | .name = "vclk_div6_en", | ||
| 1432 | .ops = &clk_regmap_gate_ops, | ||
| 1433 | .parent_names = (const char *[]){ "vclk" }, | ||
| 1434 | .num_parents = 1, | ||
| 1435 | .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, | ||
| 1436 | }, | ||
| 1437 | }; | ||
| 1438 | |||
| 1439 | static struct clk_regmap g12a_vclk_div12_en = { | ||
| 1440 | .data = &(struct clk_regmap_gate_data){ | ||
| 1441 | .offset = HHI_VID_CLK_CNTL, | ||
| 1442 | .bit_idx = 4, | ||
| 1443 | }, | ||
| 1444 | .hw.init = &(struct clk_init_data) { | ||
| 1445 | .name = "vclk_div12_en", | ||
| 1446 | .ops = &clk_regmap_gate_ops, | ||
| 1447 | .parent_names = (const char *[]){ "vclk" }, | ||
| 1448 | .num_parents = 1, | ||
| 1449 | .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, | ||
| 1450 | }, | ||
| 1451 | }; | ||
| 1452 | |||
| 1453 | static struct clk_regmap g12a_vclk2_div1 = { | ||
| 1454 | .data = &(struct clk_regmap_gate_data){ | ||
| 1455 | .offset = HHI_VIID_CLK_CNTL, | ||
| 1456 | .bit_idx = 0, | ||
| 1457 | }, | ||
| 1458 | .hw.init = &(struct clk_init_data) { | ||
| 1459 | .name = "vclk2_div1", | ||
| 1460 | .ops = &clk_regmap_gate_ops, | ||
| 1461 | .parent_names = (const char *[]){ "vclk2" }, | ||
| 1462 | .num_parents = 1, | ||
| 1463 | .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, | ||
| 1464 | }, | ||
| 1465 | }; | ||
| 1466 | |||
| 1467 | static struct clk_regmap g12a_vclk2_div2_en = { | ||
| 1468 | .data = &(struct clk_regmap_gate_data){ | ||
| 1469 | .offset = HHI_VIID_CLK_CNTL, | ||
| 1470 | .bit_idx = 1, | ||
| 1471 | }, | ||
| 1472 | .hw.init = &(struct clk_init_data) { | ||
| 1473 | .name = "vclk2_div2_en", | ||
| 1474 | .ops = &clk_regmap_gate_ops, | ||
| 1475 | .parent_names = (const char *[]){ "vclk2" }, | ||
| 1476 | .num_parents = 1, | ||
| 1477 | .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, | ||
| 1478 | }, | ||
| 1479 | }; | ||
| 1480 | |||
| 1481 | static struct clk_regmap g12a_vclk2_div4_en = { | ||
| 1482 | .data = &(struct clk_regmap_gate_data){ | ||
| 1483 | .offset = HHI_VIID_CLK_CNTL, | ||
| 1484 | .bit_idx = 2, | ||
| 1485 | }, | ||
| 1486 | .hw.init = &(struct clk_init_data) { | ||
| 1487 | .name = "vclk2_div4_en", | ||
| 1488 | .ops = &clk_regmap_gate_ops, | ||
| 1489 | .parent_names = (const char *[]){ "vclk2" }, | ||
| 1490 | .num_parents = 1, | ||
| 1491 | .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, | ||
| 1492 | }, | ||
| 1493 | }; | ||
| 1494 | |||
| 1495 | static struct clk_regmap g12a_vclk2_div6_en = { | ||
| 1496 | .data = &(struct clk_regmap_gate_data){ | ||
| 1497 | .offset = HHI_VIID_CLK_CNTL, | ||
| 1498 | .bit_idx = 3, | ||
| 1499 | }, | ||
| 1500 | .hw.init = &(struct clk_init_data) { | ||
| 1501 | .name = "vclk2_div6_en", | ||
| 1502 | .ops = &clk_regmap_gate_ops, | ||
| 1503 | .parent_names = (const char *[]){ "vclk2" }, | ||
| 1504 | .num_parents = 1, | ||
| 1505 | .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, | ||
| 1506 | }, | ||
| 1507 | }; | ||
| 1508 | |||
| 1509 | static struct clk_regmap g12a_vclk2_div12_en = { | ||
| 1510 | .data = &(struct clk_regmap_gate_data){ | ||
| 1511 | .offset = HHI_VIID_CLK_CNTL, | ||
| 1512 | .bit_idx = 4, | ||
| 1513 | }, | ||
| 1514 | .hw.init = &(struct clk_init_data) { | ||
| 1515 | .name = "vclk2_div12_en", | ||
| 1516 | .ops = &clk_regmap_gate_ops, | ||
| 1517 | .parent_names = (const char *[]){ "vclk2" }, | ||
| 1518 | .num_parents = 1, | ||
| 1519 | .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, | ||
| 1520 | }, | ||
| 1521 | }; | ||
| 1522 | |||
| 1523 | static struct clk_fixed_factor g12a_vclk_div2 = { | ||
| 1524 | .mult = 1, | ||
| 1525 | .div = 2, | ||
| 1526 | .hw.init = &(struct clk_init_data){ | ||
| 1527 | .name = "vclk_div2", | ||
| 1528 | .ops = &clk_fixed_factor_ops, | ||
| 1529 | .parent_names = (const char *[]){ "vclk_div2_en" }, | ||
| 1530 | .num_parents = 1, | ||
| 1531 | }, | ||
| 1532 | }; | ||
| 1533 | |||
| 1534 | static struct clk_fixed_factor g12a_vclk_div4 = { | ||
| 1535 | .mult = 1, | ||
| 1536 | .div = 4, | ||
| 1537 | .hw.init = &(struct clk_init_data){ | ||
| 1538 | .name = "vclk_div4", | ||
| 1539 | .ops = &clk_fixed_factor_ops, | ||
| 1540 | .parent_names = (const char *[]){ "vclk_div4_en" }, | ||
| 1541 | .num_parents = 1, | ||
| 1542 | }, | ||
| 1543 | }; | ||
| 1544 | |||
| 1545 | static struct clk_fixed_factor g12a_vclk_div6 = { | ||
| 1546 | .mult = 1, | ||
| 1547 | .div = 6, | ||
| 1548 | .hw.init = &(struct clk_init_data){ | ||
| 1549 | .name = "vclk_div6", | ||
| 1550 | .ops = &clk_fixed_factor_ops, | ||
| 1551 | .parent_names = (const char *[]){ "vclk_div6_en" }, | ||
| 1552 | .num_parents = 1, | ||
| 1553 | }, | ||
| 1554 | }; | ||
| 1555 | |||
| 1556 | static struct clk_fixed_factor g12a_vclk_div12 = { | ||
| 1557 | .mult = 1, | ||
| 1558 | .div = 12, | ||
| 1559 | .hw.init = &(struct clk_init_data){ | ||
| 1560 | .name = "vclk_div12", | ||
| 1561 | .ops = &clk_fixed_factor_ops, | ||
| 1562 | .parent_names = (const char *[]){ "vclk_div12_en" }, | ||
| 1563 | .num_parents = 1, | ||
| 1564 | }, | ||
| 1565 | }; | ||
| 1566 | |||
| 1567 | static struct clk_fixed_factor g12a_vclk2_div2 = { | ||
| 1568 | .mult = 1, | ||
| 1569 | .div = 2, | ||
| 1570 | .hw.init = &(struct clk_init_data){ | ||
| 1571 | .name = "vclk2_div2", | ||
| 1572 | .ops = &clk_fixed_factor_ops, | ||
| 1573 | .parent_names = (const char *[]){ "vclk2_div2_en" }, | ||
| 1574 | .num_parents = 1, | ||
| 1575 | }, | ||
| 1576 | }; | ||
| 1577 | |||
| 1578 | static struct clk_fixed_factor g12a_vclk2_div4 = { | ||
| 1579 | .mult = 1, | ||
| 1580 | .div = 4, | ||
| 1581 | .hw.init = &(struct clk_init_data){ | ||
| 1582 | .name = "vclk2_div4", | ||
| 1583 | .ops = &clk_fixed_factor_ops, | ||
| 1584 | .parent_names = (const char *[]){ "vclk2_div4_en" }, | ||
| 1585 | .num_parents = 1, | ||
| 1586 | }, | ||
| 1587 | }; | ||
| 1588 | |||
| 1589 | static struct clk_fixed_factor g12a_vclk2_div6 = { | ||
| 1590 | .mult = 1, | ||
| 1591 | .div = 6, | ||
| 1592 | .hw.init = &(struct clk_init_data){ | ||
| 1593 | .name = "vclk2_div6", | ||
| 1594 | .ops = &clk_fixed_factor_ops, | ||
| 1595 | .parent_names = (const char *[]){ "vclk2_div6_en" }, | ||
| 1596 | .num_parents = 1, | ||
| 1597 | }, | ||
| 1598 | }; | ||
| 1599 | |||
| 1600 | static struct clk_fixed_factor g12a_vclk2_div12 = { | ||
| 1601 | .mult = 1, | ||
| 1602 | .div = 12, | ||
| 1603 | .hw.init = &(struct clk_init_data){ | ||
| 1604 | .name = "vclk2_div12", | ||
| 1605 | .ops = &clk_fixed_factor_ops, | ||
| 1606 | .parent_names = (const char *[]){ "vclk2_div12_en" }, | ||
| 1607 | .num_parents = 1, | ||
| 1608 | }, | ||
| 1609 | }; | ||
| 1610 | |||
| 1611 | static u32 mux_table_cts_sel[] = { 0, 1, 2, 3, 4, 8, 9, 10, 11, 12 }; | ||
| 1612 | static const char * const g12a_cts_parent_names[] = { | ||
| 1613 | "vclk_div1", "vclk_div2", "vclk_div4", "vclk_div6", | ||
| 1614 | "vclk_div12", "vclk2_div1", "vclk2_div2", "vclk2_div4", | ||
| 1615 | "vclk2_div6", "vclk2_div12" | ||
| 1616 | }; | ||
| 1617 | |||
| 1618 | static struct clk_regmap g12a_cts_enci_sel = { | ||
| 1619 | .data = &(struct clk_regmap_mux_data){ | ||
| 1620 | .offset = HHI_VID_CLK_DIV, | ||
| 1621 | .mask = 0xf, | ||
| 1622 | .shift = 28, | ||
| 1623 | .table = mux_table_cts_sel, | ||
| 1624 | }, | ||
| 1625 | .hw.init = &(struct clk_init_data){ | ||
| 1626 | .name = "cts_enci_sel", | ||
| 1627 | .ops = &clk_regmap_mux_ops, | ||
| 1628 | .parent_names = g12a_cts_parent_names, | ||
| 1629 | .num_parents = ARRAY_SIZE(g12a_cts_parent_names), | ||
| 1630 | .flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE, | ||
| 1631 | }, | ||
| 1632 | }; | ||
| 1633 | |||
| 1634 | static struct clk_regmap g12a_cts_encp_sel = { | ||
| 1635 | .data = &(struct clk_regmap_mux_data){ | ||
| 1636 | .offset = HHI_VID_CLK_DIV, | ||
| 1637 | .mask = 0xf, | ||
| 1638 | .shift = 20, | ||
| 1639 | .table = mux_table_cts_sel, | ||
| 1640 | }, | ||
| 1641 | .hw.init = &(struct clk_init_data){ | ||
| 1642 | .name = "cts_encp_sel", | ||
| 1643 | .ops = &clk_regmap_mux_ops, | ||
| 1644 | .parent_names = g12a_cts_parent_names, | ||
| 1645 | .num_parents = ARRAY_SIZE(g12a_cts_parent_names), | ||
| 1646 | .flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE, | ||
| 1647 | }, | ||
| 1648 | }; | ||
| 1649 | |||
| 1650 | static struct clk_regmap g12a_cts_vdac_sel = { | ||
| 1651 | .data = &(struct clk_regmap_mux_data){ | ||
| 1652 | .offset = HHI_VIID_CLK_DIV, | ||
| 1653 | .mask = 0xf, | ||
| 1654 | .shift = 28, | ||
| 1655 | .table = mux_table_cts_sel, | ||
| 1656 | }, | ||
| 1657 | .hw.init = &(struct clk_init_data){ | ||
| 1658 | .name = "cts_vdac_sel", | ||
| 1659 | .ops = &clk_regmap_mux_ops, | ||
| 1660 | .parent_names = g12a_cts_parent_names, | ||
| 1661 | .num_parents = ARRAY_SIZE(g12a_cts_parent_names), | ||
| 1662 | .flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE, | ||
| 1663 | }, | ||
| 1664 | }; | ||
| 1665 | |||
| 1666 | /* TOFIX: add support for cts_tcon */ | ||
| 1667 | static u32 mux_table_hdmi_tx_sel[] = { 0, 1, 2, 3, 4, 8, 9, 10, 11, 12 }; | ||
| 1668 | static const char * const g12a_cts_hdmi_tx_parent_names[] = { | ||
| 1669 | "vclk_div1", "vclk_div2", "vclk_div4", "vclk_div6", | ||
| 1670 | "vclk_div12", "vclk2_div1", "vclk2_div2", "vclk2_div4", | ||
| 1671 | "vclk2_div6", "vclk2_div12" | ||
| 1672 | }; | ||
| 1673 | |||
| 1674 | static struct clk_regmap g12a_hdmi_tx_sel = { | ||
| 1675 | .data = &(struct clk_regmap_mux_data){ | ||
| 1676 | .offset = HHI_HDMI_CLK_CNTL, | ||
| 1677 | .mask = 0xf, | ||
| 1678 | .shift = 16, | ||
| 1679 | .table = mux_table_hdmi_tx_sel, | ||
| 1680 | }, | ||
| 1681 | .hw.init = &(struct clk_init_data){ | ||
| 1682 | .name = "hdmi_tx_sel", | ||
| 1683 | .ops = &clk_regmap_mux_ops, | ||
| 1684 | .parent_names = g12a_cts_hdmi_tx_parent_names, | ||
| 1685 | .num_parents = ARRAY_SIZE(g12a_cts_hdmi_tx_parent_names), | ||
| 1686 | .flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE, | ||
| 1687 | }, | ||
| 1688 | }; | ||
| 1689 | |||
| 1690 | static struct clk_regmap g12a_cts_enci = { | ||
| 1691 | .data = &(struct clk_regmap_gate_data){ | ||
| 1692 | .offset = HHI_VID_CLK_CNTL2, | ||
| 1693 | .bit_idx = 0, | ||
| 1694 | }, | ||
| 1695 | .hw.init = &(struct clk_init_data) { | ||
| 1696 | .name = "cts_enci", | ||
| 1697 | .ops = &clk_regmap_gate_ops, | ||
| 1698 | .parent_names = (const char *[]){ "cts_enci_sel" }, | ||
| 1699 | .num_parents = 1, | ||
| 1700 | .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, | ||
| 1701 | }, | ||
| 1702 | }; | ||
| 1703 | |||
| 1704 | static struct clk_regmap g12a_cts_encp = { | ||
| 1705 | .data = &(struct clk_regmap_gate_data){ | ||
| 1706 | .offset = HHI_VID_CLK_CNTL2, | ||
| 1707 | .bit_idx = 2, | ||
| 1708 | }, | ||
| 1709 | .hw.init = &(struct clk_init_data) { | ||
| 1710 | .name = "cts_encp", | ||
| 1711 | .ops = &clk_regmap_gate_ops, | ||
| 1712 | .parent_names = (const char *[]){ "cts_encp_sel" }, | ||
| 1713 | .num_parents = 1, | ||
| 1714 | .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, | ||
| 1715 | }, | ||
| 1716 | }; | ||
| 1717 | |||
| 1718 | static struct clk_regmap g12a_cts_vdac = { | ||
| 1719 | .data = &(struct clk_regmap_gate_data){ | ||
| 1720 | .offset = HHI_VID_CLK_CNTL2, | ||
| 1721 | .bit_idx = 4, | ||
| 1722 | }, | ||
| 1723 | .hw.init = &(struct clk_init_data) { | ||
| 1724 | .name = "cts_vdac", | ||
| 1725 | .ops = &clk_regmap_gate_ops, | ||
| 1726 | .parent_names = (const char *[]){ "cts_vdac_sel" }, | ||
| 1727 | .num_parents = 1, | ||
| 1728 | .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, | ||
| 1729 | }, | ||
| 1730 | }; | ||
| 1731 | |||
| 1732 | static struct clk_regmap g12a_hdmi_tx = { | ||
| 1733 | .data = &(struct clk_regmap_gate_data){ | ||
| 1734 | .offset = HHI_VID_CLK_CNTL2, | ||
| 1735 | .bit_idx = 5, | ||
| 1736 | }, | ||
| 1737 | .hw.init = &(struct clk_init_data) { | ||
| 1738 | .name = "hdmi_tx", | ||
| 1739 | .ops = &clk_regmap_gate_ops, | ||
| 1740 | .parent_names = (const char *[]){ "hdmi_tx_sel" }, | ||
| 1741 | .num_parents = 1, | ||
| 1742 | .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, | ||
| 1743 | }, | ||
| 1744 | }; | ||
| 1745 | |||
| 1746 | /* HDMI Clocks */ | ||
| 1747 | |||
| 1748 | static const char * const g12a_hdmi_parent_names[] = { | ||
| 1749 | IN_PREFIX "xtal", "fclk_div4", "fclk_div3", "fclk_div5" | ||
| 1750 | }; | ||
| 1751 | |||
| 1752 | static struct clk_regmap g12a_hdmi_sel = { | ||
| 1753 | .data = &(struct clk_regmap_mux_data){ | ||
| 1754 | .offset = HHI_HDMI_CLK_CNTL, | ||
| 1755 | .mask = 0x3, | ||
| 1756 | .shift = 9, | ||
| 1757 | .flags = CLK_MUX_ROUND_CLOSEST, | ||
| 1758 | }, | ||
| 1759 | .hw.init = &(struct clk_init_data){ | ||
| 1760 | .name = "hdmi_sel", | ||
| 1761 | .ops = &clk_regmap_mux_ops, | ||
| 1762 | .parent_names = g12a_hdmi_parent_names, | ||
| 1763 | .num_parents = ARRAY_SIZE(g12a_hdmi_parent_names), | ||
| 1764 | .flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE, | ||
| 1765 | }, | ||
| 1766 | }; | ||
| 1767 | |||
| 1768 | static struct clk_regmap g12a_hdmi_div = { | ||
| 1769 | .data = &(struct clk_regmap_div_data){ | ||
| 1770 | .offset = HHI_HDMI_CLK_CNTL, | ||
| 1771 | .shift = 0, | ||
| 1772 | .width = 7, | ||
| 1773 | }, | ||
| 1774 | .hw.init = &(struct clk_init_data){ | ||
| 1775 | .name = "hdmi_div", | ||
| 1776 | .ops = &clk_regmap_divider_ops, | ||
| 1777 | .parent_names = (const char *[]){ "hdmi_sel" }, | ||
| 1778 | .num_parents = 1, | ||
| 1779 | .flags = CLK_GET_RATE_NOCACHE, | ||
| 1780 | }, | ||
| 1781 | }; | ||
| 1782 | |||
| 1783 | static struct clk_regmap g12a_hdmi = { | ||
| 1784 | .data = &(struct clk_regmap_gate_data){ | ||
| 1785 | .offset = HHI_HDMI_CLK_CNTL, | ||
| 1786 | .bit_idx = 8, | ||
| 1787 | }, | ||
| 1788 | .hw.init = &(struct clk_init_data) { | ||
| 1789 | .name = "hdmi", | ||
| 1790 | .ops = &clk_regmap_gate_ops, | ||
| 1791 | .parent_names = (const char *[]){ "hdmi_div" }, | ||
| 1792 | .num_parents = 1, | ||
| 1793 | .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, | ||
| 1794 | }, | ||
| 1795 | }; | ||
| 1796 | |||
| 1797 | /* | ||
| 1798 | * The MALI IP is clocked by two identical clocks (mali_0 and mali_1) | ||
| 1799 | * muxed by a glitch-free switch. | ||
| 1800 | */ | ||
| 1801 | |||
| 1802 | static const char * const g12a_mali_0_1_parent_names[] = { | ||
| 1803 | IN_PREFIX "xtal", "gp0_pll", "hihi_pll", "fclk_div2p5", | ||
| 1804 | "fclk_div3", "fclk_div4", "fclk_div5", "fclk_div7" | ||
| 1805 | }; | ||
| 1806 | |||
| 1807 | static struct clk_regmap g12a_mali_0_sel = { | ||
| 1808 | .data = &(struct clk_regmap_mux_data){ | ||
| 1809 | .offset = HHI_MALI_CLK_CNTL, | ||
| 1810 | .mask = 0x7, | ||
| 1811 | .shift = 9, | ||
| 1812 | }, | ||
| 1813 | .hw.init = &(struct clk_init_data){ | ||
| 1814 | .name = "mali_0_sel", | ||
| 1815 | .ops = &clk_regmap_mux_ops, | ||
| 1816 | .parent_names = g12a_mali_0_1_parent_names, | ||
| 1817 | .num_parents = 8, | ||
| 1818 | .flags = CLK_SET_RATE_NO_REPARENT, | ||
| 1819 | }, | ||
| 1820 | }; | ||
| 1821 | |||
| 1822 | static struct clk_regmap g12a_mali_0_div = { | ||
| 1823 | .data = &(struct clk_regmap_div_data){ | ||
| 1824 | .offset = HHI_MALI_CLK_CNTL, | ||
| 1825 | .shift = 0, | ||
| 1826 | .width = 7, | ||
| 1827 | }, | ||
| 1828 | .hw.init = &(struct clk_init_data){ | ||
| 1829 | .name = "mali_0_div", | ||
| 1830 | .ops = &clk_regmap_divider_ops, | ||
| 1831 | .parent_names = (const char *[]){ "mali_0_sel" }, | ||
| 1832 | .num_parents = 1, | ||
| 1833 | .flags = CLK_SET_RATE_NO_REPARENT, | ||
| 1834 | }, | ||
| 1835 | }; | ||
| 1836 | |||
| 1837 | static struct clk_regmap g12a_mali_0 = { | ||
| 1838 | .data = &(struct clk_regmap_gate_data){ | ||
| 1839 | .offset = HHI_MALI_CLK_CNTL, | ||
| 1840 | .bit_idx = 8, | ||
| 1841 | }, | ||
| 1842 | .hw.init = &(struct clk_init_data){ | ||
| 1843 | .name = "mali_0", | ||
| 1844 | .ops = &clk_regmap_gate_ops, | ||
| 1845 | .parent_names = (const char *[]){ "mali_0_div" }, | ||
| 1846 | .num_parents = 1, | ||
| 1847 | .flags = CLK_SET_RATE_PARENT, | ||
| 1848 | }, | ||
| 1849 | }; | ||
| 1850 | |||
| 1851 | static struct clk_regmap g12a_mali_1_sel = { | ||
| 1852 | .data = &(struct clk_regmap_mux_data){ | ||
| 1853 | .offset = HHI_MALI_CLK_CNTL, | ||
| 1854 | .mask = 0x7, | ||
| 1855 | .shift = 25, | ||
| 1856 | }, | ||
| 1857 | .hw.init = &(struct clk_init_data){ | ||
| 1858 | .name = "mali_1_sel", | ||
| 1859 | .ops = &clk_regmap_mux_ops, | ||
| 1860 | .parent_names = g12a_mali_0_1_parent_names, | ||
| 1861 | .num_parents = 8, | ||
| 1862 | .flags = CLK_SET_RATE_NO_REPARENT, | ||
| 1863 | }, | ||
| 1864 | }; | ||
| 1865 | |||
| 1866 | static struct clk_regmap g12a_mali_1_div = { | ||
| 1867 | .data = &(struct clk_regmap_div_data){ | ||
| 1868 | .offset = HHI_MALI_CLK_CNTL, | ||
| 1869 | .shift = 16, | ||
| 1870 | .width = 7, | ||
| 1871 | }, | ||
| 1872 | .hw.init = &(struct clk_init_data){ | ||
| 1873 | .name = "mali_1_div", | ||
| 1874 | .ops = &clk_regmap_divider_ops, | ||
| 1875 | .parent_names = (const char *[]){ "mali_1_sel" }, | ||
| 1876 | .num_parents = 1, | ||
| 1877 | .flags = CLK_SET_RATE_NO_REPARENT, | ||
| 1878 | }, | ||
| 1879 | }; | ||
| 1880 | |||
| 1881 | static struct clk_regmap g12a_mali_1 = { | ||
| 1882 | .data = &(struct clk_regmap_gate_data){ | ||
| 1883 | .offset = HHI_MALI_CLK_CNTL, | ||
| 1884 | .bit_idx = 24, | ||
| 1885 | }, | ||
| 1886 | .hw.init = &(struct clk_init_data){ | ||
| 1887 | .name = "mali_1", | ||
| 1888 | .ops = &clk_regmap_gate_ops, | ||
| 1889 | .parent_names = (const char *[]){ "mali_1_div" }, | ||
| 1890 | .num_parents = 1, | ||
| 1891 | .flags = CLK_SET_RATE_PARENT, | ||
| 1892 | }, | ||
| 1893 | }; | ||
| 1894 | |||
| 1895 | static const char * const g12a_mali_parent_names[] = { | ||
| 1896 | "mali_0", "mali_1" | ||
| 1897 | }; | ||
| 1898 | |||
| 1899 | static struct clk_regmap g12a_mali = { | ||
| 1900 | .data = &(struct clk_regmap_mux_data){ | ||
| 1901 | .offset = HHI_MALI_CLK_CNTL, | ||
| 1902 | .mask = 1, | ||
| 1903 | .shift = 31, | ||
| 1904 | }, | ||
| 1905 | .hw.init = &(struct clk_init_data){ | ||
| 1906 | .name = "mali", | ||
| 1907 | .ops = &clk_regmap_mux_ops, | ||
| 1908 | .parent_names = g12a_mali_parent_names, | ||
| 1909 | .num_parents = 2, | ||
| 1910 | .flags = CLK_SET_RATE_NO_REPARENT, | ||
| 1911 | }, | ||
| 1912 | }; | ||
| 1913 | |||
| 1914 | /* Everything Else (EE) domain gates */ | ||
| 1915 | static MESON_GATE(g12a_ddr, HHI_GCLK_MPEG0, 0); | ||
| 1916 | static MESON_GATE(g12a_dos, HHI_GCLK_MPEG0, 1); | ||
| 1917 | static MESON_GATE(g12a_audio_locker, HHI_GCLK_MPEG0, 2); | ||
| 1918 | static MESON_GATE(g12a_mipi_dsi_host, HHI_GCLK_MPEG0, 3); | ||
| 1919 | static MESON_GATE(g12a_eth_phy, HHI_GCLK_MPEG0, 4); | ||
| 1920 | static MESON_GATE(g12a_isa, HHI_GCLK_MPEG0, 5); | ||
| 1921 | static MESON_GATE(g12a_pl301, HHI_GCLK_MPEG0, 6); | ||
| 1922 | static MESON_GATE(g12a_periphs, HHI_GCLK_MPEG0, 7); | ||
| 1923 | static MESON_GATE(g12a_spicc_0, HHI_GCLK_MPEG0, 8); | ||
| 1924 | static MESON_GATE(g12a_i2c, HHI_GCLK_MPEG0, 9); | ||
| 1925 | static MESON_GATE(g12a_sana, HHI_GCLK_MPEG0, 10); | ||
| 1926 | static MESON_GATE(g12a_sd, HHI_GCLK_MPEG0, 11); | ||
| 1927 | static MESON_GATE(g12a_rng0, HHI_GCLK_MPEG0, 12); | ||
| 1928 | static MESON_GATE(g12a_uart0, HHI_GCLK_MPEG0, 13); | ||
| 1929 | static MESON_GATE(g12a_spicc_1, HHI_GCLK_MPEG0, 14); | ||
| 1930 | static MESON_GATE(g12a_hiu_reg, HHI_GCLK_MPEG0, 19); | ||
| 1931 | static MESON_GATE(g12a_mipi_dsi_phy, HHI_GCLK_MPEG0, 20); | ||
| 1932 | static MESON_GATE(g12a_assist_misc, HHI_GCLK_MPEG0, 23); | ||
| 1933 | static MESON_GATE(g12a_emmc_a, HHI_GCLK_MPEG0, 4); | ||
| 1934 | static MESON_GATE(g12a_emmc_b, HHI_GCLK_MPEG0, 25); | ||
| 1935 | static MESON_GATE(g12a_emmc_c, HHI_GCLK_MPEG0, 26); | ||
| 1936 | static MESON_GATE(g12a_audio_codec, HHI_GCLK_MPEG0, 28); | ||
| 1937 | |||
| 1938 | static MESON_GATE(g12a_audio, HHI_GCLK_MPEG1, 0); | ||
| 1939 | static MESON_GATE(g12a_eth_core, HHI_GCLK_MPEG1, 3); | ||
| 1940 | static MESON_GATE(g12a_demux, HHI_GCLK_MPEG1, 4); | ||
| 1941 | static MESON_GATE(g12a_audio_ififo, HHI_GCLK_MPEG1, 11); | ||
| 1942 | static MESON_GATE(g12a_adc, HHI_GCLK_MPEG1, 13); | ||
| 1943 | static MESON_GATE(g12a_uart1, HHI_GCLK_MPEG1, 16); | ||
| 1944 | static MESON_GATE(g12a_g2d, HHI_GCLK_MPEG1, 20); | ||
| 1945 | static MESON_GATE(g12a_reset, HHI_GCLK_MPEG1, 23); | ||
| 1946 | static MESON_GATE(g12a_pcie_comb, HHI_GCLK_MPEG1, 24); | ||
| 1947 | static MESON_GATE(g12a_parser, HHI_GCLK_MPEG1, 25); | ||
| 1948 | static MESON_GATE(g12a_usb_general, HHI_GCLK_MPEG1, 26); | ||
| 1949 | static MESON_GATE(g12a_pcie_phy, HHI_GCLK_MPEG1, 27); | ||
| 1950 | static MESON_GATE(g12a_ahb_arb0, HHI_GCLK_MPEG1, 29); | ||
| 1951 | |||
| 1952 | static MESON_GATE(g12a_ahb_data_bus, HHI_GCLK_MPEG2, 1); | ||
| 1953 | static MESON_GATE(g12a_ahb_ctrl_bus, HHI_GCLK_MPEG2, 2); | ||
| 1954 | static MESON_GATE(g12a_htx_hdcp22, HHI_GCLK_MPEG2, 3); | ||
| 1955 | static MESON_GATE(g12a_htx_pclk, HHI_GCLK_MPEG2, 4); | ||
| 1956 | static MESON_GATE(g12a_bt656, HHI_GCLK_MPEG2, 6); | ||
| 1957 | static MESON_GATE(g12a_usb1_to_ddr, HHI_GCLK_MPEG2, 8); | ||
| 1958 | static MESON_GATE(g12a_mmc_pclk, HHI_GCLK_MPEG2, 11); | ||
| 1959 | static MESON_GATE(g12a_uart2, HHI_GCLK_MPEG2, 15); | ||
| 1960 | static MESON_GATE(g12a_vpu_intr, HHI_GCLK_MPEG2, 25); | ||
| 1961 | static MESON_GATE(g12a_gic, HHI_GCLK_MPEG2, 30); | ||
| 1962 | |||
| 1963 | static MESON_GATE(g12a_vclk2_venci0, HHI_GCLK_OTHER, 1); | ||
| 1964 | static MESON_GATE(g12a_vclk2_venci1, HHI_GCLK_OTHER, 2); | ||
| 1965 | static MESON_GATE(g12a_vclk2_vencp0, HHI_GCLK_OTHER, 3); | ||
| 1966 | static MESON_GATE(g12a_vclk2_vencp1, HHI_GCLK_OTHER, 4); | ||
| 1967 | static MESON_GATE(g12a_vclk2_venct0, HHI_GCLK_OTHER, 5); | ||
| 1968 | static MESON_GATE(g12a_vclk2_venct1, HHI_GCLK_OTHER, 6); | ||
| 1969 | static MESON_GATE(g12a_vclk2_other, HHI_GCLK_OTHER, 7); | ||
| 1970 | static MESON_GATE(g12a_vclk2_enci, HHI_GCLK_OTHER, 8); | ||
| 1971 | static MESON_GATE(g12a_vclk2_encp, HHI_GCLK_OTHER, 9); | ||
| 1972 | static MESON_GATE(g12a_dac_clk, HHI_GCLK_OTHER, 10); | ||
| 1973 | static MESON_GATE(g12a_aoclk_gate, HHI_GCLK_OTHER, 14); | ||
| 1974 | static MESON_GATE(g12a_iec958_gate, HHI_GCLK_OTHER, 16); | ||
| 1975 | static MESON_GATE(g12a_enc480p, HHI_GCLK_OTHER, 20); | ||
| 1976 | static MESON_GATE(g12a_rng1, HHI_GCLK_OTHER, 21); | ||
| 1977 | static MESON_GATE(g12a_vclk2_enct, HHI_GCLK_OTHER, 22); | ||
| 1978 | static MESON_GATE(g12a_vclk2_encl, HHI_GCLK_OTHER, 23); | ||
| 1979 | static MESON_GATE(g12a_vclk2_venclmmc, HHI_GCLK_OTHER, 24); | ||
| 1980 | static MESON_GATE(g12a_vclk2_vencl, HHI_GCLK_OTHER, 25); | ||
| 1981 | static MESON_GATE(g12a_vclk2_other1, HHI_GCLK_OTHER, 26); | ||
| 1982 | |||
| 1983 | static MESON_GATE_RO(g12a_dma, HHI_GCLK_OTHER2, 0); | ||
| 1984 | static MESON_GATE_RO(g12a_efuse, HHI_GCLK_OTHER2, 1); | ||
| 1985 | static MESON_GATE_RO(g12a_rom_boot, HHI_GCLK_OTHER2, 2); | ||
| 1986 | static MESON_GATE_RO(g12a_reset_sec, HHI_GCLK_OTHER2, 3); | ||
| 1987 | static MESON_GATE_RO(g12a_sec_ahb_apb3, HHI_GCLK_OTHER2, 4); | ||
| 1988 | |||
| 1989 | /* Array of all clocks provided by this provider */ | ||
| 1990 | static struct clk_hw_onecell_data g12a_hw_onecell_data = { | ||
| 1991 | .hws = { | ||
| 1992 | [CLKID_SYS_PLL] = &g12a_sys_pll.hw, | ||
| 1993 | [CLKID_FIXED_PLL] = &g12a_fixed_pll.hw, | ||
| 1994 | [CLKID_FCLK_DIV2] = &g12a_fclk_div2.hw, | ||
| 1995 | [CLKID_FCLK_DIV3] = &g12a_fclk_div3.hw, | ||
| 1996 | [CLKID_FCLK_DIV4] = &g12a_fclk_div4.hw, | ||
| 1997 | [CLKID_FCLK_DIV5] = &g12a_fclk_div5.hw, | ||
| 1998 | [CLKID_FCLK_DIV7] = &g12a_fclk_div7.hw, | ||
| 1999 | [CLKID_FCLK_DIV2P5] = &g12a_fclk_div2p5.hw, | ||
| 2000 | [CLKID_GP0_PLL] = &g12a_gp0_pll.hw, | ||
| 2001 | [CLKID_MPEG_SEL] = &g12a_mpeg_clk_sel.hw, | ||
| 2002 | [CLKID_MPEG_DIV] = &g12a_mpeg_clk_div.hw, | ||
| 2003 | [CLKID_CLK81] = &g12a_clk81.hw, | ||
| 2004 | [CLKID_MPLL0] = &g12a_mpll0.hw, | ||
| 2005 | [CLKID_MPLL1] = &g12a_mpll1.hw, | ||
| 2006 | [CLKID_MPLL2] = &g12a_mpll2.hw, | ||
| 2007 | [CLKID_MPLL3] = &g12a_mpll3.hw, | ||
| 2008 | [CLKID_DDR] = &g12a_ddr.hw, | ||
| 2009 | [CLKID_DOS] = &g12a_dos.hw, | ||
| 2010 | [CLKID_AUDIO_LOCKER] = &g12a_audio_locker.hw, | ||
| 2011 | [CLKID_MIPI_DSI_HOST] = &g12a_mipi_dsi_host.hw, | ||
| 2012 | [CLKID_ETH_PHY] = &g12a_eth_phy.hw, | ||
| 2013 | [CLKID_ISA] = &g12a_isa.hw, | ||
| 2014 | [CLKID_PL301] = &g12a_pl301.hw, | ||
| 2015 | [CLKID_PERIPHS] = &g12a_periphs.hw, | ||
| 2016 | [CLKID_SPICC0] = &g12a_spicc_0.hw, | ||
| 2017 | [CLKID_I2C] = &g12a_i2c.hw, | ||
| 2018 | [CLKID_SANA] = &g12a_sana.hw, | ||
| 2019 | [CLKID_SD] = &g12a_sd.hw, | ||
| 2020 | [CLKID_RNG0] = &g12a_rng0.hw, | ||
| 2021 | [CLKID_UART0] = &g12a_uart0.hw, | ||
| 2022 | [CLKID_SPICC1] = &g12a_spicc_1.hw, | ||
| 2023 | [CLKID_HIU_IFACE] = &g12a_hiu_reg.hw, | ||
| 2024 | [CLKID_MIPI_DSI_PHY] = &g12a_mipi_dsi_phy.hw, | ||
| 2025 | [CLKID_ASSIST_MISC] = &g12a_assist_misc.hw, | ||
| 2026 | [CLKID_SD_EMMC_A] = &g12a_emmc_a.hw, | ||
| 2027 | [CLKID_SD_EMMC_B] = &g12a_emmc_b.hw, | ||
| 2028 | [CLKID_SD_EMMC_C] = &g12a_emmc_c.hw, | ||
| 2029 | [CLKID_AUDIO_CODEC] = &g12a_audio_codec.hw, | ||
| 2030 | [CLKID_AUDIO] = &g12a_audio.hw, | ||
| 2031 | [CLKID_ETH] = &g12a_eth_core.hw, | ||
| 2032 | [CLKID_DEMUX] = &g12a_demux.hw, | ||
| 2033 | [CLKID_AUDIO_IFIFO] = &g12a_audio_ififo.hw, | ||
| 2034 | [CLKID_ADC] = &g12a_adc.hw, | ||
| 2035 | [CLKID_UART1] = &g12a_uart1.hw, | ||
| 2036 | [CLKID_G2D] = &g12a_g2d.hw, | ||
| 2037 | [CLKID_RESET] = &g12a_reset.hw, | ||
| 2038 | [CLKID_PCIE_COMB] = &g12a_pcie_comb.hw, | ||
| 2039 | [CLKID_PARSER] = &g12a_parser.hw, | ||
| 2040 | [CLKID_USB] = &g12a_usb_general.hw, | ||
| 2041 | [CLKID_PCIE_PHY] = &g12a_pcie_phy.hw, | ||
| 2042 | [CLKID_AHB_ARB0] = &g12a_ahb_arb0.hw, | ||
| 2043 | [CLKID_AHB_DATA_BUS] = &g12a_ahb_data_bus.hw, | ||
| 2044 | [CLKID_AHB_CTRL_BUS] = &g12a_ahb_ctrl_bus.hw, | ||
| 2045 | [CLKID_HTX_HDCP22] = &g12a_htx_hdcp22.hw, | ||
| 2046 | [CLKID_HTX_PCLK] = &g12a_htx_pclk.hw, | ||
| 2047 | [CLKID_BT656] = &g12a_bt656.hw, | ||
| 2048 | [CLKID_USB1_DDR_BRIDGE] = &g12a_usb1_to_ddr.hw, | ||
| 2049 | [CLKID_MMC_PCLK] = &g12a_mmc_pclk.hw, | ||
| 2050 | [CLKID_UART2] = &g12a_uart2.hw, | ||
| 2051 | [CLKID_VPU_INTR] = &g12a_vpu_intr.hw, | ||
| 2052 | [CLKID_GIC] = &g12a_gic.hw, | ||
| 2053 | [CLKID_SD_EMMC_A_CLK0_SEL] = &g12a_sd_emmc_a_clk0_sel.hw, | ||
| 2054 | [CLKID_SD_EMMC_A_CLK0_DIV] = &g12a_sd_emmc_a_clk0_div.hw, | ||
| 2055 | [CLKID_SD_EMMC_A_CLK0] = &g12a_sd_emmc_a_clk0.hw, | ||
| 2056 | [CLKID_SD_EMMC_B_CLK0_SEL] = &g12a_sd_emmc_b_clk0_sel.hw, | ||
| 2057 | [CLKID_SD_EMMC_B_CLK0_DIV] = &g12a_sd_emmc_b_clk0_div.hw, | ||
| 2058 | [CLKID_SD_EMMC_B_CLK0] = &g12a_sd_emmc_b_clk0.hw, | ||
| 2059 | [CLKID_SD_EMMC_C_CLK0_SEL] = &g12a_sd_emmc_c_clk0_sel.hw, | ||
| 2060 | [CLKID_SD_EMMC_C_CLK0_DIV] = &g12a_sd_emmc_c_clk0_div.hw, | ||
| 2061 | [CLKID_SD_EMMC_C_CLK0] = &g12a_sd_emmc_c_clk0.hw, | ||
| 2062 | [CLKID_MPLL0_DIV] = &g12a_mpll0_div.hw, | ||
| 2063 | [CLKID_MPLL1_DIV] = &g12a_mpll1_div.hw, | ||
| 2064 | [CLKID_MPLL2_DIV] = &g12a_mpll2_div.hw, | ||
| 2065 | [CLKID_MPLL3_DIV] = &g12a_mpll3_div.hw, | ||
| 2066 | [CLKID_FCLK_DIV2_DIV] = &g12a_fclk_div2_div.hw, | ||
| 2067 | [CLKID_FCLK_DIV3_DIV] = &g12a_fclk_div3_div.hw, | ||
| 2068 | [CLKID_FCLK_DIV4_DIV] = &g12a_fclk_div4_div.hw, | ||
| 2069 | [CLKID_FCLK_DIV5_DIV] = &g12a_fclk_div5_div.hw, | ||
| 2070 | [CLKID_FCLK_DIV7_DIV] = &g12a_fclk_div7_div.hw, | ||
| 2071 | [CLKID_FCLK_DIV2P5_DIV] = &g12a_fclk_div2p5_div.hw, | ||
| 2072 | [CLKID_HIFI_PLL] = &g12a_hifi_pll.hw, | ||
| 2073 | [CLKID_VCLK2_VENCI0] = &g12a_vclk2_venci0.hw, | ||
| 2074 | [CLKID_VCLK2_VENCI1] = &g12a_vclk2_venci1.hw, | ||
| 2075 | [CLKID_VCLK2_VENCP0] = &g12a_vclk2_vencp0.hw, | ||
| 2076 | [CLKID_VCLK2_VENCP1] = &g12a_vclk2_vencp1.hw, | ||
| 2077 | [CLKID_VCLK2_VENCT0] = &g12a_vclk2_venct0.hw, | ||
| 2078 | [CLKID_VCLK2_VENCT1] = &g12a_vclk2_venct1.hw, | ||
| 2079 | [CLKID_VCLK2_OTHER] = &g12a_vclk2_other.hw, | ||
| 2080 | [CLKID_VCLK2_ENCI] = &g12a_vclk2_enci.hw, | ||
| 2081 | [CLKID_VCLK2_ENCP] = &g12a_vclk2_encp.hw, | ||
| 2082 | [CLKID_DAC_CLK] = &g12a_dac_clk.hw, | ||
| 2083 | [CLKID_AOCLK] = &g12a_aoclk_gate.hw, | ||
| 2084 | [CLKID_IEC958] = &g12a_iec958_gate.hw, | ||
| 2085 | [CLKID_ENC480P] = &g12a_enc480p.hw, | ||
| 2086 | [CLKID_RNG1] = &g12a_rng1.hw, | ||
| 2087 | [CLKID_VCLK2_ENCT] = &g12a_vclk2_enct.hw, | ||
| 2088 | [CLKID_VCLK2_ENCL] = &g12a_vclk2_encl.hw, | ||
| 2089 | [CLKID_VCLK2_VENCLMMC] = &g12a_vclk2_venclmmc.hw, | ||
| 2090 | [CLKID_VCLK2_VENCL] = &g12a_vclk2_vencl.hw, | ||
| 2091 | [CLKID_VCLK2_OTHER1] = &g12a_vclk2_other1.hw, | ||
| 2092 | [CLKID_FIXED_PLL_DCO] = &g12a_fixed_pll_dco.hw, | ||
| 2093 | [CLKID_SYS_PLL_DCO] = &g12a_sys_pll_dco.hw, | ||
| 2094 | [CLKID_GP0_PLL_DCO] = &g12a_gp0_pll_dco.hw, | ||
| 2095 | [CLKID_HIFI_PLL_DCO] = &g12a_hifi_pll_dco.hw, | ||
| 2096 | [CLKID_DMA] = &g12a_dma.hw, | ||
| 2097 | [CLKID_EFUSE] = &g12a_efuse.hw, | ||
| 2098 | [CLKID_ROM_BOOT] = &g12a_rom_boot.hw, | ||
| 2099 | [CLKID_RESET_SEC] = &g12a_reset_sec.hw, | ||
| 2100 | [CLKID_SEC_AHB_APB3] = &g12a_sec_ahb_apb3.hw, | ||
| 2101 | [CLKID_MPLL_PREDIV] = &g12a_mpll_prediv.hw, | ||
| 2102 | [CLKID_VPU_0_SEL] = &g12a_vpu_0_sel.hw, | ||
| 2103 | [CLKID_VPU_0_DIV] = &g12a_vpu_0_div.hw, | ||
| 2104 | [CLKID_VPU_0] = &g12a_vpu_0.hw, | ||
| 2105 | [CLKID_VPU_1_SEL] = &g12a_vpu_1_sel.hw, | ||
| 2106 | [CLKID_VPU_1_DIV] = &g12a_vpu_1_div.hw, | ||
| 2107 | [CLKID_VPU_1] = &g12a_vpu_1.hw, | ||
| 2108 | [CLKID_VPU] = &g12a_vpu.hw, | ||
| 2109 | [CLKID_VAPB_0_SEL] = &g12a_vapb_0_sel.hw, | ||
| 2110 | [CLKID_VAPB_0_DIV] = &g12a_vapb_0_div.hw, | ||
| 2111 | [CLKID_VAPB_0] = &g12a_vapb_0.hw, | ||
| 2112 | [CLKID_VAPB_1_SEL] = &g12a_vapb_1_sel.hw, | ||
| 2113 | [CLKID_VAPB_1_DIV] = &g12a_vapb_1_div.hw, | ||
| 2114 | [CLKID_VAPB_1] = &g12a_vapb_1.hw, | ||
| 2115 | [CLKID_VAPB_SEL] = &g12a_vapb_sel.hw, | ||
| 2116 | [CLKID_VAPB] = &g12a_vapb.hw, | ||
| 2117 | [CLKID_HDMI_PLL_DCO] = &g12a_hdmi_pll_dco.hw, | ||
| 2118 | [CLKID_HDMI_PLL_OD] = &g12a_hdmi_pll_od.hw, | ||
| 2119 | [CLKID_HDMI_PLL_OD2] = &g12a_hdmi_pll_od2.hw, | ||
| 2120 | [CLKID_HDMI_PLL] = &g12a_hdmi_pll.hw, | ||
| 2121 | [CLKID_VID_PLL] = &g12a_vid_pll_div.hw, | ||
| 2122 | [CLKID_VID_PLL_SEL] = &g12a_vid_pll_sel.hw, | ||
| 2123 | [CLKID_VID_PLL_DIV] = &g12a_vid_pll.hw, | ||
| 2124 | [CLKID_VCLK_SEL] = &g12a_vclk_sel.hw, | ||
| 2125 | [CLKID_VCLK2_SEL] = &g12a_vclk2_sel.hw, | ||
| 2126 | [CLKID_VCLK_INPUT] = &g12a_vclk_input.hw, | ||
| 2127 | [CLKID_VCLK2_INPUT] = &g12a_vclk2_input.hw, | ||
| 2128 | [CLKID_VCLK_DIV] = &g12a_vclk_div.hw, | ||
| 2129 | [CLKID_VCLK2_DIV] = &g12a_vclk2_div.hw, | ||
| 2130 | [CLKID_VCLK] = &g12a_vclk.hw, | ||
| 2131 | [CLKID_VCLK2] = &g12a_vclk2.hw, | ||
| 2132 | [CLKID_VCLK_DIV1] = &g12a_vclk_div1.hw, | ||
| 2133 | [CLKID_VCLK_DIV2_EN] = &g12a_vclk_div2_en.hw, | ||
| 2134 | [CLKID_VCLK_DIV4_EN] = &g12a_vclk_div4_en.hw, | ||
| 2135 | [CLKID_VCLK_DIV6_EN] = &g12a_vclk_div6_en.hw, | ||
| 2136 | [CLKID_VCLK_DIV12_EN] = &g12a_vclk_div12_en.hw, | ||
| 2137 | [CLKID_VCLK2_DIV1] = &g12a_vclk2_div1.hw, | ||
| 2138 | [CLKID_VCLK2_DIV2_EN] = &g12a_vclk2_div2_en.hw, | ||
| 2139 | [CLKID_VCLK2_DIV4_EN] = &g12a_vclk2_div4_en.hw, | ||
| 2140 | [CLKID_VCLK2_DIV6_EN] = &g12a_vclk2_div6_en.hw, | ||
| 2141 | [CLKID_VCLK2_DIV12_EN] = &g12a_vclk2_div12_en.hw, | ||
| 2142 | [CLKID_VCLK_DIV2] = &g12a_vclk_div2.hw, | ||
| 2143 | [CLKID_VCLK_DIV4] = &g12a_vclk_div4.hw, | ||
| 2144 | [CLKID_VCLK_DIV6] = &g12a_vclk_div6.hw, | ||
| 2145 | [CLKID_VCLK_DIV12] = &g12a_vclk_div12.hw, | ||
| 2146 | [CLKID_VCLK2_DIV2] = &g12a_vclk2_div2.hw, | ||
| 2147 | [CLKID_VCLK2_DIV4] = &g12a_vclk2_div4.hw, | ||
| 2148 | [CLKID_VCLK2_DIV6] = &g12a_vclk2_div6.hw, | ||
| 2149 | [CLKID_VCLK2_DIV12] = &g12a_vclk2_div12.hw, | ||
| 2150 | [CLKID_CTS_ENCI_SEL] = &g12a_cts_enci_sel.hw, | ||
| 2151 | [CLKID_CTS_ENCP_SEL] = &g12a_cts_encp_sel.hw, | ||
| 2152 | [CLKID_CTS_VDAC_SEL] = &g12a_cts_vdac_sel.hw, | ||
| 2153 | [CLKID_HDMI_TX_SEL] = &g12a_hdmi_tx_sel.hw, | ||
| 2154 | [CLKID_CTS_ENCI] = &g12a_cts_enci.hw, | ||
| 2155 | [CLKID_CTS_ENCP] = &g12a_cts_encp.hw, | ||
| 2156 | [CLKID_CTS_VDAC] = &g12a_cts_vdac.hw, | ||
| 2157 | [CLKID_HDMI_TX] = &g12a_hdmi_tx.hw, | ||
| 2158 | [CLKID_HDMI_SEL] = &g12a_hdmi_sel.hw, | ||
| 2159 | [CLKID_HDMI_DIV] = &g12a_hdmi_div.hw, | ||
| 2160 | [CLKID_HDMI] = &g12a_hdmi.hw, | ||
| 2161 | [CLKID_MALI_0_SEL] = &g12a_mali_0_sel.hw, | ||
| 2162 | [CLKID_MALI_0_DIV] = &g12a_mali_0_div.hw, | ||
| 2163 | [CLKID_MALI_0] = &g12a_mali_0.hw, | ||
| 2164 | [CLKID_MALI_1_SEL] = &g12a_mali_1_sel.hw, | ||
| 2165 | [CLKID_MALI_1_DIV] = &g12a_mali_1_div.hw, | ||
| 2166 | [CLKID_MALI_1] = &g12a_mali_1.hw, | ||
| 2167 | [CLKID_MALI] = &g12a_mali.hw, | ||
| 2168 | [CLKID_MPLL_5OM_DIV] = &g12a_mpll_50m_div.hw, | ||
| 2169 | [CLKID_MPLL_5OM] = &g12a_mpll_50m.hw, | ||
| 2170 | [NR_CLKS] = NULL, | ||
| 2171 | }, | ||
| 2172 | .num = NR_CLKS, | ||
| 2173 | }; | ||
| 2174 | |||
| 2175 | /* Convenience table to populate regmap in .probe */ | ||
| 2176 | static struct clk_regmap *const g12a_clk_regmaps[] = { | ||
| 2177 | &g12a_clk81, | ||
| 2178 | &g12a_dos, | ||
| 2179 | &g12a_ddr, | ||
| 2180 | &g12a_audio_locker, | ||
| 2181 | &g12a_mipi_dsi_host, | ||
| 2182 | &g12a_eth_phy, | ||
| 2183 | &g12a_isa, | ||
| 2184 | &g12a_pl301, | ||
| 2185 | &g12a_periphs, | ||
| 2186 | &g12a_spicc_0, | ||
| 2187 | &g12a_i2c, | ||
| 2188 | &g12a_sana, | ||
| 2189 | &g12a_sd, | ||
| 2190 | &g12a_rng0, | ||
| 2191 | &g12a_uart0, | ||
| 2192 | &g12a_spicc_1, | ||
| 2193 | &g12a_hiu_reg, | ||
| 2194 | &g12a_mipi_dsi_phy, | ||
| 2195 | &g12a_assist_misc, | ||
| 2196 | &g12a_emmc_a, | ||
| 2197 | &g12a_emmc_b, | ||
| 2198 | &g12a_emmc_c, | ||
| 2199 | &g12a_audio_codec, | ||
| 2200 | &g12a_audio, | ||
| 2201 | &g12a_eth_core, | ||
| 2202 | &g12a_demux, | ||
| 2203 | &g12a_audio_ififo, | ||
| 2204 | &g12a_adc, | ||
| 2205 | &g12a_uart1, | ||
| 2206 | &g12a_g2d, | ||
| 2207 | &g12a_reset, | ||
| 2208 | &g12a_pcie_comb, | ||
| 2209 | &g12a_parser, | ||
| 2210 | &g12a_usb_general, | ||
| 2211 | &g12a_pcie_phy, | ||
| 2212 | &g12a_ahb_arb0, | ||
| 2213 | &g12a_ahb_data_bus, | ||
| 2214 | &g12a_ahb_ctrl_bus, | ||
| 2215 | &g12a_htx_hdcp22, | ||
| 2216 | &g12a_htx_pclk, | ||
| 2217 | &g12a_bt656, | ||
| 2218 | &g12a_usb1_to_ddr, | ||
| 2219 | &g12a_mmc_pclk, | ||
| 2220 | &g12a_vpu_intr, | ||
| 2221 | &g12a_gic, | ||
| 2222 | &g12a_sd_emmc_a_clk0, | ||
| 2223 | &g12a_sd_emmc_b_clk0, | ||
| 2224 | &g12a_sd_emmc_c_clk0, | ||
| 2225 | &g12a_mpeg_clk_div, | ||
| 2226 | &g12a_sd_emmc_a_clk0_div, | ||
| 2227 | &g12a_sd_emmc_b_clk0_div, | ||
| 2228 | &g12a_sd_emmc_c_clk0_div, | ||
| 2229 | &g12a_mpeg_clk_sel, | ||
| 2230 | &g12a_sd_emmc_a_clk0_sel, | ||
| 2231 | &g12a_sd_emmc_b_clk0_sel, | ||
| 2232 | &g12a_sd_emmc_c_clk0_sel, | ||
| 2233 | &g12a_mpll0, | ||
| 2234 | &g12a_mpll1, | ||
| 2235 | &g12a_mpll2, | ||
| 2236 | &g12a_mpll3, | ||
| 2237 | &g12a_mpll0_div, | ||
| 2238 | &g12a_mpll1_div, | ||
| 2239 | &g12a_mpll2_div, | ||
| 2240 | &g12a_mpll3_div, | ||
| 2241 | &g12a_fixed_pll, | ||
| 2242 | &g12a_sys_pll, | ||
| 2243 | &g12a_gp0_pll, | ||
| 2244 | &g12a_hifi_pll, | ||
| 2245 | &g12a_vclk2_venci0, | ||
| 2246 | &g12a_vclk2_venci1, | ||
| 2247 | &g12a_vclk2_vencp0, | ||
| 2248 | &g12a_vclk2_vencp1, | ||
| 2249 | &g12a_vclk2_venct0, | ||
| 2250 | &g12a_vclk2_venct1, | ||
| 2251 | &g12a_vclk2_other, | ||
| 2252 | &g12a_vclk2_enci, | ||
| 2253 | &g12a_vclk2_encp, | ||
| 2254 | &g12a_dac_clk, | ||
| 2255 | &g12a_aoclk_gate, | ||
| 2256 | &g12a_iec958_gate, | ||
| 2257 | &g12a_enc480p, | ||
| 2258 | &g12a_rng1, | ||
| 2259 | &g12a_vclk2_enct, | ||
| 2260 | &g12a_vclk2_encl, | ||
| 2261 | &g12a_vclk2_venclmmc, | ||
| 2262 | &g12a_vclk2_vencl, | ||
| 2263 | &g12a_vclk2_other1, | ||
| 2264 | &g12a_fixed_pll_dco, | ||
| 2265 | &g12a_sys_pll_dco, | ||
| 2266 | &g12a_gp0_pll_dco, | ||
| 2267 | &g12a_hifi_pll_dco, | ||
| 2268 | &g12a_fclk_div2, | ||
| 2269 | &g12a_fclk_div3, | ||
| 2270 | &g12a_fclk_div4, | ||
| 2271 | &g12a_fclk_div5, | ||
| 2272 | &g12a_fclk_div7, | ||
| 2273 | &g12a_fclk_div2p5, | ||
| 2274 | &g12a_dma, | ||
| 2275 | &g12a_efuse, | ||
| 2276 | &g12a_rom_boot, | ||
| 2277 | &g12a_reset_sec, | ||
| 2278 | &g12a_sec_ahb_apb3, | ||
| 2279 | &g12a_vpu_0_sel, | ||
| 2280 | &g12a_vpu_0_div, | ||
| 2281 | &g12a_vpu_0, | ||
| 2282 | &g12a_vpu_1_sel, | ||
| 2283 | &g12a_vpu_1_div, | ||
| 2284 | &g12a_vpu_1, | ||
| 2285 | &g12a_vpu, | ||
| 2286 | &g12a_vapb_0_sel, | ||
| 2287 | &g12a_vapb_0_div, | ||
| 2288 | &g12a_vapb_0, | ||
| 2289 | &g12a_vapb_1_sel, | ||
| 2290 | &g12a_vapb_1_div, | ||
| 2291 | &g12a_vapb_1, | ||
| 2292 | &g12a_vapb_sel, | ||
| 2293 | &g12a_vapb, | ||
| 2294 | &g12a_hdmi_pll_dco, | ||
| 2295 | &g12a_hdmi_pll_od, | ||
| 2296 | &g12a_hdmi_pll_od2, | ||
| 2297 | &g12a_hdmi_pll, | ||
| 2298 | &g12a_vid_pll_div, | ||
| 2299 | &g12a_vid_pll_sel, | ||
| 2300 | &g12a_vid_pll, | ||
| 2301 | &g12a_vclk_sel, | ||
| 2302 | &g12a_vclk2_sel, | ||
| 2303 | &g12a_vclk_input, | ||
| 2304 | &g12a_vclk2_input, | ||
| 2305 | &g12a_vclk_div, | ||
| 2306 | &g12a_vclk2_div, | ||
| 2307 | &g12a_vclk, | ||
| 2308 | &g12a_vclk2, | ||
| 2309 | &g12a_vclk_div1, | ||
| 2310 | &g12a_vclk_div2_en, | ||
| 2311 | &g12a_vclk_div4_en, | ||
| 2312 | &g12a_vclk_div6_en, | ||
| 2313 | &g12a_vclk_div12_en, | ||
| 2314 | &g12a_vclk2_div1, | ||
| 2315 | &g12a_vclk2_div2_en, | ||
| 2316 | &g12a_vclk2_div4_en, | ||
| 2317 | &g12a_vclk2_div6_en, | ||
| 2318 | &g12a_vclk2_div12_en, | ||
| 2319 | &g12a_cts_enci_sel, | ||
| 2320 | &g12a_cts_encp_sel, | ||
| 2321 | &g12a_cts_vdac_sel, | ||
| 2322 | &g12a_hdmi_tx_sel, | ||
| 2323 | &g12a_cts_enci, | ||
| 2324 | &g12a_cts_encp, | ||
| 2325 | &g12a_cts_vdac, | ||
| 2326 | &g12a_hdmi_tx, | ||
| 2327 | &g12a_hdmi_sel, | ||
| 2328 | &g12a_hdmi_div, | ||
| 2329 | &g12a_hdmi, | ||
| 2330 | &g12a_mali_0_sel, | ||
| 2331 | &g12a_mali_0_div, | ||
| 2332 | &g12a_mali_0, | ||
| 2333 | &g12a_mali_1_sel, | ||
| 2334 | &g12a_mali_1_div, | ||
| 2335 | &g12a_mali_1, | ||
| 2336 | &g12a_mali, | ||
| 2337 | &g12a_mpll_50m, | ||
| 2338 | }; | ||
| 2339 | |||
| 2340 | static const struct meson_eeclkc_data g12a_clkc_data = { | ||
| 2341 | .regmap_clks = g12a_clk_regmaps, | ||
| 2342 | .regmap_clk_num = ARRAY_SIZE(g12a_clk_regmaps), | ||
| 2343 | .hw_onecell_data = &g12a_hw_onecell_data | ||
| 2344 | }; | ||
| 2345 | |||
| 2346 | static const struct of_device_id clkc_match_table[] = { | ||
| 2347 | { .compatible = "amlogic,g12a-clkc", .data = &g12a_clkc_data }, | ||
| 2348 | {} | ||
| 2349 | }; | ||
| 2350 | |||
| 2351 | static struct platform_driver g12a_driver = { | ||
| 2352 | .probe = meson_eeclkc_probe, | ||
| 2353 | .driver = { | ||
| 2354 | .name = "g12a-clkc", | ||
| 2355 | .of_match_table = clkc_match_table, | ||
| 2356 | }, | ||
| 2357 | }; | ||
| 2358 | |||
| 2359 | builtin_platform_driver(g12a_driver); | ||
diff --git a/drivers/clk/meson/g12a.h b/drivers/clk/meson/g12a.h new file mode 100644 index 000000000000..f399dfe1401c --- /dev/null +++ b/drivers/clk/meson/g12a.h | |||
| @@ -0,0 +1,175 @@ | |||
| 1 | /* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */ | ||
| 2 | /* | ||
| 3 | * Copyright (c) 2016 Amlogic, Inc. | ||
| 4 | * Author: Michael Turquette <mturquette@baylibre.com> | ||
| 5 | * | ||
| 6 | * Copyright (c) 2018 Amlogic, inc. | ||
| 7 | * Author: Qiufang Dai <qiufang.dai@amlogic.com> | ||
| 8 | * Author: Jian Hu <jian.hu@amlogic.com> | ||
| 9 | * | ||
| 10 | */ | ||
| 11 | #ifndef __G12A_H | ||
| 12 | #define __G12A_H | ||
| 13 | |||
| 14 | /* | ||
| 15 | * Clock controller register offsets | ||
| 16 | * | ||
| 17 | * Register offsets from the data sheet must be multiplied by 4 before | ||
| 18 | * adding them to the base address to get the right value. | ||
| 19 | */ | ||
| 20 | #define HHI_MIPI_CNTL0 0x000 | ||
| 21 | #define HHI_MIPI_CNTL1 0x004 | ||
| 22 | #define HHI_MIPI_CNTL2 0x008 | ||
| 23 | #define HHI_MIPI_STS 0x00C | ||
| 24 | #define HHI_GP0_PLL_CNTL0 0x040 | ||
| 25 | #define HHI_GP0_PLL_CNTL1 0x044 | ||
| 26 | #define HHI_GP0_PLL_CNTL2 0x048 | ||
| 27 | #define HHI_GP0_PLL_CNTL3 0x04C | ||
| 28 | #define HHI_GP0_PLL_CNTL4 0x050 | ||
| 29 | #define HHI_GP0_PLL_CNTL5 0x054 | ||
| 30 | #define HHI_GP0_PLL_CNTL6 0x058 | ||
| 31 | #define HHI_GP0_PLL_STS 0x05C | ||
| 32 | #define HHI_PCIE_PLL_CNTL0 0x098 | ||
| 33 | #define HHI_PCIE_PLL_CNTL1 0x09C | ||
| 34 | #define HHI_PCIE_PLL_CNTL2 0x0A0 | ||
| 35 | #define HHI_PCIE_PLL_CNTL3 0x0A4 | ||
| 36 | #define HHI_PCIE_PLL_CNTL4 0x0A8 | ||
| 37 | #define HHI_PCIE_PLL_CNTL5 0x0AC | ||
| 38 | #define HHI_PCIE_PLL_STS 0x0B8 | ||
| 39 | #define HHI_HIFI_PLL_CNTL0 0x0D8 | ||
| 40 | #define HHI_HIFI_PLL_CNTL1 0x0DC | ||
| 41 | #define HHI_HIFI_PLL_CNTL2 0x0E0 | ||
| 42 | #define HHI_HIFI_PLL_CNTL3 0x0E4 | ||
| 43 | #define HHI_HIFI_PLL_CNTL4 0x0E8 | ||
| 44 | #define HHI_HIFI_PLL_CNTL5 0x0EC | ||
| 45 | #define HHI_HIFI_PLL_CNTL6 0x0F0 | ||
| 46 | #define HHI_VIID_CLK_DIV 0x128 | ||
| 47 | #define HHI_VIID_CLK_CNTL 0x12C | ||
| 48 | #define HHI_GCLK_MPEG0 0x140 | ||
| 49 | #define HHI_GCLK_MPEG1 0x144 | ||
| 50 | #define HHI_GCLK_MPEG2 0x148 | ||
| 51 | #define HHI_GCLK_OTHER 0x150 | ||
| 52 | #define HHI_GCLK_OTHER2 0x154 | ||
| 53 | #define HHI_VID_CLK_DIV 0x164 | ||
| 54 | #define HHI_MPEG_CLK_CNTL 0x174 | ||
| 55 | #define HHI_AUD_CLK_CNTL 0x178 | ||
| 56 | #define HHI_VID_CLK_CNTL 0x17c | ||
| 57 | #define HHI_TS_CLK_CNTL 0x190 | ||
| 58 | #define HHI_VID_CLK_CNTL2 0x194 | ||
| 59 | #define HHI_SYS_CPU_CLK_CNTL0 0x19c | ||
| 60 | #define HHI_VID_PLL_CLK_DIV 0x1A0 | ||
| 61 | #define HHI_MALI_CLK_CNTL 0x1b0 | ||
| 62 | #define HHI_VPU_CLKC_CNTL 0x1b4 | ||
| 63 | #define HHI_VPU_CLK_CNTL 0x1bC | ||
| 64 | #define HHI_HDMI_CLK_CNTL 0x1CC | ||
| 65 | #define HHI_VDEC_CLK_CNTL 0x1E0 | ||
| 66 | #define HHI_VDEC2_CLK_CNTL 0x1E4 | ||
| 67 | #define HHI_VDEC3_CLK_CNTL 0x1E8 | ||
| 68 | #define HHI_VDEC4_CLK_CNTL 0x1EC | ||
| 69 | #define HHI_HDCP22_CLK_CNTL 0x1F0 | ||
| 70 | #define HHI_VAPBCLK_CNTL 0x1F4 | ||
| 71 | #define HHI_VPU_CLKB_CNTL 0x20C | ||
| 72 | #define HHI_GEN_CLK_CNTL 0x228 | ||
| 73 | #define HHI_VDIN_MEAS_CLK_CNTL 0x250 | ||
| 74 | #define HHI_MIPIDSI_PHY_CLK_CNTL 0x254 | ||
| 75 | #define HHI_NAND_CLK_CNTL 0x25C | ||
| 76 | #define HHI_SD_EMMC_CLK_CNTL 0x264 | ||
| 77 | #define HHI_MPLL_CNTL0 0x278 | ||
| 78 | #define HHI_MPLL_CNTL1 0x27C | ||
| 79 | #define HHI_MPLL_CNTL2 0x280 | ||
| 80 | #define HHI_MPLL_CNTL3 0x284 | ||
| 81 | #define HHI_MPLL_CNTL4 0x288 | ||
| 82 | #define HHI_MPLL_CNTL5 0x28c | ||
| 83 | #define HHI_MPLL_CNTL6 0x290 | ||
| 84 | #define HHI_MPLL_CNTL7 0x294 | ||
| 85 | #define HHI_MPLL_CNTL8 0x298 | ||
| 86 | #define HHI_FIX_PLL_CNTL0 0x2A0 | ||
| 87 | #define HHI_FIX_PLL_CNTL1 0x2A4 | ||
| 88 | #define HHI_FIX_PLL_CNTL3 0x2AC | ||
| 89 | #define HHI_SYS_PLL_CNTL0 0x2f4 | ||
| 90 | #define HHI_SYS_PLL_CNTL1 0x2f8 | ||
| 91 | #define HHI_SYS_PLL_CNTL2 0x2fc | ||
| 92 | #define HHI_SYS_PLL_CNTL3 0x300 | ||
| 93 | #define HHI_SYS_PLL_CNTL4 0x304 | ||
| 94 | #define HHI_SYS_PLL_CNTL5 0x308 | ||
| 95 | #define HHI_SYS_PLL_CNTL6 0x30c | ||
| 96 | #define HHI_HDMI_PLL_CNTL0 0x320 | ||
| 97 | #define HHI_HDMI_PLL_CNTL1 0x324 | ||
| 98 | #define HHI_HDMI_PLL_CNTL2 0x328 | ||
| 99 | #define HHI_HDMI_PLL_CNTL3 0x32c | ||
| 100 | #define HHI_HDMI_PLL_CNTL4 0x330 | ||
| 101 | #define HHI_HDMI_PLL_CNTL5 0x334 | ||
| 102 | #define HHI_HDMI_PLL_CNTL6 0x338 | ||
| 103 | #define HHI_SPICC_CLK_CNTL 0x3dc | ||
| 104 | |||
| 105 | /* | ||
| 106 | * CLKID index values | ||
| 107 | * | ||
| 108 | * These indices are entirely contrived and do not map onto the hardware. | ||
| 109 | * It has now been decided to expose everything by default in the DT header: | ||
| 110 | * include/dt-bindings/clock/g12a-clkc.h. Only the clocks ids we don't want | ||
| 111 | * to expose, such as the internal muxes and dividers of composite clocks, | ||
| 112 | * will remain defined here. | ||
| 113 | */ | ||
| 114 | #define CLKID_MPEG_SEL 8 | ||
| 115 | #define CLKID_MPEG_DIV 9 | ||
| 116 | #define CLKID_SD_EMMC_A_CLK0_SEL 63 | ||
| 117 | #define CLKID_SD_EMMC_A_CLK0_DIV 64 | ||
| 118 | #define CLKID_SD_EMMC_B_CLK0_SEL 65 | ||
| 119 | #define CLKID_SD_EMMC_B_CLK0_DIV 66 | ||
| 120 | #define CLKID_SD_EMMC_C_CLK0_SEL 67 | ||
| 121 | #define CLKID_SD_EMMC_C_CLK0_DIV 68 | ||
| 122 | #define CLKID_MPLL0_DIV 69 | ||
| 123 | #define CLKID_MPLL1_DIV 70 | ||
| 124 | #define CLKID_MPLL2_DIV 71 | ||
| 125 | #define CLKID_MPLL3_DIV 72 | ||
| 126 | #define CLKID_MPLL_PREDIV 73 | ||
| 127 | #define CLKID_FCLK_DIV2_DIV 75 | ||
| 128 | #define CLKID_FCLK_DIV3_DIV 76 | ||
| 129 | #define CLKID_FCLK_DIV4_DIV 77 | ||
| 130 | #define CLKID_FCLK_DIV5_DIV 78 | ||
| 131 | #define CLKID_FCLK_DIV7_DIV 79 | ||
| 132 | #define CLKID_FCLK_DIV2P5_DIV 100 | ||
| 133 | #define CLKID_FIXED_PLL_DCO 101 | ||
| 134 | #define CLKID_SYS_PLL_DCO 102 | ||
| 135 | #define CLKID_GP0_PLL_DCO 103 | ||
| 136 | #define CLKID_HIFI_PLL_DCO 104 | ||
| 137 | #define CLKID_VPU_0_DIV 111 | ||
| 138 | #define CLKID_VPU_1_DIV 114 | ||
| 139 | #define CLKID_VAPB_0_DIV 118 | ||
| 140 | #define CLKID_VAPB_1_DIV 121 | ||
| 141 | #define CLKID_HDMI_PLL_DCO 125 | ||
| 142 | #define CLKID_HDMI_PLL_OD 126 | ||
| 143 | #define CLKID_HDMI_PLL_OD2 127 | ||
| 144 | #define CLKID_VID_PLL_SEL 130 | ||
| 145 | #define CLKID_VID_PLL_DIV 131 | ||
| 146 | #define CLKID_VCLK_SEL 132 | ||
| 147 | #define CLKID_VCLK2_SEL 133 | ||
| 148 | #define CLKID_VCLK_INPUT 134 | ||
| 149 | #define CLKID_VCLK2_INPUT 135 | ||
| 150 | #define CLKID_VCLK_DIV 136 | ||
| 151 | #define CLKID_VCLK2_DIV 137 | ||
| 152 | #define CLKID_VCLK_DIV2_EN 140 | ||
| 153 | #define CLKID_VCLK_DIV4_EN 141 | ||
| 154 | #define CLKID_VCLK_DIV6_EN 142 | ||
| 155 | #define CLKID_VCLK_DIV12_EN 143 | ||
| 156 | #define CLKID_VCLK2_DIV2_EN 144 | ||
| 157 | #define CLKID_VCLK2_DIV4_EN 145 | ||
| 158 | #define CLKID_VCLK2_DIV6_EN 146 | ||
| 159 | #define CLKID_VCLK2_DIV12_EN 147 | ||
| 160 | #define CLKID_CTS_ENCI_SEL 158 | ||
| 161 | #define CLKID_CTS_ENCP_SEL 159 | ||
| 162 | #define CLKID_CTS_VDAC_SEL 160 | ||
| 163 | #define CLKID_HDMI_TX_SEL 161 | ||
| 164 | #define CLKID_HDMI_SEL 166 | ||
| 165 | #define CLKID_HDMI_DIV 167 | ||
| 166 | #define CLKID_MALI_0_DIV 170 | ||
| 167 | #define CLKID_MALI_1_DIV 173 | ||
| 168 | #define CLKID_MPLL_5OM_DIV 176 | ||
| 169 | |||
| 170 | #define NR_CLKS 178 | ||
| 171 | |||
| 172 | /* include the CLKIDs that have been made part of the DT binding */ | ||
| 173 | #include <dt-bindings/clock/g12a-clkc.h> | ||
| 174 | |||
| 175 | #endif /* __G12A_H */ | ||
diff --git a/drivers/clk/meson/gxbb-aoclk-32k.c b/drivers/clk/meson/gxbb-aoclk-32k.c deleted file mode 100644 index 680467141a1d..000000000000 --- a/drivers/clk/meson/gxbb-aoclk-32k.c +++ /dev/null | |||
| @@ -1,193 +0,0 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0+ | ||
| 2 | /* | ||
| 3 | * Copyright (c) 2017 BayLibre, SAS. | ||
| 4 | * Author: Neil Armstrong <narmstrong@baylibre.com> | ||
| 5 | */ | ||
| 6 | |||
| 7 | #include <linux/clk-provider.h> | ||
| 8 | #include <linux/bitfield.h> | ||
| 9 | #include <linux/regmap.h> | ||
| 10 | #include "gxbb-aoclk.h" | ||
| 11 | |||
| 12 | /* | ||
| 13 | * The AO Domain embeds a dual/divider to generate a more precise | ||
| 14 | * 32,768KHz clock for low-power suspend mode and CEC. | ||
| 15 | * ______ ______ | ||
| 16 | * | | | | | ||
| 17 | * ______ | Div1 |-| Cnt1 | ______ | ||
| 18 | * | | /|______| |______|\ | | | ||
| 19 | * Xtal-->| Gate |---| ______ ______ X-X--| Gate |--> | ||
| 20 | * |______| | \| | | |/ | |______| | ||
| 21 | * | | Div2 |-| Cnt2 | | | ||
| 22 | * | |______| |______| | | ||
| 23 | * |_______________________| | ||
| 24 | * | ||
| 25 | * The dividing can be switched to single or dual, with a counter | ||
| 26 | * for each divider to set when the switching is done. | ||
| 27 | * The entire dividing mechanism can be also bypassed. | ||
| 28 | */ | ||
| 29 | |||
| 30 | #define CLK_CNTL0_N1_MASK GENMASK(11, 0) | ||
| 31 | #define CLK_CNTL0_N2_MASK GENMASK(23, 12) | ||
| 32 | #define CLK_CNTL0_DUALDIV_EN BIT(28) | ||
| 33 | #define CLK_CNTL0_OUT_GATE_EN BIT(30) | ||
| 34 | #define CLK_CNTL0_IN_GATE_EN BIT(31) | ||
| 35 | |||
| 36 | #define CLK_CNTL1_M1_MASK GENMASK(11, 0) | ||
| 37 | #define CLK_CNTL1_M2_MASK GENMASK(23, 12) | ||
| 38 | #define CLK_CNTL1_BYPASS_EN BIT(24) | ||
| 39 | #define CLK_CNTL1_SELECT_OSC BIT(27) | ||
| 40 | |||
| 41 | #define PWR_CNTL_ALT_32K_SEL GENMASK(13, 10) | ||
| 42 | |||
| 43 | struct cec_32k_freq_table { | ||
| 44 | unsigned long parent_rate; | ||
| 45 | unsigned long target_rate; | ||
| 46 | bool dualdiv; | ||
| 47 | unsigned int n1; | ||
| 48 | unsigned int n2; | ||
| 49 | unsigned int m1; | ||
| 50 | unsigned int m2; | ||
| 51 | }; | ||
| 52 | |||
| 53 | static const struct cec_32k_freq_table aoclk_cec_32k_table[] = { | ||
| 54 | [0] = { | ||
| 55 | .parent_rate = 24000000, | ||
| 56 | .target_rate = 32768, | ||
| 57 | .dualdiv = true, | ||
| 58 | .n1 = 733, | ||
| 59 | .n2 = 732, | ||
| 60 | .m1 = 8, | ||
| 61 | .m2 = 11, | ||
| 62 | }, | ||
| 63 | }; | ||
| 64 | |||
| 65 | /* | ||
| 66 | * If CLK_CNTL0_DUALDIV_EN == 0 | ||
| 67 | * - will use N1 divider only | ||
| 68 | * If CLK_CNTL0_DUALDIV_EN == 1 | ||
| 69 | * - hold M1 cycles of N1 divider then changes to N2 | ||
| 70 | * - hold M2 cycles of N2 divider then changes to N1 | ||
| 71 | * Then we can get more accurate division. | ||
| 72 | */ | ||
| 73 | static unsigned long aoclk_cec_32k_recalc_rate(struct clk_hw *hw, | ||
| 74 | unsigned long parent_rate) | ||
| 75 | { | ||
| 76 | struct aoclk_cec_32k *cec_32k = to_aoclk_cec_32k(hw); | ||
| 77 | unsigned long n1; | ||
| 78 | u32 reg0, reg1; | ||
| 79 | |||
| 80 | regmap_read(cec_32k->regmap, AO_RTC_ALT_CLK_CNTL0, ®0); | ||
| 81 | regmap_read(cec_32k->regmap, AO_RTC_ALT_CLK_CNTL1, ®1); | ||
| 82 | |||
| 83 | if (reg1 & CLK_CNTL1_BYPASS_EN) | ||
| 84 | return parent_rate; | ||
| 85 | |||
| 86 | if (reg0 & CLK_CNTL0_DUALDIV_EN) { | ||
| 87 | unsigned long n2, m1, m2, f1, f2, p1, p2; | ||
| 88 | |||
| 89 | n1 = FIELD_GET(CLK_CNTL0_N1_MASK, reg0) + 1; | ||
| 90 | n2 = FIELD_GET(CLK_CNTL0_N2_MASK, reg0) + 1; | ||
| 91 | |||
| 92 | m1 = FIELD_GET(CLK_CNTL1_M1_MASK, reg1) + 1; | ||
| 93 | m2 = FIELD_GET(CLK_CNTL1_M2_MASK, reg1) + 1; | ||
| 94 | |||
| 95 | f1 = DIV_ROUND_CLOSEST(parent_rate, n1); | ||
| 96 | f2 = DIV_ROUND_CLOSEST(parent_rate, n2); | ||
| 97 | |||
| 98 | p1 = DIV_ROUND_CLOSEST(100000000 * m1, f1 * (m1 + m2)); | ||
| 99 | p2 = DIV_ROUND_CLOSEST(100000000 * m2, f2 * (m1 + m2)); | ||
| 100 | |||
| 101 | return DIV_ROUND_UP(100000000, p1 + p2); | ||
| 102 | } | ||
| 103 | |||
| 104 | n1 = FIELD_GET(CLK_CNTL0_N1_MASK, reg0) + 1; | ||
| 105 | |||
| 106 | return DIV_ROUND_CLOSEST(parent_rate, n1); | ||
| 107 | } | ||
| 108 | |||
| 109 | static const struct cec_32k_freq_table *find_cec_32k_freq(unsigned long rate, | ||
| 110 | unsigned long prate) | ||
| 111 | { | ||
| 112 | int i; | ||
| 113 | |||
| 114 | for (i = 0 ; i < ARRAY_SIZE(aoclk_cec_32k_table) ; ++i) | ||
| 115 | if (aoclk_cec_32k_table[i].parent_rate == prate && | ||
| 116 | aoclk_cec_32k_table[i].target_rate == rate) | ||
| 117 | return &aoclk_cec_32k_table[i]; | ||
| 118 | |||
| 119 | return NULL; | ||
| 120 | } | ||
| 121 | |||
| 122 | static long aoclk_cec_32k_round_rate(struct clk_hw *hw, unsigned long rate, | ||
| 123 | unsigned long *prate) | ||
| 124 | { | ||
| 125 | const struct cec_32k_freq_table *freq = find_cec_32k_freq(rate, | ||
| 126 | *prate); | ||
| 127 | |||
| 128 | /* If invalid return first one */ | ||
| 129 | if (!freq) | ||
| 130 | return aoclk_cec_32k_table[0].target_rate; | ||
| 131 | |||
| 132 | return freq->target_rate; | ||
| 133 | } | ||
| 134 | |||
| 135 | /* | ||
| 136 | * From the Amlogic init procedure, the IN and OUT gates needs to be handled | ||
| 137 | * in the init procedure to avoid any glitches. | ||
| 138 | */ | ||
| 139 | |||
| 140 | static int aoclk_cec_32k_set_rate(struct clk_hw *hw, unsigned long rate, | ||
| 141 | unsigned long parent_rate) | ||
| 142 | { | ||
| 143 | const struct cec_32k_freq_table *freq = find_cec_32k_freq(rate, | ||
| 144 | parent_rate); | ||
| 145 | struct aoclk_cec_32k *cec_32k = to_aoclk_cec_32k(hw); | ||
| 146 | u32 reg = 0; | ||
| 147 | |||
| 148 | if (!freq) | ||
| 149 | return -EINVAL; | ||
| 150 | |||
| 151 | /* Disable clock */ | ||
| 152 | regmap_update_bits(cec_32k->regmap, AO_RTC_ALT_CLK_CNTL0, | ||
| 153 | CLK_CNTL0_IN_GATE_EN | CLK_CNTL0_OUT_GATE_EN, 0); | ||
| 154 | |||
| 155 | reg = FIELD_PREP(CLK_CNTL0_N1_MASK, freq->n1 - 1); | ||
| 156 | if (freq->dualdiv) | ||
| 157 | reg |= CLK_CNTL0_DUALDIV_EN | | ||
| 158 | FIELD_PREP(CLK_CNTL0_N2_MASK, freq->n2 - 1); | ||
| 159 | |||
| 160 | regmap_write(cec_32k->regmap, AO_RTC_ALT_CLK_CNTL0, reg); | ||
| 161 | |||
| 162 | reg = FIELD_PREP(CLK_CNTL1_M1_MASK, freq->m1 - 1); | ||
| 163 | if (freq->dualdiv) | ||
| 164 | reg |= FIELD_PREP(CLK_CNTL1_M2_MASK, freq->m2 - 1); | ||
| 165 | |||
| 166 | regmap_write(cec_32k->regmap, AO_RTC_ALT_CLK_CNTL1, reg); | ||
| 167 | |||
| 168 | /* Enable clock */ | ||
| 169 | regmap_update_bits(cec_32k->regmap, AO_RTC_ALT_CLK_CNTL0, | ||
| 170 | CLK_CNTL0_IN_GATE_EN, CLK_CNTL0_IN_GATE_EN); | ||
| 171 | |||
| 172 | udelay(200); | ||
| 173 | |||
| 174 | regmap_update_bits(cec_32k->regmap, AO_RTC_ALT_CLK_CNTL0, | ||
| 175 | CLK_CNTL0_OUT_GATE_EN, CLK_CNTL0_OUT_GATE_EN); | ||
| 176 | |||
| 177 | regmap_update_bits(cec_32k->regmap, AO_CRT_CLK_CNTL1, | ||
| 178 | CLK_CNTL1_SELECT_OSC, CLK_CNTL1_SELECT_OSC); | ||
| 179 | |||
| 180 | /* Select 32k from XTAL */ | ||
| 181 | regmap_update_bits(cec_32k->regmap, | ||
| 182 | AO_RTI_PWR_CNTL_REG0, | ||
| 183 | PWR_CNTL_ALT_32K_SEL, | ||
| 184 | FIELD_PREP(PWR_CNTL_ALT_32K_SEL, 4)); | ||
| 185 | |||
| 186 | return 0; | ||
| 187 | } | ||
| 188 | |||
| 189 | const struct clk_ops meson_aoclk_cec_32k_ops = { | ||
| 190 | .recalc_rate = aoclk_cec_32k_recalc_rate, | ||
| 191 | .round_rate = aoclk_cec_32k_round_rate, | ||
| 192 | .set_rate = aoclk_cec_32k_set_rate, | ||
| 193 | }; | ||
diff --git a/drivers/clk/meson/gxbb-aoclk.c b/drivers/clk/meson/gxbb-aoclk.c index 42ed61d3c3fb..449f6ac189d8 100644 --- a/drivers/clk/meson/gxbb-aoclk.c +++ b/drivers/clk/meson/gxbb-aoclk.c | |||
| @@ -5,10 +5,23 @@ | |||
| 5 | */ | 5 | */ |
| 6 | #include <linux/platform_device.h> | 6 | #include <linux/platform_device.h> |
| 7 | #include <linux/mfd/syscon.h> | 7 | #include <linux/mfd/syscon.h> |
| 8 | #include "clk-regmap.h" | ||
| 9 | #include "meson-aoclk.h" | 8 | #include "meson-aoclk.h" |
| 10 | #include "gxbb-aoclk.h" | 9 | #include "gxbb-aoclk.h" |
| 11 | 10 | ||
| 11 | #include "clk-regmap.h" | ||
| 12 | #include "clk-dualdiv.h" | ||
| 13 | |||
| 14 | #define IN_PREFIX "ao-in-" | ||
| 15 | |||
| 16 | /* AO Configuration Clock registers offsets */ | ||
| 17 | #define AO_RTI_PWR_CNTL_REG1 0x0c | ||
| 18 | #define AO_RTI_PWR_CNTL_REG0 0x10 | ||
| 19 | #define AO_RTI_GEN_CNTL_REG0 0x40 | ||
| 20 | #define AO_OSCIN_CNTL 0x58 | ||
| 21 | #define AO_CRT_CLK_CNTL1 0x68 | ||
| 22 | #define AO_RTC_ALT_CLK_CNTL0 0x94 | ||
| 23 | #define AO_RTC_ALT_CLK_CNTL1 0x98 | ||
| 24 | |||
| 12 | #define GXBB_AO_GATE(_name, _bit) \ | 25 | #define GXBB_AO_GATE(_name, _bit) \ |
| 13 | static struct clk_regmap _name##_ao = { \ | 26 | static struct clk_regmap _name##_ao = { \ |
| 14 | .data = &(struct clk_regmap_gate_data) { \ | 27 | .data = &(struct clk_regmap_gate_data) { \ |
| @@ -18,7 +31,7 @@ static struct clk_regmap _name##_ao = { \ | |||
| 18 | .hw.init = &(struct clk_init_data) { \ | 31 | .hw.init = &(struct clk_init_data) { \ |
| 19 | .name = #_name "_ao", \ | 32 | .name = #_name "_ao", \ |
| 20 | .ops = &clk_regmap_gate_ops, \ | 33 | .ops = &clk_regmap_gate_ops, \ |
| 21 | .parent_names = (const char *[]){ "clk81" }, \ | 34 | .parent_names = (const char *[]){ IN_PREFIX "mpeg-clk" }, \ |
| 22 | .num_parents = 1, \ | 35 | .num_parents = 1, \ |
| 23 | .flags = CLK_IGNORE_UNUSED, \ | 36 | .flags = CLK_IGNORE_UNUSED, \ |
| 24 | }, \ | 37 | }, \ |
| @@ -31,13 +44,174 @@ GXBB_AO_GATE(uart1, 3); | |||
| 31 | GXBB_AO_GATE(uart2, 5); | 44 | GXBB_AO_GATE(uart2, 5); |
| 32 | GXBB_AO_GATE(ir_blaster, 6); | 45 | GXBB_AO_GATE(ir_blaster, 6); |
| 33 | 46 | ||
| 34 | static struct aoclk_cec_32k cec_32k_ao = { | 47 | static struct clk_regmap ao_cts_oscin = { |
| 35 | .hw.init = &(struct clk_init_data) { | 48 | .data = &(struct clk_regmap_gate_data){ |
| 36 | .name = "cec_32k_ao", | 49 | .offset = AO_RTI_PWR_CNTL_REG0, |
| 37 | .ops = &meson_aoclk_cec_32k_ops, | 50 | .bit_idx = 6, |
| 38 | .parent_names = (const char *[]){ "xtal" }, | 51 | }, |
| 52 | .hw.init = &(struct clk_init_data){ | ||
| 53 | .name = "ao_cts_oscin", | ||
| 54 | .ops = &clk_regmap_gate_ro_ops, | ||
| 55 | .parent_names = (const char *[]){ IN_PREFIX "xtal" }, | ||
| 56 | .num_parents = 1, | ||
| 57 | }, | ||
| 58 | }; | ||
| 59 | |||
| 60 | static struct clk_regmap ao_32k_pre = { | ||
| 61 | .data = &(struct clk_regmap_gate_data){ | ||
| 62 | .offset = AO_RTC_ALT_CLK_CNTL0, | ||
| 63 | .bit_idx = 31, | ||
| 64 | }, | ||
| 65 | .hw.init = &(struct clk_init_data){ | ||
| 66 | .name = "ao_32k_pre", | ||
| 67 | .ops = &clk_regmap_gate_ops, | ||
| 68 | .parent_names = (const char *[]){ "ao_cts_oscin" }, | ||
| 69 | .num_parents = 1, | ||
| 70 | }, | ||
| 71 | }; | ||
| 72 | |||
| 73 | static const struct meson_clk_dualdiv_param gxbb_32k_div_table[] = { | ||
| 74 | { | ||
| 75 | .dual = 1, | ||
| 76 | .n1 = 733, | ||
| 77 | .m1 = 8, | ||
| 78 | .n2 = 732, | ||
| 79 | .m2 = 11, | ||
| 80 | }, {} | ||
| 81 | }; | ||
| 82 | |||
| 83 | static struct clk_regmap ao_32k_div = { | ||
| 84 | .data = &(struct meson_clk_dualdiv_data){ | ||
| 85 | .n1 = { | ||
| 86 | .reg_off = AO_RTC_ALT_CLK_CNTL0, | ||
| 87 | .shift = 0, | ||
| 88 | .width = 12, | ||
| 89 | }, | ||
| 90 | .n2 = { | ||
| 91 | .reg_off = AO_RTC_ALT_CLK_CNTL0, | ||
| 92 | .shift = 12, | ||
| 93 | .width = 12, | ||
| 94 | }, | ||
| 95 | .m1 = { | ||
| 96 | .reg_off = AO_RTC_ALT_CLK_CNTL1, | ||
| 97 | .shift = 0, | ||
| 98 | .width = 12, | ||
| 99 | }, | ||
| 100 | .m2 = { | ||
| 101 | .reg_off = AO_RTC_ALT_CLK_CNTL1, | ||
| 102 | .shift = 12, | ||
| 103 | .width = 12, | ||
| 104 | }, | ||
| 105 | .dual = { | ||
| 106 | .reg_off = AO_RTC_ALT_CLK_CNTL0, | ||
| 107 | .shift = 28, | ||
| 108 | .width = 1, | ||
| 109 | }, | ||
| 110 | .table = gxbb_32k_div_table, | ||
| 111 | }, | ||
| 112 | .hw.init = &(struct clk_init_data){ | ||
| 113 | .name = "ao_32k_div", | ||
| 114 | .ops = &meson_clk_dualdiv_ops, | ||
| 115 | .parent_names = (const char *[]){ "ao_32k_pre" }, | ||
| 116 | .num_parents = 1, | ||
| 117 | }, | ||
| 118 | }; | ||
| 119 | |||
| 120 | static struct clk_regmap ao_32k_sel = { | ||
| 121 | .data = &(struct clk_regmap_mux_data) { | ||
| 122 | .offset = AO_RTC_ALT_CLK_CNTL1, | ||
| 123 | .mask = 0x1, | ||
| 124 | .shift = 24, | ||
| 125 | .flags = CLK_MUX_ROUND_CLOSEST, | ||
| 126 | }, | ||
| 127 | .hw.init = &(struct clk_init_data){ | ||
| 128 | .name = "ao_32k_sel", | ||
| 129 | .ops = &clk_regmap_mux_ops, | ||
| 130 | .parent_names = (const char *[]){ "ao_32k_div", | ||
| 131 | "ao_32k_pre" }, | ||
| 132 | .num_parents = 2, | ||
| 133 | .flags = CLK_SET_RATE_PARENT, | ||
| 134 | }, | ||
| 135 | }; | ||
| 136 | |||
| 137 | static struct clk_regmap ao_32k = { | ||
| 138 | .data = &(struct clk_regmap_gate_data){ | ||
| 139 | .offset = AO_RTC_ALT_CLK_CNTL0, | ||
| 140 | .bit_idx = 30, | ||
| 141 | }, | ||
| 142 | .hw.init = &(struct clk_init_data){ | ||
| 143 | .name = "ao_32k", | ||
| 144 | .ops = &clk_regmap_gate_ops, | ||
| 145 | .parent_names = (const char *[]){ "ao_32k_sel" }, | ||
| 39 | .num_parents = 1, | 146 | .num_parents = 1, |
| 40 | .flags = CLK_IGNORE_UNUSED, | 147 | .flags = CLK_SET_RATE_PARENT, |
| 148 | }, | ||
| 149 | }; | ||
| 150 | |||
| 151 | static struct clk_regmap ao_cts_rtc_oscin = { | ||
| 152 | .data = &(struct clk_regmap_mux_data) { | ||
| 153 | .offset = AO_RTI_PWR_CNTL_REG0, | ||
| 154 | .mask = 0x7, | ||
| 155 | .shift = 10, | ||
| 156 | .table = (u32[]){ 1, 2, 3, 4 }, | ||
| 157 | .flags = CLK_MUX_ROUND_CLOSEST, | ||
| 158 | }, | ||
| 159 | .hw.init = &(struct clk_init_data){ | ||
| 160 | .name = "ao_cts_rtc_oscin", | ||
| 161 | .ops = &clk_regmap_mux_ops, | ||
| 162 | .parent_names = (const char *[]){ IN_PREFIX "ext-32k-0", | ||
| 163 | IN_PREFIX "ext-32k-1", | ||
| 164 | IN_PREFIX "ext-32k-2", | ||
| 165 | "ao_32k" }, | ||
| 166 | .num_parents = 4, | ||
| 167 | .flags = CLK_SET_RATE_PARENT, | ||
| 168 | }, | ||
| 169 | }; | ||
| 170 | |||
| 171 | static struct clk_regmap ao_clk81 = { | ||
| 172 | .data = &(struct clk_regmap_mux_data) { | ||
| 173 | .offset = AO_RTI_PWR_CNTL_REG0, | ||
| 174 | .mask = 0x1, | ||
| 175 | .shift = 0, | ||
| 176 | .flags = CLK_MUX_ROUND_CLOSEST, | ||
| 177 | }, | ||
| 178 | .hw.init = &(struct clk_init_data){ | ||
| 179 | .name = "ao_clk81", | ||
| 180 | .ops = &clk_regmap_mux_ro_ops, | ||
| 181 | .parent_names = (const char *[]){ IN_PREFIX "mpeg-clk", | ||
| 182 | "ao_cts_rtc_oscin" }, | ||
| 183 | .num_parents = 2, | ||
| 184 | .flags = CLK_SET_RATE_PARENT, | ||
| 185 | }, | ||
| 186 | }; | ||
| 187 | |||
| 188 | static struct clk_regmap ao_cts_cec = { | ||
| 189 | .data = &(struct clk_regmap_mux_data) { | ||
| 190 | .offset = AO_CRT_CLK_CNTL1, | ||
| 191 | .mask = 0x1, | ||
| 192 | .shift = 27, | ||
| 193 | .flags = CLK_MUX_ROUND_CLOSEST, | ||
| 194 | }, | ||
| 195 | .hw.init = &(struct clk_init_data){ | ||
| 196 | .name = "ao_cts_cec", | ||
| 197 | .ops = &clk_regmap_mux_ops, | ||
| 198 | /* | ||
| 199 | * FIXME: The 'fixme' parent obviously does not exist. | ||
| 200 | * | ||
| 201 | * ATM, CCF won't call get_parent() if num_parents is 1. It | ||
| 202 | * does not allow NULL as a parent name either. | ||
| 203 | * | ||
| 204 | * On this particular mux, we only know the input #1 parent | ||
| 205 | * but, on boot, unknown input #0 is set, so it is critical | ||
| 206 | * to call .get_parent() on it | ||
| 207 | * | ||
| 208 | * Until CCF gets fixed, adding this fake parent that won't | ||
| 209 | * ever be registered should work around the problem | ||
| 210 | */ | ||
| 211 | .parent_names = (const char *[]){ "fixme", | ||
| 212 | "ao_cts_rtc_oscin" }, | ||
| 213 | .num_parents = 2, | ||
| 214 | .flags = CLK_SET_RATE_PARENT, | ||
| 41 | }, | 215 | }, |
| 42 | }; | 216 | }; |
| 43 | 217 | ||
| @@ -50,13 +224,21 @@ static const unsigned int gxbb_aoclk_reset[] = { | |||
| 50 | [RESET_AO_IR_BLASTER] = 23, | 224 | [RESET_AO_IR_BLASTER] = 23, |
| 51 | }; | 225 | }; |
| 52 | 226 | ||
| 53 | static struct clk_regmap *gxbb_aoclk_gate[] = { | 227 | static struct clk_regmap *gxbb_aoclk[] = { |
| 54 | [CLKID_AO_REMOTE] = &remote_ao, | 228 | &remote_ao, |
| 55 | [CLKID_AO_I2C_MASTER] = &i2c_master_ao, | 229 | &i2c_master_ao, |
| 56 | [CLKID_AO_I2C_SLAVE] = &i2c_slave_ao, | 230 | &i2c_slave_ao, |
| 57 | [CLKID_AO_UART1] = &uart1_ao, | 231 | &uart1_ao, |
| 58 | [CLKID_AO_UART2] = &uart2_ao, | 232 | &uart2_ao, |
| 59 | [CLKID_AO_IR_BLASTER] = &ir_blaster_ao, | 233 | &ir_blaster_ao, |
| 234 | &ao_cts_oscin, | ||
| 235 | &ao_32k_pre, | ||
| 236 | &ao_32k_div, | ||
| 237 | &ao_32k_sel, | ||
| 238 | &ao_32k, | ||
| 239 | &ao_cts_rtc_oscin, | ||
| 240 | &ao_clk81, | ||
| 241 | &ao_cts_cec, | ||
| 60 | }; | 242 | }; |
| 61 | 243 | ||
| 62 | static const struct clk_hw_onecell_data gxbb_aoclk_onecell_data = { | 244 | static const struct clk_hw_onecell_data gxbb_aoclk_onecell_data = { |
| @@ -67,52 +249,38 @@ static const struct clk_hw_onecell_data gxbb_aoclk_onecell_data = { | |||
| 67 | [CLKID_AO_UART1] = &uart1_ao.hw, | 249 | [CLKID_AO_UART1] = &uart1_ao.hw, |
| 68 | [CLKID_AO_UART2] = &uart2_ao.hw, | 250 | [CLKID_AO_UART2] = &uart2_ao.hw, |
| 69 | [CLKID_AO_IR_BLASTER] = &ir_blaster_ao.hw, | 251 | [CLKID_AO_IR_BLASTER] = &ir_blaster_ao.hw, |
| 70 | [CLKID_AO_CEC_32K] = &cec_32k_ao.hw, | 252 | [CLKID_AO_CEC_32K] = &ao_cts_cec.hw, |
| 253 | [CLKID_AO_CTS_OSCIN] = &ao_cts_oscin.hw, | ||
| 254 | [CLKID_AO_32K_PRE] = &ao_32k_pre.hw, | ||
| 255 | [CLKID_AO_32K_DIV] = &ao_32k_div.hw, | ||
| 256 | [CLKID_AO_32K_SEL] = &ao_32k_sel.hw, | ||
| 257 | [CLKID_AO_32K] = &ao_32k.hw, | ||
| 258 | [CLKID_AO_CTS_RTC_OSCIN] = &ao_cts_rtc_oscin.hw, | ||
| 259 | [CLKID_AO_CLK81] = &ao_clk81.hw, | ||
| 71 | }, | 260 | }, |
| 72 | .num = NR_CLKS, | 261 | .num = NR_CLKS, |
| 73 | }; | 262 | }; |
| 74 | 263 | ||
| 75 | static int gxbb_register_cec_ao_32k(struct platform_device *pdev) | 264 | static const struct meson_aoclk_input gxbb_aoclk_inputs[] = { |
| 76 | { | 265 | { .name = "xtal", .required = true, }, |
| 77 | struct device *dev = &pdev->dev; | 266 | { .name = "mpeg-clk", .required = true, }, |
| 78 | struct regmap *regmap; | 267 | {. name = "ext-32k-0", .required = false, }, |
| 79 | int ret; | 268 | {. name = "ext-32k-1", .required = false, }, |
| 80 | 269 | {. name = "ext-32k-2", .required = false, }, | |
| 81 | regmap = syscon_node_to_regmap(of_get_parent(dev->of_node)); | 270 | }; |
| 82 | if (IS_ERR(regmap)) { | ||
| 83 | dev_err(dev, "failed to get regmap\n"); | ||
| 84 | return PTR_ERR(regmap); | ||
| 85 | } | ||
| 86 | |||
| 87 | /* Specific clocks */ | ||
| 88 | cec_32k_ao.regmap = regmap; | ||
| 89 | ret = devm_clk_hw_register(dev, &cec_32k_ao.hw); | ||
| 90 | if (ret) { | ||
| 91 | dev_err(&pdev->dev, "clk cec_32k_ao register failed.\n"); | ||
| 92 | return ret; | ||
| 93 | } | ||
| 94 | |||
| 95 | return 0; | ||
| 96 | } | ||
| 97 | 271 | ||
| 98 | static const struct meson_aoclk_data gxbb_aoclkc_data = { | 272 | static const struct meson_aoclk_data gxbb_aoclkc_data = { |
| 99 | .reset_reg = AO_RTI_GEN_CNTL_REG0, | 273 | .reset_reg = AO_RTI_GEN_CNTL_REG0, |
| 100 | .num_reset = ARRAY_SIZE(gxbb_aoclk_reset), | 274 | .num_reset = ARRAY_SIZE(gxbb_aoclk_reset), |
| 101 | .reset = gxbb_aoclk_reset, | 275 | .reset = gxbb_aoclk_reset, |
| 102 | .num_clks = ARRAY_SIZE(gxbb_aoclk_gate), | 276 | .num_clks = ARRAY_SIZE(gxbb_aoclk), |
| 103 | .clks = gxbb_aoclk_gate, | 277 | .clks = gxbb_aoclk, |
| 104 | .hw_data = &gxbb_aoclk_onecell_data, | 278 | .hw_data = &gxbb_aoclk_onecell_data, |
| 279 | .inputs = gxbb_aoclk_inputs, | ||
| 280 | .num_inputs = ARRAY_SIZE(gxbb_aoclk_inputs), | ||
| 281 | .input_prefix = IN_PREFIX, | ||
| 105 | }; | 282 | }; |
| 106 | 283 | ||
| 107 | static int gxbb_aoclkc_probe(struct platform_device *pdev) | ||
| 108 | { | ||
| 109 | int ret = gxbb_register_cec_ao_32k(pdev); | ||
| 110 | if (ret) | ||
| 111 | return ret; | ||
| 112 | |||
| 113 | return meson_aoclkc_probe(pdev); | ||
| 114 | } | ||
| 115 | |||
| 116 | static const struct of_device_id gxbb_aoclkc_match_table[] = { | 284 | static const struct of_device_id gxbb_aoclkc_match_table[] = { |
| 117 | { | 285 | { |
| 118 | .compatible = "amlogic,meson-gx-aoclkc", | 286 | .compatible = "amlogic,meson-gx-aoclkc", |
| @@ -122,7 +290,7 @@ static const struct of_device_id gxbb_aoclkc_match_table[] = { | |||
| 122 | }; | 290 | }; |
| 123 | 291 | ||
| 124 | static struct platform_driver gxbb_aoclkc_driver = { | 292 | static struct platform_driver gxbb_aoclkc_driver = { |
| 125 | .probe = gxbb_aoclkc_probe, | 293 | .probe = meson_aoclkc_probe, |
| 126 | .driver = { | 294 | .driver = { |
| 127 | .name = "gxbb-aoclkc", | 295 | .name = "gxbb-aoclkc", |
| 128 | .of_match_table = gxbb_aoclkc_match_table, | 296 | .of_match_table = gxbb_aoclkc_match_table, |
diff --git a/drivers/clk/meson/gxbb-aoclk.h b/drivers/clk/meson/gxbb-aoclk.h index c514493d989a..1db16f9b37d4 100644 --- a/drivers/clk/meson/gxbb-aoclk.h +++ b/drivers/clk/meson/gxbb-aoclk.h | |||
| @@ -7,25 +7,7 @@ | |||
| 7 | #ifndef __GXBB_AOCLKC_H | 7 | #ifndef __GXBB_AOCLKC_H |
| 8 | #define __GXBB_AOCLKC_H | 8 | #define __GXBB_AOCLKC_H |
| 9 | 9 | ||
| 10 | #define NR_CLKS 7 | 10 | #define NR_CLKS 14 |
| 11 | |||
| 12 | /* AO Configuration Clock registers offsets */ | ||
| 13 | #define AO_RTI_PWR_CNTL_REG1 0x0c | ||
| 14 | #define AO_RTI_PWR_CNTL_REG0 0x10 | ||
| 15 | #define AO_RTI_GEN_CNTL_REG0 0x40 | ||
| 16 | #define AO_OSCIN_CNTL 0x58 | ||
| 17 | #define AO_CRT_CLK_CNTL1 0x68 | ||
| 18 | #define AO_RTC_ALT_CLK_CNTL0 0x94 | ||
| 19 | #define AO_RTC_ALT_CLK_CNTL1 0x98 | ||
| 20 | |||
| 21 | struct aoclk_cec_32k { | ||
| 22 | struct clk_hw hw; | ||
| 23 | struct regmap *regmap; | ||
| 24 | }; | ||
| 25 | |||
| 26 | #define to_aoclk_cec_32k(_hw) container_of(_hw, struct aoclk_cec_32k, hw) | ||
| 27 | |||
| 28 | extern const struct clk_ops meson_aoclk_cec_32k_ops; | ||
| 29 | 11 | ||
| 30 | #include <dt-bindings/clock/gxbb-aoclkc.h> | 12 | #include <dt-bindings/clock/gxbb-aoclkc.h> |
| 31 | #include <dt-bindings/reset/gxbb-aoclkc.h> | 13 | #include <dt-bindings/reset/gxbb-aoclkc.h> |
diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c index 65f2599e5243..04df2e208ed6 100644 --- a/drivers/clk/meson/gxbb.c +++ b/drivers/clk/meson/gxbb.c | |||
| @@ -4,17 +4,20 @@ | |||
| 4 | * Michael Turquette <mturquette@baylibre.com> | 4 | * Michael Turquette <mturquette@baylibre.com> |
| 5 | */ | 5 | */ |
| 6 | 6 | ||
| 7 | #include <linux/clk.h> | ||
| 8 | #include <linux/clk-provider.h> | 7 | #include <linux/clk-provider.h> |
| 9 | #include <linux/init.h> | 8 | #include <linux/init.h> |
| 10 | #include <linux/of_device.h> | 9 | #include <linux/of_device.h> |
| 11 | #include <linux/mfd/syscon.h> | ||
| 12 | #include <linux/platform_device.h> | 10 | #include <linux/platform_device.h> |
| 13 | #include <linux/regmap.h> | ||
| 14 | 11 | ||
| 15 | #include "clkc.h" | ||
| 16 | #include "gxbb.h" | 12 | #include "gxbb.h" |
| 13 | #include "clk-input.h" | ||
| 17 | #include "clk-regmap.h" | 14 | #include "clk-regmap.h" |
| 15 | #include "clk-pll.h" | ||
| 16 | #include "clk-mpll.h" | ||
| 17 | #include "meson-eeclk.h" | ||
| 18 | #include "vid-pll-div.h" | ||
| 19 | |||
| 20 | #define IN_PREFIX "ee-in-" | ||
| 18 | 21 | ||
| 19 | static DEFINE_SPINLOCK(meson_clk_lock); | 22 | static DEFINE_SPINLOCK(meson_clk_lock); |
| 20 | 23 | ||
| @@ -118,7 +121,7 @@ static struct clk_regmap gxbb_fixed_pll_dco = { | |||
| 118 | .hw.init = &(struct clk_init_data){ | 121 | .hw.init = &(struct clk_init_data){ |
| 119 | .name = "fixed_pll_dco", | 122 | .name = "fixed_pll_dco", |
| 120 | .ops = &meson_clk_pll_ro_ops, | 123 | .ops = &meson_clk_pll_ro_ops, |
| 121 | .parent_names = (const char *[]){ "xtal" }, | 124 | .parent_names = (const char *[]){ IN_PREFIX "xtal" }, |
| 122 | .num_parents = 1, | 125 | .num_parents = 1, |
| 123 | }, | 126 | }, |
| 124 | }; | 127 | }; |
| @@ -148,7 +151,7 @@ static struct clk_fixed_factor gxbb_hdmi_pll_pre_mult = { | |||
| 148 | .hw.init = &(struct clk_init_data){ | 151 | .hw.init = &(struct clk_init_data){ |
| 149 | .name = "hdmi_pll_pre_mult", | 152 | .name = "hdmi_pll_pre_mult", |
| 150 | .ops = &clk_fixed_factor_ops, | 153 | .ops = &clk_fixed_factor_ops, |
| 151 | .parent_names = (const char *[]){ "xtal" }, | 154 | .parent_names = (const char *[]){ IN_PREFIX "xtal" }, |
| 152 | .num_parents = 1, | 155 | .num_parents = 1, |
| 153 | }, | 156 | }, |
| 154 | }; | 157 | }; |
| @@ -241,7 +244,7 @@ static struct clk_regmap gxl_hdmi_pll_dco = { | |||
| 241 | .hw.init = &(struct clk_init_data){ | 244 | .hw.init = &(struct clk_init_data){ |
| 242 | .name = "hdmi_pll_dco", | 245 | .name = "hdmi_pll_dco", |
| 243 | .ops = &meson_clk_pll_ro_ops, | 246 | .ops = &meson_clk_pll_ro_ops, |
| 244 | .parent_names = (const char *[]){ "xtal" }, | 247 | .parent_names = (const char *[]){ IN_PREFIX "xtal" }, |
| 245 | .num_parents = 1, | 248 | .num_parents = 1, |
| 246 | /* | 249 | /* |
| 247 | * Display directly handle hdmi pll registers ATM, we need | 250 | * Display directly handle hdmi pll registers ATM, we need |
| @@ -378,7 +381,7 @@ static struct clk_regmap gxbb_sys_pll_dco = { | |||
| 378 | .hw.init = &(struct clk_init_data){ | 381 | .hw.init = &(struct clk_init_data){ |
| 379 | .name = "sys_pll_dco", | 382 | .name = "sys_pll_dco", |
| 380 | .ops = &meson_clk_pll_ro_ops, | 383 | .ops = &meson_clk_pll_ro_ops, |
| 381 | .parent_names = (const char *[]){ "xtal" }, | 384 | .parent_names = (const char *[]){ IN_PREFIX "xtal" }, |
| 382 | .num_parents = 1, | 385 | .num_parents = 1, |
| 383 | }, | 386 | }, |
| 384 | }; | 387 | }; |
| @@ -439,7 +442,7 @@ static struct clk_regmap gxbb_gp0_pll_dco = { | |||
| 439 | .hw.init = &(struct clk_init_data){ | 442 | .hw.init = &(struct clk_init_data){ |
| 440 | .name = "gp0_pll_dco", | 443 | .name = "gp0_pll_dco", |
| 441 | .ops = &meson_clk_pll_ops, | 444 | .ops = &meson_clk_pll_ops, |
| 442 | .parent_names = (const char *[]){ "xtal" }, | 445 | .parent_names = (const char *[]){ IN_PREFIX "xtal" }, |
| 443 | .num_parents = 1, | 446 | .num_parents = 1, |
| 444 | }, | 447 | }, |
| 445 | }; | 448 | }; |
| @@ -491,7 +494,7 @@ static struct clk_regmap gxl_gp0_pll_dco = { | |||
| 491 | .hw.init = &(struct clk_init_data){ | 494 | .hw.init = &(struct clk_init_data){ |
| 492 | .name = "gp0_pll_dco", | 495 | .name = "gp0_pll_dco", |
| 493 | .ops = &meson_clk_pll_ops, | 496 | .ops = &meson_clk_pll_ops, |
| 494 | .parent_names = (const char *[]){ "xtal" }, | 497 | .parent_names = (const char *[]){ IN_PREFIX "xtal" }, |
| 495 | .num_parents = 1, | 498 | .num_parents = 1, |
| 496 | }, | 499 | }, |
| 497 | }; | 500 | }; |
| @@ -789,7 +792,7 @@ static struct clk_regmap gxbb_mpll2 = { | |||
| 789 | 792 | ||
| 790 | static u32 mux_table_clk81[] = { 0, 2, 3, 4, 5, 6, 7 }; | 793 | static u32 mux_table_clk81[] = { 0, 2, 3, 4, 5, 6, 7 }; |
| 791 | static const char * const clk81_parent_names[] = { | 794 | static const char * const clk81_parent_names[] = { |
| 792 | "xtal", "fclk_div7", "mpll1", "mpll2", "fclk_div4", | 795 | IN_PREFIX "xtal", "fclk_div7", "mpll1", "mpll2", "fclk_div4", |
| 793 | "fclk_div3", "fclk_div5" | 796 | "fclk_div3", "fclk_div5" |
| 794 | }; | 797 | }; |
| 795 | 798 | ||
| @@ -852,7 +855,7 @@ static struct clk_regmap gxbb_sar_adc_clk_sel = { | |||
| 852 | .name = "sar_adc_clk_sel", | 855 | .name = "sar_adc_clk_sel", |
| 853 | .ops = &clk_regmap_mux_ops, | 856 | .ops = &clk_regmap_mux_ops, |
| 854 | /* NOTE: The datasheet doesn't list the parents for bit 10 */ | 857 | /* NOTE: The datasheet doesn't list the parents for bit 10 */ |
| 855 | .parent_names = (const char *[]){ "xtal", "clk81", }, | 858 | .parent_names = (const char *[]){ IN_PREFIX "xtal", "clk81", }, |
| 856 | .num_parents = 2, | 859 | .num_parents = 2, |
| 857 | }, | 860 | }, |
| 858 | }; | 861 | }; |
| @@ -891,7 +894,7 @@ static struct clk_regmap gxbb_sar_adc_clk = { | |||
| 891 | */ | 894 | */ |
| 892 | 895 | ||
| 893 | static const char * const gxbb_mali_0_1_parent_names[] = { | 896 | static const char * const gxbb_mali_0_1_parent_names[] = { |
| 894 | "xtal", "gp0_pll", "mpll2", "mpll1", "fclk_div7", | 897 | IN_PREFIX "xtal", "gp0_pll", "mpll2", "mpll1", "fclk_div7", |
| 895 | "fclk_div4", "fclk_div3", "fclk_div5" | 898 | "fclk_div4", "fclk_div3", "fclk_div5" |
| 896 | }; | 899 | }; |
| 897 | 900 | ||
| @@ -1153,7 +1156,7 @@ static struct clk_regmap gxbb_32k_clk = { | |||
| 1153 | }; | 1156 | }; |
| 1154 | 1157 | ||
| 1155 | static const char * const gxbb_32k_clk_parent_names[] = { | 1158 | static const char * const gxbb_32k_clk_parent_names[] = { |
| 1156 | "xtal", "cts_slow_oscin", "fclk_div3", "fclk_div5" | 1159 | IN_PREFIX "xtal", "cts_slow_oscin", "fclk_div3", "fclk_div5" |
| 1157 | }; | 1160 | }; |
| 1158 | 1161 | ||
| 1159 | static struct clk_regmap gxbb_32k_clk_sel = { | 1162 | static struct clk_regmap gxbb_32k_clk_sel = { |
| @@ -1172,7 +1175,7 @@ static struct clk_regmap gxbb_32k_clk_sel = { | |||
| 1172 | }; | 1175 | }; |
| 1173 | 1176 | ||
| 1174 | static const char * const gxbb_sd_emmc_clk0_parent_names[] = { | 1177 | static const char * const gxbb_sd_emmc_clk0_parent_names[] = { |
| 1175 | "xtal", "fclk_div2", "fclk_div3", "fclk_div5", "fclk_div7", | 1178 | IN_PREFIX "xtal", "fclk_div2", "fclk_div3", "fclk_div5", "fclk_div7", |
| 1176 | 1179 | ||
| 1177 | /* | 1180 | /* |
| 1178 | * Following these parent clocks, we should also have had mpll2, mpll3 | 1181 | * Following these parent clocks, we should also have had mpll2, mpll3 |
| @@ -2138,7 +2141,7 @@ static struct clk_regmap gxbb_hdmi_tx = { | |||
| 2138 | /* HDMI Clocks */ | 2141 | /* HDMI Clocks */ |
| 2139 | 2142 | ||
| 2140 | static const char * const gxbb_hdmi_parent_names[] = { | 2143 | static const char * const gxbb_hdmi_parent_names[] = { |
| 2141 | "xtal", "fclk_div4", "fclk_div3", "fclk_div5" | 2144 | IN_PREFIX "xtal", "fclk_div4", "fclk_div3", "fclk_div5" |
| 2142 | }; | 2145 | }; |
| 2143 | 2146 | ||
| 2144 | static struct clk_regmap gxbb_hdmi_sel = { | 2147 | static struct clk_regmap gxbb_hdmi_sel = { |
| @@ -2285,7 +2288,7 @@ static struct clk_regmap gxbb_vdec_hevc = { | |||
| 2285 | static u32 mux_table_gen_clk[] = { 0, 4, 5, 6, 7, 8, | 2288 | static u32 mux_table_gen_clk[] = { 0, 4, 5, 6, 7, 8, |
| 2286 | 9, 10, 11, 13, 14, }; | 2289 | 9, 10, 11, 13, 14, }; |
| 2287 | static const char * const gen_clk_parent_names[] = { | 2290 | static const char * const gen_clk_parent_names[] = { |
| 2288 | "xtal", "vdec_1", "vdec_hevc", "mpll0", "mpll1", "mpll2", | 2291 | IN_PREFIX "xtal", "vdec_1", "vdec_hevc", "mpll0", "mpll1", "mpll2", |
| 2289 | "fclk_div4", "fclk_div3", "fclk_div5", "fclk_div7", "gp0_pll", | 2292 | "fclk_div4", "fclk_div3", "fclk_div5", "fclk_div7", "gp0_pll", |
| 2290 | }; | 2293 | }; |
| 2291 | 2294 | ||
| @@ -2854,6 +2857,192 @@ static struct clk_hw_onecell_data gxl_hw_onecell_data = { | |||
| 2854 | }; | 2857 | }; |
| 2855 | 2858 | ||
| 2856 | static struct clk_regmap *const gxbb_clk_regmaps[] = { | 2859 | static struct clk_regmap *const gxbb_clk_regmaps[] = { |
| 2860 | &gxbb_clk81, | ||
| 2861 | &gxbb_ddr, | ||
| 2862 | &gxbb_dos, | ||
| 2863 | &gxbb_isa, | ||
| 2864 | &gxbb_pl301, | ||
| 2865 | &gxbb_periphs, | ||
| 2866 | &gxbb_spicc, | ||
| 2867 | &gxbb_i2c, | ||
| 2868 | &gxbb_sar_adc, | ||
| 2869 | &gxbb_smart_card, | ||
| 2870 | &gxbb_rng0, | ||
| 2871 | &gxbb_uart0, | ||
| 2872 | &gxbb_sdhc, | ||
| 2873 | &gxbb_stream, | ||
| 2874 | &gxbb_async_fifo, | ||
| 2875 | &gxbb_sdio, | ||
| 2876 | &gxbb_abuf, | ||
| 2877 | &gxbb_hiu_iface, | ||
| 2878 | &gxbb_assist_misc, | ||
| 2879 | &gxbb_spi, | ||
| 2880 | &gxbb_i2s_spdif, | ||
| 2881 | &gxbb_eth, | ||
| 2882 | &gxbb_demux, | ||
| 2883 | &gxbb_aiu_glue, | ||
| 2884 | &gxbb_iec958, | ||
| 2885 | &gxbb_i2s_out, | ||
| 2886 | &gxbb_amclk, | ||
| 2887 | &gxbb_aififo2, | ||
| 2888 | &gxbb_mixer, | ||
| 2889 | &gxbb_mixer_iface, | ||
| 2890 | &gxbb_adc, | ||
| 2891 | &gxbb_blkmv, | ||
| 2892 | &gxbb_aiu, | ||
| 2893 | &gxbb_uart1, | ||
| 2894 | &gxbb_g2d, | ||
| 2895 | &gxbb_usb0, | ||
| 2896 | &gxbb_usb1, | ||
| 2897 | &gxbb_reset, | ||
| 2898 | &gxbb_nand, | ||
| 2899 | &gxbb_dos_parser, | ||
| 2900 | &gxbb_usb, | ||
| 2901 | &gxbb_vdin1, | ||
| 2902 | &gxbb_ahb_arb0, | ||
| 2903 | &gxbb_efuse, | ||
| 2904 | &gxbb_boot_rom, | ||
| 2905 | &gxbb_ahb_data_bus, | ||
| 2906 | &gxbb_ahb_ctrl_bus, | ||
| 2907 | &gxbb_hdmi_intr_sync, | ||
| 2908 | &gxbb_hdmi_pclk, | ||
| 2909 | &gxbb_usb1_ddr_bridge, | ||
| 2910 | &gxbb_usb0_ddr_bridge, | ||
| 2911 | &gxbb_mmc_pclk, | ||
| 2912 | &gxbb_dvin, | ||
| 2913 | &gxbb_uart2, | ||
| 2914 | &gxbb_sana, | ||
| 2915 | &gxbb_vpu_intr, | ||
| 2916 | &gxbb_sec_ahb_ahb3_bridge, | ||
| 2917 | &gxbb_clk81_a53, | ||
| 2918 | &gxbb_vclk2_venci0, | ||
| 2919 | &gxbb_vclk2_venci1, | ||
| 2920 | &gxbb_vclk2_vencp0, | ||
| 2921 | &gxbb_vclk2_vencp1, | ||
| 2922 | &gxbb_gclk_venci_int0, | ||
| 2923 | &gxbb_gclk_vencp_int, | ||
| 2924 | &gxbb_dac_clk, | ||
| 2925 | &gxbb_aoclk_gate, | ||
| 2926 | &gxbb_iec958_gate, | ||
| 2927 | &gxbb_enc480p, | ||
| 2928 | &gxbb_rng1, | ||
| 2929 | &gxbb_gclk_venci_int1, | ||
| 2930 | &gxbb_vclk2_venclmcc, | ||
| 2931 | &gxbb_vclk2_vencl, | ||
| 2932 | &gxbb_vclk_other, | ||
| 2933 | &gxbb_edp, | ||
| 2934 | &gxbb_ao_media_cpu, | ||
| 2935 | &gxbb_ao_ahb_sram, | ||
| 2936 | &gxbb_ao_ahb_bus, | ||
| 2937 | &gxbb_ao_iface, | ||
| 2938 | &gxbb_ao_i2c, | ||
| 2939 | &gxbb_emmc_a, | ||
| 2940 | &gxbb_emmc_b, | ||
| 2941 | &gxbb_emmc_c, | ||
| 2942 | &gxbb_sar_adc_clk, | ||
| 2943 | &gxbb_mali_0, | ||
| 2944 | &gxbb_mali_1, | ||
| 2945 | &gxbb_cts_amclk, | ||
| 2946 | &gxbb_cts_mclk_i958, | ||
| 2947 | &gxbb_32k_clk, | ||
| 2948 | &gxbb_sd_emmc_a_clk0, | ||
| 2949 | &gxbb_sd_emmc_b_clk0, | ||
| 2950 | &gxbb_sd_emmc_c_clk0, | ||
| 2951 | &gxbb_vpu_0, | ||
| 2952 | &gxbb_vpu_1, | ||
| 2953 | &gxbb_vapb_0, | ||
| 2954 | &gxbb_vapb_1, | ||
| 2955 | &gxbb_vapb, | ||
| 2956 | &gxbb_mpeg_clk_div, | ||
| 2957 | &gxbb_sar_adc_clk_div, | ||
| 2958 | &gxbb_mali_0_div, | ||
| 2959 | &gxbb_mali_1_div, | ||
| 2960 | &gxbb_cts_mclk_i958_div, | ||
| 2961 | &gxbb_32k_clk_div, | ||
| 2962 | &gxbb_sd_emmc_a_clk0_div, | ||
| 2963 | &gxbb_sd_emmc_b_clk0_div, | ||
| 2964 | &gxbb_sd_emmc_c_clk0_div, | ||
| 2965 | &gxbb_vpu_0_div, | ||
| 2966 | &gxbb_vpu_1_div, | ||
| 2967 | &gxbb_vapb_0_div, | ||
| 2968 | &gxbb_vapb_1_div, | ||
| 2969 | &gxbb_mpeg_clk_sel, | ||
| 2970 | &gxbb_sar_adc_clk_sel, | ||
| 2971 | &gxbb_mali_0_sel, | ||
| 2972 | &gxbb_mali_1_sel, | ||
| 2973 | &gxbb_mali, | ||
| 2974 | &gxbb_cts_amclk_sel, | ||
| 2975 | &gxbb_cts_mclk_i958_sel, | ||
| 2976 | &gxbb_cts_i958, | ||
| 2977 | &gxbb_32k_clk_sel, | ||
| 2978 | &gxbb_sd_emmc_a_clk0_sel, | ||
| 2979 | &gxbb_sd_emmc_b_clk0_sel, | ||
| 2980 | &gxbb_sd_emmc_c_clk0_sel, | ||
| 2981 | &gxbb_vpu_0_sel, | ||
| 2982 | &gxbb_vpu_1_sel, | ||
| 2983 | &gxbb_vpu, | ||
| 2984 | &gxbb_vapb_0_sel, | ||
| 2985 | &gxbb_vapb_1_sel, | ||
| 2986 | &gxbb_vapb_sel, | ||
| 2987 | &gxbb_mpll0, | ||
| 2988 | &gxbb_mpll1, | ||
| 2989 | &gxbb_mpll2, | ||
| 2990 | &gxbb_mpll0_div, | ||
| 2991 | &gxbb_mpll1_div, | ||
| 2992 | &gxbb_mpll2_div, | ||
| 2993 | &gxbb_cts_amclk_div, | ||
| 2994 | &gxbb_fixed_pll, | ||
| 2995 | &gxbb_sys_pll, | ||
| 2996 | &gxbb_mpll_prediv, | ||
| 2997 | &gxbb_fclk_div2, | ||
| 2998 | &gxbb_fclk_div3, | ||
| 2999 | &gxbb_fclk_div4, | ||
| 3000 | &gxbb_fclk_div5, | ||
| 3001 | &gxbb_fclk_div7, | ||
| 3002 | &gxbb_vdec_1_sel, | ||
| 3003 | &gxbb_vdec_1_div, | ||
| 3004 | &gxbb_vdec_1, | ||
| 3005 | &gxbb_vdec_hevc_sel, | ||
| 3006 | &gxbb_vdec_hevc_div, | ||
| 3007 | &gxbb_vdec_hevc, | ||
| 3008 | &gxbb_gen_clk_sel, | ||
| 3009 | &gxbb_gen_clk_div, | ||
| 3010 | &gxbb_gen_clk, | ||
| 3011 | &gxbb_fixed_pll_dco, | ||
| 3012 | &gxbb_sys_pll_dco, | ||
| 3013 | &gxbb_gp0_pll, | ||
| 3014 | &gxbb_vid_pll, | ||
| 3015 | &gxbb_vid_pll_sel, | ||
| 3016 | &gxbb_vid_pll_div, | ||
| 3017 | &gxbb_vclk, | ||
| 3018 | &gxbb_vclk_sel, | ||
| 3019 | &gxbb_vclk_div, | ||
| 3020 | &gxbb_vclk_input, | ||
| 3021 | &gxbb_vclk_div1, | ||
| 3022 | &gxbb_vclk_div2_en, | ||
| 3023 | &gxbb_vclk_div4_en, | ||
| 3024 | &gxbb_vclk_div6_en, | ||
| 3025 | &gxbb_vclk_div12_en, | ||
| 3026 | &gxbb_vclk2, | ||
| 3027 | &gxbb_vclk2_sel, | ||
| 3028 | &gxbb_vclk2_div, | ||
| 3029 | &gxbb_vclk2_input, | ||
| 3030 | &gxbb_vclk2_div1, | ||
| 3031 | &gxbb_vclk2_div2_en, | ||
| 3032 | &gxbb_vclk2_div4_en, | ||
| 3033 | &gxbb_vclk2_div6_en, | ||
| 3034 | &gxbb_vclk2_div12_en, | ||
| 3035 | &gxbb_cts_enci, | ||
| 3036 | &gxbb_cts_enci_sel, | ||
| 3037 | &gxbb_cts_encp, | ||
| 3038 | &gxbb_cts_encp_sel, | ||
| 3039 | &gxbb_cts_vdac, | ||
| 3040 | &gxbb_cts_vdac_sel, | ||
| 3041 | &gxbb_hdmi_tx, | ||
| 3042 | &gxbb_hdmi_tx_sel, | ||
| 3043 | &gxbb_hdmi_sel, | ||
| 3044 | &gxbb_hdmi_div, | ||
| 3045 | &gxbb_hdmi, | ||
| 2857 | &gxbb_gp0_pll_dco, | 3046 | &gxbb_gp0_pll_dco, |
| 2858 | &gxbb_hdmi_pll, | 3047 | &gxbb_hdmi_pll, |
| 2859 | &gxbb_hdmi_pll_od, | 3048 | &gxbb_hdmi_pll_od, |
| @@ -2862,14 +3051,6 @@ static struct clk_regmap *const gxbb_clk_regmaps[] = { | |||
| 2862 | }; | 3051 | }; |
| 2863 | 3052 | ||
| 2864 | static struct clk_regmap *const gxl_clk_regmaps[] = { | 3053 | static struct clk_regmap *const gxl_clk_regmaps[] = { |
| 2865 | &gxl_gp0_pll_dco, | ||
| 2866 | &gxl_hdmi_pll, | ||
| 2867 | &gxl_hdmi_pll_od, | ||
| 2868 | &gxl_hdmi_pll_od2, | ||
| 2869 | &gxl_hdmi_pll_dco, | ||
| 2870 | }; | ||
| 2871 | |||
| 2872 | static struct clk_regmap *const gx_clk_regmaps[] = { | ||
| 2873 | &gxbb_clk81, | 3054 | &gxbb_clk81, |
| 2874 | &gxbb_ddr, | 3055 | &gxbb_ddr, |
| 2875 | &gxbb_dos, | 3056 | &gxbb_dos, |
| @@ -3056,23 +3237,22 @@ static struct clk_regmap *const gx_clk_regmaps[] = { | |||
| 3056 | &gxbb_hdmi_sel, | 3237 | &gxbb_hdmi_sel, |
| 3057 | &gxbb_hdmi_div, | 3238 | &gxbb_hdmi_div, |
| 3058 | &gxbb_hdmi, | 3239 | &gxbb_hdmi, |
| 3240 | &gxl_gp0_pll_dco, | ||
| 3241 | &gxl_hdmi_pll, | ||
| 3242 | &gxl_hdmi_pll_od, | ||
| 3243 | &gxl_hdmi_pll_od2, | ||
| 3244 | &gxl_hdmi_pll_dco, | ||
| 3059 | }; | 3245 | }; |
| 3060 | 3246 | ||
| 3061 | struct clkc_data { | 3247 | static const struct meson_eeclkc_data gxbb_clkc_data = { |
| 3062 | struct clk_regmap *const *regmap_clks; | ||
| 3063 | unsigned int regmap_clks_count; | ||
| 3064 | struct clk_hw_onecell_data *hw_onecell_data; | ||
| 3065 | }; | ||
| 3066 | |||
| 3067 | static const struct clkc_data gxbb_clkc_data = { | ||
| 3068 | .regmap_clks = gxbb_clk_regmaps, | 3248 | .regmap_clks = gxbb_clk_regmaps, |
| 3069 | .regmap_clks_count = ARRAY_SIZE(gxbb_clk_regmaps), | 3249 | .regmap_clk_num = ARRAY_SIZE(gxbb_clk_regmaps), |
| 3070 | .hw_onecell_data = &gxbb_hw_onecell_data, | 3250 | .hw_onecell_data = &gxbb_hw_onecell_data, |
| 3071 | }; | 3251 | }; |
| 3072 | 3252 | ||
| 3073 | static const struct clkc_data gxl_clkc_data = { | 3253 | static const struct meson_eeclkc_data gxl_clkc_data = { |
| 3074 | .regmap_clks = gxl_clk_regmaps, | 3254 | .regmap_clks = gxl_clk_regmaps, |
| 3075 | .regmap_clks_count = ARRAY_SIZE(gxl_clk_regmaps), | 3255 | .regmap_clk_num = ARRAY_SIZE(gxl_clk_regmaps), |
| 3076 | .hw_onecell_data = &gxl_hw_onecell_data, | 3256 | .hw_onecell_data = &gxl_hw_onecell_data, |
| 3077 | }; | 3257 | }; |
| 3078 | 3258 | ||
| @@ -3082,52 +3262,8 @@ static const struct of_device_id clkc_match_table[] = { | |||
| 3082 | {}, | 3262 | {}, |
| 3083 | }; | 3263 | }; |
| 3084 | 3264 | ||
| 3085 | static int gxbb_clkc_probe(struct platform_device *pdev) | ||
| 3086 | { | ||
| 3087 | const struct clkc_data *clkc_data; | ||
| 3088 | struct regmap *map; | ||
| 3089 | int ret, i; | ||
| 3090 | struct device *dev = &pdev->dev; | ||
| 3091 | |||
| 3092 | clkc_data = of_device_get_match_data(dev); | ||
| 3093 | if (!clkc_data) | ||
| 3094 | return -EINVAL; | ||
| 3095 | |||
| 3096 | /* Get the hhi system controller node if available */ | ||
| 3097 | map = syscon_node_to_regmap(of_get_parent(dev->of_node)); | ||
| 3098 | if (IS_ERR(map)) { | ||
| 3099 | dev_err(dev, "failed to get HHI regmap\n"); | ||
| 3100 | return PTR_ERR(map); | ||
| 3101 | } | ||
| 3102 | |||
| 3103 | /* Populate regmap for the common regmap backed clocks */ | ||
| 3104 | for (i = 0; i < ARRAY_SIZE(gx_clk_regmaps); i++) | ||
| 3105 | gx_clk_regmaps[i]->map = map; | ||
| 3106 | |||
| 3107 | /* Populate regmap for soc specific clocks */ | ||
| 3108 | for (i = 0; i < clkc_data->regmap_clks_count; i++) | ||
| 3109 | clkc_data->regmap_clks[i]->map = map; | ||
| 3110 | |||
| 3111 | /* Register all clks */ | ||
| 3112 | for (i = 0; i < clkc_data->hw_onecell_data->num; i++) { | ||
| 3113 | /* array might be sparse */ | ||
| 3114 | if (!clkc_data->hw_onecell_data->hws[i]) | ||
| 3115 | continue; | ||
| 3116 | |||
| 3117 | ret = devm_clk_hw_register(dev, | ||
| 3118 | clkc_data->hw_onecell_data->hws[i]); | ||
| 3119 | if (ret) { | ||
| 3120 | dev_err(dev, "Clock registration failed\n"); | ||
| 3121 | return ret; | ||
| 3122 | } | ||
| 3123 | } | ||
| 3124 | |||
| 3125 | return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, | ||
| 3126 | clkc_data->hw_onecell_data); | ||
| 3127 | } | ||
| 3128 | |||
| 3129 | static struct platform_driver gxbb_driver = { | 3265 | static struct platform_driver gxbb_driver = { |
| 3130 | .probe = gxbb_clkc_probe, | 3266 | .probe = meson_eeclkc_probe, |
| 3131 | .driver = { | 3267 | .driver = { |
| 3132 | .name = "gxbb-clkc", | 3268 | .name = "gxbb-clkc", |
| 3133 | .of_match_table = clkc_match_table, | 3269 | .of_match_table = clkc_match_table, |
diff --git a/drivers/clk/meson/meson-aoclk.c b/drivers/clk/meson/meson-aoclk.c index f965845917e3..b67951909e04 100644 --- a/drivers/clk/meson/meson-aoclk.c +++ b/drivers/clk/meson/meson-aoclk.c | |||
| @@ -14,9 +14,11 @@ | |||
| 14 | #include <linux/reset-controller.h> | 14 | #include <linux/reset-controller.h> |
| 15 | #include <linux/mfd/syscon.h> | 15 | #include <linux/mfd/syscon.h> |
| 16 | #include <linux/of_device.h> | 16 | #include <linux/of_device.h> |
| 17 | #include "clk-regmap.h" | 17 | #include <linux/slab.h> |
| 18 | #include "meson-aoclk.h" | 18 | #include "meson-aoclk.h" |
| 19 | 19 | ||
| 20 | #include "clk-input.h" | ||
| 21 | |||
| 20 | static int meson_aoclk_do_reset(struct reset_controller_dev *rcdev, | 22 | static int meson_aoclk_do_reset(struct reset_controller_dev *rcdev, |
| 21 | unsigned long id) | 23 | unsigned long id) |
| 22 | { | 24 | { |
| @@ -31,6 +33,37 @@ static const struct reset_control_ops meson_aoclk_reset_ops = { | |||
| 31 | .reset = meson_aoclk_do_reset, | 33 | .reset = meson_aoclk_do_reset, |
| 32 | }; | 34 | }; |
| 33 | 35 | ||
| 36 | static int meson_aoclkc_register_inputs(struct device *dev, | ||
| 37 | struct meson_aoclk_data *data) | ||
| 38 | { | ||
| 39 | struct clk_hw *hw; | ||
| 40 | char *str; | ||
| 41 | int i; | ||
| 42 | |||
| 43 | for (i = 0; i < data->num_inputs; i++) { | ||
| 44 | const struct meson_aoclk_input *in = &data->inputs[i]; | ||
| 45 | |||
| 46 | str = kasprintf(GFP_KERNEL, "%s%s", data->input_prefix, | ||
| 47 | in->name); | ||
| 48 | if (!str) | ||
| 49 | return -ENOMEM; | ||
| 50 | |||
| 51 | hw = meson_clk_hw_register_input(dev, in->name, str, 0); | ||
| 52 | kfree(str); | ||
| 53 | |||
| 54 | if (IS_ERR(hw)) { | ||
| 55 | if (!in->required && PTR_ERR(hw) == -ENOENT) | ||
| 56 | continue; | ||
| 57 | else if (PTR_ERR(hw) != -EPROBE_DEFER) | ||
| 58 | dev_err(dev, "failed to register input %s\n", | ||
| 59 | in->name); | ||
| 60 | return PTR_ERR(hw); | ||
| 61 | } | ||
| 62 | } | ||
| 63 | |||
| 64 | return 0; | ||
| 65 | } | ||
| 66 | |||
| 34 | int meson_aoclkc_probe(struct platform_device *pdev) | 67 | int meson_aoclkc_probe(struct platform_device *pdev) |
| 35 | { | 68 | { |
| 36 | struct meson_aoclk_reset_controller *rstc; | 69 | struct meson_aoclk_reset_controller *rstc; |
| @@ -53,6 +86,10 @@ int meson_aoclkc_probe(struct platform_device *pdev) | |||
| 53 | return PTR_ERR(regmap); | 86 | return PTR_ERR(regmap); |
| 54 | } | 87 | } |
| 55 | 88 | ||
| 89 | ret = meson_aoclkc_register_inputs(dev, data); | ||
| 90 | if (ret) | ||
| 91 | return ret; | ||
| 92 | |||
| 56 | /* Reset Controller */ | 93 | /* Reset Controller */ |
| 57 | rstc->data = data; | 94 | rstc->data = data; |
| 58 | rstc->regmap = regmap; | 95 | rstc->regmap = regmap; |
| @@ -65,15 +102,20 @@ int meson_aoclkc_probe(struct platform_device *pdev) | |||
| 65 | return ret; | 102 | return ret; |
| 66 | } | 103 | } |
| 67 | 104 | ||
| 68 | /* | 105 | /* Populate regmap */ |
| 69 | * Populate regmap and register all clks | 106 | for (clkid = 0; clkid < data->num_clks; clkid++) |
| 70 | */ | ||
| 71 | for (clkid = 0; clkid < data->num_clks; clkid++) { | ||
| 72 | data->clks[clkid]->map = regmap; | 107 | data->clks[clkid]->map = regmap; |
| 73 | 108 | ||
| 109 | /* Register all clks */ | ||
| 110 | for (clkid = 0; clkid < data->hw_data->num; clkid++) { | ||
| 111 | if (!data->hw_data->hws[clkid]) | ||
| 112 | continue; | ||
| 113 | |||
| 74 | ret = devm_clk_hw_register(dev, data->hw_data->hws[clkid]); | 114 | ret = devm_clk_hw_register(dev, data->hw_data->hws[clkid]); |
| 75 | if (ret) | 115 | if (ret) { |
| 116 | dev_err(dev, "Clock registration failed\n"); | ||
| 76 | return ret; | 117 | return ret; |
| 118 | } | ||
| 77 | } | 119 | } |
| 78 | 120 | ||
| 79 | return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, | 121 | return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, |
diff --git a/drivers/clk/meson/meson-aoclk.h b/drivers/clk/meson/meson-aoclk.h index ab2819e88922..999cde3868f7 100644 --- a/drivers/clk/meson/meson-aoclk.h +++ b/drivers/clk/meson/meson-aoclk.h | |||
| @@ -11,16 +11,27 @@ | |||
| 11 | #ifndef __MESON_AOCLK_H__ | 11 | #ifndef __MESON_AOCLK_H__ |
| 12 | #define __MESON_AOCLK_H__ | 12 | #define __MESON_AOCLK_H__ |
| 13 | 13 | ||
| 14 | #include <linux/clk-provider.h> | ||
| 14 | #include <linux/platform_device.h> | 15 | #include <linux/platform_device.h> |
| 16 | #include <linux/regmap.h> | ||
| 15 | #include <linux/reset-controller.h> | 17 | #include <linux/reset-controller.h> |
| 18 | |||
| 16 | #include "clk-regmap.h" | 19 | #include "clk-regmap.h" |
| 17 | 20 | ||
| 21 | struct meson_aoclk_input { | ||
| 22 | const char *name; | ||
| 23 | bool required; | ||
| 24 | }; | ||
| 25 | |||
| 18 | struct meson_aoclk_data { | 26 | struct meson_aoclk_data { |
| 19 | const unsigned int reset_reg; | 27 | const unsigned int reset_reg; |
| 20 | const int num_reset; | 28 | const int num_reset; |
| 21 | const unsigned int *reset; | 29 | const unsigned int *reset; |
| 22 | int num_clks; | 30 | const int num_clks; |
| 23 | struct clk_regmap **clks; | 31 | struct clk_regmap **clks; |
| 32 | const int num_inputs; | ||
| 33 | const struct meson_aoclk_input *inputs; | ||
| 34 | const char *input_prefix; | ||
| 24 | const struct clk_hw_onecell_data *hw_data; | 35 | const struct clk_hw_onecell_data *hw_data; |
| 25 | }; | 36 | }; |
| 26 | 37 | ||
diff --git a/drivers/clk/meson/meson-eeclk.c b/drivers/clk/meson/meson-eeclk.c new file mode 100644 index 000000000000..37a34c9c3885 --- /dev/null +++ b/drivers/clk/meson/meson-eeclk.c | |||
| @@ -0,0 +1,63 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | /* | ||
| 3 | * Copyright (c) 2019 BayLibre, SAS. | ||
| 4 | * Author: Jerome Brunet <jbrunet@baylibre.com> | ||
| 5 | */ | ||
| 6 | |||
| 7 | #include <linux/clk-provider.h> | ||
| 8 | #include <linux/of_device.h> | ||
| 9 | #include <linux/platform_device.h> | ||
| 10 | #include <linux/mfd/syscon.h> | ||
| 11 | #include <linux/regmap.h> | ||
| 12 | |||
| 13 | #include "clk-input.h" | ||
| 14 | #include "clk-regmap.h" | ||
| 15 | #include "meson-eeclk.h" | ||
| 16 | |||
| 17 | int meson_eeclkc_probe(struct platform_device *pdev) | ||
| 18 | { | ||
| 19 | const struct meson_eeclkc_data *data; | ||
| 20 | struct device *dev = &pdev->dev; | ||
| 21 | struct clk_hw *input; | ||
| 22 | struct regmap *map; | ||
| 23 | int ret, i; | ||
| 24 | |||
| 25 | data = of_device_get_match_data(dev); | ||
| 26 | if (!data) | ||
| 27 | return -EINVAL; | ||
| 28 | |||
| 29 | /* Get the hhi system controller node */ | ||
| 30 | map = syscon_node_to_regmap(of_get_parent(dev->of_node)); | ||
| 31 | if (IS_ERR(map)) { | ||
| 32 | dev_err(dev, | ||
| 33 | "failed to get HHI regmap\n"); | ||
| 34 | return PTR_ERR(map); | ||
| 35 | } | ||
| 36 | |||
| 37 | input = meson_clk_hw_register_input(dev, "xtal", IN_PREFIX "xtal", 0); | ||
| 38 | if (IS_ERR(input)) { | ||
| 39 | ret = PTR_ERR(input); | ||
| 40 | if (ret != -EPROBE_DEFER) | ||
| 41 | dev_err(dev, "failed to get input clock"); | ||
| 42 | return ret; | ||
| 43 | } | ||
| 44 | |||
| 45 | /* Populate regmap for the regmap backed clocks */ | ||
| 46 | for (i = 0; i < data->regmap_clk_num; i++) | ||
| 47 | data->regmap_clks[i]->map = map; | ||
| 48 | |||
| 49 | for (i = 0; i < data->hw_onecell_data->num; i++) { | ||
| 50 | /* array might be sparse */ | ||
| 51 | if (!data->hw_onecell_data->hws[i]) | ||
| 52 | continue; | ||
| 53 | |||
| 54 | ret = devm_clk_hw_register(dev, data->hw_onecell_data->hws[i]); | ||
| 55 | if (ret) { | ||
| 56 | dev_err(dev, "Clock registration failed\n"); | ||
| 57 | return ret; | ||
| 58 | } | ||
| 59 | } | ||
| 60 | |||
| 61 | return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, | ||
| 62 | data->hw_onecell_data); | ||
| 63 | } | ||
diff --git a/drivers/clk/meson/meson-eeclk.h b/drivers/clk/meson/meson-eeclk.h new file mode 100644 index 000000000000..1b809b1419fe --- /dev/null +++ b/drivers/clk/meson/meson-eeclk.h | |||
| @@ -0,0 +1,25 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
| 2 | /* | ||
| 3 | * Copyright (c) 2019 BayLibre, SAS. | ||
| 4 | * Author: Jerome Brunet <jbrunet@baylibre.com> | ||
| 5 | */ | ||
| 6 | |||
| 7 | #ifndef __MESON_CLKC_H | ||
| 8 | #define __MESON_CLKC_H | ||
| 9 | |||
| 10 | #include <linux/clk-provider.h> | ||
| 11 | #include "clk-regmap.h" | ||
| 12 | |||
| 13 | #define IN_PREFIX "ee-in-" | ||
| 14 | |||
| 15 | struct platform_device; | ||
| 16 | |||
| 17 | struct meson_eeclkc_data { | ||
| 18 | struct clk_regmap *const *regmap_clks; | ||
| 19 | unsigned int regmap_clk_num; | ||
| 20 | struct clk_hw_onecell_data *hw_onecell_data; | ||
| 21 | }; | ||
| 22 | |||
| 23 | int meson_eeclkc_probe(struct platform_device *pdev); | ||
| 24 | |||
| 25 | #endif /* __MESON_CLKC_H */ | ||
diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c index 950d0e548c75..576ad42252d0 100644 --- a/drivers/clk/meson/meson8b.c +++ b/drivers/clk/meson/meson8b.c | |||
| @@ -16,9 +16,10 @@ | |||
| 16 | #include <linux/slab.h> | 16 | #include <linux/slab.h> |
| 17 | #include <linux/regmap.h> | 17 | #include <linux/regmap.h> |
| 18 | 18 | ||
| 19 | #include "clkc.h" | ||
| 20 | #include "meson8b.h" | 19 | #include "meson8b.h" |
| 21 | #include "clk-regmap.h" | 20 | #include "clk-regmap.h" |
| 21 | #include "clk-pll.h" | ||
| 22 | #include "clk-mpll.h" | ||
| 22 | 23 | ||
| 23 | static DEFINE_SPINLOCK(meson_clk_lock); | 24 | static DEFINE_SPINLOCK(meson_clk_lock); |
| 24 | 25 | ||
| @@ -803,16 +804,16 @@ static struct clk_fixed_factor meson8b_cpu_clk_div8 = { | |||
| 803 | }, | 804 | }, |
| 804 | }; | 805 | }; |
| 805 | 806 | ||
| 806 | static u32 mux_table_abp[] = { 1, 2, 3, 4, 5, 6, 7 }; | 807 | static u32 mux_table_apb[] = { 1, 2, 3, 4, 5, 6, 7 }; |
| 807 | static struct clk_regmap meson8b_abp_clk_sel = { | 808 | static struct clk_regmap meson8b_apb_clk_sel = { |
| 808 | .data = &(struct clk_regmap_mux_data){ | 809 | .data = &(struct clk_regmap_mux_data){ |
| 809 | .offset = HHI_SYS_CPU_CLK_CNTL1, | 810 | .offset = HHI_SYS_CPU_CLK_CNTL1, |
| 810 | .mask = 0x7, | 811 | .mask = 0x7, |
| 811 | .shift = 3, | 812 | .shift = 3, |
| 812 | .table = mux_table_abp, | 813 | .table = mux_table_apb, |
| 813 | }, | 814 | }, |
| 814 | .hw.init = &(struct clk_init_data){ | 815 | .hw.init = &(struct clk_init_data){ |
| 815 | .name = "abp_clk_sel", | 816 | .name = "apb_clk_sel", |
| 816 | .ops = &clk_regmap_mux_ops, | 817 | .ops = &clk_regmap_mux_ops, |
| 817 | .parent_names = (const char *[]){ "cpu_clk_div2", | 818 | .parent_names = (const char *[]){ "cpu_clk_div2", |
| 818 | "cpu_clk_div3", | 819 | "cpu_clk_div3", |
| @@ -825,16 +826,16 @@ static struct clk_regmap meson8b_abp_clk_sel = { | |||
| 825 | }, | 826 | }, |
| 826 | }; | 827 | }; |
| 827 | 828 | ||
| 828 | static struct clk_regmap meson8b_abp_clk_gate = { | 829 | static struct clk_regmap meson8b_apb_clk_gate = { |
| 829 | .data = &(struct clk_regmap_gate_data){ | 830 | .data = &(struct clk_regmap_gate_data){ |
| 830 | .offset = HHI_SYS_CPU_CLK_CNTL1, | 831 | .offset = HHI_SYS_CPU_CLK_CNTL1, |
| 831 | .bit_idx = 16, | 832 | .bit_idx = 16, |
| 832 | .flags = CLK_GATE_SET_TO_DISABLE, | 833 | .flags = CLK_GATE_SET_TO_DISABLE, |
| 833 | }, | 834 | }, |
| 834 | .hw.init = &(struct clk_init_data){ | 835 | .hw.init = &(struct clk_init_data){ |
| 835 | .name = "abp_clk_dis", | 836 | .name = "apb_clk_dis", |
| 836 | .ops = &clk_regmap_gate_ro_ops, | 837 | .ops = &clk_regmap_gate_ro_ops, |
| 837 | .parent_names = (const char *[]){ "abp_clk_sel" }, | 838 | .parent_names = (const char *[]){ "apb_clk_sel" }, |
| 838 | .num_parents = 1, | 839 | .num_parents = 1, |
| 839 | .flags = CLK_SET_RATE_PARENT, | 840 | .flags = CLK_SET_RATE_PARENT, |
| 840 | }, | 841 | }, |
| @@ -1573,6 +1574,135 @@ static struct clk_regmap meson8b_hdmi_sys = { | |||
| 1573 | }, | 1574 | }, |
| 1574 | }; | 1575 | }; |
| 1575 | 1576 | ||
| 1577 | /* | ||
| 1578 | * The MALI IP is clocked by two identical clocks (mali_0 and mali_1) | ||
| 1579 | * muxed by a glitch-free switch on Meson8b and Meson8m2. Meson8 only | ||
| 1580 | * has mali_0 and no glitch-free mux. | ||
| 1581 | */ | ||
| 1582 | static const char * const meson8b_mali_0_1_parent_names[] = { | ||
| 1583 | "xtal", "mpll2", "mpll1", "fclk_div7", "fclk_div4", "fclk_div3", | ||
| 1584 | "fclk_div5" | ||
| 1585 | }; | ||
| 1586 | |||
| 1587 | static u32 meson8b_mali_0_1_mux_table[] = { 0, 2, 3, 4, 5, 6, 7 }; | ||
| 1588 | |||
| 1589 | static struct clk_regmap meson8b_mali_0_sel = { | ||
| 1590 | .data = &(struct clk_regmap_mux_data){ | ||
| 1591 | .offset = HHI_MALI_CLK_CNTL, | ||
| 1592 | .mask = 0x7, | ||
| 1593 | .shift = 9, | ||
| 1594 | .table = meson8b_mali_0_1_mux_table, | ||
| 1595 | }, | ||
| 1596 | .hw.init = &(struct clk_init_data){ | ||
| 1597 | .name = "mali_0_sel", | ||
| 1598 | .ops = &clk_regmap_mux_ops, | ||
| 1599 | .parent_names = meson8b_mali_0_1_parent_names, | ||
| 1600 | .num_parents = ARRAY_SIZE(meson8b_mali_0_1_parent_names), | ||
| 1601 | /* | ||
| 1602 | * Don't propagate rate changes up because the only changeable | ||
| 1603 | * parents are mpll1 and mpll2 but we need those for audio and | ||
| 1604 | * RGMII (Ethernet). We don't want to change the audio or | ||
| 1605 | * Ethernet clocks when setting the GPU frequency. | ||
| 1606 | */ | ||
| 1607 | .flags = 0, | ||
| 1608 | }, | ||
| 1609 | }; | ||
| 1610 | |||
| 1611 | static struct clk_regmap meson8b_mali_0_div = { | ||
| 1612 | .data = &(struct clk_regmap_div_data){ | ||
| 1613 | .offset = HHI_MALI_CLK_CNTL, | ||
| 1614 | .shift = 0, | ||
| 1615 | .width = 7, | ||
| 1616 | }, | ||
| 1617 | .hw.init = &(struct clk_init_data){ | ||
| 1618 | .name = "mali_0_div", | ||
| 1619 | .ops = &clk_regmap_divider_ops, | ||
| 1620 | .parent_names = (const char *[]){ "mali_0_sel" }, | ||
| 1621 | .num_parents = 1, | ||
| 1622 | .flags = CLK_SET_RATE_PARENT, | ||
| 1623 | }, | ||
| 1624 | }; | ||
| 1625 | |||
| 1626 | static struct clk_regmap meson8b_mali_0 = { | ||
| 1627 | .data = &(struct clk_regmap_gate_data){ | ||
| 1628 | .offset = HHI_MALI_CLK_CNTL, | ||
| 1629 | .bit_idx = 8, | ||
| 1630 | }, | ||
| 1631 | .hw.init = &(struct clk_init_data){ | ||
| 1632 | .name = "mali_0", | ||
| 1633 | .ops = &clk_regmap_gate_ops, | ||
| 1634 | .parent_names = (const char *[]){ "mali_0_div" }, | ||
| 1635 | .num_parents = 1, | ||
| 1636 | .flags = CLK_SET_RATE_PARENT, | ||
| 1637 | }, | ||
| 1638 | }; | ||
| 1639 | |||
| 1640 | static struct clk_regmap meson8b_mali_1_sel = { | ||
| 1641 | .data = &(struct clk_regmap_mux_data){ | ||
| 1642 | .offset = HHI_MALI_CLK_CNTL, | ||
| 1643 | .mask = 0x7, | ||
| 1644 | .shift = 25, | ||
| 1645 | .table = meson8b_mali_0_1_mux_table, | ||
| 1646 | }, | ||
| 1647 | .hw.init = &(struct clk_init_data){ | ||
| 1648 | .name = "mali_1_sel", | ||
| 1649 | .ops = &clk_regmap_mux_ops, | ||
| 1650 | .parent_names = meson8b_mali_0_1_parent_names, | ||
| 1651 | .num_parents = ARRAY_SIZE(meson8b_mali_0_1_parent_names), | ||
| 1652 | /* | ||
| 1653 | * Don't propagate rate changes up because the only changeable | ||
| 1654 | * parents are mpll1 and mpll2 but we need those for audio and | ||
| 1655 | * RGMII (Ethernet). We don't want to change the audio or | ||
| 1656 | * Ethernet clocks when setting the GPU frequency. | ||
| 1657 | */ | ||
| 1658 | .flags = 0, | ||
| 1659 | }, | ||
| 1660 | }; | ||
| 1661 | |||
| 1662 | static struct clk_regmap meson8b_mali_1_div = { | ||
| 1663 | .data = &(struct clk_regmap_div_data){ | ||
| 1664 | .offset = HHI_MALI_CLK_CNTL, | ||
| 1665 | .shift = 16, | ||
| 1666 | .width = 7, | ||
| 1667 | }, | ||
| 1668 | .hw.init = &(struct clk_init_data){ | ||
| 1669 | .name = "mali_1_div", | ||
| 1670 | .ops = &clk_regmap_divider_ops, | ||
| 1671 | .parent_names = (const char *[]){ "mali_1_sel" }, | ||
| 1672 | .num_parents = 1, | ||
| 1673 | .flags = CLK_SET_RATE_PARENT, | ||
| 1674 | }, | ||
| 1675 | }; | ||
| 1676 | |||
| 1677 | static struct clk_regmap meson8b_mali_1 = { | ||
| 1678 | .data = &(struct clk_regmap_gate_data){ | ||
| 1679 | .offset = HHI_MALI_CLK_CNTL, | ||
| 1680 | .bit_idx = 24, | ||
| 1681 | }, | ||
| 1682 | .hw.init = &(struct clk_init_data){ | ||
| 1683 | .name = "mali_1", | ||
| 1684 | .ops = &clk_regmap_gate_ops, | ||
| 1685 | .parent_names = (const char *[]){ "mali_1_div" }, | ||
| 1686 | .num_parents = 1, | ||
| 1687 | .flags = CLK_SET_RATE_PARENT, | ||
| 1688 | }, | ||
| 1689 | }; | ||
| 1690 | |||
| 1691 | static struct clk_regmap meson8b_mali = { | ||
| 1692 | .data = &(struct clk_regmap_mux_data){ | ||
| 1693 | .offset = HHI_MALI_CLK_CNTL, | ||
| 1694 | .mask = 1, | ||
| 1695 | .shift = 31, | ||
| 1696 | }, | ||
| 1697 | .hw.init = &(struct clk_init_data){ | ||
| 1698 | .name = "mali", | ||
| 1699 | .ops = &clk_regmap_mux_ops, | ||
| 1700 | .parent_names = (const char *[]){ "mali_0", "mali_1" }, | ||
| 1701 | .num_parents = 2, | ||
| 1702 | .flags = CLK_SET_RATE_PARENT, | ||
| 1703 | }, | ||
| 1704 | }; | ||
| 1705 | |||
| 1576 | /* Everything Else (EE) domain gates */ | 1706 | /* Everything Else (EE) domain gates */ |
| 1577 | 1707 | ||
| 1578 | static MESON_GATE(meson8b_ddr, HHI_GCLK_MPEG0, 0); | 1708 | static MESON_GATE(meson8b_ddr, HHI_GCLK_MPEG0, 0); |
| @@ -1659,6 +1789,188 @@ static MESON_GATE(meson8b_ao_ahb_sram, HHI_GCLK_AO, 1); | |||
| 1659 | static MESON_GATE(meson8b_ao_ahb_bus, HHI_GCLK_AO, 2); | 1789 | static MESON_GATE(meson8b_ao_ahb_bus, HHI_GCLK_AO, 2); |
| 1660 | static MESON_GATE(meson8b_ao_iface, HHI_GCLK_AO, 3); | 1790 | static MESON_GATE(meson8b_ao_iface, HHI_GCLK_AO, 3); |
| 1661 | 1791 | ||
| 1792 | static struct clk_hw_onecell_data meson8_hw_onecell_data = { | ||
| 1793 | .hws = { | ||
| 1794 | [CLKID_XTAL] = &meson8b_xtal.hw, | ||
| 1795 | [CLKID_PLL_FIXED] = &meson8b_fixed_pll.hw, | ||
| 1796 | [CLKID_PLL_VID] = &meson8b_vid_pll.hw, | ||
| 1797 | [CLKID_PLL_SYS] = &meson8b_sys_pll.hw, | ||
| 1798 | [CLKID_FCLK_DIV2] = &meson8b_fclk_div2.hw, | ||
| 1799 | [CLKID_FCLK_DIV3] = &meson8b_fclk_div3.hw, | ||
| 1800 | [CLKID_FCLK_DIV4] = &meson8b_fclk_div4.hw, | ||
| 1801 | [CLKID_FCLK_DIV5] = &meson8b_fclk_div5.hw, | ||
| 1802 | [CLKID_FCLK_DIV7] = &meson8b_fclk_div7.hw, | ||
| 1803 | [CLKID_CPUCLK] = &meson8b_cpu_clk.hw, | ||
| 1804 | [CLKID_MPEG_SEL] = &meson8b_mpeg_clk_sel.hw, | ||
| 1805 | [CLKID_MPEG_DIV] = &meson8b_mpeg_clk_div.hw, | ||
| 1806 | [CLKID_CLK81] = &meson8b_clk81.hw, | ||
| 1807 | [CLKID_DDR] = &meson8b_ddr.hw, | ||
| 1808 | [CLKID_DOS] = &meson8b_dos.hw, | ||
| 1809 | [CLKID_ISA] = &meson8b_isa.hw, | ||
| 1810 | [CLKID_PL301] = &meson8b_pl301.hw, | ||
| 1811 | [CLKID_PERIPHS] = &meson8b_periphs.hw, | ||
| 1812 | [CLKID_SPICC] = &meson8b_spicc.hw, | ||
| 1813 | [CLKID_I2C] = &meson8b_i2c.hw, | ||
| 1814 | [CLKID_SAR_ADC] = &meson8b_sar_adc.hw, | ||
| 1815 | [CLKID_SMART_CARD] = &meson8b_smart_card.hw, | ||
| 1816 | [CLKID_RNG0] = &meson8b_rng0.hw, | ||
| 1817 | [CLKID_UART0] = &meson8b_uart0.hw, | ||
| 1818 | [CLKID_SDHC] = &meson8b_sdhc.hw, | ||
| 1819 | [CLKID_STREAM] = &meson8b_stream.hw, | ||
| 1820 | [CLKID_ASYNC_FIFO] = &meson8b_async_fifo.hw, | ||
| 1821 | [CLKID_SDIO] = &meson8b_sdio.hw, | ||
| 1822 | [CLKID_ABUF] = &meson8b_abuf.hw, | ||
| 1823 | [CLKID_HIU_IFACE] = &meson8b_hiu_iface.hw, | ||
| 1824 | [CLKID_ASSIST_MISC] = &meson8b_assist_misc.hw, | ||
| 1825 | [CLKID_SPI] = &meson8b_spi.hw, | ||
| 1826 | [CLKID_I2S_SPDIF] = &meson8b_i2s_spdif.hw, | ||
| 1827 | [CLKID_ETH] = &meson8b_eth.hw, | ||
| 1828 | [CLKID_DEMUX] = &meson8b_demux.hw, | ||
| 1829 | [CLKID_AIU_GLUE] = &meson8b_aiu_glue.hw, | ||
| 1830 | [CLKID_IEC958] = &meson8b_iec958.hw, | ||
| 1831 | [CLKID_I2S_OUT] = &meson8b_i2s_out.hw, | ||
| 1832 | [CLKID_AMCLK] = &meson8b_amclk.hw, | ||
| 1833 | [CLKID_AIFIFO2] = &meson8b_aififo2.hw, | ||
| 1834 | [CLKID_MIXER] = &meson8b_mixer.hw, | ||
| 1835 | [CLKID_MIXER_IFACE] = &meson8b_mixer_iface.hw, | ||
| 1836 | [CLKID_ADC] = &meson8b_adc.hw, | ||
| 1837 | [CLKID_BLKMV] = &meson8b_blkmv.hw, | ||
| 1838 | [CLKID_AIU] = &meson8b_aiu.hw, | ||
| 1839 | [CLKID_UART1] = &meson8b_uart1.hw, | ||
| 1840 | [CLKID_G2D] = &meson8b_g2d.hw, | ||
| 1841 | [CLKID_USB0] = &meson8b_usb0.hw, | ||
| 1842 | [CLKID_USB1] = &meson8b_usb1.hw, | ||
| 1843 | [CLKID_RESET] = &meson8b_reset.hw, | ||
| 1844 | [CLKID_NAND] = &meson8b_nand.hw, | ||
| 1845 | [CLKID_DOS_PARSER] = &meson8b_dos_parser.hw, | ||
| 1846 | [CLKID_USB] = &meson8b_usb.hw, | ||
| 1847 | [CLKID_VDIN1] = &meson8b_vdin1.hw, | ||
| 1848 | [CLKID_AHB_ARB0] = &meson8b_ahb_arb0.hw, | ||
| 1849 | [CLKID_EFUSE] = &meson8b_efuse.hw, | ||
| 1850 | [CLKID_BOOT_ROM] = &meson8b_boot_rom.hw, | ||
| 1851 | [CLKID_AHB_DATA_BUS] = &meson8b_ahb_data_bus.hw, | ||
| 1852 | [CLKID_AHB_CTRL_BUS] = &meson8b_ahb_ctrl_bus.hw, | ||
| 1853 | [CLKID_HDMI_INTR_SYNC] = &meson8b_hdmi_intr_sync.hw, | ||
| 1854 | [CLKID_HDMI_PCLK] = &meson8b_hdmi_pclk.hw, | ||
| 1855 | [CLKID_USB1_DDR_BRIDGE] = &meson8b_usb1_ddr_bridge.hw, | ||
| 1856 | [CLKID_USB0_DDR_BRIDGE] = &meson8b_usb0_ddr_bridge.hw, | ||
| 1857 | [CLKID_MMC_PCLK] = &meson8b_mmc_pclk.hw, | ||
| 1858 | [CLKID_DVIN] = &meson8b_dvin.hw, | ||
| 1859 | [CLKID_UART2] = &meson8b_uart2.hw, | ||
| 1860 | [CLKID_SANA] = &meson8b_sana.hw, | ||
| 1861 | [CLKID_VPU_INTR] = &meson8b_vpu_intr.hw, | ||
| 1862 | [CLKID_SEC_AHB_AHB3_BRIDGE] = &meson8b_sec_ahb_ahb3_bridge.hw, | ||
| 1863 | [CLKID_CLK81_A9] = &meson8b_clk81_a9.hw, | ||
| 1864 | [CLKID_VCLK2_VENCI0] = &meson8b_vclk2_venci0.hw, | ||
| 1865 | [CLKID_VCLK2_VENCI1] = &meson8b_vclk2_venci1.hw, | ||
| 1866 | [CLKID_VCLK2_VENCP0] = &meson8b_vclk2_vencp0.hw, | ||
| 1867 | [CLKID_VCLK2_VENCP1] = &meson8b_vclk2_vencp1.hw, | ||
| 1868 | [CLKID_GCLK_VENCI_INT] = &meson8b_gclk_venci_int.hw, | ||
| 1869 | [CLKID_GCLK_VENCP_INT] = &meson8b_gclk_vencp_int.hw, | ||
| 1870 | [CLKID_DAC_CLK] = &meson8b_dac_clk.hw, | ||
| 1871 | [CLKID_AOCLK_GATE] = &meson8b_aoclk_gate.hw, | ||
| 1872 | [CLKID_IEC958_GATE] = &meson8b_iec958_gate.hw, | ||
| 1873 | [CLKID_ENC480P] = &meson8b_enc480p.hw, | ||
| 1874 | [CLKID_RNG1] = &meson8b_rng1.hw, | ||
| 1875 | [CLKID_GCLK_VENCL_INT] = &meson8b_gclk_vencl_int.hw, | ||
| 1876 | [CLKID_VCLK2_VENCLMCC] = &meson8b_vclk2_venclmcc.hw, | ||
| 1877 | [CLKID_VCLK2_VENCL] = &meson8b_vclk2_vencl.hw, | ||
| 1878 | [CLKID_VCLK2_OTHER] = &meson8b_vclk2_other.hw, | ||
| 1879 | [CLKID_EDP] = &meson8b_edp.hw, | ||
| 1880 | [CLKID_AO_MEDIA_CPU] = &meson8b_ao_media_cpu.hw, | ||
| 1881 | [CLKID_AO_AHB_SRAM] = &meson8b_ao_ahb_sram.hw, | ||
| 1882 | [CLKID_AO_AHB_BUS] = &meson8b_ao_ahb_bus.hw, | ||
| 1883 | [CLKID_AO_IFACE] = &meson8b_ao_iface.hw, | ||
| 1884 | [CLKID_MPLL0] = &meson8b_mpll0.hw, | ||
| 1885 | [CLKID_MPLL1] = &meson8b_mpll1.hw, | ||
| 1886 | [CLKID_MPLL2] = &meson8b_mpll2.hw, | ||
| 1887 | [CLKID_MPLL0_DIV] = &meson8b_mpll0_div.hw, | ||
| 1888 | [CLKID_MPLL1_DIV] = &meson8b_mpll1_div.hw, | ||
| 1889 | [CLKID_MPLL2_DIV] = &meson8b_mpll2_div.hw, | ||
| 1890 | [CLKID_CPU_IN_SEL] = &meson8b_cpu_in_sel.hw, | ||
| 1891 | [CLKID_CPU_IN_DIV2] = &meson8b_cpu_in_div2.hw, | ||
| 1892 | [CLKID_CPU_IN_DIV3] = &meson8b_cpu_in_div3.hw, | ||
| 1893 | [CLKID_CPU_SCALE_DIV] = &meson8b_cpu_scale_div.hw, | ||
| 1894 | [CLKID_CPU_SCALE_OUT_SEL] = &meson8b_cpu_scale_out_sel.hw, | ||
| 1895 | [CLKID_MPLL_PREDIV] = &meson8b_mpll_prediv.hw, | ||
| 1896 | [CLKID_FCLK_DIV2_DIV] = &meson8b_fclk_div2_div.hw, | ||
| 1897 | [CLKID_FCLK_DIV3_DIV] = &meson8b_fclk_div3_div.hw, | ||
| 1898 | [CLKID_FCLK_DIV4_DIV] = &meson8b_fclk_div4_div.hw, | ||
| 1899 | [CLKID_FCLK_DIV5_DIV] = &meson8b_fclk_div5_div.hw, | ||
| 1900 | [CLKID_FCLK_DIV7_DIV] = &meson8b_fclk_div7_div.hw, | ||
| 1901 | [CLKID_NAND_SEL] = &meson8b_nand_clk_sel.hw, | ||
| 1902 | [CLKID_NAND_DIV] = &meson8b_nand_clk_div.hw, | ||
| 1903 | [CLKID_NAND_CLK] = &meson8b_nand_clk_gate.hw, | ||
| 1904 | [CLKID_PLL_FIXED_DCO] = &meson8b_fixed_pll_dco.hw, | ||
| 1905 | [CLKID_HDMI_PLL_DCO] = &meson8b_hdmi_pll_dco.hw, | ||
| 1906 | [CLKID_PLL_SYS_DCO] = &meson8b_sys_pll_dco.hw, | ||
| 1907 | [CLKID_CPU_CLK_DIV2] = &meson8b_cpu_clk_div2.hw, | ||
| 1908 | [CLKID_CPU_CLK_DIV3] = &meson8b_cpu_clk_div3.hw, | ||
| 1909 | [CLKID_CPU_CLK_DIV4] = &meson8b_cpu_clk_div4.hw, | ||
| 1910 | [CLKID_CPU_CLK_DIV5] = &meson8b_cpu_clk_div5.hw, | ||
| 1911 | [CLKID_CPU_CLK_DIV6] = &meson8b_cpu_clk_div6.hw, | ||
| 1912 | [CLKID_CPU_CLK_DIV7] = &meson8b_cpu_clk_div7.hw, | ||
| 1913 | [CLKID_CPU_CLK_DIV8] = &meson8b_cpu_clk_div8.hw, | ||
| 1914 | [CLKID_APB_SEL] = &meson8b_apb_clk_sel.hw, | ||
| 1915 | [CLKID_APB] = &meson8b_apb_clk_gate.hw, | ||
| 1916 | [CLKID_PERIPH_SEL] = &meson8b_periph_clk_sel.hw, | ||
| 1917 | [CLKID_PERIPH] = &meson8b_periph_clk_gate.hw, | ||
| 1918 | [CLKID_AXI_SEL] = &meson8b_axi_clk_sel.hw, | ||
| 1919 | [CLKID_AXI] = &meson8b_axi_clk_gate.hw, | ||
| 1920 | [CLKID_L2_DRAM_SEL] = &meson8b_l2_dram_clk_sel.hw, | ||
| 1921 | [CLKID_L2_DRAM] = &meson8b_l2_dram_clk_gate.hw, | ||
| 1922 | [CLKID_HDMI_PLL_LVDS_OUT] = &meson8b_hdmi_pll_lvds_out.hw, | ||
| 1923 | [CLKID_HDMI_PLL_HDMI_OUT] = &meson8b_hdmi_pll_hdmi_out.hw, | ||
| 1924 | [CLKID_VID_PLL_IN_SEL] = &meson8b_vid_pll_in_sel.hw, | ||
| 1925 | [CLKID_VID_PLL_IN_EN] = &meson8b_vid_pll_in_en.hw, | ||
| 1926 | [CLKID_VID_PLL_PRE_DIV] = &meson8b_vid_pll_pre_div.hw, | ||
| 1927 | [CLKID_VID_PLL_POST_DIV] = &meson8b_vid_pll_post_div.hw, | ||
| 1928 | [CLKID_VID_PLL_FINAL_DIV] = &meson8b_vid_pll_final_div.hw, | ||
| 1929 | [CLKID_VCLK_IN_SEL] = &meson8b_vclk_in_sel.hw, | ||
| 1930 | [CLKID_VCLK_IN_EN] = &meson8b_vclk_in_en.hw, | ||
| 1931 | [CLKID_VCLK_DIV1] = &meson8b_vclk_div1_gate.hw, | ||
| 1932 | [CLKID_VCLK_DIV2_DIV] = &meson8b_vclk_div2_div.hw, | ||
| 1933 | [CLKID_VCLK_DIV2] = &meson8b_vclk_div2_div_gate.hw, | ||
| 1934 | [CLKID_VCLK_DIV4_DIV] = &meson8b_vclk_div4_div.hw, | ||
| 1935 | [CLKID_VCLK_DIV4] = &meson8b_vclk_div4_div_gate.hw, | ||
| 1936 | [CLKID_VCLK_DIV6_DIV] = &meson8b_vclk_div6_div.hw, | ||
| 1937 | [CLKID_VCLK_DIV6] = &meson8b_vclk_div6_div_gate.hw, | ||
| 1938 | [CLKID_VCLK_DIV12_DIV] = &meson8b_vclk_div12_div.hw, | ||
| 1939 | [CLKID_VCLK_DIV12] = &meson8b_vclk_div12_div_gate.hw, | ||
| 1940 | [CLKID_VCLK2_IN_SEL] = &meson8b_vclk2_in_sel.hw, | ||
| 1941 | [CLKID_VCLK2_IN_EN] = &meson8b_vclk2_clk_in_en.hw, | ||
| 1942 | [CLKID_VCLK2_DIV1] = &meson8b_vclk2_div1_gate.hw, | ||
| 1943 | [CLKID_VCLK2_DIV2_DIV] = &meson8b_vclk2_div2_div.hw, | ||
| 1944 | [CLKID_VCLK2_DIV2] = &meson8b_vclk2_div2_div_gate.hw, | ||
| 1945 | [CLKID_VCLK2_DIV4_DIV] = &meson8b_vclk2_div4_div.hw, | ||
| 1946 | [CLKID_VCLK2_DIV4] = &meson8b_vclk2_div4_div_gate.hw, | ||
| 1947 | [CLKID_VCLK2_DIV6_DIV] = &meson8b_vclk2_div6_div.hw, | ||
| 1948 | [CLKID_VCLK2_DIV6] = &meson8b_vclk2_div6_div_gate.hw, | ||
| 1949 | [CLKID_VCLK2_DIV12_DIV] = &meson8b_vclk2_div12_div.hw, | ||
| 1950 | [CLKID_VCLK2_DIV12] = &meson8b_vclk2_div12_div_gate.hw, | ||
| 1951 | [CLKID_CTS_ENCT_SEL] = &meson8b_cts_enct_sel.hw, | ||
| 1952 | [CLKID_CTS_ENCT] = &meson8b_cts_enct.hw, | ||
| 1953 | [CLKID_CTS_ENCP_SEL] = &meson8b_cts_encp_sel.hw, | ||
| 1954 | [CLKID_CTS_ENCP] = &meson8b_cts_encp.hw, | ||
| 1955 | [CLKID_CTS_ENCI_SEL] = &meson8b_cts_enci_sel.hw, | ||
| 1956 | [CLKID_CTS_ENCI] = &meson8b_cts_enci.hw, | ||
| 1957 | [CLKID_HDMI_TX_PIXEL_SEL] = &meson8b_hdmi_tx_pixel_sel.hw, | ||
| 1958 | [CLKID_HDMI_TX_PIXEL] = &meson8b_hdmi_tx_pixel.hw, | ||
| 1959 | [CLKID_CTS_ENCL_SEL] = &meson8b_cts_encl_sel.hw, | ||
| 1960 | [CLKID_CTS_ENCL] = &meson8b_cts_encl.hw, | ||
| 1961 | [CLKID_CTS_VDAC0_SEL] = &meson8b_cts_vdac0_sel.hw, | ||
| 1962 | [CLKID_CTS_VDAC0] = &meson8b_cts_vdac0.hw, | ||
| 1963 | [CLKID_HDMI_SYS_SEL] = &meson8b_hdmi_sys_sel.hw, | ||
| 1964 | [CLKID_HDMI_SYS_DIV] = &meson8b_hdmi_sys_div.hw, | ||
| 1965 | [CLKID_HDMI_SYS] = &meson8b_hdmi_sys.hw, | ||
| 1966 | [CLKID_MALI_0_SEL] = &meson8b_mali_0_sel.hw, | ||
| 1967 | [CLKID_MALI_0_DIV] = &meson8b_mali_0_div.hw, | ||
| 1968 | [CLKID_MALI] = &meson8b_mali_0.hw, | ||
| 1969 | [CLK_NR_CLKS] = NULL, | ||
| 1970 | }, | ||
| 1971 | .num = CLK_NR_CLKS, | ||
| 1972 | }; | ||
| 1973 | |||
| 1662 | static struct clk_hw_onecell_data meson8b_hw_onecell_data = { | 1974 | static struct clk_hw_onecell_data meson8b_hw_onecell_data = { |
| 1663 | .hws = { | 1975 | .hws = { |
| 1664 | [CLKID_XTAL] = &meson8b_xtal.hw, | 1976 | [CLKID_XTAL] = &meson8b_xtal.hw, |
| @@ -1781,8 +2093,8 @@ static struct clk_hw_onecell_data meson8b_hw_onecell_data = { | |||
| 1781 | [CLKID_CPU_CLK_DIV6] = &meson8b_cpu_clk_div6.hw, | 2093 | [CLKID_CPU_CLK_DIV6] = &meson8b_cpu_clk_div6.hw, |
| 1782 | [CLKID_CPU_CLK_DIV7] = &meson8b_cpu_clk_div7.hw, | 2094 | [CLKID_CPU_CLK_DIV7] = &meson8b_cpu_clk_div7.hw, |
| 1783 | [CLKID_CPU_CLK_DIV8] = &meson8b_cpu_clk_div8.hw, | 2095 | [CLKID_CPU_CLK_DIV8] = &meson8b_cpu_clk_div8.hw, |
| 1784 | [CLKID_ABP_SEL] = &meson8b_abp_clk_sel.hw, | 2096 | [CLKID_APB_SEL] = &meson8b_apb_clk_sel.hw, |
| 1785 | [CLKID_ABP] = &meson8b_abp_clk_gate.hw, | 2097 | [CLKID_APB] = &meson8b_apb_clk_gate.hw, |
| 1786 | [CLKID_PERIPH_SEL] = &meson8b_periph_clk_sel.hw, | 2098 | [CLKID_PERIPH_SEL] = &meson8b_periph_clk_sel.hw, |
| 1787 | [CLKID_PERIPH] = &meson8b_periph_clk_gate.hw, | 2099 | [CLKID_PERIPH] = &meson8b_periph_clk_gate.hw, |
| 1788 | [CLKID_AXI_SEL] = &meson8b_axi_clk_sel.hw, | 2100 | [CLKID_AXI_SEL] = &meson8b_axi_clk_sel.hw, |
| @@ -1833,6 +2145,13 @@ static struct clk_hw_onecell_data meson8b_hw_onecell_data = { | |||
| 1833 | [CLKID_HDMI_SYS_SEL] = &meson8b_hdmi_sys_sel.hw, | 2145 | [CLKID_HDMI_SYS_SEL] = &meson8b_hdmi_sys_sel.hw, |
| 1834 | [CLKID_HDMI_SYS_DIV] = &meson8b_hdmi_sys_div.hw, | 2146 | [CLKID_HDMI_SYS_DIV] = &meson8b_hdmi_sys_div.hw, |
| 1835 | [CLKID_HDMI_SYS] = &meson8b_hdmi_sys.hw, | 2147 | [CLKID_HDMI_SYS] = &meson8b_hdmi_sys.hw, |
| 2148 | [CLKID_MALI_0_SEL] = &meson8b_mali_0_sel.hw, | ||
| 2149 | [CLKID_MALI_0_DIV] = &meson8b_mali_0_div.hw, | ||
| 2150 | [CLKID_MALI_0] = &meson8b_mali_0.hw, | ||
| 2151 | [CLKID_MALI_1_SEL] = &meson8b_mali_1_sel.hw, | ||
| 2152 | [CLKID_MALI_1_DIV] = &meson8b_mali_1_div.hw, | ||
| 2153 | [CLKID_MALI_1] = &meson8b_mali_1.hw, | ||
| 2154 | [CLKID_MALI] = &meson8b_mali.hw, | ||
| 1836 | [CLK_NR_CLKS] = NULL, | 2155 | [CLK_NR_CLKS] = NULL, |
| 1837 | }, | 2156 | }, |
| 1838 | .num = CLK_NR_CLKS, | 2157 | .num = CLK_NR_CLKS, |
| @@ -1943,8 +2262,8 @@ static struct clk_regmap *const meson8b_clk_regmaps[] = { | |||
| 1943 | &meson8b_fixed_pll_dco, | 2262 | &meson8b_fixed_pll_dco, |
| 1944 | &meson8b_hdmi_pll_dco, | 2263 | &meson8b_hdmi_pll_dco, |
| 1945 | &meson8b_sys_pll_dco, | 2264 | &meson8b_sys_pll_dco, |
| 1946 | &meson8b_abp_clk_sel, | 2265 | &meson8b_apb_clk_sel, |
| 1947 | &meson8b_abp_clk_gate, | 2266 | &meson8b_apb_clk_gate, |
| 1948 | &meson8b_periph_clk_sel, | 2267 | &meson8b_periph_clk_sel, |
| 1949 | &meson8b_periph_clk_gate, | 2268 | &meson8b_periph_clk_gate, |
| 1950 | &meson8b_axi_clk_sel, | 2269 | &meson8b_axi_clk_sel, |
| @@ -1988,6 +2307,13 @@ static struct clk_regmap *const meson8b_clk_regmaps[] = { | |||
| 1988 | &meson8b_hdmi_sys_sel, | 2307 | &meson8b_hdmi_sys_sel, |
| 1989 | &meson8b_hdmi_sys_div, | 2308 | &meson8b_hdmi_sys_div, |
| 1990 | &meson8b_hdmi_sys, | 2309 | &meson8b_hdmi_sys, |
| 2310 | &meson8b_mali_0_sel, | ||
| 2311 | &meson8b_mali_0_div, | ||
| 2312 | &meson8b_mali_0, | ||
| 2313 | &meson8b_mali_1_sel, | ||
| 2314 | &meson8b_mali_1_div, | ||
| 2315 | &meson8b_mali_1, | ||
| 2316 | &meson8b_mali, | ||
| 1991 | }; | 2317 | }; |
| 1992 | 2318 | ||
| 1993 | static const struct meson8b_clk_reset_line { | 2319 | static const struct meson8b_clk_reset_line { |
| @@ -2132,7 +2458,6 @@ static int meson8b_cpu_clk_notifier_cb(struct notifier_block *nb, | |||
| 2132 | 2458 | ||
| 2133 | static struct meson8b_nb_data meson8b_cpu_nb_data = { | 2459 | static struct meson8b_nb_data meson8b_cpu_nb_data = { |
| 2134 | .nb.notifier_call = meson8b_cpu_clk_notifier_cb, | 2460 | .nb.notifier_call = meson8b_cpu_clk_notifier_cb, |
| 2135 | .onecell_data = &meson8b_hw_onecell_data, | ||
| 2136 | }; | 2461 | }; |
| 2137 | 2462 | ||
| 2138 | static const struct regmap_config clkc_regmap_config = { | 2463 | static const struct regmap_config clkc_regmap_config = { |
| @@ -2141,7 +2466,8 @@ static const struct regmap_config clkc_regmap_config = { | |||
| 2141 | .reg_stride = 4, | 2466 | .reg_stride = 4, |
| 2142 | }; | 2467 | }; |
| 2143 | 2468 | ||
| 2144 | static void __init meson8b_clkc_init(struct device_node *np) | 2469 | static void __init meson8b_clkc_init_common(struct device_node *np, |
| 2470 | struct clk_hw_onecell_data *clk_hw_onecell_data) | ||
| 2145 | { | 2471 | { |
| 2146 | struct meson8b_clk_reset *rstc; | 2472 | struct meson8b_clk_reset *rstc; |
| 2147 | const char *notifier_clk_name; | 2473 | const char *notifier_clk_name; |
| @@ -2192,14 +2518,16 @@ static void __init meson8b_clkc_init(struct device_node *np) | |||
| 2192 | */ | 2518 | */ |
| 2193 | for (i = CLKID_XTAL; i < CLK_NR_CLKS; i++) { | 2519 | for (i = CLKID_XTAL; i < CLK_NR_CLKS; i++) { |
| 2194 | /* array might be sparse */ | 2520 | /* array might be sparse */ |
| 2195 | if (!meson8b_hw_onecell_data.hws[i]) | 2521 | if (!clk_hw_onecell_data->hws[i]) |
| 2196 | continue; | 2522 | continue; |
| 2197 | 2523 | ||
| 2198 | ret = clk_hw_register(NULL, meson8b_hw_onecell_data.hws[i]); | 2524 | ret = clk_hw_register(NULL, clk_hw_onecell_data->hws[i]); |
| 2199 | if (ret) | 2525 | if (ret) |
| 2200 | return; | 2526 | return; |
| 2201 | } | 2527 | } |
| 2202 | 2528 | ||
| 2529 | meson8b_cpu_nb_data.onecell_data = clk_hw_onecell_data; | ||
| 2530 | |||
| 2203 | /* | 2531 | /* |
| 2204 | * FIXME we shouldn't program the muxes in notifier handlers. The | 2532 | * FIXME we shouldn't program the muxes in notifier handlers. The |
| 2205 | * tricky programming sequence will be handled by the forthcoming | 2533 | * tricky programming sequence will be handled by the forthcoming |
| @@ -2215,13 +2543,23 @@ static void __init meson8b_clkc_init(struct device_node *np) | |||
| 2215 | } | 2543 | } |
| 2216 | 2544 | ||
| 2217 | ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, | 2545 | ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, |
| 2218 | &meson8b_hw_onecell_data); | 2546 | clk_hw_onecell_data); |
| 2219 | if (ret) | 2547 | if (ret) |
| 2220 | pr_err("%s: failed to register clock provider\n", __func__); | 2548 | pr_err("%s: failed to register clock provider\n", __func__); |
| 2221 | } | 2549 | } |
| 2222 | 2550 | ||
| 2551 | static void __init meson8_clkc_init(struct device_node *np) | ||
| 2552 | { | ||
| 2553 | return meson8b_clkc_init_common(np, &meson8_hw_onecell_data); | ||
| 2554 | } | ||
| 2555 | |||
| 2556 | static void __init meson8b_clkc_init(struct device_node *np) | ||
| 2557 | { | ||
| 2558 | return meson8b_clkc_init_common(np, &meson8b_hw_onecell_data); | ||
| 2559 | } | ||
| 2560 | |||
| 2223 | CLK_OF_DECLARE_DRIVER(meson8_clkc, "amlogic,meson8-clkc", | 2561 | CLK_OF_DECLARE_DRIVER(meson8_clkc, "amlogic,meson8-clkc", |
| 2224 | meson8b_clkc_init); | 2562 | meson8_clkc_init); |
| 2225 | CLK_OF_DECLARE_DRIVER(meson8b_clkc, "amlogic,meson8b-clkc", | 2563 | CLK_OF_DECLARE_DRIVER(meson8b_clkc, "amlogic,meson8b-clkc", |
| 2226 | meson8b_clkc_init); | 2564 | meson8b_clkc_init); |
| 2227 | CLK_OF_DECLARE_DRIVER(meson8m2_clkc, "amlogic,meson8m2-clkc", | 2565 | CLK_OF_DECLARE_DRIVER(meson8m2_clkc, "amlogic,meson8m2-clkc", |
diff --git a/drivers/clk/meson/meson8b.h b/drivers/clk/meson/meson8b.h index 87fba739af81..b8c58faeae52 100644 --- a/drivers/clk/meson/meson8b.h +++ b/drivers/clk/meson/meson8b.h | |||
| @@ -33,6 +33,7 @@ | |||
| 33 | #define HHI_VID_CLK_CNTL2 0x194 /* 0x65 offset in data sheet */ | 33 | #define HHI_VID_CLK_CNTL2 0x194 /* 0x65 offset in data sheet */ |
| 34 | #define HHI_VID_DIVIDER_CNTL 0x198 /* 0x66 offset in data sheet */ | 34 | #define HHI_VID_DIVIDER_CNTL 0x198 /* 0x66 offset in data sheet */ |
| 35 | #define HHI_SYS_CPU_CLK_CNTL0 0x19c /* 0x67 offset in data sheet */ | 35 | #define HHI_SYS_CPU_CLK_CNTL0 0x19c /* 0x67 offset in data sheet */ |
| 36 | #define HHI_MALI_CLK_CNTL 0x1b0 /* 0x6c offset in data sheet */ | ||
| 36 | #define HHI_HDMI_CLK_CNTL 0x1cc /* 0x73 offset in data sheet */ | 37 | #define HHI_HDMI_CLK_CNTL 0x1cc /* 0x73 offset in data sheet */ |
| 37 | #define HHI_NAND_CLK_CNTL 0x25c /* 0x97 offset in data sheet */ | 38 | #define HHI_NAND_CLK_CNTL 0x25c /* 0x97 offset in data sheet */ |
| 38 | #define HHI_MPLL_CNTL 0x280 /* 0xa0 offset in data sheet */ | 39 | #define HHI_MPLL_CNTL 0x280 /* 0xa0 offset in data sheet */ |
| @@ -91,7 +92,7 @@ | |||
| 91 | #define CLKID_CPU_CLK_DIV6 120 | 92 | #define CLKID_CPU_CLK_DIV6 120 |
| 92 | #define CLKID_CPU_CLK_DIV7 121 | 93 | #define CLKID_CPU_CLK_DIV7 121 |
| 93 | #define CLKID_CPU_CLK_DIV8 122 | 94 | #define CLKID_CPU_CLK_DIV8 122 |
| 94 | #define CLKID_ABP_SEL 123 | 95 | #define CLKID_APB_SEL 123 |
| 95 | #define CLKID_PERIPH_SEL 125 | 96 | #define CLKID_PERIPH_SEL 125 |
| 96 | #define CLKID_AXI_SEL 127 | 97 | #define CLKID_AXI_SEL 127 |
| 97 | #define CLKID_L2_DRAM_SEL 129 | 98 | #define CLKID_L2_DRAM_SEL 129 |
| @@ -139,8 +140,14 @@ | |||
| 139 | #define CLKID_HDMI_SYS_SEL 172 | 140 | #define CLKID_HDMI_SYS_SEL 172 |
| 140 | #define CLKID_HDMI_SYS_DIV 173 | 141 | #define CLKID_HDMI_SYS_DIV 173 |
| 141 | #define CLKID_HDMI_SYS 174 | 142 | #define CLKID_HDMI_SYS 174 |
| 143 | #define CLKID_MALI_0_SEL 175 | ||
| 144 | #define CLKID_MALI_0_DIV 176 | ||
| 145 | #define CLKID_MALI_0 177 | ||
| 146 | #define CLKID_MALI_1_SEL 178 | ||
| 147 | #define CLKID_MALI_1_DIV 179 | ||
| 148 | #define CLKID_MALI_1 180 | ||
| 142 | 149 | ||
| 143 | #define CLK_NR_CLKS 175 | 150 | #define CLK_NR_CLKS 181 |
| 144 | 151 | ||
| 145 | /* | 152 | /* |
| 146 | * include the CLKID and RESETID that have | 153 | * include the CLKID and RESETID that have |
diff --git a/drivers/clk/meson/parm.h b/drivers/clk/meson/parm.h new file mode 100644 index 000000000000..3c9ef1b505ce --- /dev/null +++ b/drivers/clk/meson/parm.h | |||
| @@ -0,0 +1,46 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
| 2 | /* | ||
| 3 | * Copyright (c) 2015 Endless Mobile, Inc. | ||
| 4 | * Author: Carlo Caione <carlo@endlessm.com> | ||
| 5 | */ | ||
| 6 | |||
| 7 | #ifndef __MESON_PARM_H | ||
| 8 | #define __MESON_PARM_H | ||
| 9 | |||
| 10 | #include <linux/bits.h> | ||
| 11 | #include <linux/regmap.h> | ||
| 12 | |||
| 13 | #define PMASK(width) GENMASK(width - 1, 0) | ||
| 14 | #define SETPMASK(width, shift) GENMASK(shift + width - 1, shift) | ||
| 15 | #define CLRPMASK(width, shift) (~SETPMASK(width, shift)) | ||
| 16 | |||
| 17 | #define PARM_GET(width, shift, reg) \ | ||
| 18 | (((reg) & SETPMASK(width, shift)) >> (shift)) | ||
| 19 | #define PARM_SET(width, shift, reg, val) \ | ||
| 20 | (((reg) & CLRPMASK(width, shift)) | ((val) << (shift))) | ||
| 21 | |||
| 22 | #define MESON_PARM_APPLICABLE(p) (!!((p)->width)) | ||
| 23 | |||
| 24 | struct parm { | ||
| 25 | u16 reg_off; | ||
| 26 | u8 shift; | ||
| 27 | u8 width; | ||
| 28 | }; | ||
| 29 | |||
| 30 | static inline unsigned int meson_parm_read(struct regmap *map, struct parm *p) | ||
| 31 | { | ||
| 32 | unsigned int val; | ||
| 33 | |||
| 34 | regmap_read(map, p->reg_off, &val); | ||
| 35 | return PARM_GET(p->width, p->shift, val); | ||
| 36 | } | ||
| 37 | |||
| 38 | static inline void meson_parm_write(struct regmap *map, struct parm *p, | ||
| 39 | unsigned int val) | ||
| 40 | { | ||
| 41 | regmap_update_bits(map, p->reg_off, SETPMASK(p->width, p->shift), | ||
| 42 | val << p->shift); | ||
| 43 | } | ||
| 44 | |||
| 45 | #endif /* __MESON_PARM_H */ | ||
| 46 | |||
diff --git a/drivers/clk/meson/sclk-div.c b/drivers/clk/meson/sclk-div.c index bc64019b8eeb..3acf03780221 100644 --- a/drivers/clk/meson/sclk-div.c +++ b/drivers/clk/meson/sclk-div.c | |||
| @@ -16,7 +16,11 @@ | |||
| 16 | * duty_cycle = (1 + hi) / (1 + val) | 16 | * duty_cycle = (1 + hi) / (1 + val) |
| 17 | */ | 17 | */ |
| 18 | 18 | ||
| 19 | #include "clkc-audio.h" | 19 | #include <linux/clk-provider.h> |
| 20 | #include <linux/module.h> | ||
| 21 | |||
| 22 | #include "clk-regmap.h" | ||
| 23 | #include "sclk-div.h" | ||
| 20 | 24 | ||
| 21 | static inline struct meson_sclk_div_data * | 25 | static inline struct meson_sclk_div_data * |
| 22 | meson_sclk_div_data(struct clk_regmap *clk) | 26 | meson_sclk_div_data(struct clk_regmap *clk) |
| @@ -241,3 +245,7 @@ const struct clk_ops meson_sclk_div_ops = { | |||
| 241 | .init = sclk_div_init, | 245 | .init = sclk_div_init, |
| 242 | }; | 246 | }; |
| 243 | EXPORT_SYMBOL_GPL(meson_sclk_div_ops); | 247 | EXPORT_SYMBOL_GPL(meson_sclk_div_ops); |
| 248 | |||
| 249 | MODULE_DESCRIPTION("Amlogic Sample divider driver"); | ||
| 250 | MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>"); | ||
| 251 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/clk/meson/clkc-audio.h b/drivers/clk/meson/sclk-div.h index 0a7c157ebf81..b64b2a32005f 100644 --- a/drivers/clk/meson/clkc-audio.h +++ b/drivers/clk/meson/sclk-div.h | |||
| @@ -4,16 +4,11 @@ | |||
| 4 | * Author: Jerome Brunet <jbrunet@baylibre.com> | 4 | * Author: Jerome Brunet <jbrunet@baylibre.com> |
| 5 | */ | 5 | */ |
| 6 | 6 | ||
| 7 | #ifndef __MESON_CLKC_AUDIO_H | 7 | #ifndef __MESON_SCLK_DIV_H |
| 8 | #define __MESON_CLKC_AUDIO_H | 8 | #define __MESON_SCLK_DIV_H |
| 9 | 9 | ||
| 10 | #include "clkc.h" | 10 | #include <linux/clk-provider.h> |
| 11 | 11 | #include "parm.h" | |
| 12 | struct meson_clk_triphase_data { | ||
| 13 | struct parm ph0; | ||
| 14 | struct parm ph1; | ||
| 15 | struct parm ph2; | ||
| 16 | }; | ||
| 17 | 12 | ||
| 18 | struct meson_sclk_div_data { | 13 | struct meson_sclk_div_data { |
| 19 | struct parm div; | 14 | struct parm div; |
| @@ -22,7 +17,6 @@ struct meson_sclk_div_data { | |||
| 22 | struct clk_duty cached_duty; | 17 | struct clk_duty cached_duty; |
| 23 | }; | 18 | }; |
| 24 | 19 | ||
| 25 | extern const struct clk_ops meson_clk_triphase_ops; | ||
| 26 | extern const struct clk_ops meson_sclk_div_ops; | 20 | extern const struct clk_ops meson_sclk_div_ops; |
| 27 | 21 | ||
| 28 | #endif /* __MESON_CLKC_AUDIO_H */ | 22 | #endif /* __MESON_SCLK_DIV_H */ |
diff --git a/drivers/clk/meson/vid-pll-div.c b/drivers/clk/meson/vid-pll-div.c index 88af0e282ea0..08bcc01c0923 100644 --- a/drivers/clk/meson/vid-pll-div.c +++ b/drivers/clk/meson/vid-pll-div.c | |||
| @@ -5,7 +5,10 @@ | |||
| 5 | */ | 5 | */ |
| 6 | 6 | ||
| 7 | #include <linux/clk-provider.h> | 7 | #include <linux/clk-provider.h> |
| 8 | #include "clkc.h" | 8 | #include <linux/module.h> |
| 9 | |||
| 10 | #include "clk-regmap.h" | ||
| 11 | #include "vid-pll-div.h" | ||
| 9 | 12 | ||
| 10 | static inline struct meson_vid_pll_div_data * | 13 | static inline struct meson_vid_pll_div_data * |
| 11 | meson_vid_pll_div_data(struct clk_regmap *clk) | 14 | meson_vid_pll_div_data(struct clk_regmap *clk) |
| @@ -89,3 +92,8 @@ static unsigned long meson_vid_pll_div_recalc_rate(struct clk_hw *hw, | |||
| 89 | const struct clk_ops meson_vid_pll_div_ro_ops = { | 92 | const struct clk_ops meson_vid_pll_div_ro_ops = { |
| 90 | .recalc_rate = meson_vid_pll_div_recalc_rate, | 93 | .recalc_rate = meson_vid_pll_div_recalc_rate, |
| 91 | }; | 94 | }; |
| 95 | EXPORT_SYMBOL_GPL(meson_vid_pll_div_ro_ops); | ||
| 96 | |||
| 97 | MODULE_DESCRIPTION("Amlogic video pll divider driver"); | ||
| 98 | MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>"); | ||
| 99 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/clk/meson/vid-pll-div.h b/drivers/clk/meson/vid-pll-div.h new file mode 100644 index 000000000000..c0128e33ccf9 --- /dev/null +++ b/drivers/clk/meson/vid-pll-div.h | |||
| @@ -0,0 +1,20 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
| 2 | /* | ||
| 3 | * Copyright (c) 2019 BayLibre, SAS. | ||
| 4 | * Author: Jerome Brunet <jbrunet@baylibre.com> | ||
| 5 | */ | ||
| 6 | |||
| 7 | #ifndef __MESON_VID_PLL_DIV_H | ||
| 8 | #define __MESON_VID_PLL_DIV_H | ||
| 9 | |||
| 10 | #include <linux/clk-provider.h> | ||
| 11 | #include "parm.h" | ||
| 12 | |||
| 13 | struct meson_vid_pll_div_data { | ||
| 14 | struct parm val; | ||
| 15 | struct parm sel; | ||
| 16 | }; | ||
| 17 | |||
| 18 | extern const struct clk_ops meson_vid_pll_div_ro_ops; | ||
| 19 | |||
| 20 | #endif /* __MESON_VID_PLL_DIV_H */ | ||
diff --git a/drivers/clk/mmp/clk-of-mmp2.c b/drivers/clk/mmp/clk-of-mmp2.c index d083b860f083..a60a1be937ad 100644 --- a/drivers/clk/mmp/clk-of-mmp2.c +++ b/drivers/clk/mmp/clk-of-mmp2.c | |||
| @@ -229,9 +229,10 @@ static struct mmp_param_gate_clk apmu_gate_clks[] = { | |||
| 229 | {MMP2_CLK_SDH1, "sdh1_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH1, 0x1b, 0x1b, 0x0, 0, &sdh_lock}, | 229 | {MMP2_CLK_SDH1, "sdh1_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH1, 0x1b, 0x1b, 0x0, 0, &sdh_lock}, |
| 230 | {MMP2_CLK_SDH2, "sdh2_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH2, 0x1b, 0x1b, 0x0, 0, &sdh_lock}, | 230 | {MMP2_CLK_SDH2, "sdh2_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH2, 0x1b, 0x1b, 0x0, 0, &sdh_lock}, |
| 231 | {MMP2_CLK_SDH3, "sdh3_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH3, 0x1b, 0x1b, 0x0, 0, &sdh_lock}, | 231 | {MMP2_CLK_SDH3, "sdh3_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH3, 0x1b, 0x1b, 0x0, 0, &sdh_lock}, |
| 232 | {MMP2_CLK_DISP0, "disp0_clk", "disp0_div", CLK_SET_RATE_PARENT, APMU_DISP0, 0x1b, 0x1b, 0x0, 0, &disp0_lock}, | 232 | {MMP2_CLK_DISP0, "disp0_clk", "disp0_div", CLK_SET_RATE_PARENT, APMU_DISP0, 0x09, 0x09, 0x0, 0, &disp0_lock}, |
| 233 | {MMP2_CLK_DISP0_LCDC, "disp0_lcdc_clk", "disp0_mux", CLK_SET_RATE_PARENT, APMU_DISP0, 0x12, 0x12, 0x0, 0, &disp0_lock}, | ||
| 233 | {MMP2_CLK_DISP0_SPHY, "disp0_sphy_clk", "disp0_sphy_div", CLK_SET_RATE_PARENT, APMU_DISP0, 0x1024, 0x1024, 0x0, 0, &disp0_lock}, | 234 | {MMP2_CLK_DISP0_SPHY, "disp0_sphy_clk", "disp0_sphy_div", CLK_SET_RATE_PARENT, APMU_DISP0, 0x1024, 0x1024, 0x0, 0, &disp0_lock}, |
| 234 | {MMP2_CLK_DISP1, "disp1_clk", "disp1_div", CLK_SET_RATE_PARENT, APMU_DISP1, 0x1b, 0x1b, 0x0, 0, &disp1_lock}, | 235 | {MMP2_CLK_DISP1, "disp1_clk", "disp1_div", CLK_SET_RATE_PARENT, APMU_DISP1, 0x09, 0x09, 0x0, 0, &disp1_lock}, |
| 235 | {MMP2_CLK_CCIC_ARBITER, "ccic_arbiter", "vctcxo", CLK_SET_RATE_PARENT, APMU_CCIC0, 0x1800, 0x1800, 0x0, 0, &ccic0_lock}, | 236 | {MMP2_CLK_CCIC_ARBITER, "ccic_arbiter", "vctcxo", CLK_SET_RATE_PARENT, APMU_CCIC0, 0x1800, 0x1800, 0x0, 0, &ccic0_lock}, |
| 236 | {MMP2_CLK_CCIC0, "ccic0_clk", "ccic0_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC0, 0x1b, 0x1b, 0x0, 0, &ccic0_lock}, | 237 | {MMP2_CLK_CCIC0, "ccic0_clk", "ccic0_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC0, 0x1b, 0x1b, 0x0, 0, &ccic0_lock}, |
| 237 | {MMP2_CLK_CCIC0_PHY, "ccic0_phy_clk", "ccic0_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC0, 0x24, 0x24, 0x0, 0, &ccic0_lock}, | 238 | {MMP2_CLK_CCIC0_PHY, "ccic0_phy_clk", "ccic0_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC0, 0x24, 0x24, 0x0, 0, &ccic0_lock}, |
diff --git a/drivers/clk/mvebu/armada-370.c b/drivers/clk/mvebu/armada-370.c index 7dedfaa6e152..5c6bbee396b3 100644 --- a/drivers/clk/mvebu/armada-370.c +++ b/drivers/clk/mvebu/armada-370.c | |||
| @@ -175,8 +175,10 @@ static void __init a370_clk_init(struct device_node *np) | |||
| 175 | 175 | ||
| 176 | mvebu_coreclk_setup(np, &a370_coreclks); | 176 | mvebu_coreclk_setup(np, &a370_coreclks); |
| 177 | 177 | ||
| 178 | if (cgnp) | 178 | if (cgnp) { |
| 179 | mvebu_clk_gating_setup(cgnp, a370_gating_desc); | 179 | mvebu_clk_gating_setup(cgnp, a370_gating_desc); |
| 180 | of_node_put(cgnp); | ||
| 181 | } | ||
| 180 | } | 182 | } |
| 181 | CLK_OF_DECLARE(a370_clk, "marvell,armada-370-core-clock", a370_clk_init); | 183 | CLK_OF_DECLARE(a370_clk, "marvell,armada-370-core-clock", a370_clk_init); |
| 182 | 184 | ||
diff --git a/drivers/clk/mvebu/armada-xp.c b/drivers/clk/mvebu/armada-xp.c index e8f03293ec83..fa1568279c23 100644 --- a/drivers/clk/mvebu/armada-xp.c +++ b/drivers/clk/mvebu/armada-xp.c | |||
| @@ -226,7 +226,9 @@ static void __init axp_clk_init(struct device_node *np) | |||
| 226 | 226 | ||
| 227 | mvebu_coreclk_setup(np, &axp_coreclks); | 227 | mvebu_coreclk_setup(np, &axp_coreclks); |
| 228 | 228 | ||
| 229 | if (cgnp) | 229 | if (cgnp) { |
| 230 | mvebu_clk_gating_setup(cgnp, axp_gating_desc); | 230 | mvebu_clk_gating_setup(cgnp, axp_gating_desc); |
| 231 | of_node_put(cgnp); | ||
| 232 | } | ||
| 231 | } | 233 | } |
| 232 | CLK_OF_DECLARE(axp_clk, "marvell,armada-xp-core-clock", axp_clk_init); | 234 | CLK_OF_DECLARE(axp_clk, "marvell,armada-xp-core-clock", axp_clk_init); |
diff --git a/drivers/clk/mvebu/dove.c b/drivers/clk/mvebu/dove.c index e0dd99f36bf4..0bd09d33f9cf 100644 --- a/drivers/clk/mvebu/dove.c +++ b/drivers/clk/mvebu/dove.c | |||
| @@ -188,10 +188,14 @@ static void __init dove_clk_init(struct device_node *np) | |||
| 188 | 188 | ||
| 189 | mvebu_coreclk_setup(np, &dove_coreclks); | 189 | mvebu_coreclk_setup(np, &dove_coreclks); |
| 190 | 190 | ||
| 191 | if (ddnp) | 191 | if (ddnp) { |
| 192 | dove_divider_clk_init(ddnp); | 192 | dove_divider_clk_init(ddnp); |
| 193 | of_node_put(ddnp); | ||
| 194 | } | ||
| 193 | 195 | ||
| 194 | if (cgnp) | 196 | if (cgnp) { |
| 195 | mvebu_clk_gating_setup(cgnp, dove_gating_desc); | 197 | mvebu_clk_gating_setup(cgnp, dove_gating_desc); |
| 198 | of_node_put(cgnp); | ||
| 199 | } | ||
| 196 | } | 200 | } |
| 197 | CLK_OF_DECLARE(dove_clk, "marvell,dove-core-clock", dove_clk_init); | 201 | CLK_OF_DECLARE(dove_clk, "marvell,dove-core-clock", dove_clk_init); |
diff --git a/drivers/clk/mvebu/kirkwood.c b/drivers/clk/mvebu/kirkwood.c index 6f784167bda4..35af3aa18f1c 100644 --- a/drivers/clk/mvebu/kirkwood.c +++ b/drivers/clk/mvebu/kirkwood.c | |||
| @@ -331,6 +331,8 @@ static void __init kirkwood_clk_init(struct device_node *np) | |||
| 331 | if (cgnp) { | 331 | if (cgnp) { |
| 332 | mvebu_clk_gating_setup(cgnp, kirkwood_gating_desc); | 332 | mvebu_clk_gating_setup(cgnp, kirkwood_gating_desc); |
| 333 | kirkwood_clk_muxing_setup(cgnp, kirkwood_mux_desc); | 333 | kirkwood_clk_muxing_setup(cgnp, kirkwood_mux_desc); |
| 334 | |||
| 335 | of_node_put(cgnp); | ||
| 334 | } | 336 | } |
| 335 | } | 337 | } |
| 336 | CLK_OF_DECLARE(kirkwood_clk, "marvell,kirkwood-core-clock", | 338 | CLK_OF_DECLARE(kirkwood_clk, "marvell,kirkwood-core-clock", |
diff --git a/drivers/clk/mvebu/mv98dx3236.c b/drivers/clk/mvebu/mv98dx3236.c index 0a74cf7a7725..1c8ab4f834ba 100644 --- a/drivers/clk/mvebu/mv98dx3236.c +++ b/drivers/clk/mvebu/mv98dx3236.c | |||
| @@ -172,7 +172,9 @@ static void __init mv98dx3236_clk_init(struct device_node *np) | |||
| 172 | 172 | ||
| 173 | mvebu_coreclk_setup(np, &mv98dx3236_core_clocks); | 173 | mvebu_coreclk_setup(np, &mv98dx3236_core_clocks); |
| 174 | 174 | ||
| 175 | if (cgnp) | 175 | if (cgnp) { |
| 176 | mvebu_clk_gating_setup(cgnp, mv98dx3236_gating_desc); | 176 | mvebu_clk_gating_setup(cgnp, mv98dx3236_gating_desc); |
| 177 | of_node_put(cgnp); | ||
| 178 | } | ||
| 177 | } | 179 | } |
| 178 | CLK_OF_DECLARE(mv98dx3236_clk, "marvell,mv98dx3236-core-clock", mv98dx3236_clk_init); | 180 | CLK_OF_DECLARE(mv98dx3236_clk, "marvell,mv98dx3236-core-clock", mv98dx3236_clk_init); |
diff --git a/drivers/clk/qcom/clk-rcg.h b/drivers/clk/qcom/clk-rcg.h index e5eca8a1abe4..c25b57c3cbc8 100644 --- a/drivers/clk/qcom/clk-rcg.h +++ b/drivers/clk/qcom/clk-rcg.h | |||
| @@ -71,7 +71,6 @@ struct src_sel { | |||
| 71 | * @freq_tbl: frequency table | 71 | * @freq_tbl: frequency table |
| 72 | * @clkr: regmap clock handle | 72 | * @clkr: regmap clock handle |
| 73 | * @lock: register lock | 73 | * @lock: register lock |
| 74 | * | ||
| 75 | */ | 74 | */ |
| 76 | struct clk_rcg { | 75 | struct clk_rcg { |
| 77 | u32 ns_reg; | 76 | u32 ns_reg; |
| @@ -107,7 +106,6 @@ extern const struct clk_ops clk_rcg_lcc_ops; | |||
| 107 | * @freq_tbl: frequency table | 106 | * @freq_tbl: frequency table |
| 108 | * @clkr: regmap clock handle | 107 | * @clkr: regmap clock handle |
| 109 | * @lock: register lock | 108 | * @lock: register lock |
| 110 | * | ||
| 111 | */ | 109 | */ |
| 112 | struct clk_dyn_rcg { | 110 | struct clk_dyn_rcg { |
| 113 | u32 ns_reg[2]; | 111 | u32 ns_reg[2]; |
| @@ -140,7 +138,7 @@ extern const struct clk_ops clk_dyn_rcg_ops; | |||
| 140 | * @parent_map: map from software's parent index to hardware's src_sel field | 138 | * @parent_map: map from software's parent index to hardware's src_sel field |
| 141 | * @freq_tbl: frequency table | 139 | * @freq_tbl: frequency table |
| 142 | * @clkr: regmap clock handle | 140 | * @clkr: regmap clock handle |
| 143 | * | 141 | * @cfg_off: defines the cfg register offset from the CMD_RCGR + CFG_REG |
| 144 | */ | 142 | */ |
| 145 | struct clk_rcg2 { | 143 | struct clk_rcg2 { |
| 146 | u32 cmd_rcgr; | 144 | u32 cmd_rcgr; |
| @@ -150,6 +148,7 @@ struct clk_rcg2 { | |||
| 150 | const struct parent_map *parent_map; | 148 | const struct parent_map *parent_map; |
| 151 | const struct freq_tbl *freq_tbl; | 149 | const struct freq_tbl *freq_tbl; |
| 152 | struct clk_regmap clkr; | 150 | struct clk_regmap clkr; |
| 151 | u8 cfg_off; | ||
| 153 | }; | 152 | }; |
| 154 | 153 | ||
| 155 | #define to_clk_rcg2(_hw) container_of(to_clk_regmap(_hw), struct clk_rcg2, clkr) | 154 | #define to_clk_rcg2(_hw) container_of(to_clk_regmap(_hw), struct clk_rcg2, clkr) |
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c index 6e3bd195d012..8c02bffe50df 100644 --- a/drivers/clk/qcom/clk-rcg2.c +++ b/drivers/clk/qcom/clk-rcg2.c | |||
| @@ -41,6 +41,11 @@ | |||
| 41 | #define N_REG 0xc | 41 | #define N_REG 0xc |
| 42 | #define D_REG 0x10 | 42 | #define D_REG 0x10 |
| 43 | 43 | ||
| 44 | #define RCG_CFG_OFFSET(rcg) ((rcg)->cmd_rcgr + (rcg)->cfg_off + CFG_REG) | ||
| 45 | #define RCG_M_OFFSET(rcg) ((rcg)->cmd_rcgr + (rcg)->cfg_off + M_REG) | ||
| 46 | #define RCG_N_OFFSET(rcg) ((rcg)->cmd_rcgr + (rcg)->cfg_off + N_REG) | ||
| 47 | #define RCG_D_OFFSET(rcg) ((rcg)->cmd_rcgr + (rcg)->cfg_off + D_REG) | ||
| 48 | |||
| 44 | /* Dynamic Frequency Scaling */ | 49 | /* Dynamic Frequency Scaling */ |
| 45 | #define MAX_PERF_LEVEL 8 | 50 | #define MAX_PERF_LEVEL 8 |
| 46 | #define SE_CMD_DFSR_OFFSET 0x14 | 51 | #define SE_CMD_DFSR_OFFSET 0x14 |
| @@ -74,7 +79,7 @@ static u8 clk_rcg2_get_parent(struct clk_hw *hw) | |||
| 74 | u32 cfg; | 79 | u32 cfg; |
| 75 | int i, ret; | 80 | int i, ret; |
| 76 | 81 | ||
| 77 | ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg); | 82 | ret = regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg); |
| 78 | if (ret) | 83 | if (ret) |
| 79 | goto err; | 84 | goto err; |
| 80 | 85 | ||
| @@ -123,7 +128,7 @@ static int clk_rcg2_set_parent(struct clk_hw *hw, u8 index) | |||
| 123 | int ret; | 128 | int ret; |
| 124 | u32 cfg = rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT; | 129 | u32 cfg = rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT; |
| 125 | 130 | ||
| 126 | ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, | 131 | ret = regmap_update_bits(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), |
| 127 | CFG_SRC_SEL_MASK, cfg); | 132 | CFG_SRC_SEL_MASK, cfg); |
| 128 | if (ret) | 133 | if (ret) |
| 129 | return ret; | 134 | return ret; |
| @@ -162,13 +167,13 @@ clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) | |||
| 162 | struct clk_rcg2 *rcg = to_clk_rcg2(hw); | 167 | struct clk_rcg2 *rcg = to_clk_rcg2(hw); |
| 163 | u32 cfg, hid_div, m = 0, n = 0, mode = 0, mask; | 168 | u32 cfg, hid_div, m = 0, n = 0, mode = 0, mask; |
| 164 | 169 | ||
| 165 | regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg); | 170 | regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg); |
| 166 | 171 | ||
| 167 | if (rcg->mnd_width) { | 172 | if (rcg->mnd_width) { |
| 168 | mask = BIT(rcg->mnd_width) - 1; | 173 | mask = BIT(rcg->mnd_width) - 1; |
| 169 | regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + M_REG, &m); | 174 | regmap_read(rcg->clkr.regmap, RCG_M_OFFSET(rcg), &m); |
| 170 | m &= mask; | 175 | m &= mask; |
| 171 | regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + N_REG, &n); | 176 | regmap_read(rcg->clkr.regmap, RCG_N_OFFSET(rcg), &n); |
| 172 | n = ~n; | 177 | n = ~n; |
| 173 | n &= mask; | 178 | n &= mask; |
| 174 | n += m; | 179 | n += m; |
| @@ -263,17 +268,17 @@ static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f) | |||
| 263 | if (rcg->mnd_width && f->n) { | 268 | if (rcg->mnd_width && f->n) { |
| 264 | mask = BIT(rcg->mnd_width) - 1; | 269 | mask = BIT(rcg->mnd_width) - 1; |
| 265 | ret = regmap_update_bits(rcg->clkr.regmap, | 270 | ret = regmap_update_bits(rcg->clkr.regmap, |
| 266 | rcg->cmd_rcgr + M_REG, mask, f->m); | 271 | RCG_M_OFFSET(rcg), mask, f->m); |
| 267 | if (ret) | 272 | if (ret) |
| 268 | return ret; | 273 | return ret; |
| 269 | 274 | ||
| 270 | ret = regmap_update_bits(rcg->clkr.regmap, | 275 | ret = regmap_update_bits(rcg->clkr.regmap, |
| 271 | rcg->cmd_rcgr + N_REG, mask, ~(f->n - f->m)); | 276 | RCG_N_OFFSET(rcg), mask, ~(f->n - f->m)); |
| 272 | if (ret) | 277 | if (ret) |
| 273 | return ret; | 278 | return ret; |
| 274 | 279 | ||
| 275 | ret = regmap_update_bits(rcg->clkr.regmap, | 280 | ret = regmap_update_bits(rcg->clkr.regmap, |
| 276 | rcg->cmd_rcgr + D_REG, mask, ~f->n); | 281 | RCG_D_OFFSET(rcg), mask, ~f->n); |
| 277 | if (ret) | 282 | if (ret) |
| 278 | return ret; | 283 | return ret; |
| 279 | } | 284 | } |
| @@ -284,8 +289,7 @@ static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f) | |||
| 284 | cfg |= rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT; | 289 | cfg |= rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT; |
| 285 | if (rcg->mnd_width && f->n && (f->m != f->n)) | 290 | if (rcg->mnd_width && f->n && (f->m != f->n)) |
| 286 | cfg |= CFG_MODE_DUAL_EDGE; | 291 | cfg |= CFG_MODE_DUAL_EDGE; |
| 287 | 292 | return regmap_update_bits(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), | |
| 288 | return regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, | ||
| 289 | mask, cfg); | 293 | mask, cfg); |
| 290 | } | 294 | } |
| 291 | 295 | ||
diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c index 9f4fc7773fb2..c3fd632af119 100644 --- a/drivers/clk/qcom/clk-rpmh.c +++ b/drivers/clk/qcom/clk-rpmh.c | |||
| @@ -18,6 +18,31 @@ | |||
| 18 | #define CLK_RPMH_ARC_EN_OFFSET 0 | 18 | #define CLK_RPMH_ARC_EN_OFFSET 0 |
| 19 | #define CLK_RPMH_VRM_EN_OFFSET 4 | 19 | #define CLK_RPMH_VRM_EN_OFFSET 4 |
| 20 | 20 | ||
| 21 | #define BCM_TCS_CMD_COMMIT_MASK 0x40000000 | ||
| 22 | #define BCM_TCS_CMD_VALID_SHIFT 29 | ||
| 23 | #define BCM_TCS_CMD_VOTE_MASK 0x3fff | ||
| 24 | #define BCM_TCS_CMD_VOTE_SHIFT 0 | ||
| 25 | |||
| 26 | #define BCM_TCS_CMD(valid, vote) \ | ||
| 27 | (BCM_TCS_CMD_COMMIT_MASK | \ | ||
| 28 | ((valid) << BCM_TCS_CMD_VALID_SHIFT) | \ | ||
| 29 | ((vote & BCM_TCS_CMD_VOTE_MASK) \ | ||
| 30 | << BCM_TCS_CMD_VOTE_SHIFT)) | ||
| 31 | |||
| 32 | /** | ||
| 33 | * struct bcm_db - Auxiliary data pertaining to each Bus Clock Manager(BCM) | ||
| 34 | * @unit: divisor used to convert Hz value to an RPMh msg | ||
| 35 | * @width: multiplier used to convert Hz value to an RPMh msg | ||
| 36 | * @vcd: virtual clock domain that this bcm belongs to | ||
| 37 | * @reserved: reserved to pad the struct | ||
| 38 | */ | ||
| 39 | struct bcm_db { | ||
| 40 | __le32 unit; | ||
| 41 | __le16 width; | ||
| 42 | u8 vcd; | ||
| 43 | u8 reserved; | ||
| 44 | }; | ||
| 45 | |||
| 21 | /** | 46 | /** |
| 22 | * struct clk_rpmh - individual rpmh clock data structure | 47 | * struct clk_rpmh - individual rpmh clock data structure |
| 23 | * @hw: handle between common and hardware-specific interfaces | 48 | * @hw: handle between common and hardware-specific interfaces |
| @@ -29,6 +54,7 @@ | |||
| 29 | * @aggr_state: rpmh clock aggregated state | 54 | * @aggr_state: rpmh clock aggregated state |
| 30 | * @last_sent_aggr_state: rpmh clock last aggr state sent to RPMh | 55 | * @last_sent_aggr_state: rpmh clock last aggr state sent to RPMh |
| 31 | * @valid_state_mask: mask to determine the state of the rpmh clock | 56 | * @valid_state_mask: mask to determine the state of the rpmh clock |
| 57 | * @unit: divisor to convert rate to rpmh msg in magnitudes of Khz | ||
| 32 | * @dev: device to which it is attached | 58 | * @dev: device to which it is attached |
| 33 | * @peer: pointer to the clock rpmh sibling | 59 | * @peer: pointer to the clock rpmh sibling |
| 34 | */ | 60 | */ |
| @@ -42,6 +68,7 @@ struct clk_rpmh { | |||
| 42 | u32 aggr_state; | 68 | u32 aggr_state; |
| 43 | u32 last_sent_aggr_state; | 69 | u32 last_sent_aggr_state; |
| 44 | u32 valid_state_mask; | 70 | u32 valid_state_mask; |
| 71 | u32 unit; | ||
| 45 | struct device *dev; | 72 | struct device *dev; |
| 46 | struct clk_rpmh *peer; | 73 | struct clk_rpmh *peer; |
| 47 | }; | 74 | }; |
| @@ -98,6 +125,17 @@ static DEFINE_MUTEX(rpmh_clk_lock); | |||
| 98 | __DEFINE_CLK_RPMH(_platform, _name, _name_active, _res_name, \ | 125 | __DEFINE_CLK_RPMH(_platform, _name, _name_active, _res_name, \ |
| 99 | CLK_RPMH_VRM_EN_OFFSET, 1, _div) | 126 | CLK_RPMH_VRM_EN_OFFSET, 1, _div) |
| 100 | 127 | ||
| 128 | #define DEFINE_CLK_RPMH_BCM(_platform, _name, _res_name) \ | ||
| 129 | static struct clk_rpmh _platform##_##_name = { \ | ||
| 130 | .res_name = _res_name, \ | ||
| 131 | .valid_state_mask = BIT(RPMH_ACTIVE_ONLY_STATE), \ | ||
| 132 | .div = 1, \ | ||
| 133 | .hw.init = &(struct clk_init_data){ \ | ||
| 134 | .ops = &clk_rpmh_bcm_ops, \ | ||
| 135 | .name = #_name, \ | ||
| 136 | }, \ | ||
| 137 | } | ||
| 138 | |||
| 101 | static inline struct clk_rpmh *to_clk_rpmh(struct clk_hw *_hw) | 139 | static inline struct clk_rpmh *to_clk_rpmh(struct clk_hw *_hw) |
| 102 | { | 140 | { |
| 103 | return container_of(_hw, struct clk_rpmh, hw); | 141 | return container_of(_hw, struct clk_rpmh, hw); |
| @@ -210,6 +248,96 @@ static const struct clk_ops clk_rpmh_ops = { | |||
| 210 | .recalc_rate = clk_rpmh_recalc_rate, | 248 | .recalc_rate = clk_rpmh_recalc_rate, |
| 211 | }; | 249 | }; |
| 212 | 250 | ||
| 251 | static int clk_rpmh_bcm_send_cmd(struct clk_rpmh *c, bool enable) | ||
| 252 | { | ||
| 253 | struct tcs_cmd cmd = { 0 }; | ||
| 254 | u32 cmd_state; | ||
| 255 | int ret; | ||
| 256 | |||
| 257 | mutex_lock(&rpmh_clk_lock); | ||
| 258 | |||
| 259 | cmd_state = 0; | ||
| 260 | if (enable) { | ||
| 261 | cmd_state = 1; | ||
| 262 | if (c->aggr_state) | ||
| 263 | cmd_state = c->aggr_state; | ||
| 264 | } | ||
| 265 | |||
| 266 | if (c->last_sent_aggr_state == cmd_state) { | ||
| 267 | mutex_unlock(&rpmh_clk_lock); | ||
| 268 | return 0; | ||
| 269 | } | ||
| 270 | |||
| 271 | cmd.addr = c->res_addr; | ||
| 272 | cmd.data = BCM_TCS_CMD(enable, cmd_state); | ||
| 273 | |||
| 274 | ret = rpmh_write_async(c->dev, RPMH_ACTIVE_ONLY_STATE, &cmd, 1); | ||
| 275 | if (ret) { | ||
| 276 | dev_err(c->dev, "set active state of %s failed: (%d)\n", | ||
| 277 | c->res_name, ret); | ||
| 278 | mutex_unlock(&rpmh_clk_lock); | ||
| 279 | return ret; | ||
| 280 | } | ||
| 281 | |||
| 282 | c->last_sent_aggr_state = cmd_state; | ||
| 283 | |||
| 284 | mutex_unlock(&rpmh_clk_lock); | ||
| 285 | |||
| 286 | return 0; | ||
| 287 | } | ||
| 288 | |||
| 289 | static int clk_rpmh_bcm_prepare(struct clk_hw *hw) | ||
| 290 | { | ||
| 291 | struct clk_rpmh *c = to_clk_rpmh(hw); | ||
| 292 | |||
| 293 | return clk_rpmh_bcm_send_cmd(c, true); | ||
| 294 | }; | ||
| 295 | |||
| 296 | static void clk_rpmh_bcm_unprepare(struct clk_hw *hw) | ||
| 297 | { | ||
| 298 | struct clk_rpmh *c = to_clk_rpmh(hw); | ||
| 299 | |||
| 300 | clk_rpmh_bcm_send_cmd(c, false); | ||
| 301 | }; | ||
| 302 | |||
| 303 | static int clk_rpmh_bcm_set_rate(struct clk_hw *hw, unsigned long rate, | ||
| 304 | unsigned long parent_rate) | ||
| 305 | { | ||
| 306 | struct clk_rpmh *c = to_clk_rpmh(hw); | ||
| 307 | |||
| 308 | c->aggr_state = rate / c->unit; | ||
| 309 | /* | ||
| 310 | * Since any non-zero value sent to hw would result in enabling the | ||
| 311 | * clock, only send the value if the clock has already been prepared. | ||
| 312 | */ | ||
| 313 | if (clk_hw_is_prepared(hw)) | ||
| 314 | clk_rpmh_bcm_send_cmd(c, true); | ||
| 315 | |||
| 316 | return 0; | ||
| 317 | }; | ||
| 318 | |||
| 319 | static long clk_rpmh_round_rate(struct clk_hw *hw, unsigned long rate, | ||
| 320 | unsigned long *parent_rate) | ||
| 321 | { | ||
| 322 | return rate; | ||
| 323 | } | ||
| 324 | |||
| 325 | static unsigned long clk_rpmh_bcm_recalc_rate(struct clk_hw *hw, | ||
| 326 | unsigned long prate) | ||
| 327 | { | ||
| 328 | struct clk_rpmh *c = to_clk_rpmh(hw); | ||
| 329 | |||
| 330 | return c->aggr_state * c->unit; | ||
| 331 | } | ||
| 332 | |||
| 333 | static const struct clk_ops clk_rpmh_bcm_ops = { | ||
| 334 | .prepare = clk_rpmh_bcm_prepare, | ||
| 335 | .unprepare = clk_rpmh_bcm_unprepare, | ||
| 336 | .set_rate = clk_rpmh_bcm_set_rate, | ||
| 337 | .round_rate = clk_rpmh_round_rate, | ||
| 338 | .recalc_rate = clk_rpmh_bcm_recalc_rate, | ||
| 339 | }; | ||
| 340 | |||
| 213 | /* Resource name must match resource id present in cmd-db. */ | 341 | /* Resource name must match resource id present in cmd-db. */ |
| 214 | DEFINE_CLK_RPMH_ARC(sdm845, bi_tcxo, bi_tcxo_ao, "xo.lvl", 0x3, 2); | 342 | DEFINE_CLK_RPMH_ARC(sdm845, bi_tcxo, bi_tcxo_ao, "xo.lvl", 0x3, 2); |
| 215 | DEFINE_CLK_RPMH_VRM(sdm845, ln_bb_clk2, ln_bb_clk2_ao, "lnbclka2", 2); | 343 | DEFINE_CLK_RPMH_VRM(sdm845, ln_bb_clk2, ln_bb_clk2_ao, "lnbclka2", 2); |
| @@ -217,6 +345,7 @@ DEFINE_CLK_RPMH_VRM(sdm845, ln_bb_clk3, ln_bb_clk3_ao, "lnbclka3", 2); | |||
| 217 | DEFINE_CLK_RPMH_VRM(sdm845, rf_clk1, rf_clk1_ao, "rfclka1", 1); | 345 | DEFINE_CLK_RPMH_VRM(sdm845, rf_clk1, rf_clk1_ao, "rfclka1", 1); |
| 218 | DEFINE_CLK_RPMH_VRM(sdm845, rf_clk2, rf_clk2_ao, "rfclka2", 1); | 346 | DEFINE_CLK_RPMH_VRM(sdm845, rf_clk2, rf_clk2_ao, "rfclka2", 1); |
| 219 | DEFINE_CLK_RPMH_VRM(sdm845, rf_clk3, rf_clk3_ao, "rfclka3", 1); | 347 | DEFINE_CLK_RPMH_VRM(sdm845, rf_clk3, rf_clk3_ao, "rfclka3", 1); |
| 348 | DEFINE_CLK_RPMH_BCM(sdm845, ipa, "IP0"); | ||
| 220 | 349 | ||
| 221 | static struct clk_hw *sdm845_rpmh_clocks[] = { | 350 | static struct clk_hw *sdm845_rpmh_clocks[] = { |
| 222 | [RPMH_CXO_CLK] = &sdm845_bi_tcxo.hw, | 351 | [RPMH_CXO_CLK] = &sdm845_bi_tcxo.hw, |
| @@ -231,6 +360,7 @@ static struct clk_hw *sdm845_rpmh_clocks[] = { | |||
| 231 | [RPMH_RF_CLK2_A] = &sdm845_rf_clk2_ao.hw, | 360 | [RPMH_RF_CLK2_A] = &sdm845_rf_clk2_ao.hw, |
| 232 | [RPMH_RF_CLK3] = &sdm845_rf_clk3.hw, | 361 | [RPMH_RF_CLK3] = &sdm845_rf_clk3.hw, |
| 233 | [RPMH_RF_CLK3_A] = &sdm845_rf_clk3_ao.hw, | 362 | [RPMH_RF_CLK3_A] = &sdm845_rf_clk3_ao.hw, |
| 363 | [RPMH_IPA_CLK] = &sdm845_ipa.hw, | ||
| 234 | }; | 364 | }; |
| 235 | 365 | ||
| 236 | static const struct clk_rpmh_desc clk_rpmh_sdm845 = { | 366 | static const struct clk_rpmh_desc clk_rpmh_sdm845 = { |
| @@ -267,6 +397,8 @@ static int clk_rpmh_probe(struct platform_device *pdev) | |||
| 267 | 397 | ||
| 268 | for (i = 0; i < desc->num_clks; i++) { | 398 | for (i = 0; i < desc->num_clks; i++) { |
| 269 | u32 res_addr; | 399 | u32 res_addr; |
| 400 | size_t aux_data_len; | ||
| 401 | const struct bcm_db *data; | ||
| 270 | 402 | ||
| 271 | rpmh_clk = to_clk_rpmh(hw_clks[i]); | 403 | rpmh_clk = to_clk_rpmh(hw_clks[i]); |
| 272 | res_addr = cmd_db_read_addr(rpmh_clk->res_name); | 404 | res_addr = cmd_db_read_addr(rpmh_clk->res_name); |
| @@ -275,6 +407,20 @@ static int clk_rpmh_probe(struct platform_device *pdev) | |||
| 275 | rpmh_clk->res_name); | 407 | rpmh_clk->res_name); |
| 276 | return -ENODEV; | 408 | return -ENODEV; |
| 277 | } | 409 | } |
| 410 | |||
| 411 | data = cmd_db_read_aux_data(rpmh_clk->res_name, &aux_data_len); | ||
| 412 | if (IS_ERR(data)) { | ||
| 413 | ret = PTR_ERR(data); | ||
| 414 | dev_err(&pdev->dev, | ||
| 415 | "error reading RPMh aux data for %s (%d)\n", | ||
| 416 | rpmh_clk->res_name, ret); | ||
| 417 | return ret; | ||
| 418 | } | ||
| 419 | |||
| 420 | /* Convert unit from Khz to Hz */ | ||
| 421 | if (aux_data_len == sizeof(*data)) | ||
| 422 | rpmh_clk->unit = le32_to_cpu(data->unit) * 1000ULL; | ||
| 423 | |||
| 278 | rpmh_clk->res_addr += res_addr; | 424 | rpmh_clk->res_addr += res_addr; |
| 279 | rpmh_clk->dev = &pdev->dev; | 425 | rpmh_clk->dev = &pdev->dev; |
| 280 | 426 | ||
diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c index d3aadaeb2903..22dd42ad9223 100644 --- a/drivers/clk/qcom/clk-smd-rpm.c +++ b/drivers/clk/qcom/clk-smd-rpm.c | |||
| @@ -655,10 +655,73 @@ static const struct rpm_smd_clk_desc rpm_clk_qcs404 = { | |||
| 655 | .num_clks = ARRAY_SIZE(qcs404_clks), | 655 | .num_clks = ARRAY_SIZE(qcs404_clks), |
| 656 | }; | 656 | }; |
| 657 | 657 | ||
| 658 | /* msm8998 */ | ||
| 659 | DEFINE_CLK_SMD_RPM(msm8998, snoc_clk, snoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 1); | ||
| 660 | DEFINE_CLK_SMD_RPM(msm8998, cnoc_clk, cnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 2); | ||
| 661 | DEFINE_CLK_SMD_RPM(msm8998, ce1_clk, ce1_a_clk, QCOM_SMD_RPM_CE_CLK, 0); | ||
| 662 | DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8998, div_clk1, div_clk1_a, 0xb); | ||
| 663 | DEFINE_CLK_SMD_RPM(msm8998, ipa_clk, ipa_a_clk, QCOM_SMD_RPM_IPA_CLK, 0); | ||
| 664 | DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8998, ln_bb_clk1, ln_bb_clk1_a, 1); | ||
| 665 | DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8998, ln_bb_clk2, ln_bb_clk2_a, 2); | ||
| 666 | DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8998, ln_bb_clk3_pin, ln_bb_clk3_a_pin, | ||
| 667 | 3); | ||
| 668 | DEFINE_CLK_SMD_RPM(msm8998, mmssnoc_axi_rpm_clk, mmssnoc_axi_rpm_a_clk, | ||
| 669 | QCOM_SMD_RPM_MMAXI_CLK, 0); | ||
| 670 | DEFINE_CLK_SMD_RPM(msm8998, aggre1_noc_clk, aggre1_noc_a_clk, | ||
| 671 | QCOM_SMD_RPM_AGGR_CLK, 1); | ||
| 672 | DEFINE_CLK_SMD_RPM(msm8998, aggre2_noc_clk, aggre2_noc_a_clk, | ||
| 673 | QCOM_SMD_RPM_AGGR_CLK, 2); | ||
| 674 | DEFINE_CLK_SMD_RPM_QDSS(msm8998, qdss_clk, qdss_a_clk, | ||
| 675 | QCOM_SMD_RPM_MISC_CLK, 1); | ||
| 676 | DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8998, rf_clk1, rf_clk1_a, 4); | ||
| 677 | DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8998, rf_clk2_pin, rf_clk2_a_pin, 5); | ||
| 678 | DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8998, rf_clk3, rf_clk3_a, 6); | ||
| 679 | DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8998, rf_clk3_pin, rf_clk3_a_pin, 6); | ||
| 680 | static struct clk_smd_rpm *msm8998_clks[] = { | ||
| 681 | [RPM_SMD_SNOC_CLK] = &msm8998_snoc_clk, | ||
| 682 | [RPM_SMD_SNOC_A_CLK] = &msm8998_snoc_a_clk, | ||
| 683 | [RPM_SMD_CNOC_CLK] = &msm8998_cnoc_clk, | ||
| 684 | [RPM_SMD_CNOC_A_CLK] = &msm8998_cnoc_a_clk, | ||
| 685 | [RPM_SMD_CE1_CLK] = &msm8998_ce1_clk, | ||
| 686 | [RPM_SMD_CE1_A_CLK] = &msm8998_ce1_a_clk, | ||
| 687 | [RPM_SMD_DIV_CLK1] = &msm8998_div_clk1, | ||
| 688 | [RPM_SMD_DIV_A_CLK1] = &msm8998_div_clk1_a, | ||
| 689 | [RPM_SMD_IPA_CLK] = &msm8998_ipa_clk, | ||
| 690 | [RPM_SMD_IPA_A_CLK] = &msm8998_ipa_a_clk, | ||
| 691 | [RPM_SMD_LN_BB_CLK1] = &msm8998_ln_bb_clk1, | ||
| 692 | [RPM_SMD_LN_BB_CLK1_A] = &msm8998_ln_bb_clk1_a, | ||
| 693 | [RPM_SMD_LN_BB_CLK2] = &msm8998_ln_bb_clk2, | ||
| 694 | [RPM_SMD_LN_BB_CLK2_A] = &msm8998_ln_bb_clk2_a, | ||
| 695 | [RPM_SMD_LN_BB_CLK3_PIN] = &msm8998_ln_bb_clk3_pin, | ||
| 696 | [RPM_SMD_LN_BB_CLK3_A_PIN] = &msm8998_ln_bb_clk3_a_pin, | ||
| 697 | [RPM_SMD_MMAXI_CLK] = &msm8998_mmssnoc_axi_rpm_clk, | ||
| 698 | [RPM_SMD_MMAXI_A_CLK] = &msm8998_mmssnoc_axi_rpm_a_clk, | ||
| 699 | [RPM_SMD_AGGR1_NOC_CLK] = &msm8998_aggre1_noc_clk, | ||
| 700 | [RPM_SMD_AGGR1_NOC_A_CLK] = &msm8998_aggre1_noc_a_clk, | ||
| 701 | [RPM_SMD_AGGR2_NOC_CLK] = &msm8998_aggre2_noc_clk, | ||
| 702 | [RPM_SMD_AGGR2_NOC_A_CLK] = &msm8998_aggre2_noc_a_clk, | ||
| 703 | [RPM_SMD_QDSS_CLK] = &msm8998_qdss_clk, | ||
| 704 | [RPM_SMD_QDSS_A_CLK] = &msm8998_qdss_a_clk, | ||
| 705 | [RPM_SMD_RF_CLK1] = &msm8998_rf_clk1, | ||
| 706 | [RPM_SMD_RF_CLK1_A] = &msm8998_rf_clk1_a, | ||
| 707 | [RPM_SMD_RF_CLK2_PIN] = &msm8998_rf_clk2_pin, | ||
| 708 | [RPM_SMD_RF_CLK2_A_PIN] = &msm8998_rf_clk2_a_pin, | ||
| 709 | [RPM_SMD_RF_CLK3] = &msm8998_rf_clk3, | ||
| 710 | [RPM_SMD_RF_CLK3_A] = &msm8998_rf_clk3_a, | ||
| 711 | [RPM_SMD_RF_CLK3_PIN] = &msm8998_rf_clk3_pin, | ||
| 712 | [RPM_SMD_RF_CLK3_A_PIN] = &msm8998_rf_clk3_a_pin, | ||
| 713 | }; | ||
| 714 | |||
| 715 | static const struct rpm_smd_clk_desc rpm_clk_msm8998 = { | ||
| 716 | .clks = msm8998_clks, | ||
| 717 | .num_clks = ARRAY_SIZE(msm8998_clks), | ||
| 718 | }; | ||
| 719 | |||
| 658 | static const struct of_device_id rpm_smd_clk_match_table[] = { | 720 | static const struct of_device_id rpm_smd_clk_match_table[] = { |
| 659 | { .compatible = "qcom,rpmcc-msm8916", .data = &rpm_clk_msm8916 }, | 721 | { .compatible = "qcom,rpmcc-msm8916", .data = &rpm_clk_msm8916 }, |
| 660 | { .compatible = "qcom,rpmcc-msm8974", .data = &rpm_clk_msm8974 }, | 722 | { .compatible = "qcom,rpmcc-msm8974", .data = &rpm_clk_msm8974 }, |
| 661 | { .compatible = "qcom,rpmcc-msm8996", .data = &rpm_clk_msm8996 }, | 723 | { .compatible = "qcom,rpmcc-msm8996", .data = &rpm_clk_msm8996 }, |
| 724 | { .compatible = "qcom,rpmcc-msm8998", .data = &rpm_clk_msm8998 }, | ||
| 662 | { .compatible = "qcom,rpmcc-qcs404", .data = &rpm_clk_qcs404 }, | 725 | { .compatible = "qcom,rpmcc-qcs404", .data = &rpm_clk_qcs404 }, |
| 663 | { } | 726 | { } |
| 664 | }; | 727 | }; |
diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c index 0a48ed56833b..a6b2f86112d8 100644 --- a/drivers/clk/qcom/common.c +++ b/drivers/clk/qcom/common.c | |||
| @@ -231,6 +231,8 @@ int qcom_cc_really_probe(struct platform_device *pdev, | |||
| 231 | struct gdsc_desc *scd; | 231 | struct gdsc_desc *scd; |
| 232 | size_t num_clks = desc->num_clks; | 232 | size_t num_clks = desc->num_clks; |
| 233 | struct clk_regmap **rclks = desc->clks; | 233 | struct clk_regmap **rclks = desc->clks; |
| 234 | size_t num_clk_hws = desc->num_clk_hws; | ||
| 235 | struct clk_hw **clk_hws = desc->clk_hws; | ||
| 234 | 236 | ||
| 235 | cc = devm_kzalloc(dev, sizeof(*cc), GFP_KERNEL); | 237 | cc = devm_kzalloc(dev, sizeof(*cc), GFP_KERNEL); |
| 236 | if (!cc) | 238 | if (!cc) |
| @@ -269,6 +271,12 @@ int qcom_cc_really_probe(struct platform_device *pdev, | |||
| 269 | 271 | ||
| 270 | qcom_cc_drop_protected(dev, cc); | 272 | qcom_cc_drop_protected(dev, cc); |
| 271 | 273 | ||
| 274 | for (i = 0; i < num_clk_hws; i++) { | ||
| 275 | ret = devm_clk_hw_register(dev, clk_hws[i]); | ||
| 276 | if (ret) | ||
| 277 | return ret; | ||
| 278 | } | ||
| 279 | |||
| 272 | for (i = 0; i < num_clks; i++) { | 280 | for (i = 0; i < num_clks; i++) { |
| 273 | if (!rclks[i]) | 281 | if (!rclks[i]) |
| 274 | continue; | 282 | continue; |
diff --git a/drivers/clk/qcom/common.h b/drivers/clk/qcom/common.h index 4aa33ee70bae..1e2a8bdac55a 100644 --- a/drivers/clk/qcom/common.h +++ b/drivers/clk/qcom/common.h | |||
| @@ -27,6 +27,8 @@ struct qcom_cc_desc { | |||
| 27 | size_t num_resets; | 27 | size_t num_resets; |
| 28 | struct gdsc **gdscs; | 28 | struct gdsc **gdscs; |
| 29 | size_t num_gdscs; | 29 | size_t num_gdscs; |
| 30 | struct clk_hw **clk_hws; | ||
| 31 | size_t num_clk_hws; | ||
| 30 | }; | 32 | }; |
| 31 | 33 | ||
| 32 | /** | 34 | /** |
diff --git a/drivers/clk/qcom/gcc-ipq8074.c b/drivers/clk/qcom/gcc-ipq8074.c index 505c6263141d..0e32892b438c 100644 --- a/drivers/clk/qcom/gcc-ipq8074.c +++ b/drivers/clk/qcom/gcc-ipq8074.c | |||
| @@ -4715,18 +4715,12 @@ static const struct qcom_cc_desc gcc_ipq8074_desc = { | |||
| 4715 | .num_clks = ARRAY_SIZE(gcc_ipq8074_clks), | 4715 | .num_clks = ARRAY_SIZE(gcc_ipq8074_clks), |
| 4716 | .resets = gcc_ipq8074_resets, | 4716 | .resets = gcc_ipq8074_resets, |
| 4717 | .num_resets = ARRAY_SIZE(gcc_ipq8074_resets), | 4717 | .num_resets = ARRAY_SIZE(gcc_ipq8074_resets), |
| 4718 | .clk_hws = gcc_ipq8074_hws, | ||
| 4719 | .num_clk_hws = ARRAY_SIZE(gcc_ipq8074_hws), | ||
| 4718 | }; | 4720 | }; |
| 4719 | 4721 | ||
| 4720 | static int gcc_ipq8074_probe(struct platform_device *pdev) | 4722 | static int gcc_ipq8074_probe(struct platform_device *pdev) |
| 4721 | { | 4723 | { |
| 4722 | int ret, i; | ||
| 4723 | |||
| 4724 | for (i = 0; i < ARRAY_SIZE(gcc_ipq8074_hws); i++) { | ||
| 4725 | ret = devm_clk_hw_register(&pdev->dev, gcc_ipq8074_hws[i]); | ||
| 4726 | if (ret) | ||
| 4727 | return ret; | ||
| 4728 | } | ||
| 4729 | |||
| 4730 | return qcom_cc_probe(pdev, &gcc_ipq8074_desc); | 4724 | return qcom_cc_probe(pdev, &gcc_ipq8074_desc); |
| 4731 | } | 4725 | } |
| 4732 | 4726 | ||
diff --git a/drivers/clk/qcom/gcc-mdm9615.c b/drivers/clk/qcom/gcc-mdm9615.c index 849046fbed6d..8c6d93144b9c 100644 --- a/drivers/clk/qcom/gcc-mdm9615.c +++ b/drivers/clk/qcom/gcc-mdm9615.c | |||
| @@ -1702,6 +1702,8 @@ static const struct qcom_cc_desc gcc_mdm9615_desc = { | |||
| 1702 | .num_clks = ARRAY_SIZE(gcc_mdm9615_clks), | 1702 | .num_clks = ARRAY_SIZE(gcc_mdm9615_clks), |
| 1703 | .resets = gcc_mdm9615_resets, | 1703 | .resets = gcc_mdm9615_resets, |
| 1704 | .num_resets = ARRAY_SIZE(gcc_mdm9615_resets), | 1704 | .num_resets = ARRAY_SIZE(gcc_mdm9615_resets), |
| 1705 | .clk_hws = gcc_mdm9615_hws, | ||
| 1706 | .num_clk_hws = ARRAY_SIZE(gcc_mdm9615_hws), | ||
| 1705 | }; | 1707 | }; |
| 1706 | 1708 | ||
| 1707 | static const struct of_device_id gcc_mdm9615_match_table[] = { | 1709 | static const struct of_device_id gcc_mdm9615_match_table[] = { |
| @@ -1712,21 +1714,12 @@ MODULE_DEVICE_TABLE(of, gcc_mdm9615_match_table); | |||
| 1712 | 1714 | ||
| 1713 | static int gcc_mdm9615_probe(struct platform_device *pdev) | 1715 | static int gcc_mdm9615_probe(struct platform_device *pdev) |
| 1714 | { | 1716 | { |
| 1715 | struct device *dev = &pdev->dev; | ||
| 1716 | struct regmap *regmap; | 1717 | struct regmap *regmap; |
| 1717 | int ret; | ||
| 1718 | int i; | ||
| 1719 | 1718 | ||
| 1720 | regmap = qcom_cc_map(pdev, &gcc_mdm9615_desc); | 1719 | regmap = qcom_cc_map(pdev, &gcc_mdm9615_desc); |
| 1721 | if (IS_ERR(regmap)) | 1720 | if (IS_ERR(regmap)) |
| 1722 | return PTR_ERR(regmap); | 1721 | return PTR_ERR(regmap); |
| 1723 | 1722 | ||
| 1724 | for (i = 0; i < ARRAY_SIZE(gcc_mdm9615_hws); i++) { | ||
| 1725 | ret = devm_clk_hw_register(dev, gcc_mdm9615_hws[i]); | ||
| 1726 | if (ret) | ||
| 1727 | return ret; | ||
| 1728 | } | ||
| 1729 | |||
| 1730 | return qcom_cc_really_probe(pdev, &gcc_mdm9615_desc, regmap); | 1723 | return qcom_cc_really_probe(pdev, &gcc_mdm9615_desc, regmap); |
| 1731 | } | 1724 | } |
| 1732 | 1725 | ||
diff --git a/drivers/clk/qcom/gcc-msm8996.c b/drivers/clk/qcom/gcc-msm8996.c index 9d136172c27c..4632b9272b7f 100644 --- a/drivers/clk/qcom/gcc-msm8996.c +++ b/drivers/clk/qcom/gcc-msm8996.c | |||
| @@ -3656,6 +3656,8 @@ static const struct qcom_cc_desc gcc_msm8996_desc = { | |||
| 3656 | .num_resets = ARRAY_SIZE(gcc_msm8996_resets), | 3656 | .num_resets = ARRAY_SIZE(gcc_msm8996_resets), |
| 3657 | .gdscs = gcc_msm8996_gdscs, | 3657 | .gdscs = gcc_msm8996_gdscs, |
| 3658 | .num_gdscs = ARRAY_SIZE(gcc_msm8996_gdscs), | 3658 | .num_gdscs = ARRAY_SIZE(gcc_msm8996_gdscs), |
| 3659 | .clk_hws = gcc_msm8996_hws, | ||
| 3660 | .num_clk_hws = ARRAY_SIZE(gcc_msm8996_hws), | ||
| 3659 | }; | 3661 | }; |
| 3660 | 3662 | ||
| 3661 | static const struct of_device_id gcc_msm8996_match_table[] = { | 3663 | static const struct of_device_id gcc_msm8996_match_table[] = { |
| @@ -3666,8 +3668,6 @@ MODULE_DEVICE_TABLE(of, gcc_msm8996_match_table); | |||
| 3666 | 3668 | ||
| 3667 | static int gcc_msm8996_probe(struct platform_device *pdev) | 3669 | static int gcc_msm8996_probe(struct platform_device *pdev) |
| 3668 | { | 3670 | { |
| 3669 | struct device *dev = &pdev->dev; | ||
| 3670 | int i, ret; | ||
| 3671 | struct regmap *regmap; | 3671 | struct regmap *regmap; |
| 3672 | 3672 | ||
| 3673 | regmap = qcom_cc_map(pdev, &gcc_msm8996_desc); | 3673 | regmap = qcom_cc_map(pdev, &gcc_msm8996_desc); |
| @@ -3680,12 +3680,6 @@ static int gcc_msm8996_probe(struct platform_device *pdev) | |||
| 3680 | */ | 3680 | */ |
| 3681 | regmap_update_bits(regmap, 0x52008, BIT(21), BIT(21)); | 3681 | regmap_update_bits(regmap, 0x52008, BIT(21), BIT(21)); |
| 3682 | 3682 | ||
| 3683 | for (i = 0; i < ARRAY_SIZE(gcc_msm8996_hws); i++) { | ||
| 3684 | ret = devm_clk_hw_register(dev, gcc_msm8996_hws[i]); | ||
| 3685 | if (ret) | ||
| 3686 | return ret; | ||
| 3687 | } | ||
| 3688 | |||
| 3689 | return qcom_cc_really_probe(pdev, &gcc_msm8996_desc, regmap); | 3683 | return qcom_cc_really_probe(pdev, &gcc_msm8996_desc, regmap); |
| 3690 | } | 3684 | } |
| 3691 | 3685 | ||
diff --git a/drivers/clk/qcom/gcc-msm8998.c b/drivers/clk/qcom/gcc-msm8998.c index 1b779396e04f..c240fba794c7 100644 --- a/drivers/clk/qcom/gcc-msm8998.c +++ b/drivers/clk/qcom/gcc-msm8998.c | |||
| @@ -1112,6 +1112,7 @@ static struct clk_rcg2 ufs_axi_clk_src = { | |||
| 1112 | 1112 | ||
| 1113 | static const struct freq_tbl ftbl_usb30_master_clk_src[] = { | 1113 | static const struct freq_tbl ftbl_usb30_master_clk_src[] = { |
| 1114 | F(19200000, P_XO, 1, 0, 0), | 1114 | F(19200000, P_XO, 1, 0, 0), |
| 1115 | F(60000000, P_GPLL0_OUT_MAIN, 10, 0, 0), | ||
| 1115 | F(120000000, P_GPLL0_OUT_MAIN, 5, 0, 0), | 1116 | F(120000000, P_GPLL0_OUT_MAIN, 5, 0, 0), |
| 1116 | F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0), | 1117 | F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0), |
| 1117 | { } | 1118 | { } |
| @@ -1189,6 +1190,7 @@ static struct clk_branch gcc_aggre1_ufs_axi_clk = { | |||
| 1189 | "ufs_axi_clk_src", | 1190 | "ufs_axi_clk_src", |
| 1190 | }, | 1191 | }, |
| 1191 | .num_parents = 1, | 1192 | .num_parents = 1, |
| 1193 | .flags = CLK_SET_RATE_PARENT, | ||
| 1192 | .ops = &clk_branch2_ops, | 1194 | .ops = &clk_branch2_ops, |
| 1193 | }, | 1195 | }, |
| 1194 | }, | 1196 | }, |
| @@ -1206,6 +1208,7 @@ static struct clk_branch gcc_aggre1_usb3_axi_clk = { | |||
| 1206 | "usb30_master_clk_src", | 1208 | "usb30_master_clk_src", |
| 1207 | }, | 1209 | }, |
| 1208 | .num_parents = 1, | 1210 | .num_parents = 1, |
| 1211 | .flags = CLK_SET_RATE_PARENT, | ||
| 1209 | .ops = &clk_branch2_ops, | 1212 | .ops = &clk_branch2_ops, |
| 1210 | }, | 1213 | }, |
| 1211 | }, | 1214 | }, |
| @@ -1288,6 +1291,7 @@ static struct clk_branch gcc_blsp1_qup1_i2c_apps_clk = { | |||
| 1288 | "blsp1_qup1_i2c_apps_clk_src", | 1291 | "blsp1_qup1_i2c_apps_clk_src", |
| 1289 | }, | 1292 | }, |
| 1290 | .num_parents = 1, | 1293 | .num_parents = 1, |
| 1294 | .flags = CLK_SET_RATE_PARENT, | ||
| 1291 | .ops = &clk_branch2_ops, | 1295 | .ops = &clk_branch2_ops, |
| 1292 | }, | 1296 | }, |
| 1293 | }, | 1297 | }, |
| @@ -1305,6 +1309,7 @@ static struct clk_branch gcc_blsp1_qup1_spi_apps_clk = { | |||
| 1305 | "blsp1_qup1_spi_apps_clk_src", | 1309 | "blsp1_qup1_spi_apps_clk_src", |
| 1306 | }, | 1310 | }, |
| 1307 | .num_parents = 1, | 1311 | .num_parents = 1, |
| 1312 | .flags = CLK_SET_RATE_PARENT, | ||
| 1308 | .ops = &clk_branch2_ops, | 1313 | .ops = &clk_branch2_ops, |
| 1309 | }, | 1314 | }, |
| 1310 | }, | 1315 | }, |
| @@ -1322,6 +1327,7 @@ static struct clk_branch gcc_blsp1_qup2_i2c_apps_clk = { | |||
| 1322 | "blsp1_qup2_i2c_apps_clk_src", | 1327 | "blsp1_qup2_i2c_apps_clk_src", |
| 1323 | }, | 1328 | }, |
| 1324 | .num_parents = 1, | 1329 | .num_parents = 1, |
| 1330 | .flags = CLK_SET_RATE_PARENT, | ||
| 1325 | .ops = &clk_branch2_ops, | 1331 | .ops = &clk_branch2_ops, |
| 1326 | }, | 1332 | }, |
| 1327 | }, | 1333 | }, |
| @@ -1339,6 +1345,7 @@ static struct clk_branch gcc_blsp1_qup2_spi_apps_clk = { | |||
| 1339 | "blsp1_qup2_spi_apps_clk_src", | 1345 | "blsp1_qup2_spi_apps_clk_src", |
| 1340 | }, | 1346 | }, |
| 1341 | .num_parents = 1, | 1347 | .num_parents = 1, |
| 1348 | .flags = CLK_SET_RATE_PARENT, | ||
| 1342 | .ops = &clk_branch2_ops, | 1349 | .ops = &clk_branch2_ops, |
| 1343 | }, | 1350 | }, |
| 1344 | }, | 1351 | }, |
| @@ -1356,6 +1363,7 @@ static struct clk_branch gcc_blsp1_qup3_i2c_apps_clk = { | |||
| 1356 | "blsp1_qup3_i2c_apps_clk_src", | 1363 | "blsp1_qup3_i2c_apps_clk_src", |
| 1357 | }, | 1364 | }, |
| 1358 | .num_parents = 1, | 1365 | .num_parents = 1, |
| 1366 | .flags = CLK_SET_RATE_PARENT, | ||
| 1359 | .ops = &clk_branch2_ops, | 1367 | .ops = &clk_branch2_ops, |
| 1360 | }, | 1368 | }, |
| 1361 | }, | 1369 | }, |
| @@ -1373,6 +1381,7 @@ static struct clk_branch gcc_blsp1_qup3_spi_apps_clk = { | |||
| 1373 | "blsp1_qup3_spi_apps_clk_src", | 1381 | "blsp1_qup3_spi_apps_clk_src", |
| 1374 | }, | 1382 | }, |
| 1375 | .num_parents = 1, | 1383 | .num_parents = 1, |
| 1384 | .flags = CLK_SET_RATE_PARENT, | ||
| 1376 | .ops = &clk_branch2_ops, | 1385 | .ops = &clk_branch2_ops, |
| 1377 | }, | 1386 | }, |
| 1378 | }, | 1387 | }, |
| @@ -1390,6 +1399,7 @@ static struct clk_branch gcc_blsp1_qup4_i2c_apps_clk = { | |||
| 1390 | "blsp1_qup4_i2c_apps_clk_src", | 1399 | "blsp1_qup4_i2c_apps_clk_src", |
| 1391 | }, | 1400 | }, |
| 1392 | .num_parents = 1, | 1401 | .num_parents = 1, |
| 1402 | .flags = CLK_SET_RATE_PARENT, | ||
| 1393 | .ops = &clk_branch2_ops, | 1403 | .ops = &clk_branch2_ops, |
| 1394 | }, | 1404 | }, |
| 1395 | }, | 1405 | }, |
| @@ -1407,6 +1417,7 @@ static struct clk_branch gcc_blsp1_qup4_spi_apps_clk = { | |||
| 1407 | "blsp1_qup4_spi_apps_clk_src", | 1417 | "blsp1_qup4_spi_apps_clk_src", |
| 1408 | }, | 1418 | }, |
| 1409 | .num_parents = 1, | 1419 | .num_parents = 1, |
| 1420 | .flags = CLK_SET_RATE_PARENT, | ||
| 1410 | .ops = &clk_branch2_ops, | 1421 | .ops = &clk_branch2_ops, |
| 1411 | }, | 1422 | }, |
| 1412 | }, | 1423 | }, |
| @@ -1424,6 +1435,7 @@ static struct clk_branch gcc_blsp1_qup5_i2c_apps_clk = { | |||
| 1424 | "blsp1_qup5_i2c_apps_clk_src", | 1435 | "blsp1_qup5_i2c_apps_clk_src", |
| 1425 | }, | 1436 | }, |
| 1426 | .num_parents = 1, | 1437 | .num_parents = 1, |
| 1438 | .flags = CLK_SET_RATE_PARENT, | ||
| 1427 | .ops = &clk_branch2_ops, | 1439 | .ops = &clk_branch2_ops, |
| 1428 | }, | 1440 | }, |
| 1429 | }, | 1441 | }, |
| @@ -1441,6 +1453,7 @@ static struct clk_branch gcc_blsp1_qup5_spi_apps_clk = { | |||
| 1441 | "blsp1_qup5_spi_apps_clk_src", | 1453 | "blsp1_qup5_spi_apps_clk_src", |
| 1442 | }, | 1454 | }, |
| 1443 | .num_parents = 1, | 1455 | .num_parents = 1, |
| 1456 | .flags = CLK_SET_RATE_PARENT, | ||
| 1444 | .ops = &clk_branch2_ops, | 1457 | .ops = &clk_branch2_ops, |
| 1445 | }, | 1458 | }, |
| 1446 | }, | 1459 | }, |
| @@ -1458,6 +1471,7 @@ static struct clk_branch gcc_blsp1_qup6_i2c_apps_clk = { | |||
| 1458 | "blsp1_qup6_i2c_apps_clk_src", | 1471 | "blsp1_qup6_i2c_apps_clk_src", |
| 1459 | }, | 1472 | }, |
| 1460 | .num_parents = 1, | 1473 | .num_parents = 1, |
| 1474 | .flags = CLK_SET_RATE_PARENT, | ||
| 1461 | .ops = &clk_branch2_ops, | 1475 | .ops = &clk_branch2_ops, |
| 1462 | }, | 1476 | }, |
| 1463 | }, | 1477 | }, |
| @@ -1475,6 +1489,7 @@ static struct clk_branch gcc_blsp1_qup6_spi_apps_clk = { | |||
| 1475 | "blsp1_qup6_spi_apps_clk_src", | 1489 | "blsp1_qup6_spi_apps_clk_src", |
| 1476 | }, | 1490 | }, |
| 1477 | .num_parents = 1, | 1491 | .num_parents = 1, |
| 1492 | .flags = CLK_SET_RATE_PARENT, | ||
| 1478 | .ops = &clk_branch2_ops, | 1493 | .ops = &clk_branch2_ops, |
| 1479 | }, | 1494 | }, |
| 1480 | }, | 1495 | }, |
| @@ -1505,6 +1520,7 @@ static struct clk_branch gcc_blsp1_uart1_apps_clk = { | |||
| 1505 | "blsp1_uart1_apps_clk_src", | 1520 | "blsp1_uart1_apps_clk_src", |
| 1506 | }, | 1521 | }, |
| 1507 | .num_parents = 1, | 1522 | .num_parents = 1, |
| 1523 | .flags = CLK_SET_RATE_PARENT, | ||
| 1508 | .ops = &clk_branch2_ops, | 1524 | .ops = &clk_branch2_ops, |
| 1509 | }, | 1525 | }, |
| 1510 | }, | 1526 | }, |
| @@ -1522,6 +1538,7 @@ static struct clk_branch gcc_blsp1_uart2_apps_clk = { | |||
| 1522 | "blsp1_uart2_apps_clk_src", | 1538 | "blsp1_uart2_apps_clk_src", |
| 1523 | }, | 1539 | }, |
| 1524 | .num_parents = 1, | 1540 | .num_parents = 1, |
| 1541 | .flags = CLK_SET_RATE_PARENT, | ||
| 1525 | .ops = &clk_branch2_ops, | 1542 | .ops = &clk_branch2_ops, |
| 1526 | }, | 1543 | }, |
| 1527 | }, | 1544 | }, |
| @@ -1539,6 +1556,7 @@ static struct clk_branch gcc_blsp1_uart3_apps_clk = { | |||
| 1539 | "blsp1_uart3_apps_clk_src", | 1556 | "blsp1_uart3_apps_clk_src", |
| 1540 | }, | 1557 | }, |
| 1541 | .num_parents = 1, | 1558 | .num_parents = 1, |
| 1559 | .flags = CLK_SET_RATE_PARENT, | ||
| 1542 | .ops = &clk_branch2_ops, | 1560 | .ops = &clk_branch2_ops, |
| 1543 | }, | 1561 | }, |
| 1544 | }, | 1562 | }, |
| @@ -1569,6 +1587,7 @@ static struct clk_branch gcc_blsp2_qup1_i2c_apps_clk = { | |||
| 1569 | "blsp2_qup1_i2c_apps_clk_src", | 1587 | "blsp2_qup1_i2c_apps_clk_src", |
| 1570 | }, | 1588 | }, |
| 1571 | .num_parents = 1, | 1589 | .num_parents = 1, |
| 1590 | .flags = CLK_SET_RATE_PARENT, | ||
| 1572 | .ops = &clk_branch2_ops, | 1591 | .ops = &clk_branch2_ops, |
| 1573 | }, | 1592 | }, |
| 1574 | }, | 1593 | }, |
| @@ -1586,6 +1605,7 @@ static struct clk_branch gcc_blsp2_qup1_spi_apps_clk = { | |||
| 1586 | "blsp2_qup1_spi_apps_clk_src", | 1605 | "blsp2_qup1_spi_apps_clk_src", |
| 1587 | }, | 1606 | }, |
| 1588 | .num_parents = 1, | 1607 | .num_parents = 1, |
| 1608 | .flags = CLK_SET_RATE_PARENT, | ||
| 1589 | .ops = &clk_branch2_ops, | 1609 | .ops = &clk_branch2_ops, |
| 1590 | }, | 1610 | }, |
| 1591 | }, | 1611 | }, |
| @@ -1603,6 +1623,7 @@ static struct clk_branch gcc_blsp2_qup2_i2c_apps_clk = { | |||
| 1603 | "blsp2_qup2_i2c_apps_clk_src", | 1623 | "blsp2_qup2_i2c_apps_clk_src", |
| 1604 | }, | 1624 | }, |
| 1605 | .num_parents = 1, | 1625 | .num_parents = 1, |
| 1626 | .flags = CLK_SET_RATE_PARENT, | ||
| 1606 | .ops = &clk_branch2_ops, | 1627 | .ops = &clk_branch2_ops, |
| 1607 | }, | 1628 | }, |
| 1608 | }, | 1629 | }, |
| @@ -1620,6 +1641,7 @@ static struct clk_branch gcc_blsp2_qup2_spi_apps_clk = { | |||
| 1620 | "blsp2_qup2_spi_apps_clk_src", | 1641 | "blsp2_qup2_spi_apps_clk_src", |
| 1621 | }, | 1642 | }, |
| 1622 | .num_parents = 1, | 1643 | .num_parents = 1, |
| 1644 | .flags = CLK_SET_RATE_PARENT, | ||
| 1623 | .ops = &clk_branch2_ops, | 1645 | .ops = &clk_branch2_ops, |
| 1624 | }, | 1646 | }, |
| 1625 | }, | 1647 | }, |
| @@ -1637,6 +1659,7 @@ static struct clk_branch gcc_blsp2_qup3_i2c_apps_clk = { | |||
| 1637 | "blsp2_qup3_i2c_apps_clk_src", | 1659 | "blsp2_qup3_i2c_apps_clk_src", |
| 1638 | }, | 1660 | }, |
| 1639 | .num_parents = 1, | 1661 | .num_parents = 1, |
| 1662 | .flags = CLK_SET_RATE_PARENT, | ||
| 1640 | .ops = &clk_branch2_ops, | 1663 | .ops = &clk_branch2_ops, |
| 1641 | }, | 1664 | }, |
| 1642 | }, | 1665 | }, |
| @@ -1654,6 +1677,7 @@ static struct clk_branch gcc_blsp2_qup3_spi_apps_clk = { | |||
| 1654 | "blsp2_qup3_spi_apps_clk_src", | 1677 | "blsp2_qup3_spi_apps_clk_src", |
| 1655 | }, | 1678 | }, |
| 1656 | .num_parents = 1, | 1679 | .num_parents = 1, |
| 1680 | .flags = CLK_SET_RATE_PARENT, | ||
| 1657 | .ops = &clk_branch2_ops, | 1681 | .ops = &clk_branch2_ops, |
| 1658 | }, | 1682 | }, |
| 1659 | }, | 1683 | }, |
| @@ -1671,6 +1695,7 @@ static struct clk_branch gcc_blsp2_qup4_i2c_apps_clk = { | |||
| 1671 | "blsp2_qup4_i2c_apps_clk_src", | 1695 | "blsp2_qup4_i2c_apps_clk_src", |
| 1672 | }, | 1696 | }, |
| 1673 | .num_parents = 1, | 1697 | .num_parents = 1, |
| 1698 | .flags = CLK_SET_RATE_PARENT, | ||
| 1674 | .ops = &clk_branch2_ops, | 1699 | .ops = &clk_branch2_ops, |
| 1675 | }, | 1700 | }, |
| 1676 | }, | 1701 | }, |
| @@ -1688,6 +1713,7 @@ static struct clk_branch gcc_blsp2_qup4_spi_apps_clk = { | |||
| 1688 | "blsp2_qup4_spi_apps_clk_src", | 1713 | "blsp2_qup4_spi_apps_clk_src", |
| 1689 | }, | 1714 | }, |
| 1690 | .num_parents = 1, | 1715 | .num_parents = 1, |
| 1716 | .flags = CLK_SET_RATE_PARENT, | ||
| 1691 | .ops = &clk_branch2_ops, | 1717 | .ops = &clk_branch2_ops, |
| 1692 | }, | 1718 | }, |
| 1693 | }, | 1719 | }, |
| @@ -1705,6 +1731,7 @@ static struct clk_branch gcc_blsp2_qup5_i2c_apps_clk = { | |||
| 1705 | "blsp2_qup5_i2c_apps_clk_src", | 1731 | "blsp2_qup5_i2c_apps_clk_src", |
| 1706 | }, | 1732 | }, |
| 1707 | .num_parents = 1, | 1733 | .num_parents = 1, |
| 1734 | .flags = CLK_SET_RATE_PARENT, | ||
| 1708 | .ops = &clk_branch2_ops, | 1735 | .ops = &clk_branch2_ops, |
| 1709 | }, | 1736 | }, |
| 1710 | }, | 1737 | }, |
| @@ -1722,6 +1749,7 @@ static struct clk_branch gcc_blsp2_qup5_spi_apps_clk = { | |||
| 1722 | "blsp2_qup5_spi_apps_clk_src", | 1749 | "blsp2_qup5_spi_apps_clk_src", |
| 1723 | }, | 1750 | }, |
| 1724 | .num_parents = 1, | 1751 | .num_parents = 1, |
| 1752 | .flags = CLK_SET_RATE_PARENT, | ||
| 1725 | .ops = &clk_branch2_ops, | 1753 | .ops = &clk_branch2_ops, |
| 1726 | }, | 1754 | }, |
| 1727 | }, | 1755 | }, |
| @@ -1739,6 +1767,7 @@ static struct clk_branch gcc_blsp2_qup6_i2c_apps_clk = { | |||
| 1739 | "blsp2_qup6_i2c_apps_clk_src", | 1767 | "blsp2_qup6_i2c_apps_clk_src", |
| 1740 | }, | 1768 | }, |
| 1741 | .num_parents = 1, | 1769 | .num_parents = 1, |
| 1770 | .flags = CLK_SET_RATE_PARENT, | ||
| 1742 | .ops = &clk_branch2_ops, | 1771 | .ops = &clk_branch2_ops, |
| 1743 | }, | 1772 | }, |
| 1744 | }, | 1773 | }, |
| @@ -1756,6 +1785,7 @@ static struct clk_branch gcc_blsp2_qup6_spi_apps_clk = { | |||
| 1756 | "blsp2_qup6_spi_apps_clk_src", | 1785 | "blsp2_qup6_spi_apps_clk_src", |
| 1757 | }, | 1786 | }, |
| 1758 | .num_parents = 1, | 1787 | .num_parents = 1, |
| 1788 | .flags = CLK_SET_RATE_PARENT, | ||
| 1759 | .ops = &clk_branch2_ops, | 1789 | .ops = &clk_branch2_ops, |
| 1760 | }, | 1790 | }, |
| 1761 | }, | 1791 | }, |
| @@ -1786,6 +1816,7 @@ static struct clk_branch gcc_blsp2_uart1_apps_clk = { | |||
| 1786 | "blsp2_uart1_apps_clk_src", | 1816 | "blsp2_uart1_apps_clk_src", |
| 1787 | }, | 1817 | }, |
| 1788 | .num_parents = 1, | 1818 | .num_parents = 1, |
| 1819 | .flags = CLK_SET_RATE_PARENT, | ||
| 1789 | .ops = &clk_branch2_ops, | 1820 | .ops = &clk_branch2_ops, |
| 1790 | }, | 1821 | }, |
| 1791 | }, | 1822 | }, |
| @@ -1803,6 +1834,7 @@ static struct clk_branch gcc_blsp2_uart2_apps_clk = { | |||
| 1803 | "blsp2_uart2_apps_clk_src", | 1834 | "blsp2_uart2_apps_clk_src", |
| 1804 | }, | 1835 | }, |
| 1805 | .num_parents = 1, | 1836 | .num_parents = 1, |
| 1837 | .flags = CLK_SET_RATE_PARENT, | ||
| 1806 | .ops = &clk_branch2_ops, | 1838 | .ops = &clk_branch2_ops, |
| 1807 | }, | 1839 | }, |
| 1808 | }, | 1840 | }, |
| @@ -1820,6 +1852,7 @@ static struct clk_branch gcc_blsp2_uart3_apps_clk = { | |||
| 1820 | "blsp2_uart3_apps_clk_src", | 1852 | "blsp2_uart3_apps_clk_src", |
| 1821 | }, | 1853 | }, |
| 1822 | .num_parents = 1, | 1854 | .num_parents = 1, |
| 1855 | .flags = CLK_SET_RATE_PARENT, | ||
| 1823 | .ops = &clk_branch2_ops, | 1856 | .ops = &clk_branch2_ops, |
| 1824 | }, | 1857 | }, |
| 1825 | }, | 1858 | }, |
| @@ -1837,6 +1870,7 @@ static struct clk_branch gcc_cfg_noc_usb3_axi_clk = { | |||
| 1837 | "usb30_master_clk_src", | 1870 | "usb30_master_clk_src", |
| 1838 | }, | 1871 | }, |
| 1839 | .num_parents = 1, | 1872 | .num_parents = 1, |
| 1873 | .flags = CLK_SET_RATE_PARENT, | ||
| 1840 | .ops = &clk_branch2_ops, | 1874 | .ops = &clk_branch2_ops, |
| 1841 | }, | 1875 | }, |
| 1842 | }, | 1876 | }, |
| @@ -1854,6 +1888,7 @@ static struct clk_branch gcc_gp1_clk = { | |||
| 1854 | "gp1_clk_src", | 1888 | "gp1_clk_src", |
| 1855 | }, | 1889 | }, |
| 1856 | .num_parents = 1, | 1890 | .num_parents = 1, |
| 1891 | .flags = CLK_SET_RATE_PARENT, | ||
| 1857 | .ops = &clk_branch2_ops, | 1892 | .ops = &clk_branch2_ops, |
| 1858 | }, | 1893 | }, |
| 1859 | }, | 1894 | }, |
| @@ -1871,6 +1906,7 @@ static struct clk_branch gcc_gp2_clk = { | |||
| 1871 | "gp2_clk_src", | 1906 | "gp2_clk_src", |
| 1872 | }, | 1907 | }, |
| 1873 | .num_parents = 1, | 1908 | .num_parents = 1, |
| 1909 | .flags = CLK_SET_RATE_PARENT, | ||
| 1874 | .ops = &clk_branch2_ops, | 1910 | .ops = &clk_branch2_ops, |
| 1875 | }, | 1911 | }, |
| 1876 | }, | 1912 | }, |
| @@ -1888,6 +1924,7 @@ static struct clk_branch gcc_gp3_clk = { | |||
| 1888 | "gp3_clk_src", | 1924 | "gp3_clk_src", |
| 1889 | }, | 1925 | }, |
| 1890 | .num_parents = 1, | 1926 | .num_parents = 1, |
| 1927 | .flags = CLK_SET_RATE_PARENT, | ||
| 1891 | .ops = &clk_branch2_ops, | 1928 | .ops = &clk_branch2_ops, |
| 1892 | }, | 1929 | }, |
| 1893 | }, | 1930 | }, |
| @@ -1957,6 +1994,7 @@ static struct clk_branch gcc_hmss_ahb_clk = { | |||
| 1957 | "hmss_ahb_clk_src", | 1994 | "hmss_ahb_clk_src", |
| 1958 | }, | 1995 | }, |
| 1959 | .num_parents = 1, | 1996 | .num_parents = 1, |
| 1997 | .flags = CLK_SET_RATE_PARENT, | ||
| 1960 | .ops = &clk_branch2_ops, | 1998 | .ops = &clk_branch2_ops, |
| 1961 | }, | 1999 | }, |
| 1962 | }, | 2000 | }, |
| @@ -1987,6 +2025,7 @@ static struct clk_branch gcc_hmss_rbcpr_clk = { | |||
| 1987 | "hmss_rbcpr_clk_src", | 2025 | "hmss_rbcpr_clk_src", |
| 1988 | }, | 2026 | }, |
| 1989 | .num_parents = 1, | 2027 | .num_parents = 1, |
| 2028 | .flags = CLK_SET_RATE_PARENT, | ||
| 1990 | .ops = &clk_branch2_ops, | 2029 | .ops = &clk_branch2_ops, |
| 1991 | }, | 2030 | }, |
| 1992 | }, | 2031 | }, |
| @@ -2088,6 +2127,7 @@ static struct clk_branch gcc_pcie_0_aux_clk = { | |||
| 2088 | "pcie_aux_clk_src", | 2127 | "pcie_aux_clk_src", |
| 2089 | }, | 2128 | }, |
| 2090 | .num_parents = 1, | 2129 | .num_parents = 1, |
| 2130 | .flags = CLK_SET_RATE_PARENT, | ||
| 2091 | .ops = &clk_branch2_ops, | 2131 | .ops = &clk_branch2_ops, |
| 2092 | }, | 2132 | }, |
| 2093 | }, | 2133 | }, |
| @@ -2157,6 +2197,7 @@ static struct clk_branch gcc_pcie_phy_aux_clk = { | |||
| 2157 | "pcie_aux_clk_src", | 2197 | "pcie_aux_clk_src", |
| 2158 | }, | 2198 | }, |
| 2159 | .num_parents = 1, | 2199 | .num_parents = 1, |
| 2200 | .flags = CLK_SET_RATE_PARENT, | ||
| 2160 | .ops = &clk_branch2_ops, | 2201 | .ops = &clk_branch2_ops, |
| 2161 | }, | 2202 | }, |
| 2162 | }, | 2203 | }, |
| @@ -2174,6 +2215,7 @@ static struct clk_branch gcc_pdm2_clk = { | |||
| 2174 | "pdm2_clk_src", | 2215 | "pdm2_clk_src", |
| 2175 | }, | 2216 | }, |
| 2176 | .num_parents = 1, | 2217 | .num_parents = 1, |
| 2218 | .flags = CLK_SET_RATE_PARENT, | ||
| 2177 | .ops = &clk_branch2_ops, | 2219 | .ops = &clk_branch2_ops, |
| 2178 | }, | 2220 | }, |
| 2179 | }, | 2221 | }, |
| @@ -2243,6 +2285,7 @@ static struct clk_branch gcc_sdcc2_apps_clk = { | |||
| 2243 | "sdcc2_apps_clk_src", | 2285 | "sdcc2_apps_clk_src", |
| 2244 | }, | 2286 | }, |
| 2245 | .num_parents = 1, | 2287 | .num_parents = 1, |
| 2288 | .flags = CLK_SET_RATE_PARENT, | ||
| 2246 | .ops = &clk_branch2_ops, | 2289 | .ops = &clk_branch2_ops, |
| 2247 | }, | 2290 | }, |
| 2248 | }, | 2291 | }, |
| @@ -2273,6 +2316,7 @@ static struct clk_branch gcc_sdcc4_apps_clk = { | |||
| 2273 | "sdcc4_apps_clk_src", | 2316 | "sdcc4_apps_clk_src", |
| 2274 | }, | 2317 | }, |
| 2275 | .num_parents = 1, | 2318 | .num_parents = 1, |
| 2319 | .flags = CLK_SET_RATE_PARENT, | ||
| 2276 | .ops = &clk_branch2_ops, | 2320 | .ops = &clk_branch2_ops, |
| 2277 | }, | 2321 | }, |
| 2278 | }, | 2322 | }, |
| @@ -2316,6 +2360,7 @@ static struct clk_branch gcc_tsif_ref_clk = { | |||
| 2316 | "tsif_ref_clk_src", | 2360 | "tsif_ref_clk_src", |
| 2317 | }, | 2361 | }, |
| 2318 | .num_parents = 1, | 2362 | .num_parents = 1, |
| 2363 | .flags = CLK_SET_RATE_PARENT, | ||
| 2319 | .ops = &clk_branch2_ops, | 2364 | .ops = &clk_branch2_ops, |
| 2320 | }, | 2365 | }, |
| 2321 | }, | 2366 | }, |
| @@ -2346,6 +2391,7 @@ static struct clk_branch gcc_ufs_axi_clk = { | |||
| 2346 | "ufs_axi_clk_src", | 2391 | "ufs_axi_clk_src", |
| 2347 | }, | 2392 | }, |
| 2348 | .num_parents = 1, | 2393 | .num_parents = 1, |
| 2394 | .flags = CLK_SET_RATE_PARENT, | ||
| 2349 | .ops = &clk_branch2_ops, | 2395 | .ops = &clk_branch2_ops, |
| 2350 | }, | 2396 | }, |
| 2351 | }, | 2397 | }, |
| @@ -2441,6 +2487,7 @@ static struct clk_branch gcc_usb30_master_clk = { | |||
| 2441 | "usb30_master_clk_src", | 2487 | "usb30_master_clk_src", |
| 2442 | }, | 2488 | }, |
| 2443 | .num_parents = 1, | 2489 | .num_parents = 1, |
| 2490 | .flags = CLK_SET_RATE_PARENT, | ||
| 2444 | .ops = &clk_branch2_ops, | 2491 | .ops = &clk_branch2_ops, |
| 2445 | }, | 2492 | }, |
| 2446 | }, | 2493 | }, |
| @@ -2458,6 +2505,7 @@ static struct clk_branch gcc_usb30_mock_utmi_clk = { | |||
| 2458 | "usb30_mock_utmi_clk_src", | 2505 | "usb30_mock_utmi_clk_src", |
| 2459 | }, | 2506 | }, |
| 2460 | .num_parents = 1, | 2507 | .num_parents = 1, |
| 2508 | .flags = CLK_SET_RATE_PARENT, | ||
| 2461 | .ops = &clk_branch2_ops, | 2509 | .ops = &clk_branch2_ops, |
| 2462 | }, | 2510 | }, |
| 2463 | }, | 2511 | }, |
| @@ -2488,6 +2536,7 @@ static struct clk_branch gcc_usb3_phy_aux_clk = { | |||
| 2488 | "usb3_phy_aux_clk_src", | 2536 | "usb3_phy_aux_clk_src", |
| 2489 | }, | 2537 | }, |
| 2490 | .num_parents = 1, | 2538 | .num_parents = 1, |
| 2539 | .flags = CLK_SET_RATE_PARENT, | ||
| 2491 | .ops = &clk_branch2_ops, | 2540 | .ops = &clk_branch2_ops, |
| 2492 | }, | 2541 | }, |
| 2493 | }, | 2542 | }, |
| @@ -2495,7 +2544,7 @@ static struct clk_branch gcc_usb3_phy_aux_clk = { | |||
| 2495 | 2544 | ||
| 2496 | static struct clk_branch gcc_usb3_phy_pipe_clk = { | 2545 | static struct clk_branch gcc_usb3_phy_pipe_clk = { |
| 2497 | .halt_reg = 0x50004, | 2546 | .halt_reg = 0x50004, |
| 2498 | .halt_check = BRANCH_HALT, | 2547 | .halt_check = BRANCH_HALT_SKIP, |
| 2499 | .clkr = { | 2548 | .clkr = { |
| 2500 | .enable_reg = 0x50004, | 2549 | .enable_reg = 0x50004, |
| 2501 | .enable_mask = BIT(0), | 2550 | .enable_mask = BIT(0), |
| @@ -2910,6 +2959,10 @@ static const struct regmap_config gcc_msm8998_regmap_config = { | |||
| 2910 | .fast_io = true, | 2959 | .fast_io = true, |
| 2911 | }; | 2960 | }; |
| 2912 | 2961 | ||
| 2962 | static struct clk_hw *gcc_msm8998_hws[] = { | ||
| 2963 | &xo.hw, | ||
| 2964 | }; | ||
| 2965 | |||
| 2913 | static const struct qcom_cc_desc gcc_msm8998_desc = { | 2966 | static const struct qcom_cc_desc gcc_msm8998_desc = { |
| 2914 | .config = &gcc_msm8998_regmap_config, | 2967 | .config = &gcc_msm8998_regmap_config, |
| 2915 | .clks = gcc_msm8998_clocks, | 2968 | .clks = gcc_msm8998_clocks, |
| @@ -2918,6 +2971,8 @@ static const struct qcom_cc_desc gcc_msm8998_desc = { | |||
| 2918 | .num_resets = ARRAY_SIZE(gcc_msm8998_resets), | 2971 | .num_resets = ARRAY_SIZE(gcc_msm8998_resets), |
| 2919 | .gdscs = gcc_msm8998_gdscs, | 2972 | .gdscs = gcc_msm8998_gdscs, |
| 2920 | .num_gdscs = ARRAY_SIZE(gcc_msm8998_gdscs), | 2973 | .num_gdscs = ARRAY_SIZE(gcc_msm8998_gdscs), |
| 2974 | .clk_hws = gcc_msm8998_hws, | ||
| 2975 | .num_clk_hws = ARRAY_SIZE(gcc_msm8998_hws), | ||
| 2921 | }; | 2976 | }; |
| 2922 | 2977 | ||
| 2923 | static int gcc_msm8998_probe(struct platform_device *pdev) | 2978 | static int gcc_msm8998_probe(struct platform_device *pdev) |
| @@ -2937,10 +2992,6 @@ static int gcc_msm8998_probe(struct platform_device *pdev) | |||
| 2937 | if (ret) | 2992 | if (ret) |
| 2938 | return ret; | 2993 | return ret; |
| 2939 | 2994 | ||
| 2940 | ret = devm_clk_hw_register(&pdev->dev, &xo.hw); | ||
| 2941 | if (ret) | ||
| 2942 | return ret; | ||
| 2943 | |||
| 2944 | return qcom_cc_really_probe(pdev, &gcc_msm8998_desc, regmap); | 2995 | return qcom_cc_really_probe(pdev, &gcc_msm8998_desc, regmap); |
| 2945 | } | 2996 | } |
| 2946 | 2997 | ||
diff --git a/drivers/clk/qcom/gcc-qcs404.c b/drivers/clk/qcom/gcc-qcs404.c index 64da032bb9ed..5a62f64ada93 100644 --- a/drivers/clk/qcom/gcc-qcs404.c +++ b/drivers/clk/qcom/gcc-qcs404.c | |||
| @@ -678,6 +678,7 @@ static struct clk_rcg2 blsp1_uart3_apps_clk_src = { | |||
| 678 | .cmd_rcgr = 0x4014, | 678 | .cmd_rcgr = 0x4014, |
| 679 | .mnd_width = 16, | 679 | .mnd_width = 16, |
| 680 | .hid_width = 5, | 680 | .hid_width = 5, |
| 681 | .cfg_off = 0x20, | ||
| 681 | .parent_map = gcc_parent_map_0, | 682 | .parent_map = gcc_parent_map_0, |
| 682 | .freq_tbl = ftbl_blsp1_uart0_apps_clk_src, | 683 | .freq_tbl = ftbl_blsp1_uart0_apps_clk_src, |
| 683 | .clkr.hw.init = &(struct clk_init_data){ | 684 | .clkr.hw.init = &(struct clk_init_data){ |
| @@ -2692,6 +2693,8 @@ static const struct qcom_cc_desc gcc_qcs404_desc = { | |||
| 2692 | .num_clks = ARRAY_SIZE(gcc_qcs404_clocks), | 2693 | .num_clks = ARRAY_SIZE(gcc_qcs404_clocks), |
| 2693 | .resets = gcc_qcs404_resets, | 2694 | .resets = gcc_qcs404_resets, |
| 2694 | .num_resets = ARRAY_SIZE(gcc_qcs404_resets), | 2695 | .num_resets = ARRAY_SIZE(gcc_qcs404_resets), |
| 2696 | .clk_hws = gcc_qcs404_hws, | ||
| 2697 | .num_clk_hws = ARRAY_SIZE(gcc_qcs404_hws), | ||
| 2695 | }; | 2698 | }; |
| 2696 | 2699 | ||
| 2697 | static const struct of_device_id gcc_qcs404_match_table[] = { | 2700 | static const struct of_device_id gcc_qcs404_match_table[] = { |
| @@ -2703,7 +2706,6 @@ MODULE_DEVICE_TABLE(of, gcc_qcs404_match_table); | |||
| 2703 | static int gcc_qcs404_probe(struct platform_device *pdev) | 2706 | static int gcc_qcs404_probe(struct platform_device *pdev) |
| 2704 | { | 2707 | { |
| 2705 | struct regmap *regmap; | 2708 | struct regmap *regmap; |
| 2706 | int ret, i; | ||
| 2707 | 2709 | ||
| 2708 | regmap = qcom_cc_map(pdev, &gcc_qcs404_desc); | 2710 | regmap = qcom_cc_map(pdev, &gcc_qcs404_desc); |
| 2709 | if (IS_ERR(regmap)) | 2711 | if (IS_ERR(regmap)) |
| @@ -2711,12 +2713,6 @@ static int gcc_qcs404_probe(struct platform_device *pdev) | |||
| 2711 | 2713 | ||
| 2712 | clk_alpha_pll_configure(&gpll3_out_main, regmap, &gpll3_config); | 2714 | clk_alpha_pll_configure(&gpll3_out_main, regmap, &gpll3_config); |
| 2713 | 2715 | ||
| 2714 | for (i = 0; i < ARRAY_SIZE(gcc_qcs404_hws); i++) { | ||
| 2715 | ret = devm_clk_hw_register(&pdev->dev, gcc_qcs404_hws[i]); | ||
| 2716 | if (ret) | ||
| 2717 | return ret; | ||
| 2718 | } | ||
| 2719 | |||
| 2720 | return qcom_cc_really_probe(pdev, &gcc_qcs404_desc, regmap); | 2716 | return qcom_cc_really_probe(pdev, &gcc_qcs404_desc, regmap); |
| 2721 | } | 2717 | } |
| 2722 | 2718 | ||
diff --git a/drivers/clk/qcom/gcc-sdm660.c b/drivers/clk/qcom/gcc-sdm660.c index ba239ea4c842..8827db23066f 100644 --- a/drivers/clk/qcom/gcc-sdm660.c +++ b/drivers/clk/qcom/gcc-sdm660.c | |||
| @@ -2420,6 +2420,8 @@ static const struct qcom_cc_desc gcc_sdm660_desc = { | |||
| 2420 | .num_resets = ARRAY_SIZE(gcc_sdm660_resets), | 2420 | .num_resets = ARRAY_SIZE(gcc_sdm660_resets), |
| 2421 | .gdscs = gcc_sdm660_gdscs, | 2421 | .gdscs = gcc_sdm660_gdscs, |
| 2422 | .num_gdscs = ARRAY_SIZE(gcc_sdm660_gdscs), | 2422 | .num_gdscs = ARRAY_SIZE(gcc_sdm660_gdscs), |
| 2423 | .clk_hws = gcc_sdm660_hws, | ||
| 2424 | .num_clk_hws = ARRAY_SIZE(gcc_sdm660_hws), | ||
| 2423 | }; | 2425 | }; |
| 2424 | 2426 | ||
| 2425 | static const struct of_device_id gcc_sdm660_match_table[] = { | 2427 | static const struct of_device_id gcc_sdm660_match_table[] = { |
| @@ -2431,7 +2433,7 @@ MODULE_DEVICE_TABLE(of, gcc_sdm660_match_table); | |||
| 2431 | 2433 | ||
| 2432 | static int gcc_sdm660_probe(struct platform_device *pdev) | 2434 | static int gcc_sdm660_probe(struct platform_device *pdev) |
| 2433 | { | 2435 | { |
| 2434 | int i, ret; | 2436 | int ret; |
| 2435 | struct regmap *regmap; | 2437 | struct regmap *regmap; |
| 2436 | 2438 | ||
| 2437 | regmap = qcom_cc_map(pdev, &gcc_sdm660_desc); | 2439 | regmap = qcom_cc_map(pdev, &gcc_sdm660_desc); |
| @@ -2446,13 +2448,6 @@ static int gcc_sdm660_probe(struct platform_device *pdev) | |||
| 2446 | if (ret) | 2448 | if (ret) |
| 2447 | return ret; | 2449 | return ret; |
| 2448 | 2450 | ||
| 2449 | /* Register the hws */ | ||
| 2450 | for (i = 0; i < ARRAY_SIZE(gcc_sdm660_hws); i++) { | ||
| 2451 | ret = devm_clk_hw_register(&pdev->dev, gcc_sdm660_hws[i]); | ||
| 2452 | if (ret) | ||
| 2453 | return ret; | ||
| 2454 | } | ||
| 2455 | |||
| 2456 | return qcom_cc_really_probe(pdev, &gcc_sdm660_desc, regmap); | 2451 | return qcom_cc_really_probe(pdev, &gcc_sdm660_desc, regmap); |
| 2457 | } | 2452 | } |
| 2458 | 2453 | ||
diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c index 58fa5c247af1..7131dcf9b060 100644 --- a/drivers/clk/qcom/gcc-sdm845.c +++ b/drivers/clk/qcom/gcc-sdm845.c | |||
| @@ -1703,6 +1703,9 @@ static struct clk_branch gcc_pcie_0_pipe_clk = { | |||
| 1703 | .enable_mask = BIT(4), | 1703 | .enable_mask = BIT(4), |
| 1704 | .hw.init = &(struct clk_init_data){ | 1704 | .hw.init = &(struct clk_init_data){ |
| 1705 | .name = "gcc_pcie_0_pipe_clk", | 1705 | .name = "gcc_pcie_0_pipe_clk", |
| 1706 | .parent_names = (const char *[]){ "pcie_0_pipe_clk" }, | ||
| 1707 | .num_parents = 1, | ||
| 1708 | .flags = CLK_SET_RATE_PARENT, | ||
| 1706 | .ops = &clk_branch2_ops, | 1709 | .ops = &clk_branch2_ops, |
| 1707 | }, | 1710 | }, |
| 1708 | }, | 1711 | }, |
| @@ -1802,6 +1805,8 @@ static struct clk_branch gcc_pcie_1_pipe_clk = { | |||
| 1802 | .enable_mask = BIT(30), | 1805 | .enable_mask = BIT(30), |
| 1803 | .hw.init = &(struct clk_init_data){ | 1806 | .hw.init = &(struct clk_init_data){ |
| 1804 | .name = "gcc_pcie_1_pipe_clk", | 1807 | .name = "gcc_pcie_1_pipe_clk", |
| 1808 | .parent_names = (const char *[]){ "pcie_1_pipe_clk" }, | ||
| 1809 | .num_parents = 1, | ||
| 1805 | .ops = &clk_branch2_ops, | 1810 | .ops = &clk_branch2_ops, |
| 1806 | }, | 1811 | }, |
| 1807 | }, | 1812 | }, |
diff --git a/drivers/clk/qcom/mmcc-msm8996.c b/drivers/clk/qcom/mmcc-msm8996.c index 7d4ee109435c..7235510eac94 100644 --- a/drivers/clk/qcom/mmcc-msm8996.c +++ b/drivers/clk/qcom/mmcc-msm8996.c | |||
| @@ -3347,6 +3347,8 @@ static const struct qcom_cc_desc mmcc_msm8996_desc = { | |||
| 3347 | .num_resets = ARRAY_SIZE(mmcc_msm8996_resets), | 3347 | .num_resets = ARRAY_SIZE(mmcc_msm8996_resets), |
| 3348 | .gdscs = mmcc_msm8996_gdscs, | 3348 | .gdscs = mmcc_msm8996_gdscs, |
| 3349 | .num_gdscs = ARRAY_SIZE(mmcc_msm8996_gdscs), | 3349 | .num_gdscs = ARRAY_SIZE(mmcc_msm8996_gdscs), |
| 3350 | .clk_hws = mmcc_msm8996_hws, | ||
| 3351 | .num_clk_hws = ARRAY_SIZE(mmcc_msm8996_hws), | ||
| 3350 | }; | 3352 | }; |
| 3351 | 3353 | ||
| 3352 | static const struct of_device_id mmcc_msm8996_match_table[] = { | 3354 | static const struct of_device_id mmcc_msm8996_match_table[] = { |
| @@ -3357,8 +3359,6 @@ MODULE_DEVICE_TABLE(of, mmcc_msm8996_match_table); | |||
| 3357 | 3359 | ||
| 3358 | static int mmcc_msm8996_probe(struct platform_device *pdev) | 3360 | static int mmcc_msm8996_probe(struct platform_device *pdev) |
| 3359 | { | 3361 | { |
| 3360 | struct device *dev = &pdev->dev; | ||
| 3361 | int i, ret; | ||
| 3362 | struct regmap *regmap; | 3362 | struct regmap *regmap; |
| 3363 | 3363 | ||
| 3364 | regmap = qcom_cc_map(pdev, &mmcc_msm8996_desc); | 3364 | regmap = qcom_cc_map(pdev, &mmcc_msm8996_desc); |
| @@ -3370,12 +3370,6 @@ static int mmcc_msm8996_probe(struct platform_device *pdev) | |||
| 3370 | /* Disable the NoC FSM for mmss_mmagic_cfg_ahb_clk */ | 3370 | /* Disable the NoC FSM for mmss_mmagic_cfg_ahb_clk */ |
| 3371 | regmap_update_bits(regmap, 0x5054, BIT(15), 0); | 3371 | regmap_update_bits(regmap, 0x5054, BIT(15), 0); |
| 3372 | 3372 | ||
| 3373 | for (i = 0; i < ARRAY_SIZE(mmcc_msm8996_hws); i++) { | ||
| 3374 | ret = devm_clk_hw_register(dev, mmcc_msm8996_hws[i]); | ||
| 3375 | if (ret) | ||
| 3376 | return ret; | ||
| 3377 | } | ||
| 3378 | |||
| 3379 | return qcom_cc_really_probe(pdev, &mmcc_msm8996_desc, regmap); | 3373 | return qcom_cc_really_probe(pdev, &mmcc_msm8996_desc, regmap); |
| 3380 | } | 3374 | } |
| 3381 | 3375 | ||
diff --git a/drivers/clk/renesas/r8a774a1-cpg-mssr.c b/drivers/clk/renesas/r8a774a1-cpg-mssr.c index 10e852518870..4d92b27a6153 100644 --- a/drivers/clk/renesas/r8a774a1-cpg-mssr.c +++ b/drivers/clk/renesas/r8a774a1-cpg-mssr.c | |||
| @@ -21,7 +21,7 @@ | |||
| 21 | 21 | ||
| 22 | enum clk_ids { | 22 | enum clk_ids { |
| 23 | /* Core Clock Outputs exported to DT */ | 23 | /* Core Clock Outputs exported to DT */ |
| 24 | LAST_DT_CORE_CLK = R8A774A1_CLK_OSC, | 24 | LAST_DT_CORE_CLK = R8A774A1_CLK_CANFD, |
| 25 | 25 | ||
| 26 | /* External Input Clocks */ | 26 | /* External Input Clocks */ |
| 27 | CLK_EXTAL, | 27 | CLK_EXTAL, |
| @@ -102,6 +102,7 @@ static const struct cpg_core_clk r8a774a1_core_clks[] __initconst = { | |||
| 102 | DEF_FIXED("cp", R8A774A1_CLK_CP, CLK_EXTAL, 2, 1), | 102 | DEF_FIXED("cp", R8A774A1_CLK_CP, CLK_EXTAL, 2, 1), |
| 103 | DEF_FIXED("cpex", R8A774A1_CLK_CPEX, CLK_EXTAL, 2, 1), | 103 | DEF_FIXED("cpex", R8A774A1_CLK_CPEX, CLK_EXTAL, 2, 1), |
| 104 | 104 | ||
| 105 | DEF_DIV6P1("canfd", R8A774A1_CLK_CANFD, CLK_PLL1_DIV4, 0x244), | ||
| 105 | DEF_DIV6P1("csi0", R8A774A1_CLK_CSI0, CLK_PLL1_DIV4, 0x00c), | 106 | DEF_DIV6P1("csi0", R8A774A1_CLK_CSI0, CLK_PLL1_DIV4, 0x00c), |
| 106 | DEF_DIV6P1("mso", R8A774A1_CLK_MSO, CLK_PLL1_DIV4, 0x014), | 107 | DEF_DIV6P1("mso", R8A774A1_CLK_MSO, CLK_PLL1_DIV4, 0x014), |
| 107 | DEF_DIV6P1("hdmi", R8A774A1_CLK_HDMI, CLK_PLL1_DIV4, 0x250), | 108 | DEF_DIV6P1("hdmi", R8A774A1_CLK_HDMI, CLK_PLL1_DIV4, 0x250), |
| @@ -191,6 +192,7 @@ static const struct mssr_mod_clk r8a774a1_mod_clks[] __initconst = { | |||
| 191 | DEF_MOD("gpio2", 910, R8A774A1_CLK_S3D4), | 192 | DEF_MOD("gpio2", 910, R8A774A1_CLK_S3D4), |
| 192 | DEF_MOD("gpio1", 911, R8A774A1_CLK_S3D4), | 193 | DEF_MOD("gpio1", 911, R8A774A1_CLK_S3D4), |
| 193 | DEF_MOD("gpio0", 912, R8A774A1_CLK_S3D4), | 194 | DEF_MOD("gpio0", 912, R8A774A1_CLK_S3D4), |
| 195 | DEF_MOD("can-fd", 914, R8A774A1_CLK_S3D2), | ||
| 194 | DEF_MOD("can-if1", 915, R8A774A1_CLK_S3D4), | 196 | DEF_MOD("can-if1", 915, R8A774A1_CLK_S3D4), |
| 195 | DEF_MOD("can-if0", 916, R8A774A1_CLK_S3D4), | 197 | DEF_MOD("can-if0", 916, R8A774A1_CLK_S3D4), |
| 196 | DEF_MOD("i2c6", 918, R8A774A1_CLK_S0D6), | 198 | DEF_MOD("i2c6", 918, R8A774A1_CLK_S0D6), |
diff --git a/drivers/clk/renesas/r8a774c0-cpg-mssr.c b/drivers/clk/renesas/r8a774c0-cpg-mssr.c index 10b96895d452..34e274f2a273 100644 --- a/drivers/clk/renesas/r8a774c0-cpg-mssr.c +++ b/drivers/clk/renesas/r8a774c0-cpg-mssr.c | |||
| @@ -22,7 +22,7 @@ | |||
| 22 | 22 | ||
| 23 | enum clk_ids { | 23 | enum clk_ids { |
| 24 | /* Core Clock Outputs exported to DT */ | 24 | /* Core Clock Outputs exported to DT */ |
| 25 | LAST_DT_CORE_CLK = R8A774C0_CLK_CPEX, | 25 | LAST_DT_CORE_CLK = R8A774C0_CLK_CANFD, |
| 26 | 26 | ||
| 27 | /* External Input Clocks */ | 27 | /* External Input Clocks */ |
| 28 | CLK_EXTAL, | 28 | CLK_EXTAL, |
| @@ -33,6 +33,7 @@ enum clk_ids { | |||
| 33 | CLK_PLL1, | 33 | CLK_PLL1, |
| 34 | CLK_PLL3, | 34 | CLK_PLL3, |
| 35 | CLK_PLL0D4, | 35 | CLK_PLL0D4, |
| 36 | CLK_PLL0D6, | ||
| 36 | CLK_PLL0D8, | 37 | CLK_PLL0D8, |
| 37 | CLK_PLL0D20, | 38 | CLK_PLL0D20, |
| 38 | CLK_PLL0D24, | 39 | CLK_PLL0D24, |
| @@ -61,6 +62,7 @@ static const struct cpg_core_clk r8a774c0_core_clks[] __initconst = { | |||
| 61 | 62 | ||
| 62 | DEF_FIXED(".pll0", CLK_PLL0, CLK_MAIN, 1, 100), | 63 | DEF_FIXED(".pll0", CLK_PLL0, CLK_MAIN, 1, 100), |
| 63 | DEF_FIXED(".pll0d4", CLK_PLL0D4, CLK_PLL0, 4, 1), | 64 | DEF_FIXED(".pll0d4", CLK_PLL0D4, CLK_PLL0, 4, 1), |
| 65 | DEF_FIXED(".pll0d6", CLK_PLL0D6, CLK_PLL0, 6, 1), | ||
| 64 | DEF_FIXED(".pll0d8", CLK_PLL0D8, CLK_PLL0, 8, 1), | 66 | DEF_FIXED(".pll0d8", CLK_PLL0D8, CLK_PLL0, 8, 1), |
| 65 | DEF_FIXED(".pll0d20", CLK_PLL0D20, CLK_PLL0, 20, 1), | 67 | DEF_FIXED(".pll0d20", CLK_PLL0D20, CLK_PLL0, 20, 1), |
| 66 | DEF_FIXED(".pll0d24", CLK_PLL0D24, CLK_PLL0, 24, 1), | 68 | DEF_FIXED(".pll0d24", CLK_PLL0D24, CLK_PLL0, 24, 1), |
| @@ -112,6 +114,7 @@ static const struct cpg_core_clk r8a774c0_core_clks[] __initconst = { | |||
| 112 | DEF_GEN3_PE("s3d2c", R8A774C0_CLK_S3D2C, CLK_S3, 2, CLK_PE, 2), | 114 | DEF_GEN3_PE("s3d2c", R8A774C0_CLK_S3D2C, CLK_S3, 2, CLK_PE, 2), |
| 113 | DEF_GEN3_PE("s3d4c", R8A774C0_CLK_S3D4C, CLK_S3, 4, CLK_PE, 4), | 115 | DEF_GEN3_PE("s3d4c", R8A774C0_CLK_S3D4C, CLK_S3, 4, CLK_PE, 4), |
| 114 | 116 | ||
| 117 | DEF_DIV6P1("canfd", R8A774C0_CLK_CANFD, CLK_PLL0D6, 0x244), | ||
| 115 | DEF_DIV6P1("csi0", R8A774C0_CLK_CSI0, CLK_PLL1D2, 0x00c), | 118 | DEF_DIV6P1("csi0", R8A774C0_CLK_CSI0, CLK_PLL1D2, 0x00c), |
| 116 | DEF_DIV6P1("mso", R8A774C0_CLK_MSO, CLK_PLL1D2, 0x014), | 119 | DEF_DIV6P1("mso", R8A774C0_CLK_MSO, CLK_PLL1D2, 0x014), |
| 117 | 120 | ||
| @@ -119,6 +122,11 @@ static const struct cpg_core_clk r8a774c0_core_clks[] __initconst = { | |||
| 119 | }; | 122 | }; |
| 120 | 123 | ||
| 121 | static const struct mssr_mod_clk r8a774c0_mod_clks[] __initconst = { | 124 | static const struct mssr_mod_clk r8a774c0_mod_clks[] __initconst = { |
| 125 | DEF_MOD("tmu4", 121, R8A774C0_CLK_S0D6C), | ||
| 126 | DEF_MOD("tmu3", 122, R8A774C0_CLK_S3D2C), | ||
| 127 | DEF_MOD("tmu2", 123, R8A774C0_CLK_S3D2C), | ||
| 128 | DEF_MOD("tmu1", 124, R8A774C0_CLK_S3D2C), | ||
| 129 | DEF_MOD("tmu0", 125, R8A774C0_CLK_CP), | ||
| 122 | DEF_MOD("scif5", 202, R8A774C0_CLK_S3D4C), | 130 | DEF_MOD("scif5", 202, R8A774C0_CLK_S3D4C), |
| 123 | DEF_MOD("scif4", 203, R8A774C0_CLK_S3D4C), | 131 | DEF_MOD("scif4", 203, R8A774C0_CLK_S3D4C), |
| 124 | DEF_MOD("scif3", 204, R8A774C0_CLK_S3D4C), | 132 | DEF_MOD("scif3", 204, R8A774C0_CLK_S3D4C), |
| @@ -172,8 +180,8 @@ static const struct mssr_mod_clk r8a774c0_mod_clks[] __initconst = { | |||
| 172 | DEF_MOD("ehci0", 703, R8A774C0_CLK_S3D4), | 180 | DEF_MOD("ehci0", 703, R8A774C0_CLK_S3D4), |
| 173 | DEF_MOD("hsusb", 704, R8A774C0_CLK_S3D4), | 181 | DEF_MOD("hsusb", 704, R8A774C0_CLK_S3D4), |
| 174 | DEF_MOD("csi40", 716, R8A774C0_CLK_CSI0), | 182 | DEF_MOD("csi40", 716, R8A774C0_CLK_CSI0), |
| 175 | DEF_MOD("du1", 723, R8A774C0_CLK_S2D1), | 183 | DEF_MOD("du1", 723, R8A774C0_CLK_S1D1), |
| 176 | DEF_MOD("du0", 724, R8A774C0_CLK_S2D1), | 184 | DEF_MOD("du0", 724, R8A774C0_CLK_S1D1), |
| 177 | DEF_MOD("lvds", 727, R8A774C0_CLK_S2D1), | 185 | DEF_MOD("lvds", 727, R8A774C0_CLK_S2D1), |
| 178 | 186 | ||
| 179 | DEF_MOD("vin5", 806, R8A774C0_CLK_S1D2), | 187 | DEF_MOD("vin5", 806, R8A774C0_CLK_S1D2), |
| @@ -187,6 +195,7 @@ static const struct mssr_mod_clk r8a774c0_mod_clks[] __initconst = { | |||
| 187 | DEF_MOD("gpio2", 910, R8A774C0_CLK_S3D4), | 195 | DEF_MOD("gpio2", 910, R8A774C0_CLK_S3D4), |
| 188 | DEF_MOD("gpio1", 911, R8A774C0_CLK_S3D4), | 196 | DEF_MOD("gpio1", 911, R8A774C0_CLK_S3D4), |
| 189 | DEF_MOD("gpio0", 912, R8A774C0_CLK_S3D4), | 197 | DEF_MOD("gpio0", 912, R8A774C0_CLK_S3D4), |
| 198 | DEF_MOD("can-fd", 914, R8A774C0_CLK_S3D2), | ||
| 190 | DEF_MOD("can-if1", 915, R8A774C0_CLK_S3D4), | 199 | DEF_MOD("can-if1", 915, R8A774C0_CLK_S3D4), |
| 191 | DEF_MOD("can-if0", 916, R8A774C0_CLK_S3D4), | 200 | DEF_MOD("can-if0", 916, R8A774C0_CLK_S3D4), |
| 192 | DEF_MOD("i2c6", 918, R8A774C0_CLK_S3D2), | 201 | DEF_MOD("i2c6", 918, R8A774C0_CLK_S3D2), |
diff --git a/drivers/clk/renesas/r8a77980-cpg-mssr.c b/drivers/clk/renesas/r8a77980-cpg-mssr.c index 25a3083b6764..f9e07fcc0d96 100644 --- a/drivers/clk/renesas/r8a77980-cpg-mssr.c +++ b/drivers/clk/renesas/r8a77980-cpg-mssr.c | |||
| @@ -41,6 +41,7 @@ enum clk_ids { | |||
| 41 | CLK_S2, | 41 | CLK_S2, |
| 42 | CLK_S3, | 42 | CLK_S3, |
| 43 | CLK_SDSRC, | 43 | CLK_SDSRC, |
| 44 | CLK_RPCSRC, | ||
| 44 | CLK_OCO, | 45 | CLK_OCO, |
| 45 | 46 | ||
| 46 | /* Module Clocks */ | 47 | /* Module Clocks */ |
| @@ -65,8 +66,14 @@ static const struct cpg_core_clk r8a77980_core_clks[] __initconst = { | |||
| 65 | DEF_FIXED(".s2", CLK_S2, CLK_PLL1_DIV2, 4, 1), | 66 | DEF_FIXED(".s2", CLK_S2, CLK_PLL1_DIV2, 4, 1), |
| 66 | DEF_FIXED(".s3", CLK_S3, CLK_PLL1_DIV2, 6, 1), | 67 | DEF_FIXED(".s3", CLK_S3, CLK_PLL1_DIV2, 6, 1), |
| 67 | DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1_DIV2, 2, 1), | 68 | DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1_DIV2, 2, 1), |
| 69 | DEF_BASE(".rpcsrc", CLK_RPCSRC, CLK_TYPE_GEN3_RPCSRC, CLK_PLL1), | ||
| 68 | DEF_RATE(".oco", CLK_OCO, 32768), | 70 | DEF_RATE(".oco", CLK_OCO, 32768), |
| 69 | 71 | ||
| 72 | DEF_BASE("rpc", R8A77980_CLK_RPC, CLK_TYPE_GEN3_RPC, | ||
| 73 | CLK_RPCSRC), | ||
| 74 | DEF_BASE("rpcd2", R8A77980_CLK_RPCD2, CLK_TYPE_GEN3_RPCD2, | ||
| 75 | R8A77980_CLK_RPC), | ||
| 76 | |||
| 70 | /* Core Clock Outputs */ | 77 | /* Core Clock Outputs */ |
| 71 | DEF_FIXED("ztr", R8A77980_CLK_ZTR, CLK_PLL1_DIV2, 6, 1), | 78 | DEF_FIXED("ztr", R8A77980_CLK_ZTR, CLK_PLL1_DIV2, 6, 1), |
| 72 | DEF_FIXED("ztrd2", R8A77980_CLK_ZTRD2, CLK_PLL1_DIV2, 12, 1), | 79 | DEF_FIXED("ztrd2", R8A77980_CLK_ZTRD2, CLK_PLL1_DIV2, 12, 1), |
| @@ -164,6 +171,7 @@ static const struct mssr_mod_clk r8a77980_mod_clks[] __initconst = { | |||
| 164 | DEF_MOD("gpio1", 911, R8A77980_CLK_CP), | 171 | DEF_MOD("gpio1", 911, R8A77980_CLK_CP), |
| 165 | DEF_MOD("gpio0", 912, R8A77980_CLK_CP), | 172 | DEF_MOD("gpio0", 912, R8A77980_CLK_CP), |
| 166 | DEF_MOD("can-fd", 914, R8A77980_CLK_S3D2), | 173 | DEF_MOD("can-fd", 914, R8A77980_CLK_S3D2), |
| 174 | DEF_MOD("rpc-if", 917, R8A77980_CLK_RPC), | ||
| 167 | DEF_MOD("i2c4", 927, R8A77980_CLK_S0D6), | 175 | DEF_MOD("i2c4", 927, R8A77980_CLK_S0D6), |
| 168 | DEF_MOD("i2c3", 928, R8A77980_CLK_S0D6), | 176 | DEF_MOD("i2c3", 928, R8A77980_CLK_S0D6), |
| 169 | DEF_MOD("i2c2", 929, R8A77980_CLK_S3D2), | 177 | DEF_MOD("i2c2", 929, R8A77980_CLK_S3D2), |
diff --git a/drivers/clk/renesas/rcar-gen3-cpg.c b/drivers/clk/renesas/rcar-gen3-cpg.c index be2ccbd6d623..9a8071a8114d 100644 --- a/drivers/clk/renesas/rcar-gen3-cpg.c +++ b/drivers/clk/renesas/rcar-gen3-cpg.c | |||
| @@ -30,6 +30,21 @@ | |||
| 30 | 30 | ||
| 31 | #define CPG_RCKCR_CKSEL BIT(15) /* RCLK Clock Source Select */ | 31 | #define CPG_RCKCR_CKSEL BIT(15) /* RCLK Clock Source Select */ |
| 32 | 32 | ||
| 33 | static spinlock_t cpg_lock; | ||
| 34 | |||
| 35 | static void cpg_reg_modify(void __iomem *reg, u32 clear, u32 set) | ||
| 36 | { | ||
| 37 | unsigned long flags; | ||
| 38 | u32 val; | ||
| 39 | |||
| 40 | spin_lock_irqsave(&cpg_lock, flags); | ||
| 41 | val = readl(reg); | ||
| 42 | val &= ~clear; | ||
| 43 | val |= set; | ||
| 44 | writel(val, reg); | ||
| 45 | spin_unlock_irqrestore(&cpg_lock, flags); | ||
| 46 | }; | ||
| 47 | |||
| 33 | struct cpg_simple_notifier { | 48 | struct cpg_simple_notifier { |
| 34 | struct notifier_block nb; | 49 | struct notifier_block nb; |
| 35 | void __iomem *reg; | 50 | void __iomem *reg; |
| @@ -118,7 +133,6 @@ static int cpg_z_clk_set_rate(struct clk_hw *hw, unsigned long rate, | |||
| 118 | struct cpg_z_clk *zclk = to_z_clk(hw); | 133 | struct cpg_z_clk *zclk = to_z_clk(hw); |
| 119 | unsigned int mult; | 134 | unsigned int mult; |
| 120 | unsigned int i; | 135 | unsigned int i; |
| 121 | u32 val, kick; | ||
| 122 | 136 | ||
| 123 | /* Factor of 2 is for fixed divider */ | 137 | /* Factor of 2 is for fixed divider */ |
| 124 | mult = DIV_ROUND_CLOSEST_ULL(rate * 32ULL * 2, parent_rate); | 138 | mult = DIV_ROUND_CLOSEST_ULL(rate * 32ULL * 2, parent_rate); |
| @@ -127,17 +141,14 @@ static int cpg_z_clk_set_rate(struct clk_hw *hw, unsigned long rate, | |||
| 127 | if (readl(zclk->kick_reg) & CPG_FRQCRB_KICK) | 141 | if (readl(zclk->kick_reg) & CPG_FRQCRB_KICK) |
| 128 | return -EBUSY; | 142 | return -EBUSY; |
| 129 | 143 | ||
| 130 | val = readl(zclk->reg) & ~zclk->mask; | 144 | cpg_reg_modify(zclk->reg, zclk->mask, |
| 131 | val |= ((32 - mult) << __ffs(zclk->mask)) & zclk->mask; | 145 | ((32 - mult) << __ffs(zclk->mask)) & zclk->mask); |
| 132 | writel(val, zclk->reg); | ||
| 133 | 146 | ||
| 134 | /* | 147 | /* |
| 135 | * Set KICK bit in FRQCRB to update hardware setting and wait for | 148 | * Set KICK bit in FRQCRB to update hardware setting and wait for |
| 136 | * clock change completion. | 149 | * clock change completion. |
| 137 | */ | 150 | */ |
| 138 | kick = readl(zclk->kick_reg); | 151 | cpg_reg_modify(zclk->kick_reg, 0, CPG_FRQCRB_KICK); |
| 139 | kick |= CPG_FRQCRB_KICK; | ||
| 140 | writel(kick, zclk->kick_reg); | ||
| 141 | 152 | ||
| 142 | /* | 153 | /* |
| 143 | * Note: There is no HW information about the worst case latency. | 154 | * Note: There is no HW information about the worst case latency. |
| @@ -266,12 +277,10 @@ static const struct sd_div_table cpg_sd_div_table[] = { | |||
| 266 | static int cpg_sd_clock_enable(struct clk_hw *hw) | 277 | static int cpg_sd_clock_enable(struct clk_hw *hw) |
| 267 | { | 278 | { |
| 268 | struct sd_clock *clock = to_sd_clock(hw); | 279 | struct sd_clock *clock = to_sd_clock(hw); |
| 269 | u32 val = readl(clock->csn.reg); | ||
| 270 | |||
| 271 | val &= ~(CPG_SD_STP_MASK); | ||
| 272 | val |= clock->div_table[clock->cur_div_idx].val & CPG_SD_STP_MASK; | ||
| 273 | 280 | ||
| 274 | writel(val, clock->csn.reg); | 281 | cpg_reg_modify(clock->csn.reg, CPG_SD_STP_MASK, |
| 282 | clock->div_table[clock->cur_div_idx].val & | ||
| 283 | CPG_SD_STP_MASK); | ||
| 275 | 284 | ||
| 276 | return 0; | 285 | return 0; |
| 277 | } | 286 | } |
| @@ -280,7 +289,7 @@ static void cpg_sd_clock_disable(struct clk_hw *hw) | |||
| 280 | { | 289 | { |
| 281 | struct sd_clock *clock = to_sd_clock(hw); | 290 | struct sd_clock *clock = to_sd_clock(hw); |
| 282 | 291 | ||
| 283 | writel(readl(clock->csn.reg) | CPG_SD_STP_MASK, clock->csn.reg); | 292 | cpg_reg_modify(clock->csn.reg, 0, CPG_SD_STP_MASK); |
| 284 | } | 293 | } |
| 285 | 294 | ||
| 286 | static int cpg_sd_clock_is_enabled(struct clk_hw *hw) | 295 | static int cpg_sd_clock_is_enabled(struct clk_hw *hw) |
| @@ -327,7 +336,6 @@ static int cpg_sd_clock_set_rate(struct clk_hw *hw, unsigned long rate, | |||
| 327 | { | 336 | { |
| 328 | struct sd_clock *clock = to_sd_clock(hw); | 337 | struct sd_clock *clock = to_sd_clock(hw); |
| 329 | unsigned int div = cpg_sd_clock_calc_div(clock, rate, parent_rate); | 338 | unsigned int div = cpg_sd_clock_calc_div(clock, rate, parent_rate); |
| 330 | u32 val; | ||
| 331 | unsigned int i; | 339 | unsigned int i; |
| 332 | 340 | ||
| 333 | for (i = 0; i < clock->div_num; i++) | 341 | for (i = 0; i < clock->div_num; i++) |
| @@ -339,10 +347,9 @@ static int cpg_sd_clock_set_rate(struct clk_hw *hw, unsigned long rate, | |||
| 339 | 347 | ||
| 340 | clock->cur_div_idx = i; | 348 | clock->cur_div_idx = i; |
| 341 | 349 | ||
| 342 | val = readl(clock->csn.reg); | 350 | cpg_reg_modify(clock->csn.reg, CPG_SD_STP_MASK | CPG_SD_FC_MASK, |
| 343 | val &= ~(CPG_SD_STP_MASK | CPG_SD_FC_MASK); | 351 | clock->div_table[i].val & |
| 344 | val |= clock->div_table[i].val & (CPG_SD_STP_MASK | CPG_SD_FC_MASK); | 352 | (CPG_SD_STP_MASK | CPG_SD_FC_MASK)); |
| 345 | writel(val, clock->csn.reg); | ||
| 346 | 353 | ||
| 347 | return 0; | 354 | return 0; |
| 348 | } | 355 | } |
| @@ -415,6 +422,92 @@ free_clock: | |||
| 415 | return clk; | 422 | return clk; |
| 416 | } | 423 | } |
| 417 | 424 | ||
| 425 | struct rpc_clock { | ||
| 426 | struct clk_divider div; | ||
| 427 | struct clk_gate gate; | ||
| 428 | /* | ||
| 429 | * One notifier covers both RPC and RPCD2 clocks as they are both | ||
| 430 | * controlled by the same RPCCKCR register... | ||
| 431 | */ | ||
| 432 | struct cpg_simple_notifier csn; | ||
| 433 | }; | ||
| 434 | |||
| 435 | static const struct clk_div_table cpg_rpcsrc_div_table[] = { | ||
| 436 | { 2, 5 }, { 3, 6 }, { 0, 0 }, | ||
| 437 | }; | ||
| 438 | |||
| 439 | static const struct clk_div_table cpg_rpc_div_table[] = { | ||
| 440 | { 1, 2 }, { 3, 4 }, { 5, 6 }, { 7, 8 }, { 0, 0 }, | ||
| 441 | }; | ||
| 442 | |||
| 443 | static struct clk * __init cpg_rpc_clk_register(const char *name, | ||
| 444 | void __iomem *base, const char *parent_name, | ||
| 445 | struct raw_notifier_head *notifiers) | ||
| 446 | { | ||
| 447 | struct rpc_clock *rpc; | ||
| 448 | struct clk *clk; | ||
| 449 | |||
| 450 | rpc = kzalloc(sizeof(*rpc), GFP_KERNEL); | ||
| 451 | if (!rpc) | ||
| 452 | return ERR_PTR(-ENOMEM); | ||
| 453 | |||
| 454 | rpc->div.reg = base + CPG_RPCCKCR; | ||
| 455 | rpc->div.width = 3; | ||
| 456 | rpc->div.table = cpg_rpc_div_table; | ||
| 457 | rpc->div.lock = &cpg_lock; | ||
| 458 | |||
| 459 | rpc->gate.reg = base + CPG_RPCCKCR; | ||
| 460 | rpc->gate.bit_idx = 8; | ||
| 461 | rpc->gate.flags = CLK_GATE_SET_TO_DISABLE; | ||
| 462 | rpc->gate.lock = &cpg_lock; | ||
| 463 | |||
| 464 | rpc->csn.reg = base + CPG_RPCCKCR; | ||
| 465 | |||
| 466 | clk = clk_register_composite(NULL, name, &parent_name, 1, NULL, NULL, | ||
| 467 | &rpc->div.hw, &clk_divider_ops, | ||
| 468 | &rpc->gate.hw, &clk_gate_ops, 0); | ||
| 469 | if (IS_ERR(clk)) { | ||
| 470 | kfree(rpc); | ||
| 471 | return clk; | ||
| 472 | } | ||
| 473 | |||
| 474 | cpg_simple_notifier_register(notifiers, &rpc->csn); | ||
| 475 | return clk; | ||
| 476 | } | ||
| 477 | |||
| 478 | struct rpcd2_clock { | ||
| 479 | struct clk_fixed_factor fixed; | ||
| 480 | struct clk_gate gate; | ||
| 481 | }; | ||
| 482 | |||
| 483 | static struct clk * __init cpg_rpcd2_clk_register(const char *name, | ||
| 484 | void __iomem *base, | ||
| 485 | const char *parent_name) | ||
| 486 | { | ||
| 487 | struct rpcd2_clock *rpcd2; | ||
| 488 | struct clk *clk; | ||
| 489 | |||
| 490 | rpcd2 = kzalloc(sizeof(*rpcd2), GFP_KERNEL); | ||
| 491 | if (!rpcd2) | ||
| 492 | return ERR_PTR(-ENOMEM); | ||
| 493 | |||
| 494 | rpcd2->fixed.mult = 1; | ||
| 495 | rpcd2->fixed.div = 2; | ||
| 496 | |||
| 497 | rpcd2->gate.reg = base + CPG_RPCCKCR; | ||
| 498 | rpcd2->gate.bit_idx = 9; | ||
| 499 | rpcd2->gate.flags = CLK_GATE_SET_TO_DISABLE; | ||
| 500 | rpcd2->gate.lock = &cpg_lock; | ||
| 501 | |||
| 502 | clk = clk_register_composite(NULL, name, &parent_name, 1, NULL, NULL, | ||
| 503 | &rpcd2->fixed.hw, &clk_fixed_factor_ops, | ||
| 504 | &rpcd2->gate.hw, &clk_gate_ops, 0); | ||
| 505 | if (IS_ERR(clk)) | ||
| 506 | kfree(rpcd2); | ||
| 507 | |||
| 508 | return clk; | ||
| 509 | } | ||
| 510 | |||
| 418 | 511 | ||
| 419 | static const struct rcar_gen3_cpg_pll_config *cpg_pll_config __initdata; | 512 | static const struct rcar_gen3_cpg_pll_config *cpg_pll_config __initdata; |
| 420 | static unsigned int cpg_clk_extalr __initdata; | 513 | static unsigned int cpg_clk_extalr __initdata; |
| @@ -593,6 +686,21 @@ struct clk * __init rcar_gen3_cpg_clk_register(struct device *dev, | |||
| 593 | } | 686 | } |
| 594 | break; | 687 | break; |
| 595 | 688 | ||
| 689 | case CLK_TYPE_GEN3_RPCSRC: | ||
| 690 | return clk_register_divider_table(NULL, core->name, | ||
| 691 | __clk_get_name(parent), 0, | ||
| 692 | base + CPG_RPCCKCR, 3, 2, 0, | ||
| 693 | cpg_rpcsrc_div_table, | ||
| 694 | &cpg_lock); | ||
| 695 | |||
| 696 | case CLK_TYPE_GEN3_RPC: | ||
| 697 | return cpg_rpc_clk_register(core->name, base, | ||
| 698 | __clk_get_name(parent), notifiers); | ||
| 699 | |||
| 700 | case CLK_TYPE_GEN3_RPCD2: | ||
| 701 | return cpg_rpcd2_clk_register(core->name, base, | ||
| 702 | __clk_get_name(parent)); | ||
| 703 | |||
| 596 | default: | 704 | default: |
| 597 | return ERR_PTR(-EINVAL); | 705 | return ERR_PTR(-EINVAL); |
| 598 | } | 706 | } |
| @@ -613,5 +721,8 @@ int __init rcar_gen3_cpg_init(const struct rcar_gen3_cpg_pll_config *config, | |||
| 613 | if (attr) | 721 | if (attr) |
| 614 | cpg_quirks = (uintptr_t)attr->data; | 722 | cpg_quirks = (uintptr_t)attr->data; |
| 615 | pr_debug("%s: mode = 0x%x quirks = 0x%x\n", __func__, mode, cpg_quirks); | 723 | pr_debug("%s: mode = 0x%x quirks = 0x%x\n", __func__, mode, cpg_quirks); |
| 724 | |||
| 725 | spin_lock_init(&cpg_lock); | ||
| 726 | |||
| 616 | return 0; | 727 | return 0; |
| 617 | } | 728 | } |
diff --git a/drivers/clk/renesas/rcar-gen3-cpg.h b/drivers/clk/renesas/rcar-gen3-cpg.h index f4fb6cf16688..eac1b057455a 100644 --- a/drivers/clk/renesas/rcar-gen3-cpg.h +++ b/drivers/clk/renesas/rcar-gen3-cpg.h | |||
| @@ -23,6 +23,9 @@ enum rcar_gen3_clk_types { | |||
| 23 | CLK_TYPE_GEN3_Z2, | 23 | CLK_TYPE_GEN3_Z2, |
| 24 | CLK_TYPE_GEN3_OSC, /* OSC EXTAL predivider and fixed divider */ | 24 | CLK_TYPE_GEN3_OSC, /* OSC EXTAL predivider and fixed divider */ |
| 25 | CLK_TYPE_GEN3_RCKSEL, /* Select parent/divider using RCKCR.CKSEL */ | 25 | CLK_TYPE_GEN3_RCKSEL, /* Select parent/divider using RCKCR.CKSEL */ |
| 26 | CLK_TYPE_GEN3_RPCSRC, | ||
| 27 | CLK_TYPE_GEN3_RPC, | ||
| 28 | CLK_TYPE_GEN3_RPCD2, | ||
| 26 | 29 | ||
| 27 | /* SoC specific definitions start here */ | 30 | /* SoC specific definitions start here */ |
| 28 | CLK_TYPE_GEN3_SOC_BASE, | 31 | CLK_TYPE_GEN3_SOC_BASE, |
| @@ -57,6 +60,7 @@ struct rcar_gen3_cpg_pll_config { | |||
| 57 | u8 osc_prediv; | 60 | u8 osc_prediv; |
| 58 | }; | 61 | }; |
| 59 | 62 | ||
| 63 | #define CPG_RPCCKCR 0x238 | ||
| 60 | #define CPG_RCKCR 0x240 | 64 | #define CPG_RCKCR 0x240 |
| 61 | 65 | ||
| 62 | struct clk *rcar_gen3_cpg_clk_register(struct device *dev, | 66 | struct clk *rcar_gen3_cpg_clk_register(struct device *dev, |
diff --git a/drivers/clk/rockchip/clk-rk3188.c b/drivers/clk/rockchip/clk-rk3188.c index 7ea20341e870..5ecf28854876 100644 --- a/drivers/clk/rockchip/clk-rk3188.c +++ b/drivers/clk/rockchip/clk-rk3188.c | |||
| @@ -586,12 +586,12 @@ static struct rockchip_clk_branch rk3066a_clk_branches[] __initdata = { | |||
| 586 | COMPOSITE(0, "dclk_lcdc0_src", mux_pll_src_cpll_gpll_p, 0, | 586 | COMPOSITE(0, "dclk_lcdc0_src", mux_pll_src_cpll_gpll_p, 0, |
| 587 | RK2928_CLKSEL_CON(27), 0, 1, MFLAGS, 8, 8, DFLAGS, | 587 | RK2928_CLKSEL_CON(27), 0, 1, MFLAGS, 8, 8, DFLAGS, |
| 588 | RK2928_CLKGATE_CON(3), 1, GFLAGS), | 588 | RK2928_CLKGATE_CON(3), 1, GFLAGS), |
| 589 | MUX(DCLK_LCDC0, "dclk_lcdc0", mux_rk3066_lcdc0_p, 0, | 589 | MUX(DCLK_LCDC0, "dclk_lcdc0", mux_rk3066_lcdc0_p, CLK_SET_RATE_PARENT, |
| 590 | RK2928_CLKSEL_CON(27), 4, 1, MFLAGS), | 590 | RK2928_CLKSEL_CON(27), 4, 1, MFLAGS), |
| 591 | COMPOSITE(0, "dclk_lcdc1_src", mux_pll_src_cpll_gpll_p, 0, | 591 | COMPOSITE(0, "dclk_lcdc1_src", mux_pll_src_cpll_gpll_p, 0, |
| 592 | RK2928_CLKSEL_CON(28), 0, 1, MFLAGS, 8, 8, DFLAGS, | 592 | RK2928_CLKSEL_CON(28), 0, 1, MFLAGS, 8, 8, DFLAGS, |
| 593 | RK2928_CLKGATE_CON(3), 2, GFLAGS), | 593 | RK2928_CLKGATE_CON(3), 2, GFLAGS), |
| 594 | MUX(DCLK_LCDC1, "dclk_lcdc1", mux_rk3066_lcdc1_p, 0, | 594 | MUX(DCLK_LCDC1, "dclk_lcdc1", mux_rk3066_lcdc1_p, CLK_SET_RATE_PARENT, |
| 595 | RK2928_CLKSEL_CON(28), 4, 1, MFLAGS), | 595 | RK2928_CLKSEL_CON(28), 4, 1, MFLAGS), |
| 596 | 596 | ||
| 597 | COMPOSITE_NOMUX(0, "cif1_pre", "cif_src", 0, | 597 | COMPOSITE_NOMUX(0, "cif1_pre", "cif_src", 0, |
diff --git a/drivers/clk/rockchip/clk-rk3328.c b/drivers/clk/rockchip/clk-rk3328.c index faa94adb2a37..65ab5c2f48b0 100644 --- a/drivers/clk/rockchip/clk-rk3328.c +++ b/drivers/clk/rockchip/clk-rk3328.c | |||
| @@ -78,17 +78,17 @@ static struct rockchip_pll_rate_table rk3328_pll_rates[] = { | |||
| 78 | 78 | ||
| 79 | static struct rockchip_pll_rate_table rk3328_pll_frac_rates[] = { | 79 | static struct rockchip_pll_rate_table rk3328_pll_frac_rates[] = { |
| 80 | /* _mhz, _refdiv, _fbdiv, _postdiv1, _postdiv2, _dsmpd, _frac */ | 80 | /* _mhz, _refdiv, _fbdiv, _postdiv1, _postdiv2, _dsmpd, _frac */ |
| 81 | RK3036_PLL_RATE(1016064000, 3, 127, 1, 1, 0, 134217), | 81 | RK3036_PLL_RATE(1016064000, 3, 127, 1, 1, 0, 134218), |
| 82 | /* vco = 1016064000 */ | 82 | /* vco = 1016064000 */ |
| 83 | RK3036_PLL_RATE(983040000, 24, 983, 1, 1, 0, 671088), | 83 | RK3036_PLL_RATE(983040000, 24, 983, 1, 1, 0, 671089), |
| 84 | /* vco = 983040000 */ | 84 | /* vco = 983040000 */ |
| 85 | RK3036_PLL_RATE(491520000, 24, 983, 2, 1, 0, 671088), | 85 | RK3036_PLL_RATE(491520000, 24, 983, 2, 1, 0, 671089), |
| 86 | /* vco = 983040000 */ | 86 | /* vco = 983040000 */ |
| 87 | RK3036_PLL_RATE(61440000, 6, 215, 7, 2, 0, 671088), | 87 | RK3036_PLL_RATE(61440000, 6, 215, 7, 2, 0, 671089), |
| 88 | /* vco = 860156000 */ | 88 | /* vco = 860156000 */ |
| 89 | RK3036_PLL_RATE(56448000, 12, 451, 4, 4, 0, 9797894), | 89 | RK3036_PLL_RATE(56448000, 12, 451, 4, 4, 0, 9797895), |
| 90 | /* vco = 903168000 */ | 90 | /* vco = 903168000 */ |
| 91 | RK3036_PLL_RATE(40960000, 12, 409, 4, 5, 0, 10066329), | 91 | RK3036_PLL_RATE(40960000, 12, 409, 4, 5, 0, 10066330), |
| 92 | /* vco = 819200000 */ | 92 | /* vco = 819200000 */ |
| 93 | { /* sentinel */ }, | 93 | { /* sentinel */ }, |
| 94 | }; | 94 | }; |
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c index 59d4d46667ce..54066e6508d3 100644 --- a/drivers/clk/samsung/clk-exynos4.c +++ b/drivers/clk/samsung/clk-exynos4.c | |||
| @@ -1028,6 +1028,7 @@ static unsigned long __init exynos4_get_xom(void) | |||
| 1028 | xom = readl(chipid_base + 8); | 1028 | xom = readl(chipid_base + 8); |
| 1029 | 1029 | ||
| 1030 | iounmap(chipid_base); | 1030 | iounmap(chipid_base); |
| 1031 | of_node_put(np); | ||
| 1031 | } | 1032 | } |
| 1032 | 1033 | ||
| 1033 | return xom; | 1034 | return xom; |
diff --git a/drivers/clk/samsung/clk-exynos5-subcmu.c b/drivers/clk/samsung/clk-exynos5-subcmu.c index 93306283d764..8ae44b5db4c2 100644 --- a/drivers/clk/samsung/clk-exynos5-subcmu.c +++ b/drivers/clk/samsung/clk-exynos5-subcmu.c | |||
| @@ -136,15 +136,20 @@ static int __init exynos5_clk_register_subcmu(struct device *parent, | |||
| 136 | { | 136 | { |
| 137 | struct of_phandle_args genpdspec = { .np = pd_node }; | 137 | struct of_phandle_args genpdspec = { .np = pd_node }; |
| 138 | struct platform_device *pdev; | 138 | struct platform_device *pdev; |
| 139 | int ret; | ||
| 140 | |||
| 141 | pdev = platform_device_alloc("exynos5-subcmu", PLATFORM_DEVID_AUTO); | ||
| 142 | if (!pdev) | ||
| 143 | return -ENOMEM; | ||
| 139 | 144 | ||
| 140 | pdev = platform_device_alloc(info->pd_name, -1); | ||
| 141 | pdev->dev.parent = parent; | 145 | pdev->dev.parent = parent; |
| 142 | pdev->driver_override = "exynos5-subcmu"; | ||
| 143 | platform_set_drvdata(pdev, (void *)info); | 146 | platform_set_drvdata(pdev, (void *)info); |
| 144 | of_genpd_add_device(&genpdspec, &pdev->dev); | 147 | of_genpd_add_device(&genpdspec, &pdev->dev); |
| 145 | platform_device_add(pdev); | 148 | ret = platform_device_add(pdev); |
| 149 | if (ret) | ||
| 150 | platform_device_put(pdev); | ||
| 146 | 151 | ||
| 147 | return 0; | 152 | return ret; |
| 148 | } | 153 | } |
| 149 | 154 | ||
| 150 | static int __init exynos5_clk_probe(struct platform_device *pdev) | 155 | static int __init exynos5_clk_probe(struct platform_device *pdev) |
diff --git a/drivers/clk/samsung/clk-exynos5433.c b/drivers/clk/samsung/clk-exynos5433.c index 751e2c4fb65b..dae1c96de933 100644 --- a/drivers/clk/samsung/clk-exynos5433.c +++ b/drivers/clk/samsung/clk-exynos5433.c | |||
| @@ -559,7 +559,7 @@ static const struct samsung_gate_clock top_gate_clks[] __initconst = { | |||
| 559 | /* ENABLE_ACLK_TOP */ | 559 | /* ENABLE_ACLK_TOP */ |
| 560 | GATE(CLK_ACLK_G3D_400, "aclk_g3d_400", "div_aclk_g3d_400", | 560 | GATE(CLK_ACLK_G3D_400, "aclk_g3d_400", "div_aclk_g3d_400", |
| 561 | ENABLE_ACLK_TOP, 30, CLK_IS_CRITICAL, 0), | 561 | ENABLE_ACLK_TOP, 30, CLK_IS_CRITICAL, 0), |
| 562 | GATE(CLK_ACLK_IMEM_SSX_266, "aclk_imem_ssx_266", | 562 | GATE(CLK_ACLK_IMEM_SSSX_266, "aclk_imem_sssx_266", |
| 563 | "div_aclk_imem_sssx_266", ENABLE_ACLK_TOP, | 563 | "div_aclk_imem_sssx_266", ENABLE_ACLK_TOP, |
| 564 | 29, CLK_IGNORE_UNUSED, 0), | 564 | 29, CLK_IGNORE_UNUSED, 0), |
| 565 | GATE(CLK_ACLK_BUS0_400, "aclk_bus0_400", "div_aclk_bus0_400", | 565 | GATE(CLK_ACLK_BUS0_400, "aclk_bus0_400", "div_aclk_bus0_400", |
| @@ -568,10 +568,10 @@ static const struct samsung_gate_clock top_gate_clks[] __initconst = { | |||
| 568 | GATE(CLK_ACLK_BUS1_400, "aclk_bus1_400", "div_aclk_bus1_400", | 568 | GATE(CLK_ACLK_BUS1_400, "aclk_bus1_400", "div_aclk_bus1_400", |
| 569 | ENABLE_ACLK_TOP, 25, | 569 | ENABLE_ACLK_TOP, 25, |
| 570 | CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0), | 570 | CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0), |
| 571 | GATE(CLK_ACLK_IMEM_200, "aclk_imem_200", "div_aclk_imem_266", | 571 | GATE(CLK_ACLK_IMEM_200, "aclk_imem_200", "div_aclk_imem_200", |
| 572 | ENABLE_ACLK_TOP, 24, | 572 | ENABLE_ACLK_TOP, 24, |
| 573 | CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0), | 573 | CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, 0), |
| 574 | GATE(CLK_ACLK_IMEM_266, "aclk_imem_266", "div_aclk_imem_200", | 574 | GATE(CLK_ACLK_IMEM_266, "aclk_imem_266", "div_aclk_imem_266", |
| 575 | ENABLE_ACLK_TOP, 23, | 575 | ENABLE_ACLK_TOP, 23, |
| 576 | CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0), | 576 | CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0), |
| 577 | GATE(CLK_ACLK_PERIC_66, "aclk_peric_66", "div_aclk_peric_66_b", | 577 | GATE(CLK_ACLK_PERIC_66, "aclk_peric_66", "div_aclk_peric_66_b", |
| @@ -5467,6 +5467,35 @@ static const struct samsung_cmu_info cam1_cmu_info __initconst = { | |||
| 5467 | .clk_name = "aclk_cam1_400", | 5467 | .clk_name = "aclk_cam1_400", |
| 5468 | }; | 5468 | }; |
| 5469 | 5469 | ||
| 5470 | /* | ||
| 5471 | * Register offset definitions for CMU_IMEM | ||
| 5472 | */ | ||
| 5473 | #define ENABLE_ACLK_IMEM_SLIMSSS 0x080c | ||
| 5474 | #define ENABLE_PCLK_IMEM_SLIMSSS 0x0908 | ||
| 5475 | |||
| 5476 | static const unsigned long imem_clk_regs[] __initconst = { | ||
| 5477 | ENABLE_ACLK_IMEM_SLIMSSS, | ||
| 5478 | ENABLE_PCLK_IMEM_SLIMSSS, | ||
| 5479 | }; | ||
| 5480 | |||
| 5481 | static const struct samsung_gate_clock imem_gate_clks[] __initconst = { | ||
| 5482 | /* ENABLE_ACLK_IMEM_SLIMSSS */ | ||
| 5483 | GATE(CLK_ACLK_SLIMSSS, "aclk_slimsss", "aclk_imem_sssx_266", | ||
| 5484 | ENABLE_ACLK_IMEM_SLIMSSS, 0, CLK_IGNORE_UNUSED, 0), | ||
| 5485 | |||
| 5486 | /* ENABLE_PCLK_IMEM_SLIMSSS */ | ||
| 5487 | GATE(CLK_PCLK_SLIMSSS, "pclk_slimsss", "aclk_imem_200", | ||
| 5488 | ENABLE_PCLK_IMEM_SLIMSSS, 0, CLK_IGNORE_UNUSED, 0), | ||
| 5489 | }; | ||
| 5490 | |||
| 5491 | static const struct samsung_cmu_info imem_cmu_info __initconst = { | ||
| 5492 | .gate_clks = imem_gate_clks, | ||
| 5493 | .nr_gate_clks = ARRAY_SIZE(imem_gate_clks), | ||
| 5494 | .nr_clk_ids = IMEM_NR_CLK, | ||
| 5495 | .clk_regs = imem_clk_regs, | ||
| 5496 | .nr_clk_regs = ARRAY_SIZE(imem_clk_regs), | ||
| 5497 | .clk_name = "aclk_imem_200", | ||
| 5498 | }; | ||
| 5470 | 5499 | ||
| 5471 | struct exynos5433_cmu_data { | 5500 | struct exynos5433_cmu_data { |
| 5472 | struct samsung_clk_reg_dump *clk_save; | 5501 | struct samsung_clk_reg_dump *clk_save; |
| @@ -5655,6 +5684,9 @@ static const struct of_device_id exynos5433_cmu_of_match[] = { | |||
| 5655 | .compatible = "samsung,exynos5433-cmu-mscl", | 5684 | .compatible = "samsung,exynos5433-cmu-mscl", |
| 5656 | .data = &mscl_cmu_info, | 5685 | .data = &mscl_cmu_info, |
| 5657 | }, { | 5686 | }, { |
| 5687 | .compatible = "samsung,exynos5433-cmu-imem", | ||
| 5688 | .data = &imem_cmu_info, | ||
| 5689 | }, { | ||
| 5658 | }, | 5690 | }, |
| 5659 | }; | 5691 | }; |
| 5660 | 5692 | ||
diff --git a/drivers/clk/samsung/clk-s3c2443.c b/drivers/clk/samsung/clk-s3c2443.c index 884067e4f1a1..f38f0e24e3b6 100644 --- a/drivers/clk/samsung/clk-s3c2443.c +++ b/drivers/clk/samsung/clk-s3c2443.c | |||
| @@ -389,7 +389,7 @@ void __init s3c2443_common_clk_init(struct device_node *np, unsigned long xti_f, | |||
| 389 | ARRAY_SIZE(s3c2450_gates)); | 389 | ARRAY_SIZE(s3c2450_gates)); |
| 390 | samsung_clk_register_alias(ctx, s3c2450_aliases, | 390 | samsung_clk_register_alias(ctx, s3c2450_aliases, |
| 391 | ARRAY_SIZE(s3c2450_aliases)); | 391 | ARRAY_SIZE(s3c2450_aliases)); |
| 392 | /* fall through, as s3c2450 extends the s3c2416 clocks */ | 392 | /* fall through - as s3c2450 extends the s3c2416 clocks */ |
| 393 | case S3C2416: | 393 | case S3C2416: |
| 394 | samsung_clk_register_div(ctx, s3c2416_dividers, | 394 | samsung_clk_register_div(ctx, s3c2416_dividers, |
| 395 | ARRAY_SIZE(s3c2416_dividers)); | 395 | ARRAY_SIZE(s3c2416_dividers)); |
diff --git a/drivers/clk/samsung/clk.h b/drivers/clk/samsung/clk.h index c3f309d7100d..9cfaca5fbcdb 100644 --- a/drivers/clk/samsung/clk.h +++ b/drivers/clk/samsung/clk.h | |||
| @@ -26,7 +26,7 @@ struct samsung_clk_provider { | |||
| 26 | void __iomem *reg_base; | 26 | void __iomem *reg_base; |
| 27 | struct device *dev; | 27 | struct device *dev; |
| 28 | spinlock_t lock; | 28 | spinlock_t lock; |
| 29 | /* clk_data must be the last entry due to variable lenght 'hws' array */ | 29 | /* clk_data must be the last entry due to variable length 'hws' array */ |
| 30 | struct clk_hw_onecell_data clk_data; | 30 | struct clk_hw_onecell_data clk_data; |
| 31 | }; | 31 | }; |
| 32 | 32 | ||
diff --git a/drivers/clk/socfpga/clk-gate.c b/drivers/clk/socfpga/clk-gate.c index aa7a6e6a15b6..73e03328d5c5 100644 --- a/drivers/clk/socfpga/clk-gate.c +++ b/drivers/clk/socfpga/clk-gate.c | |||
| @@ -176,8 +176,7 @@ static struct clk_ops gateclk_ops = { | |||
| 176 | .set_parent = socfpga_clk_set_parent, | 176 | .set_parent = socfpga_clk_set_parent, |
| 177 | }; | 177 | }; |
| 178 | 178 | ||
| 179 | static void __init __socfpga_gate_init(struct device_node *node, | 179 | void __init socfpga_gate_init(struct device_node *node) |
| 180 | const struct clk_ops *ops) | ||
| 181 | { | 180 | { |
| 182 | u32 clk_gate[2]; | 181 | u32 clk_gate[2]; |
| 183 | u32 div_reg[3]; | 182 | u32 div_reg[3]; |
| @@ -188,12 +187,17 @@ static void __init __socfpga_gate_init(struct device_node *node, | |||
| 188 | const char *clk_name = node->name; | 187 | const char *clk_name = node->name; |
| 189 | const char *parent_name[SOCFPGA_MAX_PARENTS]; | 188 | const char *parent_name[SOCFPGA_MAX_PARENTS]; |
| 190 | struct clk_init_data init; | 189 | struct clk_init_data init; |
| 190 | struct clk_ops *ops; | ||
| 191 | int rc; | 191 | int rc; |
| 192 | 192 | ||
| 193 | socfpga_clk = kzalloc(sizeof(*socfpga_clk), GFP_KERNEL); | 193 | socfpga_clk = kzalloc(sizeof(*socfpga_clk), GFP_KERNEL); |
| 194 | if (WARN_ON(!socfpga_clk)) | 194 | if (WARN_ON(!socfpga_clk)) |
| 195 | return; | 195 | return; |
| 196 | 196 | ||
| 197 | ops = kmemdup(&gateclk_ops, sizeof(gateclk_ops), GFP_KERNEL); | ||
| 198 | if (WARN_ON(!ops)) | ||
| 199 | return; | ||
| 200 | |||
| 197 | rc = of_property_read_u32_array(node, "clk-gate", clk_gate, 2); | 201 | rc = of_property_read_u32_array(node, "clk-gate", clk_gate, 2); |
| 198 | if (rc) | 202 | if (rc) |
| 199 | clk_gate[0] = 0; | 203 | clk_gate[0] = 0; |
| @@ -202,8 +206,8 @@ static void __init __socfpga_gate_init(struct device_node *node, | |||
| 202 | socfpga_clk->hw.reg = clk_mgr_base_addr + clk_gate[0]; | 206 | socfpga_clk->hw.reg = clk_mgr_base_addr + clk_gate[0]; |
| 203 | socfpga_clk->hw.bit_idx = clk_gate[1]; | 207 | socfpga_clk->hw.bit_idx = clk_gate[1]; |
| 204 | 208 | ||
| 205 | gateclk_ops.enable = clk_gate_ops.enable; | 209 | ops->enable = clk_gate_ops.enable; |
| 206 | gateclk_ops.disable = clk_gate_ops.disable; | 210 | ops->disable = clk_gate_ops.disable; |
| 207 | } | 211 | } |
| 208 | 212 | ||
| 209 | rc = of_property_read_u32(node, "fixed-divider", &fixed_div); | 213 | rc = of_property_read_u32(node, "fixed-divider", &fixed_div); |
| @@ -234,6 +238,11 @@ static void __init __socfpga_gate_init(struct device_node *node, | |||
| 234 | init.flags = 0; | 238 | init.flags = 0; |
| 235 | 239 | ||
| 236 | init.num_parents = of_clk_parent_fill(node, parent_name, SOCFPGA_MAX_PARENTS); | 240 | init.num_parents = of_clk_parent_fill(node, parent_name, SOCFPGA_MAX_PARENTS); |
| 241 | if (init.num_parents < 2) { | ||
| 242 | ops->get_parent = NULL; | ||
| 243 | ops->set_parent = NULL; | ||
| 244 | } | ||
| 245 | |||
| 237 | init.parent_names = parent_name; | 246 | init.parent_names = parent_name; |
| 238 | socfpga_clk->hw.hw.init = &init; | 247 | socfpga_clk->hw.hw.init = &init; |
| 239 | 248 | ||
| @@ -246,8 +255,3 @@ static void __init __socfpga_gate_init(struct device_node *node, | |||
| 246 | if (WARN_ON(rc)) | 255 | if (WARN_ON(rc)) |
| 247 | return; | 256 | return; |
| 248 | } | 257 | } |
| 249 | |||
| 250 | void __init socfpga_gate_init(struct device_node *node) | ||
| 251 | { | ||
| 252 | __socfpga_gate_init(node, &gateclk_ops); | ||
| 253 | } | ||
diff --git a/drivers/clk/socfpga/clk-pll-a10.c b/drivers/clk/socfpga/clk-pll-a10.c index 35fabe1a32c3..269467e8e07e 100644 --- a/drivers/clk/socfpga/clk-pll-a10.c +++ b/drivers/clk/socfpga/clk-pll-a10.c | |||
| @@ -95,6 +95,7 @@ static struct clk * __init __socfpga_pll_init(struct device_node *node, | |||
| 95 | 95 | ||
| 96 | clkmgr_np = of_find_compatible_node(NULL, NULL, "altr,clk-mgr"); | 96 | clkmgr_np = of_find_compatible_node(NULL, NULL, "altr,clk-mgr"); |
| 97 | clk_mgr_a10_base_addr = of_iomap(clkmgr_np, 0); | 97 | clk_mgr_a10_base_addr = of_iomap(clkmgr_np, 0); |
| 98 | of_node_put(clkmgr_np); | ||
| 98 | BUG_ON(!clk_mgr_a10_base_addr); | 99 | BUG_ON(!clk_mgr_a10_base_addr); |
| 99 | pll_clk->hw.reg = clk_mgr_a10_base_addr + reg; | 100 | pll_clk->hw.reg = clk_mgr_a10_base_addr + reg; |
| 100 | 101 | ||
diff --git a/drivers/clk/socfpga/clk-pll.c b/drivers/clk/socfpga/clk-pll.c index c7f463172e4b..b4b44e9b5901 100644 --- a/drivers/clk/socfpga/clk-pll.c +++ b/drivers/clk/socfpga/clk-pll.c | |||
| @@ -100,6 +100,7 @@ static __init struct clk *__socfpga_pll_init(struct device_node *node, | |||
| 100 | 100 | ||
| 101 | clkmgr_np = of_find_compatible_node(NULL, NULL, "altr,clk-mgr"); | 101 | clkmgr_np = of_find_compatible_node(NULL, NULL, "altr,clk-mgr"); |
| 102 | clk_mgr_base_addr = of_iomap(clkmgr_np, 0); | 102 | clk_mgr_base_addr = of_iomap(clkmgr_np, 0); |
| 103 | of_node_put(clkmgr_np); | ||
| 103 | BUG_ON(!clk_mgr_base_addr); | 104 | BUG_ON(!clk_mgr_base_addr); |
| 104 | pll_clk->hw.reg = clk_mgr_base_addr + reg; | 105 | pll_clk->hw.reg = clk_mgr_base_addr + reg; |
| 105 | 106 | ||
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a23.c b/drivers/clk/sunxi-ng/ccu-sun8i-a23.c index a4fa2945f230..4b5f8f4e4ab8 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-a23.c +++ b/drivers/clk/sunxi-ng/ccu-sun8i-a23.c | |||
| @@ -144,7 +144,7 @@ static SUNXI_CCU_NKM_WITH_GATE_LOCK(pll_mipi_clk, "pll-mipi", | |||
| 144 | 8, 4, /* N */ | 144 | 8, 4, /* N */ |
| 145 | 4, 2, /* K */ | 145 | 4, 2, /* K */ |
| 146 | 0, 4, /* M */ | 146 | 0, 4, /* M */ |
| 147 | BIT(31), /* gate */ | 147 | BIT(31) | BIT(23) | BIT(22), /* gate */ |
| 148 | BIT(28), /* lock */ | 148 | BIT(28), /* lock */ |
| 149 | CLK_SET_RATE_UNGATE); | 149 | CLK_SET_RATE_UNGATE); |
| 150 | 150 | ||
diff --git a/drivers/clk/tegra/clk-dfll.c b/drivers/clk/tegra/clk-dfll.c index 0400e5b1d627..1fc71baae13b 100644 --- a/drivers/clk/tegra/clk-dfll.c +++ b/drivers/clk/tegra/clk-dfll.c | |||
| @@ -1293,8 +1293,8 @@ static int attr_enable_set(void *data, u64 val) | |||
| 1293 | 1293 | ||
| 1294 | return val ? dfll_enable(td) : dfll_disable(td); | 1294 | return val ? dfll_enable(td) : dfll_disable(td); |
| 1295 | } | 1295 | } |
| 1296 | DEFINE_SIMPLE_ATTRIBUTE(enable_fops, attr_enable_get, attr_enable_set, | 1296 | DEFINE_DEBUGFS_ATTRIBUTE(enable_fops, attr_enable_get, attr_enable_set, |
| 1297 | "%llu\n"); | 1297 | "%llu\n"); |
| 1298 | 1298 | ||
| 1299 | static int attr_lock_get(void *data, u64 *val) | 1299 | static int attr_lock_get(void *data, u64 *val) |
| 1300 | { | 1300 | { |
| @@ -1310,8 +1310,7 @@ static int attr_lock_set(void *data, u64 val) | |||
| 1310 | 1310 | ||
| 1311 | return val ? dfll_lock(td) : dfll_unlock(td); | 1311 | return val ? dfll_lock(td) : dfll_unlock(td); |
| 1312 | } | 1312 | } |
| 1313 | DEFINE_SIMPLE_ATTRIBUTE(lock_fops, attr_lock_get, attr_lock_set, | 1313 | DEFINE_DEBUGFS_ATTRIBUTE(lock_fops, attr_lock_get, attr_lock_set, "%llu\n"); |
| 1314 | "%llu\n"); | ||
| 1315 | 1314 | ||
| 1316 | static int attr_rate_get(void *data, u64 *val) | 1315 | static int attr_rate_get(void *data, u64 *val) |
| 1317 | { | 1316 | { |
| @@ -1328,7 +1327,7 @@ static int attr_rate_set(void *data, u64 val) | |||
| 1328 | 1327 | ||
| 1329 | return dfll_request_rate(td, val); | 1328 | return dfll_request_rate(td, val); |
| 1330 | } | 1329 | } |
| 1331 | DEFINE_SIMPLE_ATTRIBUTE(rate_fops, attr_rate_get, attr_rate_set, "%llu\n"); | 1330 | DEFINE_DEBUGFS_ATTRIBUTE(rate_fops, attr_rate_get, attr_rate_set, "%llu\n"); |
| 1332 | 1331 | ||
| 1333 | static int attr_registers_show(struct seq_file *s, void *data) | 1332 | static int attr_registers_show(struct seq_file *s, void *data) |
| 1334 | { | 1333 | { |
| @@ -1379,10 +1378,11 @@ static void dfll_debug_init(struct tegra_dfll *td) | |||
| 1379 | root = debugfs_create_dir("tegra_dfll_fcpu", NULL); | 1378 | root = debugfs_create_dir("tegra_dfll_fcpu", NULL); |
| 1380 | td->debugfs_dir = root; | 1379 | td->debugfs_dir = root; |
| 1381 | 1380 | ||
| 1382 | debugfs_create_file("enable", S_IRUGO | S_IWUSR, root, td, &enable_fops); | 1381 | debugfs_create_file_unsafe("enable", 0644, root, td, |
| 1383 | debugfs_create_file("lock", S_IRUGO, root, td, &lock_fops); | 1382 | &enable_fops); |
| 1384 | debugfs_create_file("rate", S_IRUGO, root, td, &rate_fops); | 1383 | debugfs_create_file_unsafe("lock", 0444, root, td, &lock_fops); |
| 1385 | debugfs_create_file("registers", S_IRUGO, root, td, &attr_registers_fops); | 1384 | debugfs_create_file_unsafe("rate", 0444, root, td, &rate_fops); |
| 1385 | debugfs_create_file("registers", 0444, root, td, &attr_registers_fops); | ||
| 1386 | } | 1386 | } |
| 1387 | 1387 | ||
| 1388 | #else | 1388 | #else |
diff --git a/drivers/clk/ti/adpll.c b/drivers/clk/ti/adpll.c index 688e403333b9..0c210984765a 100644 --- a/drivers/clk/ti/adpll.c +++ b/drivers/clk/ti/adpll.c | |||
| @@ -614,7 +614,7 @@ static int ti_adpll_init_clkout(struct ti_adpll_data *d, | |||
| 614 | 614 | ||
| 615 | init.name = child_name; | 615 | init.name = child_name; |
| 616 | init.ops = ops; | 616 | init.ops = ops; |
| 617 | init.flags = CLK_IS_BASIC; | 617 | init.flags = 0; |
| 618 | co->hw.init = &init; | 618 | co->hw.init = &init; |
| 619 | parent_names[0] = __clk_get_name(clk0); | 619 | parent_names[0] = __clk_get_name(clk0); |
| 620 | parent_names[1] = __clk_get_name(clk1); | 620 | parent_names[1] = __clk_get_name(clk1); |
diff --git a/drivers/clk/ti/apll.c b/drivers/clk/ti/apll.c index 222f68bc3f2a..015a657d3382 100644 --- a/drivers/clk/ti/apll.c +++ b/drivers/clk/ti/apll.c | |||
| @@ -165,7 +165,7 @@ static void __init omap_clk_register_apll(void *user, | |||
| 165 | 165 | ||
| 166 | ad->clk_bypass = __clk_get_hw(clk); | 166 | ad->clk_bypass = __clk_get_hw(clk); |
| 167 | 167 | ||
| 168 | clk = ti_clk_register(NULL, &clk_hw->hw, node->name); | 168 | clk = ti_clk_register_omap_hw(NULL, &clk_hw->hw, node->name); |
| 169 | if (!IS_ERR(clk)) { | 169 | if (!IS_ERR(clk)) { |
| 170 | of_clk_add_provider(node, of_clk_src_simple_get, clk); | 170 | of_clk_add_provider(node, of_clk_src_simple_get, clk); |
| 171 | kfree(clk_hw->hw.init->parent_names); | 171 | kfree(clk_hw->hw.init->parent_names); |
| @@ -402,7 +402,7 @@ static void __init of_omap2_apll_setup(struct device_node *node) | |||
| 402 | if (ret) | 402 | if (ret) |
| 403 | goto cleanup; | 403 | goto cleanup; |
| 404 | 404 | ||
| 405 | clk = clk_register(NULL, &clk_hw->hw); | 405 | clk = ti_clk_register_omap_hw(NULL, &clk_hw->hw, node->name); |
| 406 | if (!IS_ERR(clk)) { | 406 | if (!IS_ERR(clk)) { |
| 407 | of_clk_add_provider(node, of_clk_src_simple_get, clk); | 407 | of_clk_add_provider(node, of_clk_src_simple_get, clk); |
| 408 | kfree(init); | 408 | kfree(init); |
diff --git a/drivers/clk/ti/autoidle.c b/drivers/clk/ti/autoidle.c index 7bb9afbe4058..1cae226759dd 100644 --- a/drivers/clk/ti/autoidle.c +++ b/drivers/clk/ti/autoidle.c | |||
| @@ -35,7 +35,44 @@ struct clk_ti_autoidle { | |||
| 35 | #define AUTOIDLE_LOW 0x1 | 35 | #define AUTOIDLE_LOW 0x1 |
| 36 | 36 | ||
| 37 | static LIST_HEAD(autoidle_clks); | 37 | static LIST_HEAD(autoidle_clks); |
| 38 | static LIST_HEAD(clk_hw_omap_clocks); | 38 | |
| 39 | /* | ||
| 40 | * we have some non-atomic read/write | ||
| 41 | * operations behind it, so lets | ||
| 42 | * take one lock for handling autoidle | ||
| 43 | * of all clocks | ||
| 44 | */ | ||
| 45 | static DEFINE_SPINLOCK(autoidle_spinlock); | ||
| 46 | |||
| 47 | static int _omap2_clk_deny_idle(struct clk_hw_omap *clk) | ||
| 48 | { | ||
| 49 | if (clk->ops && clk->ops->deny_idle) { | ||
| 50 | unsigned long irqflags; | ||
| 51 | |||
| 52 | spin_lock_irqsave(&autoidle_spinlock, irqflags); | ||
| 53 | clk->autoidle_count++; | ||
| 54 | if (clk->autoidle_count == 1) | ||
| 55 | clk->ops->deny_idle(clk); | ||
| 56 | |||
| 57 | spin_unlock_irqrestore(&autoidle_spinlock, irqflags); | ||
| 58 | } | ||
| 59 | return 0; | ||
| 60 | } | ||
| 61 | |||
| 62 | static int _omap2_clk_allow_idle(struct clk_hw_omap *clk) | ||
| 63 | { | ||
| 64 | if (clk->ops && clk->ops->allow_idle) { | ||
| 65 | unsigned long irqflags; | ||
| 66 | |||
| 67 | spin_lock_irqsave(&autoidle_spinlock, irqflags); | ||
| 68 | clk->autoidle_count--; | ||
| 69 | if (clk->autoidle_count == 0) | ||
| 70 | clk->ops->allow_idle(clk); | ||
| 71 | |||
| 72 | spin_unlock_irqrestore(&autoidle_spinlock, irqflags); | ||
| 73 | } | ||
| 74 | return 0; | ||
| 75 | } | ||
| 39 | 76 | ||
| 40 | /** | 77 | /** |
| 41 | * omap2_clk_deny_idle - disable autoidle on an OMAP clock | 78 | * omap2_clk_deny_idle - disable autoidle on an OMAP clock |
| @@ -45,12 +82,15 @@ static LIST_HEAD(clk_hw_omap_clocks); | |||
| 45 | */ | 82 | */ |
| 46 | int omap2_clk_deny_idle(struct clk *clk) | 83 | int omap2_clk_deny_idle(struct clk *clk) |
| 47 | { | 84 | { |
| 48 | struct clk_hw_omap *c; | 85 | struct clk_hw *hw = __clk_get_hw(clk); |
| 49 | 86 | ||
| 50 | c = to_clk_hw_omap(__clk_get_hw(clk)); | 87 | if (omap2_clk_is_hw_omap(hw)) { |
| 51 | if (c->ops && c->ops->deny_idle) | 88 | struct clk_hw_omap *c = to_clk_hw_omap(hw); |
| 52 | c->ops->deny_idle(c); | 89 | |
| 53 | return 0; | 90 | return _omap2_clk_deny_idle(c); |
| 91 | } | ||
| 92 | |||
| 93 | return -EINVAL; | ||
| 54 | } | 94 | } |
| 55 | 95 | ||
| 56 | /** | 96 | /** |
| @@ -61,12 +101,15 @@ int omap2_clk_deny_idle(struct clk *clk) | |||
| 61 | */ | 101 | */ |
| 62 | int omap2_clk_allow_idle(struct clk *clk) | 102 | int omap2_clk_allow_idle(struct clk *clk) |
| 63 | { | 103 | { |
| 64 | struct clk_hw_omap *c; | 104 | struct clk_hw *hw = __clk_get_hw(clk); |
| 65 | 105 | ||
| 66 | c = to_clk_hw_omap(__clk_get_hw(clk)); | 106 | if (omap2_clk_is_hw_omap(hw)) { |
| 67 | if (c->ops && c->ops->allow_idle) | 107 | struct clk_hw_omap *c = to_clk_hw_omap(hw); |
| 68 | c->ops->allow_idle(c); | 108 | |
| 69 | return 0; | 109 | return _omap2_clk_allow_idle(c); |
| 110 | } | ||
| 111 | |||
| 112 | return -EINVAL; | ||
| 70 | } | 113 | } |
| 71 | 114 | ||
| 72 | static void _allow_autoidle(struct clk_ti_autoidle *clk) | 115 | static void _allow_autoidle(struct clk_ti_autoidle *clk) |
| @@ -168,26 +211,6 @@ int __init of_ti_clk_autoidle_setup(struct device_node *node) | |||
| 168 | } | 211 | } |
| 169 | 212 | ||
| 170 | /** | 213 | /** |
| 171 | * omap2_init_clk_hw_omap_clocks - initialize an OMAP clock | ||
| 172 | * @hw: struct clk_hw * to initialize | ||
| 173 | * | ||
| 174 | * Add an OMAP clock @clk to the internal list of OMAP clocks. Used | ||
| 175 | * temporarily for autoidle handling, until this support can be | ||
| 176 | * integrated into the common clock framework code in some way. No | ||
| 177 | * return value. | ||
| 178 | */ | ||
| 179 | void omap2_init_clk_hw_omap_clocks(struct clk_hw *hw) | ||
| 180 | { | ||
| 181 | struct clk_hw_omap *c; | ||
| 182 | |||
| 183 | if (clk_hw_get_flags(hw) & CLK_IS_BASIC) | ||
| 184 | return; | ||
| 185 | |||
| 186 | c = to_clk_hw_omap(hw); | ||
| 187 | list_add(&c->node, &clk_hw_omap_clocks); | ||
| 188 | } | ||
| 189 | |||
| 190 | /** | ||
| 191 | * omap2_clk_enable_autoidle_all - enable autoidle on all OMAP clocks that | 214 | * omap2_clk_enable_autoidle_all - enable autoidle on all OMAP clocks that |
| 192 | * support it | 215 | * support it |
| 193 | * | 216 | * |
| @@ -198,11 +221,11 @@ void omap2_init_clk_hw_omap_clocks(struct clk_hw *hw) | |||
| 198 | */ | 221 | */ |
| 199 | int omap2_clk_enable_autoidle_all(void) | 222 | int omap2_clk_enable_autoidle_all(void) |
| 200 | { | 223 | { |
| 201 | struct clk_hw_omap *c; | 224 | int ret; |
| 202 | 225 | ||
| 203 | list_for_each_entry(c, &clk_hw_omap_clocks, node) | 226 | ret = omap2_clk_for_each(_omap2_clk_allow_idle); |
| 204 | if (c->ops && c->ops->allow_idle) | 227 | if (ret) |
| 205 | c->ops->allow_idle(c); | 228 | return ret; |
| 206 | 229 | ||
| 207 | _clk_generic_allow_autoidle_all(); | 230 | _clk_generic_allow_autoidle_all(); |
| 208 | 231 | ||
| @@ -220,11 +243,11 @@ int omap2_clk_enable_autoidle_all(void) | |||
| 220 | */ | 243 | */ |
| 221 | int omap2_clk_disable_autoidle_all(void) | 244 | int omap2_clk_disable_autoidle_all(void) |
| 222 | { | 245 | { |
| 223 | struct clk_hw_omap *c; | 246 | int ret; |
| 224 | 247 | ||
| 225 | list_for_each_entry(c, &clk_hw_omap_clocks, node) | 248 | ret = omap2_clk_for_each(_omap2_clk_deny_idle); |
| 226 | if (c->ops && c->ops->deny_idle) | 249 | if (ret) |
| 227 | c->ops->deny_idle(c); | 250 | return ret; |
| 228 | 251 | ||
| 229 | _clk_generic_deny_autoidle_all(); | 252 | _clk_generic_deny_autoidle_all(); |
| 230 | 253 | ||
diff --git a/drivers/clk/ti/clk.c b/drivers/clk/ti/clk.c index 5d7fb2eecce4..ba17cc5bd04b 100644 --- a/drivers/clk/ti/clk.c +++ b/drivers/clk/ti/clk.c | |||
| @@ -31,6 +31,7 @@ | |||
| 31 | #undef pr_fmt | 31 | #undef pr_fmt |
| 32 | #define pr_fmt(fmt) "%s: " fmt, __func__ | 32 | #define pr_fmt(fmt) "%s: " fmt, __func__ |
| 33 | 33 | ||
| 34 | static LIST_HEAD(clk_hw_omap_clocks); | ||
| 34 | struct ti_clk_ll_ops *ti_clk_ll_ops; | 35 | struct ti_clk_ll_ops *ti_clk_ll_ops; |
| 35 | static struct device_node *clocks_node_ptr[CLK_MAX_MEMMAPS]; | 36 | static struct device_node *clocks_node_ptr[CLK_MAX_MEMMAPS]; |
| 36 | 37 | ||
| @@ -191,9 +192,13 @@ void __init ti_dt_clocks_register(struct ti_dt_clk oclks[]) | |||
| 191 | clkdev_add(&c->lk); | 192 | clkdev_add(&c->lk); |
| 192 | } else { | 193 | } else { |
| 193 | if (num_args && !has_clkctrl_data) { | 194 | if (num_args && !has_clkctrl_data) { |
| 194 | if (of_find_compatible_node(NULL, NULL, | 195 | struct device_node *np; |
| 195 | "ti,clkctrl")) { | 196 | |
| 197 | np = of_find_compatible_node(NULL, NULL, | ||
| 198 | "ti,clkctrl"); | ||
| 199 | if (np) { | ||
| 196 | has_clkctrl_data = true; | 200 | has_clkctrl_data = true; |
| 201 | of_node_put(np); | ||
| 197 | } else { | 202 | } else { |
| 198 | clkctrl_nodes_missing = true; | 203 | clkctrl_nodes_missing = true; |
| 199 | 204 | ||
| @@ -520,3 +525,74 @@ struct clk *ti_clk_register(struct device *dev, struct clk_hw *hw, | |||
| 520 | 525 | ||
| 521 | return clk; | 526 | return clk; |
| 522 | } | 527 | } |
| 528 | |||
| 529 | /** | ||
| 530 | * ti_clk_register_omap_hw - register a clk_hw_omap to the clock framework | ||
| 531 | * @dev: device for this clock | ||
| 532 | * @hw: hardware clock handle | ||
| 533 | * @con: connection ID for this clock | ||
| 534 | * | ||
| 535 | * Registers a clk_hw_omap clock to the clock framewor, adds a clock alias | ||
| 536 | * for it, and adds the list to the available clk_hw_omap type clocks. | ||
| 537 | * Returns a handle to the registered clock if successful, ERR_PTR value | ||
| 538 | * in failure. | ||
| 539 | */ | ||
| 540 | struct clk *ti_clk_register_omap_hw(struct device *dev, struct clk_hw *hw, | ||
| 541 | const char *con) | ||
| 542 | { | ||
| 543 | struct clk *clk; | ||
| 544 | struct clk_hw_omap *oclk; | ||
| 545 | |||
| 546 | clk = ti_clk_register(dev, hw, con); | ||
| 547 | if (IS_ERR(clk)) | ||
| 548 | return clk; | ||
| 549 | |||
| 550 | oclk = to_clk_hw_omap(hw); | ||
| 551 | |||
| 552 | list_add(&oclk->node, &clk_hw_omap_clocks); | ||
| 553 | |||
| 554 | return clk; | ||
| 555 | } | ||
| 556 | |||
| 557 | /** | ||
| 558 | * omap2_clk_for_each - call function for each registered clk_hw_omap | ||
| 559 | * @fn: pointer to a callback function | ||
| 560 | * | ||
| 561 | * Call @fn for each registered clk_hw_omap, passing @hw to each | ||
| 562 | * function. @fn must return 0 for success or any other value for | ||
| 563 | * failure. If @fn returns non-zero, the iteration across clocks | ||
| 564 | * will stop and the non-zero return value will be passed to the | ||
| 565 | * caller of omap2_clk_for_each(). | ||
| 566 | */ | ||
| 567 | int omap2_clk_for_each(int (*fn)(struct clk_hw_omap *hw)) | ||
| 568 | { | ||
| 569 | int ret; | ||
| 570 | struct clk_hw_omap *hw; | ||
| 571 | |||
| 572 | list_for_each_entry(hw, &clk_hw_omap_clocks, node) { | ||
| 573 | ret = (*fn)(hw); | ||
| 574 | if (ret) | ||
| 575 | break; | ||
| 576 | } | ||
| 577 | |||
| 578 | return ret; | ||
| 579 | } | ||
| 580 | |||
| 581 | /** | ||
| 582 | * omap2_clk_is_hw_omap - check if the provided clk_hw is OMAP clock | ||
| 583 | * @hw: clk_hw to check if it is an omap clock or not | ||
| 584 | * | ||
| 585 | * Checks if the provided clk_hw is OMAP clock or not. Returns true if | ||
| 586 | * it is, false otherwise. | ||
| 587 | */ | ||
| 588 | bool omap2_clk_is_hw_omap(struct clk_hw *hw) | ||
| 589 | { | ||
| 590 | struct clk_hw_omap *oclk; | ||
| 591 | |||
| 592 | list_for_each_entry(oclk, &clk_hw_omap_clocks, node) { | ||
| 593 | if (&oclk->hw == hw) | ||
| 594 | return true; | ||
| 595 | } | ||
| 596 | |||
| 597 | return false; | ||
| 598 | } | ||
diff --git a/drivers/clk/ti/clkctrl.c b/drivers/clk/ti/clkctrl.c index 40630eb950fc..639f515e08f0 100644 --- a/drivers/clk/ti/clkctrl.c +++ b/drivers/clk/ti/clkctrl.c | |||
| @@ -276,7 +276,7 @@ _ti_clkctrl_clk_register(struct omap_clkctrl_provider *provider, | |||
| 276 | init.parent_names = parents; | 276 | init.parent_names = parents; |
| 277 | init.num_parents = num_parents; | 277 | init.num_parents = num_parents; |
| 278 | init.ops = ops; | 278 | init.ops = ops; |
| 279 | init.flags = CLK_IS_BASIC; | 279 | init.flags = 0; |
| 280 | 280 | ||
| 281 | clk = ti_clk_register(NULL, clk_hw, init.name); | 281 | clk = ti_clk_register(NULL, clk_hw, init.name); |
| 282 | if (IS_ERR_OR_NULL(clk)) { | 282 | if (IS_ERR_OR_NULL(clk)) { |
| @@ -530,7 +530,7 @@ static void __init _ti_omap4_clkctrl_setup(struct device_node *node) | |||
| 530 | * Create default clkdm name, replace _cm from end of parent | 530 | * Create default clkdm name, replace _cm from end of parent |
| 531 | * node name with _clkdm | 531 | * node name with _clkdm |
| 532 | */ | 532 | */ |
| 533 | provider->clkdm_name[strlen(provider->clkdm_name) - 5] = 0; | 533 | provider->clkdm_name[strlen(provider->clkdm_name) - 2] = 0; |
| 534 | } else { | 534 | } else { |
| 535 | provider->clkdm_name = kasprintf(GFP_KERNEL, "%pOFn", node); | 535 | provider->clkdm_name = kasprintf(GFP_KERNEL, "%pOFn", node); |
| 536 | if (!provider->clkdm_name) { | 536 | if (!provider->clkdm_name) { |
diff --git a/drivers/clk/ti/clock.h b/drivers/clk/ti/clock.h index 9f312a219510..1c0fac59d809 100644 --- a/drivers/clk/ti/clock.h +++ b/drivers/clk/ti/clock.h | |||
| @@ -203,6 +203,8 @@ typedef void (*ti_of_clk_init_cb_t)(void *, struct device_node *); | |||
| 203 | 203 | ||
| 204 | struct clk *ti_clk_register(struct device *dev, struct clk_hw *hw, | 204 | struct clk *ti_clk_register(struct device *dev, struct clk_hw *hw, |
| 205 | const char *con); | 205 | const char *con); |
| 206 | struct clk *ti_clk_register_omap_hw(struct device *dev, struct clk_hw *hw, | ||
| 207 | const char *con); | ||
| 206 | int ti_clk_add_alias(struct device *dev, struct clk *clk, const char *con); | 208 | int ti_clk_add_alias(struct device *dev, struct clk *clk, const char *con); |
| 207 | void ti_clk_add_aliases(void); | 209 | void ti_clk_add_aliases(void); |
| 208 | 210 | ||
| @@ -221,7 +223,6 @@ int ti_clk_retry_init(struct device_node *node, void *user, | |||
| 221 | ti_of_clk_init_cb_t func); | 223 | ti_of_clk_init_cb_t func); |
| 222 | int ti_clk_add_component(struct device_node *node, struct clk_hw *hw, int type); | 224 | int ti_clk_add_component(struct device_node *node, struct clk_hw *hw, int type); |
| 223 | 225 | ||
| 224 | void omap2_init_clk_hw_omap_clocks(struct clk_hw *hw); | ||
| 225 | int of_ti_clk_autoidle_setup(struct device_node *node); | 226 | int of_ti_clk_autoidle_setup(struct device_node *node); |
| 226 | void omap2_clk_enable_init_clocks(const char **clk_names, u8 num_clocks); | 227 | void omap2_clk_enable_init_clocks(const char **clk_names, u8 num_clocks); |
| 227 | 228 | ||
| @@ -301,6 +302,8 @@ long omap4_dpll_regm4xen_round_rate(struct clk_hw *hw, | |||
| 301 | unsigned long *parent_rate); | 302 | unsigned long *parent_rate); |
| 302 | int omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw, | 303 | int omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw, |
| 303 | struct clk_rate_request *req); | 304 | struct clk_rate_request *req); |
| 305 | int omap2_clk_for_each(int (*fn)(struct clk_hw_omap *hw)); | ||
| 306 | bool omap2_clk_is_hw_omap(struct clk_hw *hw); | ||
| 304 | 307 | ||
| 305 | extern struct ti_clk_ll_ops *ti_clk_ll_ops; | 308 | extern struct ti_clk_ll_ops *ti_clk_ll_ops; |
| 306 | 309 | ||
diff --git a/drivers/clk/ti/clockdomain.c b/drivers/clk/ti/clockdomain.c index 07a805125e98..423a99b9f10c 100644 --- a/drivers/clk/ti/clockdomain.c +++ b/drivers/clk/ti/clockdomain.c | |||
| @@ -143,7 +143,7 @@ static void __init of_ti_clockdomain_setup(struct device_node *node) | |||
| 143 | continue; | 143 | continue; |
| 144 | } | 144 | } |
| 145 | clk_hw = __clk_get_hw(clk); | 145 | clk_hw = __clk_get_hw(clk); |
| 146 | if (clk_hw_get_flags(clk_hw) & CLK_IS_BASIC) { | 146 | if (!omap2_clk_is_hw_omap(clk_hw)) { |
| 147 | pr_warn("can't setup clkdm for basic clk %s\n", | 147 | pr_warn("can't setup clkdm for basic clk %s\n", |
| 148 | __clk_get_name(clk)); | 148 | __clk_get_name(clk)); |
| 149 | continue; | 149 | continue; |
diff --git a/drivers/clk/ti/divider.c b/drivers/clk/ti/divider.c index 0241450f3eb3..4786e0ebc2e8 100644 --- a/drivers/clk/ti/divider.c +++ b/drivers/clk/ti/divider.c | |||
| @@ -336,7 +336,7 @@ static struct clk *_register_divider(struct device *dev, const char *name, | |||
| 336 | 336 | ||
| 337 | init.name = name; | 337 | init.name = name; |
| 338 | init.ops = &ti_clk_divider_ops; | 338 | init.ops = &ti_clk_divider_ops; |
| 339 | init.flags = flags | CLK_IS_BASIC; | 339 | init.flags = flags; |
| 340 | init.parent_names = (parent_name ? &parent_name : NULL); | 340 | init.parent_names = (parent_name ? &parent_name : NULL); |
| 341 | init.num_parents = (parent_name ? 1 : 0); | 341 | init.num_parents = (parent_name ? 1 : 0); |
| 342 | 342 | ||
diff --git a/drivers/clk/ti/dpll.c b/drivers/clk/ti/dpll.c index 6c3329bc116f..659dadb23279 100644 --- a/drivers/clk/ti/dpll.c +++ b/drivers/clk/ti/dpll.c | |||
| @@ -192,10 +192,9 @@ static void __init _register_dpll(void *user, | |||
| 192 | dd->clk_bypass = __clk_get_hw(clk); | 192 | dd->clk_bypass = __clk_get_hw(clk); |
| 193 | 193 | ||
| 194 | /* register the clock */ | 194 | /* register the clock */ |
| 195 | clk = ti_clk_register(NULL, &clk_hw->hw, node->name); | 195 | clk = ti_clk_register_omap_hw(NULL, &clk_hw->hw, node->name); |
| 196 | 196 | ||
| 197 | if (!IS_ERR(clk)) { | 197 | if (!IS_ERR(clk)) { |
| 198 | omap2_init_clk_hw_omap_clocks(&clk_hw->hw); | ||
| 199 | of_clk_add_provider(node, of_clk_src_simple_get, clk); | 198 | of_clk_add_provider(node, of_clk_src_simple_get, clk); |
| 200 | kfree(clk_hw->hw.init->parent_names); | 199 | kfree(clk_hw->hw.init->parent_names); |
| 201 | kfree(clk_hw->hw.init); | 200 | kfree(clk_hw->hw.init); |
| @@ -265,14 +264,12 @@ static void _register_dpll_x2(struct device_node *node, | |||
| 265 | #endif | 264 | #endif |
| 266 | 265 | ||
| 267 | /* register the clock */ | 266 | /* register the clock */ |
| 268 | clk = ti_clk_register(NULL, &clk_hw->hw, name); | 267 | clk = ti_clk_register_omap_hw(NULL, &clk_hw->hw, name); |
| 269 | 268 | ||
| 270 | if (IS_ERR(clk)) { | 269 | if (IS_ERR(clk)) |
| 271 | kfree(clk_hw); | 270 | kfree(clk_hw); |
| 272 | } else { | 271 | else |
| 273 | omap2_init_clk_hw_omap_clocks(&clk_hw->hw); | ||
| 274 | of_clk_add_provider(node, of_clk_src_simple_get, clk); | 272 | of_clk_add_provider(node, of_clk_src_simple_get, clk); |
| 275 | } | ||
| 276 | } | 273 | } |
| 277 | #endif | 274 | #endif |
| 278 | 275 | ||
diff --git a/drivers/clk/ti/dpll3xxx.c b/drivers/clk/ti/dpll3xxx.c index 44b6b6403753..3dde6c8c3354 100644 --- a/drivers/clk/ti/dpll3xxx.c +++ b/drivers/clk/ti/dpll3xxx.c | |||
| @@ -731,7 +731,7 @@ static struct clk_hw_omap *omap3_find_clkoutx2_dpll(struct clk_hw *hw) | |||
| 731 | do { | 731 | do { |
| 732 | do { | 732 | do { |
| 733 | hw = clk_hw_get_parent(hw); | 733 | hw = clk_hw_get_parent(hw); |
| 734 | } while (hw && (clk_hw_get_flags(hw) & CLK_IS_BASIC)); | 734 | } while (hw && (!omap2_clk_is_hw_omap(hw))); |
| 735 | if (!hw) | 735 | if (!hw) |
| 736 | break; | 736 | break; |
| 737 | pclk = to_clk_hw_omap(hw); | 737 | pclk = to_clk_hw_omap(hw); |
diff --git a/drivers/clk/ti/gate.c b/drivers/clk/ti/gate.c index 1c78fff5513c..504c0e91cdc7 100644 --- a/drivers/clk/ti/gate.c +++ b/drivers/clk/ti/gate.c | |||
| @@ -123,7 +123,7 @@ static struct clk *_register_gate(struct device *dev, const char *name, | |||
| 123 | 123 | ||
| 124 | init.flags = flags; | 124 | init.flags = flags; |
| 125 | 125 | ||
| 126 | clk = ti_clk_register(NULL, &clk_hw->hw, name); | 126 | clk = ti_clk_register_omap_hw(NULL, &clk_hw->hw, name); |
| 127 | 127 | ||
| 128 | if (IS_ERR(clk)) | 128 | if (IS_ERR(clk)) |
| 129 | kfree(clk_hw); | 129 | kfree(clk_hw); |
diff --git a/drivers/clk/ti/interface.c b/drivers/clk/ti/interface.c index 87e00c2ee957..83e34429d3b1 100644 --- a/drivers/clk/ti/interface.c +++ b/drivers/clk/ti/interface.c | |||
| @@ -57,12 +57,10 @@ static struct clk *_register_interface(struct device *dev, const char *name, | |||
| 57 | init.num_parents = 1; | 57 | init.num_parents = 1; |
| 58 | init.parent_names = &parent_name; | 58 | init.parent_names = &parent_name; |
| 59 | 59 | ||
| 60 | clk = ti_clk_register(NULL, &clk_hw->hw, name); | 60 | clk = ti_clk_register_omap_hw(NULL, &clk_hw->hw, name); |
| 61 | 61 | ||
| 62 | if (IS_ERR(clk)) | 62 | if (IS_ERR(clk)) |
| 63 | kfree(clk_hw); | 63 | kfree(clk_hw); |
| 64 | else | ||
| 65 | omap2_init_clk_hw_omap_clocks(&clk_hw->hw); | ||
| 66 | 64 | ||
| 67 | return clk; | 65 | return clk; |
| 68 | } | 66 | } |
diff --git a/drivers/clk/ti/mux.c b/drivers/clk/ti/mux.c index 883bdde94d04..b7f9a4f068bf 100644 --- a/drivers/clk/ti/mux.c +++ b/drivers/clk/ti/mux.c | |||
| @@ -143,7 +143,7 @@ static struct clk *_register_mux(struct device *dev, const char *name, | |||
| 143 | 143 | ||
| 144 | init.name = name; | 144 | init.name = name; |
| 145 | init.ops = &ti_clk_mux_ops; | 145 | init.ops = &ti_clk_mux_ops; |
| 146 | init.flags = flags | CLK_IS_BASIC; | 146 | init.flags = flags; |
| 147 | init.parent_names = parent_names; | 147 | init.parent_names = parent_names; |
| 148 | init.num_parents = num_parents; | 148 | init.num_parents = num_parents; |
| 149 | 149 | ||
diff --git a/drivers/clk/uniphier/clk-uniphier-cpugear.c b/drivers/clk/uniphier/clk-uniphier-cpugear.c index ec11f55594ad..5d2d42b7e182 100644 --- a/drivers/clk/uniphier/clk-uniphier-cpugear.c +++ b/drivers/clk/uniphier/clk-uniphier-cpugear.c | |||
| @@ -47,7 +47,7 @@ static int uniphier_clk_cpugear_set_parent(struct clk_hw *hw, u8 index) | |||
| 47 | return ret; | 47 | return ret; |
| 48 | 48 | ||
| 49 | ret = regmap_write_bits(gear->regmap, | 49 | ret = regmap_write_bits(gear->regmap, |
| 50 | gear->regbase + UNIPHIER_CLK_CPUGEAR_SET, | 50 | gear->regbase + UNIPHIER_CLK_CPUGEAR_UPD, |
| 51 | UNIPHIER_CLK_CPUGEAR_UPD_BIT, | 51 | UNIPHIER_CLK_CPUGEAR_UPD_BIT, |
| 52 | UNIPHIER_CLK_CPUGEAR_UPD_BIT); | 52 | UNIPHIER_CLK_CPUGEAR_UPD_BIT); |
| 53 | if (ret) | 53 | if (ret) |
diff --git a/drivers/clk/x86/clk-lpt.c b/drivers/clk/x86/clk-lpt.c index 6b40eb89ae19..68bd3abaef2c 100644 --- a/drivers/clk/x86/clk-lpt.c +++ b/drivers/clk/x86/clk-lpt.c | |||
| @@ -13,7 +13,7 @@ | |||
| 13 | #include <linux/clk-provider.h> | 13 | #include <linux/clk-provider.h> |
| 14 | #include <linux/err.h> | 14 | #include <linux/err.h> |
| 15 | #include <linux/module.h> | 15 | #include <linux/module.h> |
| 16 | #include <linux/platform_data/clk-lpss.h> | 16 | #include <linux/platform_data/x86/clk-lpss.h> |
| 17 | #include <linux/platform_device.h> | 17 | #include <linux/platform_device.h> |
| 18 | 18 | ||
| 19 | static int lpt_clk_probe(struct platform_device *pdev) | 19 | static int lpt_clk_probe(struct platform_device *pdev) |
diff --git a/drivers/clk/x86/clk-st.c b/drivers/clk/x86/clk-st.c index 3a0996f2d556..25d4b97aff9b 100644 --- a/drivers/clk/x86/clk-st.c +++ b/drivers/clk/x86/clk-st.c | |||
| @@ -52,7 +52,8 @@ static int st_clk_probe(struct platform_device *pdev) | |||
| 52 | 0, st_data->base + MISCCLKCNTL1, OSCCLKENB, | 52 | 0, st_data->base + MISCCLKCNTL1, OSCCLKENB, |
| 53 | CLK_GATE_SET_TO_DISABLE, NULL); | 53 | CLK_GATE_SET_TO_DISABLE, NULL); |
| 54 | 54 | ||
| 55 | clk_hw_register_clkdev(hws[ST_CLK_GATE], "oscout1", NULL); | 55 | devm_clk_hw_register_clkdev(&pdev->dev, hws[ST_CLK_GATE], "oscout1", |
| 56 | NULL); | ||
| 56 | 57 | ||
| 57 | return 0; | 58 | return 0; |
| 58 | } | 59 | } |
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 0e626b00053b..e10922709d13 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
| @@ -206,17 +206,15 @@ unsigned int cpufreq_generic_get(unsigned int cpu) | |||
| 206 | EXPORT_SYMBOL_GPL(cpufreq_generic_get); | 206 | EXPORT_SYMBOL_GPL(cpufreq_generic_get); |
| 207 | 207 | ||
| 208 | /** | 208 | /** |
| 209 | * cpufreq_cpu_get: returns policy for a cpu and marks it busy. | 209 | * cpufreq_cpu_get - Return policy for a CPU and mark it as busy. |
| 210 | * @cpu: CPU to find the policy for. | ||
| 210 | * | 211 | * |
| 211 | * @cpu: cpu to find policy for. | 212 | * Call cpufreq_cpu_get_raw() to obtain a cpufreq policy for @cpu and increment |
| 213 | * the kobject reference counter of that policy. Return a valid policy on | ||
| 214 | * success or NULL on failure. | ||
| 212 | * | 215 | * |
| 213 | * This returns policy for 'cpu', returns NULL if it doesn't exist. | 216 | * The policy returned by this function has to be released with the help of |
| 214 | * It also increments the kobject reference count to mark it busy and so would | 217 | * cpufreq_cpu_put() to balance its kobject reference counter properly. |
| 215 | * require a corresponding call to cpufreq_cpu_put() to decrement it back. | ||
| 216 | * If corresponding call cpufreq_cpu_put() isn't made, the policy wouldn't be | ||
| 217 | * freed as that depends on the kobj count. | ||
| 218 | * | ||
| 219 | * Return: A valid policy on success, otherwise NULL on failure. | ||
| 220 | */ | 218 | */ |
| 221 | struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) | 219 | struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) |
| 222 | { | 220 | { |
| @@ -243,12 +241,8 @@ struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) | |||
| 243 | EXPORT_SYMBOL_GPL(cpufreq_cpu_get); | 241 | EXPORT_SYMBOL_GPL(cpufreq_cpu_get); |
| 244 | 242 | ||
| 245 | /** | 243 | /** |
| 246 | * cpufreq_cpu_put: Decrements the usage count of a policy | 244 | * cpufreq_cpu_put - Decrement kobject usage counter for cpufreq policy. |
| 247 | * | 245 | * @policy: cpufreq policy returned by cpufreq_cpu_get(). |
| 248 | * @policy: policy earlier returned by cpufreq_cpu_get(). | ||
| 249 | * | ||
| 250 | * This decrements the kobject reference count incremented earlier by calling | ||
| 251 | * cpufreq_cpu_get(). | ||
| 252 | */ | 246 | */ |
| 253 | void cpufreq_cpu_put(struct cpufreq_policy *policy) | 247 | void cpufreq_cpu_put(struct cpufreq_policy *policy) |
| 254 | { | 248 | { |
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 002f5169d4eb..e22f0dbaebb1 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c | |||
| @@ -1762,7 +1762,7 @@ static void intel_pstate_update_util(struct update_util_data *data, u64 time, | |||
| 1762 | /* Start over if the CPU may have been idle. */ | 1762 | /* Start over if the CPU may have been idle. */ |
| 1763 | if (delta_ns > TICK_NSEC) { | 1763 | if (delta_ns > TICK_NSEC) { |
| 1764 | cpu->iowait_boost = ONE_EIGHTH_FP; | 1764 | cpu->iowait_boost = ONE_EIGHTH_FP; |
| 1765 | } else if (cpu->iowait_boost) { | 1765 | } else if (cpu->iowait_boost >= ONE_EIGHTH_FP) { |
| 1766 | cpu->iowait_boost <<= 1; | 1766 | cpu->iowait_boost <<= 1; |
| 1767 | if (cpu->iowait_boost > int_tofp(1)) | 1767 | if (cpu->iowait_boost > int_tofp(1)) |
| 1768 | cpu->iowait_boost = int_tofp(1); | 1768 | cpu->iowait_boost = int_tofp(1); |
diff --git a/drivers/cpufreq/pxa2xx-cpufreq.c b/drivers/cpufreq/pxa2xx-cpufreq.c index 46254e583982..74e0e0c20c46 100644 --- a/drivers/cpufreq/pxa2xx-cpufreq.c +++ b/drivers/cpufreq/pxa2xx-cpufreq.c | |||
| @@ -143,7 +143,7 @@ static int pxa_cpufreq_change_voltage(const struct pxa_freqs *pxa_freq) | |||
| 143 | return ret; | 143 | return ret; |
| 144 | } | 144 | } |
| 145 | 145 | ||
| 146 | static void __init pxa_cpufreq_init_voltages(void) | 146 | static void pxa_cpufreq_init_voltages(void) |
| 147 | { | 147 | { |
| 148 | vcc_core = regulator_get(NULL, "vcc_core"); | 148 | vcc_core = regulator_get(NULL, "vcc_core"); |
| 149 | if (IS_ERR(vcc_core)) { | 149 | if (IS_ERR(vcc_core)) { |
| @@ -159,7 +159,7 @@ static int pxa_cpufreq_change_voltage(const struct pxa_freqs *pxa_freq) | |||
| 159 | return 0; | 159 | return 0; |
| 160 | } | 160 | } |
| 161 | 161 | ||
| 162 | static void __init pxa_cpufreq_init_voltages(void) { } | 162 | static void pxa_cpufreq_init_voltages(void) { } |
| 163 | #endif | 163 | #endif |
| 164 | 164 | ||
| 165 | static void find_freq_tables(struct cpufreq_frequency_table **freq_table, | 165 | static void find_freq_tables(struct cpufreq_frequency_table **freq_table, |
diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c index bb93e5cf6a4a..9fddf828a76f 100644 --- a/drivers/cpuidle/governor.c +++ b/drivers/cpuidle/governor.c | |||
| @@ -89,6 +89,7 @@ int cpuidle_register_governor(struct cpuidle_governor *gov) | |||
| 89 | mutex_lock(&cpuidle_lock); | 89 | mutex_lock(&cpuidle_lock); |
| 90 | if (__cpuidle_find_governor(gov->name) == NULL) { | 90 | if (__cpuidle_find_governor(gov->name) == NULL) { |
| 91 | ret = 0; | 91 | ret = 0; |
| 92 | list_add_tail(&gov->governor_list, &cpuidle_governors); | ||
| 92 | if (!cpuidle_curr_governor || | 93 | if (!cpuidle_curr_governor || |
| 93 | !strncasecmp(param_governor, gov->name, CPUIDLE_NAME_LEN) || | 94 | !strncasecmp(param_governor, gov->name, CPUIDLE_NAME_LEN) || |
| 94 | (cpuidle_curr_governor->rating < gov->rating && | 95 | (cpuidle_curr_governor->rating < gov->rating && |
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index 61316fc51548..5951604e7d5c 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c | |||
| @@ -186,7 +186,7 @@ static unsigned int get_typical_interval(struct menu_device *data, | |||
| 186 | unsigned int min, max, thresh, avg; | 186 | unsigned int min, max, thresh, avg; |
| 187 | uint64_t sum, variance; | 187 | uint64_t sum, variance; |
| 188 | 188 | ||
| 189 | thresh = UINT_MAX; /* Discard outliers above this value */ | 189 | thresh = INT_MAX; /* Discard outliers above this value */ |
| 190 | 190 | ||
| 191 | again: | 191 | again: |
| 192 | 192 | ||
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index 9eac5099098e..579578498deb 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c | |||
| @@ -3455,7 +3455,6 @@ static int __init caam_algapi_init(void) | |||
| 3455 | { | 3455 | { |
| 3456 | struct device_node *dev_node; | 3456 | struct device_node *dev_node; |
| 3457 | struct platform_device *pdev; | 3457 | struct platform_device *pdev; |
| 3458 | struct device *ctrldev; | ||
| 3459 | struct caam_drv_private *priv; | 3458 | struct caam_drv_private *priv; |
| 3460 | int i = 0, err = 0; | 3459 | int i = 0, err = 0; |
| 3461 | u32 aes_vid, aes_inst, des_inst, md_vid, md_inst, ccha_inst, ptha_inst; | 3460 | u32 aes_vid, aes_inst, des_inst, md_vid, md_inst, ccha_inst, ptha_inst; |
| @@ -3476,16 +3475,17 @@ static int __init caam_algapi_init(void) | |||
| 3476 | return -ENODEV; | 3475 | return -ENODEV; |
| 3477 | } | 3476 | } |
| 3478 | 3477 | ||
| 3479 | ctrldev = &pdev->dev; | 3478 | priv = dev_get_drvdata(&pdev->dev); |
| 3480 | priv = dev_get_drvdata(ctrldev); | ||
| 3481 | of_node_put(dev_node); | 3479 | of_node_put(dev_node); |
| 3482 | 3480 | ||
| 3483 | /* | 3481 | /* |
| 3484 | * If priv is NULL, it's probably because the caam driver wasn't | 3482 | * If priv is NULL, it's probably because the caam driver wasn't |
| 3485 | * properly initialized (e.g. RNG4 init failed). Thus, bail out here. | 3483 | * properly initialized (e.g. RNG4 init failed). Thus, bail out here. |
| 3486 | */ | 3484 | */ |
| 3487 | if (!priv) | 3485 | if (!priv) { |
| 3488 | return -ENODEV; | 3486 | err = -ENODEV; |
| 3487 | goto out_put_dev; | ||
| 3488 | } | ||
| 3489 | 3489 | ||
| 3490 | 3490 | ||
| 3491 | /* | 3491 | /* |
| @@ -3626,6 +3626,8 @@ static int __init caam_algapi_init(void) | |||
| 3626 | if (registered) | 3626 | if (registered) |
| 3627 | pr_info("caam algorithms registered in /proc/crypto\n"); | 3627 | pr_info("caam algorithms registered in /proc/crypto\n"); |
| 3628 | 3628 | ||
| 3629 | out_put_dev: | ||
| 3630 | put_device(&pdev->dev); | ||
| 3629 | return err; | 3631 | return err; |
| 3630 | } | 3632 | } |
| 3631 | 3633 | ||
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c index a15ce9213310..c61921d32489 100644 --- a/drivers/crypto/caam/caamalg_qi.c +++ b/drivers/crypto/caam/caamalg_qi.c | |||
| @@ -2492,12 +2492,15 @@ static int __init caam_qi_algapi_init(void) | |||
| 2492 | * If priv is NULL, it's probably because the caam driver wasn't | 2492 | * If priv is NULL, it's probably because the caam driver wasn't |
| 2493 | * properly initialized (e.g. RNG4 init failed). Thus, bail out here. | 2493 | * properly initialized (e.g. RNG4 init failed). Thus, bail out here. |
| 2494 | */ | 2494 | */ |
| 2495 | if (!priv || !priv->qi_present) | 2495 | if (!priv || !priv->qi_present) { |
| 2496 | return -ENODEV; | 2496 | err = -ENODEV; |
| 2497 | goto out_put_dev; | ||
| 2498 | } | ||
| 2497 | 2499 | ||
| 2498 | if (caam_dpaa2) { | 2500 | if (caam_dpaa2) { |
| 2499 | dev_info(ctrldev, "caam/qi frontend driver not suitable for DPAA 2.x, aborting...\n"); | 2501 | dev_info(ctrldev, "caam/qi frontend driver not suitable for DPAA 2.x, aborting...\n"); |
| 2500 | return -ENODEV; | 2502 | err = -ENODEV; |
| 2503 | goto out_put_dev; | ||
| 2501 | } | 2504 | } |
| 2502 | 2505 | ||
| 2503 | /* | 2506 | /* |
| @@ -2610,6 +2613,8 @@ static int __init caam_qi_algapi_init(void) | |||
| 2610 | if (registered) | 2613 | if (registered) |
| 2611 | dev_info(priv->qidev, "algorithms registered in /proc/crypto\n"); | 2614 | dev_info(priv->qidev, "algorithms registered in /proc/crypto\n"); |
| 2612 | 2615 | ||
| 2616 | out_put_dev: | ||
| 2617 | put_device(ctrldev); | ||
| 2613 | return err; | 2618 | return err; |
| 2614 | } | 2619 | } |
| 2615 | 2620 | ||
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index d7483e4d0ce2..b1eadc6652b5 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c | |||
| @@ -1993,7 +1993,6 @@ static int __init caam_algapi_hash_init(void) | |||
| 1993 | { | 1993 | { |
| 1994 | struct device_node *dev_node; | 1994 | struct device_node *dev_node; |
| 1995 | struct platform_device *pdev; | 1995 | struct platform_device *pdev; |
| 1996 | struct device *ctrldev; | ||
| 1997 | int i = 0, err = 0; | 1996 | int i = 0, err = 0; |
| 1998 | struct caam_drv_private *priv; | 1997 | struct caam_drv_private *priv; |
| 1999 | unsigned int md_limit = SHA512_DIGEST_SIZE; | 1998 | unsigned int md_limit = SHA512_DIGEST_SIZE; |
| @@ -2012,16 +2011,17 @@ static int __init caam_algapi_hash_init(void) | |||
| 2012 | return -ENODEV; | 2011 | return -ENODEV; |
| 2013 | } | 2012 | } |
| 2014 | 2013 | ||
| 2015 | ctrldev = &pdev->dev; | 2014 | priv = dev_get_drvdata(&pdev->dev); |
| 2016 | priv = dev_get_drvdata(ctrldev); | ||
| 2017 | of_node_put(dev_node); | 2015 | of_node_put(dev_node); |
| 2018 | 2016 | ||
| 2019 | /* | 2017 | /* |
| 2020 | * If priv is NULL, it's probably because the caam driver wasn't | 2018 | * If priv is NULL, it's probably because the caam driver wasn't |
| 2021 | * properly initialized (e.g. RNG4 init failed). Thus, bail out here. | 2019 | * properly initialized (e.g. RNG4 init failed). Thus, bail out here. |
| 2022 | */ | 2020 | */ |
| 2023 | if (!priv) | 2021 | if (!priv) { |
| 2024 | return -ENODEV; | 2022 | err = -ENODEV; |
| 2023 | goto out_put_dev; | ||
| 2024 | } | ||
| 2025 | 2025 | ||
| 2026 | /* | 2026 | /* |
| 2027 | * Register crypto algorithms the device supports. First, identify | 2027 | * Register crypto algorithms the device supports. First, identify |
| @@ -2043,8 +2043,10 @@ static int __init caam_algapi_hash_init(void) | |||
| 2043 | * Skip registration of any hashing algorithms if MD block | 2043 | * Skip registration of any hashing algorithms if MD block |
| 2044 | * is not present. | 2044 | * is not present. |
| 2045 | */ | 2045 | */ |
| 2046 | if (!md_inst) | 2046 | if (!md_inst) { |
| 2047 | return -ENODEV; | 2047 | err = -ENODEV; |
| 2048 | goto out_put_dev; | ||
| 2049 | } | ||
| 2048 | 2050 | ||
| 2049 | /* Limit digest size based on LP256 */ | 2051 | /* Limit digest size based on LP256 */ |
| 2050 | if (md_vid == CHA_VER_VID_MD_LP256) | 2052 | if (md_vid == CHA_VER_VID_MD_LP256) |
| @@ -2101,6 +2103,8 @@ static int __init caam_algapi_hash_init(void) | |||
| 2101 | list_add_tail(&t_alg->entry, &hash_list); | 2103 | list_add_tail(&t_alg->entry, &hash_list); |
| 2102 | } | 2104 | } |
| 2103 | 2105 | ||
| 2106 | out_put_dev: | ||
| 2107 | put_device(&pdev->dev); | ||
| 2104 | return err; | 2108 | return err; |
| 2105 | } | 2109 | } |
| 2106 | 2110 | ||
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c index 77ab28a2811a..58285642306e 100644 --- a/drivers/crypto/caam/caampkc.c +++ b/drivers/crypto/caam/caampkc.c | |||
| @@ -1042,8 +1042,10 @@ static int __init caam_pkc_init(void) | |||
| 1042 | * If priv is NULL, it's probably because the caam driver wasn't | 1042 | * If priv is NULL, it's probably because the caam driver wasn't |
| 1043 | * properly initialized (e.g. RNG4 init failed). Thus, bail out here. | 1043 | * properly initialized (e.g. RNG4 init failed). Thus, bail out here. |
| 1044 | */ | 1044 | */ |
| 1045 | if (!priv) | 1045 | if (!priv) { |
| 1046 | return -ENODEV; | 1046 | err = -ENODEV; |
| 1047 | goto out_put_dev; | ||
| 1048 | } | ||
| 1047 | 1049 | ||
| 1048 | /* Determine public key hardware accelerator presence. */ | 1050 | /* Determine public key hardware accelerator presence. */ |
| 1049 | if (priv->era < 10) | 1051 | if (priv->era < 10) |
| @@ -1053,8 +1055,10 @@ static int __init caam_pkc_init(void) | |||
| 1053 | pk_inst = rd_reg32(&priv->ctrl->vreg.pkha) & CHA_VER_NUM_MASK; | 1055 | pk_inst = rd_reg32(&priv->ctrl->vreg.pkha) & CHA_VER_NUM_MASK; |
| 1054 | 1056 | ||
| 1055 | /* Do not register algorithms if PKHA is not present. */ | 1057 | /* Do not register algorithms if PKHA is not present. */ |
| 1056 | if (!pk_inst) | 1058 | if (!pk_inst) { |
| 1057 | return -ENODEV; | 1059 | err = -ENODEV; |
| 1060 | goto out_put_dev; | ||
| 1061 | } | ||
| 1058 | 1062 | ||
| 1059 | err = crypto_register_akcipher(&caam_rsa); | 1063 | err = crypto_register_akcipher(&caam_rsa); |
| 1060 | if (err) | 1064 | if (err) |
| @@ -1063,6 +1067,8 @@ static int __init caam_pkc_init(void) | |||
| 1063 | else | 1067 | else |
| 1064 | dev_info(ctrldev, "caam pkc algorithms registered in /proc/crypto\n"); | 1068 | dev_info(ctrldev, "caam pkc algorithms registered in /proc/crypto\n"); |
| 1065 | 1069 | ||
| 1070 | out_put_dev: | ||
| 1071 | put_device(ctrldev); | ||
| 1066 | return err; | 1072 | return err; |
| 1067 | } | 1073 | } |
| 1068 | 1074 | ||
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c index a387c8d49a62..95eb5402c59f 100644 --- a/drivers/crypto/caam/caamrng.c +++ b/drivers/crypto/caam/caamrng.c | |||
| @@ -308,7 +308,6 @@ static int __init caam_rng_init(void) | |||
| 308 | struct device *dev; | 308 | struct device *dev; |
| 309 | struct device_node *dev_node; | 309 | struct device_node *dev_node; |
| 310 | struct platform_device *pdev; | 310 | struct platform_device *pdev; |
| 311 | struct device *ctrldev; | ||
| 312 | struct caam_drv_private *priv; | 311 | struct caam_drv_private *priv; |
| 313 | u32 rng_inst; | 312 | u32 rng_inst; |
| 314 | int err; | 313 | int err; |
| @@ -326,16 +325,17 @@ static int __init caam_rng_init(void) | |||
| 326 | return -ENODEV; | 325 | return -ENODEV; |
| 327 | } | 326 | } |
| 328 | 327 | ||
| 329 | ctrldev = &pdev->dev; | 328 | priv = dev_get_drvdata(&pdev->dev); |
| 330 | priv = dev_get_drvdata(ctrldev); | ||
| 331 | of_node_put(dev_node); | 329 | of_node_put(dev_node); |
| 332 | 330 | ||
| 333 | /* | 331 | /* |
| 334 | * If priv is NULL, it's probably because the caam driver wasn't | 332 | * If priv is NULL, it's probably because the caam driver wasn't |
| 335 | * properly initialized (e.g. RNG4 init failed). Thus, bail out here. | 333 | * properly initialized (e.g. RNG4 init failed). Thus, bail out here. |
| 336 | */ | 334 | */ |
| 337 | if (!priv) | 335 | if (!priv) { |
| 338 | return -ENODEV; | 336 | err = -ENODEV; |
| 337 | goto out_put_dev; | ||
| 338 | } | ||
| 339 | 339 | ||
| 340 | /* Check for an instantiated RNG before registration */ | 340 | /* Check for an instantiated RNG before registration */ |
| 341 | if (priv->era < 10) | 341 | if (priv->era < 10) |
| @@ -344,13 +344,16 @@ static int __init caam_rng_init(void) | |||
| 344 | else | 344 | else |
| 345 | rng_inst = rd_reg32(&priv->ctrl->vreg.rng) & CHA_VER_NUM_MASK; | 345 | rng_inst = rd_reg32(&priv->ctrl->vreg.rng) & CHA_VER_NUM_MASK; |
| 346 | 346 | ||
| 347 | if (!rng_inst) | 347 | if (!rng_inst) { |
| 348 | return -ENODEV; | 348 | err = -ENODEV; |
| 349 | goto out_put_dev; | ||
| 350 | } | ||
| 349 | 351 | ||
| 350 | dev = caam_jr_alloc(); | 352 | dev = caam_jr_alloc(); |
| 351 | if (IS_ERR(dev)) { | 353 | if (IS_ERR(dev)) { |
| 352 | pr_err("Job Ring Device allocation for transform failed\n"); | 354 | pr_err("Job Ring Device allocation for transform failed\n"); |
| 353 | return PTR_ERR(dev); | 355 | err = PTR_ERR(dev); |
| 356 | goto out_put_dev; | ||
| 354 | } | 357 | } |
| 355 | rng_ctx = kmalloc(sizeof(*rng_ctx), GFP_DMA | GFP_KERNEL); | 358 | rng_ctx = kmalloc(sizeof(*rng_ctx), GFP_DMA | GFP_KERNEL); |
| 356 | if (!rng_ctx) { | 359 | if (!rng_ctx) { |
| @@ -361,6 +364,7 @@ static int __init caam_rng_init(void) | |||
| 361 | if (err) | 364 | if (err) |
| 362 | goto free_rng_ctx; | 365 | goto free_rng_ctx; |
| 363 | 366 | ||
| 367 | put_device(&pdev->dev); | ||
| 364 | dev_info(dev, "registering rng-caam\n"); | 368 | dev_info(dev, "registering rng-caam\n"); |
| 365 | return hwrng_register(&caam_rng); | 369 | return hwrng_register(&caam_rng); |
| 366 | 370 | ||
| @@ -368,6 +372,8 @@ free_rng_ctx: | |||
| 368 | kfree(rng_ctx); | 372 | kfree(rng_ctx); |
| 369 | free_caam_alloc: | 373 | free_caam_alloc: |
| 370 | caam_jr_free(dev); | 374 | caam_jr_free(dev); |
| 375 | out_put_dev: | ||
| 376 | put_device(&pdev->dev); | ||
| 371 | return err; | 377 | return err; |
| 372 | } | 378 | } |
| 373 | 379 | ||
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c index f4e625cf53ca..1afdcb81d8ed 100644 --- a/drivers/crypto/s5p-sss.c +++ b/drivers/crypto/s5p-sss.c | |||
| @@ -241,7 +241,7 @@ | |||
| 241 | struct samsung_aes_variant { | 241 | struct samsung_aes_variant { |
| 242 | unsigned int aes_offset; | 242 | unsigned int aes_offset; |
| 243 | unsigned int hash_offset; | 243 | unsigned int hash_offset; |
| 244 | const char *clk_names[]; | 244 | const char *clk_names[2]; |
| 245 | }; | 245 | }; |
| 246 | 246 | ||
| 247 | struct s5p_aes_reqctx { | 247 | struct s5p_aes_reqctx { |
diff --git a/drivers/dax/super.c b/drivers/dax/super.c index 6e928f37d084..0cb8c30ea278 100644 --- a/drivers/dax/super.c +++ b/drivers/dax/super.c | |||
| @@ -86,12 +86,14 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize) | |||
| 86 | { | 86 | { |
| 87 | struct dax_device *dax_dev; | 87 | struct dax_device *dax_dev; |
| 88 | bool dax_enabled = false; | 88 | bool dax_enabled = false; |
| 89 | pgoff_t pgoff, pgoff_end; | ||
| 89 | struct request_queue *q; | 90 | struct request_queue *q; |
| 90 | pgoff_t pgoff; | ||
| 91 | int err, id; | ||
| 92 | pfn_t pfn; | ||
| 93 | long len; | ||
| 94 | char buf[BDEVNAME_SIZE]; | 91 | char buf[BDEVNAME_SIZE]; |
| 92 | void *kaddr, *end_kaddr; | ||
| 93 | pfn_t pfn, end_pfn; | ||
| 94 | sector_t last_page; | ||
| 95 | long len, len2; | ||
| 96 | int err, id; | ||
| 95 | 97 | ||
| 96 | if (blocksize != PAGE_SIZE) { | 98 | if (blocksize != PAGE_SIZE) { |
| 97 | pr_debug("%s: error: unsupported blocksize for dax\n", | 99 | pr_debug("%s: error: unsupported blocksize for dax\n", |
| @@ -113,6 +115,14 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize) | |||
| 113 | return false; | 115 | return false; |
| 114 | } | 116 | } |
| 115 | 117 | ||
| 118 | last_page = PFN_DOWN(i_size_read(bdev->bd_inode) - 1) * 8; | ||
| 119 | err = bdev_dax_pgoff(bdev, last_page, PAGE_SIZE, &pgoff_end); | ||
| 120 | if (err) { | ||
| 121 | pr_debug("%s: error: unaligned partition for dax\n", | ||
| 122 | bdevname(bdev, buf)); | ||
| 123 | return false; | ||
| 124 | } | ||
| 125 | |||
| 116 | dax_dev = dax_get_by_host(bdev->bd_disk->disk_name); | 126 | dax_dev = dax_get_by_host(bdev->bd_disk->disk_name); |
| 117 | if (!dax_dev) { | 127 | if (!dax_dev) { |
| 118 | pr_debug("%s: error: device does not support dax\n", | 128 | pr_debug("%s: error: device does not support dax\n", |
| @@ -121,14 +131,15 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize) | |||
| 121 | } | 131 | } |
| 122 | 132 | ||
| 123 | id = dax_read_lock(); | 133 | id = dax_read_lock(); |
| 124 | len = dax_direct_access(dax_dev, pgoff, 1, NULL, &pfn); | 134 | len = dax_direct_access(dax_dev, pgoff, 1, &kaddr, &pfn); |
| 135 | len2 = dax_direct_access(dax_dev, pgoff_end, 1, &end_kaddr, &end_pfn); | ||
| 125 | dax_read_unlock(id); | 136 | dax_read_unlock(id); |
| 126 | 137 | ||
| 127 | put_dax(dax_dev); | 138 | put_dax(dax_dev); |
| 128 | 139 | ||
| 129 | if (len < 1) { | 140 | if (len < 1 || len2 < 1) { |
| 130 | pr_debug("%s: error: dax access failed (%ld)\n", | 141 | pr_debug("%s: error: dax access failed (%ld)\n", |
| 131 | bdevname(bdev, buf), len); | 142 | bdevname(bdev, buf), len < 1 ? len : len2); |
| 132 | return false; | 143 | return false; |
| 133 | } | 144 | } |
| 134 | 145 | ||
| @@ -143,13 +154,20 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize) | |||
| 143 | */ | 154 | */ |
| 144 | WARN_ON(IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API)); | 155 | WARN_ON(IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API)); |
| 145 | dax_enabled = true; | 156 | dax_enabled = true; |
| 146 | } else if (pfn_t_devmap(pfn)) { | 157 | } else if (pfn_t_devmap(pfn) && pfn_t_devmap(end_pfn)) { |
| 147 | struct dev_pagemap *pgmap; | 158 | struct dev_pagemap *pgmap, *end_pgmap; |
| 148 | 159 | ||
| 149 | pgmap = get_dev_pagemap(pfn_t_to_pfn(pfn), NULL); | 160 | pgmap = get_dev_pagemap(pfn_t_to_pfn(pfn), NULL); |
| 150 | if (pgmap && pgmap->type == MEMORY_DEVICE_FS_DAX) | 161 | end_pgmap = get_dev_pagemap(pfn_t_to_pfn(end_pfn), NULL); |
| 162 | if (pgmap && pgmap == end_pgmap && pgmap->type == MEMORY_DEVICE_FS_DAX | ||
| 163 | && pfn_t_to_page(pfn)->pgmap == pgmap | ||
| 164 | && pfn_t_to_page(end_pfn)->pgmap == pgmap | ||
| 165 | && pfn_t_to_pfn(pfn) == PHYS_PFN(__pa(kaddr)) | ||
| 166 | && pfn_t_to_pfn(end_pfn) == PHYS_PFN(__pa(end_kaddr))) | ||
| 151 | dax_enabled = true; | 167 | dax_enabled = true; |
| 152 | put_dev_pagemap(pgmap); | 168 | put_dev_pagemap(pgmap); |
| 169 | put_dev_pagemap(end_pgmap); | ||
| 170 | |||
| 153 | } | 171 | } |
| 154 | 172 | ||
| 155 | if (!dax_enabled) { | 173 | if (!dax_enabled) { |
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index d2286c7f7222..0b1dfb5bf2d9 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
| @@ -218,6 +218,20 @@ config FSL_EDMA | |||
| 218 | multiplexing capability for DMA request sources(slot). | 218 | multiplexing capability for DMA request sources(slot). |
| 219 | This module can be found on Freescale Vybrid and LS-1 SoCs. | 219 | This module can be found on Freescale Vybrid and LS-1 SoCs. |
| 220 | 220 | ||
| 221 | config FSL_QDMA | ||
| 222 | tristate "NXP Layerscape qDMA engine support" | ||
| 223 | depends on ARM || ARM64 | ||
| 224 | select DMA_ENGINE | ||
| 225 | select DMA_VIRTUAL_CHANNELS | ||
| 226 | select DMA_ENGINE_RAID | ||
| 227 | select ASYNC_TX_ENABLE_CHANNEL_SWITCH | ||
| 228 | help | ||
| 229 | Support the NXP Layerscape qDMA engine with command queue and legacy mode. | ||
| 230 | Channel virtualization is supported through enqueuing of DMA jobs to, | ||
| 231 | or dequeuing DMA jobs from, different work queues. | ||
| 232 | This module can be found on NXP Layerscape SoCs. | ||
| 233 | The qdma driver only work on SoCs with a DPAA hardware block. | ||
| 234 | |||
| 221 | config FSL_RAID | 235 | config FSL_RAID |
| 222 | tristate "Freescale RAID engine Support" | 236 | tristate "Freescale RAID engine Support" |
| 223 | depends on FSL_SOC && !ASYNC_TX_ENABLE_CHANNEL_SWITCH | 237 | depends on FSL_SOC && !ASYNC_TX_ENABLE_CHANNEL_SWITCH |
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 09571a81353d..6126e1c3a875 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile | |||
| @@ -33,6 +33,7 @@ obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o | |||
| 33 | obj-$(CONFIG_FSL_DMA) += fsldma.o | 33 | obj-$(CONFIG_FSL_DMA) += fsldma.o |
| 34 | obj-$(CONFIG_FSL_EDMA) += fsl-edma.o fsl-edma-common.o | 34 | obj-$(CONFIG_FSL_EDMA) += fsl-edma.o fsl-edma-common.o |
| 35 | obj-$(CONFIG_MCF_EDMA) += mcf-edma.o fsl-edma-common.o | 35 | obj-$(CONFIG_MCF_EDMA) += mcf-edma.o fsl-edma-common.o |
| 36 | obj-$(CONFIG_FSL_QDMA) += fsl-qdma.o | ||
| 36 | obj-$(CONFIG_FSL_RAID) += fsl_raid.o | 37 | obj-$(CONFIG_FSL_RAID) += fsl_raid.o |
| 37 | obj-$(CONFIG_HSU_DMA) += hsu/ | 38 | obj-$(CONFIG_HSU_DMA) += hsu/ |
| 38 | obj-$(CONFIG_IMG_MDC_DMA) += img-mdc-dma.o | 39 | obj-$(CONFIG_IMG_MDC_DMA) += img-mdc-dma.o |
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 01d936c9fe89..a0a9cd76c1d4 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c | |||
| @@ -134,7 +134,6 @@ static struct at_desc *atc_desc_get(struct at_dma_chan *atchan) | |||
| 134 | struct at_desc *ret = NULL; | 134 | struct at_desc *ret = NULL; |
| 135 | unsigned long flags; | 135 | unsigned long flags; |
| 136 | unsigned int i = 0; | 136 | unsigned int i = 0; |
| 137 | LIST_HEAD(tmp_list); | ||
| 138 | 137 | ||
| 139 | spin_lock_irqsave(&atchan->lock, flags); | 138 | spin_lock_irqsave(&atchan->lock, flags); |
| 140 | list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) { | 139 | list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) { |
| @@ -1387,8 +1386,6 @@ static int atc_pause(struct dma_chan *chan) | |||
| 1387 | int chan_id = atchan->chan_common.chan_id; | 1386 | int chan_id = atchan->chan_common.chan_id; |
| 1388 | unsigned long flags; | 1387 | unsigned long flags; |
| 1389 | 1388 | ||
| 1390 | LIST_HEAD(list); | ||
| 1391 | |||
| 1392 | dev_vdbg(chan2dev(chan), "%s\n", __func__); | 1389 | dev_vdbg(chan2dev(chan), "%s\n", __func__); |
| 1393 | 1390 | ||
| 1394 | spin_lock_irqsave(&atchan->lock, flags); | 1391 | spin_lock_irqsave(&atchan->lock, flags); |
| @@ -1408,8 +1405,6 @@ static int atc_resume(struct dma_chan *chan) | |||
| 1408 | int chan_id = atchan->chan_common.chan_id; | 1405 | int chan_id = atchan->chan_common.chan_id; |
| 1409 | unsigned long flags; | 1406 | unsigned long flags; |
| 1410 | 1407 | ||
| 1411 | LIST_HEAD(list); | ||
| 1412 | |||
| 1413 | dev_vdbg(chan2dev(chan), "%s\n", __func__); | 1408 | dev_vdbg(chan2dev(chan), "%s\n", __func__); |
| 1414 | 1409 | ||
| 1415 | if (!atc_chan_is_paused(atchan)) | 1410 | if (!atc_chan_is_paused(atchan)) |
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c index ae10f5614f95..ec8a291d62ba 100644 --- a/drivers/dma/bcm2835-dma.c +++ b/drivers/dma/bcm2835-dma.c | |||
| @@ -2,9 +2,6 @@ | |||
| 2 | /* | 2 | /* |
| 3 | * BCM2835 DMA engine support | 3 | * BCM2835 DMA engine support |
| 4 | * | 4 | * |
| 5 | * This driver only supports cyclic DMA transfers | ||
| 6 | * as needed for the I2S module. | ||
| 7 | * | ||
| 8 | * Author: Florian Meier <florian.meier@koalo.de> | 5 | * Author: Florian Meier <florian.meier@koalo.de> |
| 9 | * Copyright 2013 | 6 | * Copyright 2013 |
| 10 | * | 7 | * |
| @@ -42,7 +39,6 @@ | |||
| 42 | 39 | ||
| 43 | struct bcm2835_dmadev { | 40 | struct bcm2835_dmadev { |
| 44 | struct dma_device ddev; | 41 | struct dma_device ddev; |
| 45 | spinlock_t lock; | ||
| 46 | void __iomem *base; | 42 | void __iomem *base; |
| 47 | struct device_dma_parameters dma_parms; | 43 | struct device_dma_parameters dma_parms; |
| 48 | }; | 44 | }; |
| @@ -64,7 +60,6 @@ struct bcm2835_cb_entry { | |||
| 64 | 60 | ||
| 65 | struct bcm2835_chan { | 61 | struct bcm2835_chan { |
| 66 | struct virt_dma_chan vc; | 62 | struct virt_dma_chan vc; |
| 67 | struct list_head node; | ||
| 68 | 63 | ||
| 69 | struct dma_slave_config cfg; | 64 | struct dma_slave_config cfg; |
| 70 | unsigned int dreq; | 65 | unsigned int dreq; |
| @@ -312,8 +307,7 @@ static struct bcm2835_desc *bcm2835_dma_create_cb_chain( | |||
| 312 | return NULL; | 307 | return NULL; |
| 313 | 308 | ||
| 314 | /* allocate and setup the descriptor. */ | 309 | /* allocate and setup the descriptor. */ |
| 315 | d = kzalloc(sizeof(*d) + frames * sizeof(struct bcm2835_cb_entry), | 310 | d = kzalloc(struct_size(d, cb_list, frames), gfp); |
| 316 | gfp); | ||
| 317 | if (!d) | 311 | if (!d) |
| 318 | return NULL; | 312 | return NULL; |
| 319 | 313 | ||
| @@ -406,7 +400,7 @@ static void bcm2835_dma_fill_cb_chain_with_sg( | |||
| 406 | } | 400 | } |
| 407 | } | 401 | } |
| 408 | 402 | ||
| 409 | static int bcm2835_dma_abort(struct bcm2835_chan *c) | 403 | static void bcm2835_dma_abort(struct bcm2835_chan *c) |
| 410 | { | 404 | { |
| 411 | void __iomem *chan_base = c->chan_base; | 405 | void __iomem *chan_base = c->chan_base; |
| 412 | long int timeout = 10000; | 406 | long int timeout = 10000; |
| @@ -416,7 +410,7 @@ static int bcm2835_dma_abort(struct bcm2835_chan *c) | |||
| 416 | * (The ACTIVE flag in the CS register is not a reliable indicator.) | 410 | * (The ACTIVE flag in the CS register is not a reliable indicator.) |
| 417 | */ | 411 | */ |
| 418 | if (!readl(chan_base + BCM2835_DMA_ADDR)) | 412 | if (!readl(chan_base + BCM2835_DMA_ADDR)) |
| 419 | return 0; | 413 | return; |
| 420 | 414 | ||
| 421 | /* Write 0 to the active bit - Pause the DMA */ | 415 | /* Write 0 to the active bit - Pause the DMA */ |
| 422 | writel(0, chan_base + BCM2835_DMA_CS); | 416 | writel(0, chan_base + BCM2835_DMA_CS); |
| @@ -432,7 +426,6 @@ static int bcm2835_dma_abort(struct bcm2835_chan *c) | |||
| 432 | "failed to complete outstanding writes\n"); | 426 | "failed to complete outstanding writes\n"); |
| 433 | 427 | ||
| 434 | writel(BCM2835_DMA_RESET, chan_base + BCM2835_DMA_CS); | 428 | writel(BCM2835_DMA_RESET, chan_base + BCM2835_DMA_CS); |
| 435 | return 0; | ||
| 436 | } | 429 | } |
| 437 | 430 | ||
| 438 | static void bcm2835_dma_start_desc(struct bcm2835_chan *c) | 431 | static void bcm2835_dma_start_desc(struct bcm2835_chan *c) |
| @@ -504,8 +497,12 @@ static int bcm2835_dma_alloc_chan_resources(struct dma_chan *chan) | |||
| 504 | 497 | ||
| 505 | dev_dbg(dev, "Allocating DMA channel %d\n", c->ch); | 498 | dev_dbg(dev, "Allocating DMA channel %d\n", c->ch); |
| 506 | 499 | ||
| 500 | /* | ||
| 501 | * Control blocks are 256 bit in length and must start at a 256 bit | ||
| 502 | * (32 byte) aligned address (BCM2835 ARM Peripherals, sec. 4.2.1.1). | ||
| 503 | */ | ||
| 507 | c->cb_pool = dma_pool_create(dev_name(dev), dev, | 504 | c->cb_pool = dma_pool_create(dev_name(dev), dev, |
| 508 | sizeof(struct bcm2835_dma_cb), 0, 0); | 505 | sizeof(struct bcm2835_dma_cb), 32, 0); |
| 509 | if (!c->cb_pool) { | 506 | if (!c->cb_pool) { |
| 510 | dev_err(dev, "unable to allocate descriptor pool\n"); | 507 | dev_err(dev, "unable to allocate descriptor pool\n"); |
| 511 | return -ENOMEM; | 508 | return -ENOMEM; |
| @@ -774,17 +771,11 @@ static int bcm2835_dma_slave_config(struct dma_chan *chan, | |||
| 774 | static int bcm2835_dma_terminate_all(struct dma_chan *chan) | 771 | static int bcm2835_dma_terminate_all(struct dma_chan *chan) |
| 775 | { | 772 | { |
| 776 | struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); | 773 | struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); |
| 777 | struct bcm2835_dmadev *d = to_bcm2835_dma_dev(c->vc.chan.device); | ||
| 778 | unsigned long flags; | 774 | unsigned long flags; |
| 779 | LIST_HEAD(head); | 775 | LIST_HEAD(head); |
| 780 | 776 | ||
| 781 | spin_lock_irqsave(&c->vc.lock, flags); | 777 | spin_lock_irqsave(&c->vc.lock, flags); |
| 782 | 778 | ||
| 783 | /* Prevent this channel being scheduled */ | ||
| 784 | spin_lock(&d->lock); | ||
| 785 | list_del_init(&c->node); | ||
| 786 | spin_unlock(&d->lock); | ||
| 787 | |||
| 788 | /* stop DMA activity */ | 779 | /* stop DMA activity */ |
| 789 | if (c->desc) { | 780 | if (c->desc) { |
| 790 | vchan_terminate_vdesc(&c->desc->vd); | 781 | vchan_terminate_vdesc(&c->desc->vd); |
| @@ -817,7 +808,6 @@ static int bcm2835_dma_chan_init(struct bcm2835_dmadev *d, int chan_id, | |||
| 817 | 808 | ||
| 818 | c->vc.desc_free = bcm2835_dma_desc_free; | 809 | c->vc.desc_free = bcm2835_dma_desc_free; |
| 819 | vchan_init(&c->vc, &d->ddev); | 810 | vchan_init(&c->vc, &d->ddev); |
| 820 | INIT_LIST_HEAD(&c->node); | ||
| 821 | 811 | ||
| 822 | c->chan_base = BCM2835_DMA_CHANIO(d->base, chan_id); | 812 | c->chan_base = BCM2835_DMA_CHANIO(d->base, chan_id); |
| 823 | c->ch = chan_id; | 813 | c->ch = chan_id; |
| @@ -920,7 +910,6 @@ static int bcm2835_dma_probe(struct platform_device *pdev) | |||
| 920 | od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; | 910 | od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; |
| 921 | od->ddev.dev = &pdev->dev; | 911 | od->ddev.dev = &pdev->dev; |
| 922 | INIT_LIST_HEAD(&od->ddev.channels); | 912 | INIT_LIST_HEAD(&od->ddev.channels); |
| 923 | spin_lock_init(&od->lock); | ||
| 924 | 913 | ||
| 925 | platform_set_drvdata(pdev, od); | 914 | platform_set_drvdata(pdev, od); |
| 926 | 915 | ||
diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c index 15b2453d2647..ffc0adc2f6ce 100644 --- a/drivers/dma/dma-axi-dmac.c +++ b/drivers/dma/dma-axi-dmac.c | |||
| @@ -367,8 +367,7 @@ static struct axi_dmac_desc *axi_dmac_alloc_desc(unsigned int num_sgs) | |||
| 367 | struct axi_dmac_desc *desc; | 367 | struct axi_dmac_desc *desc; |
| 368 | unsigned int i; | 368 | unsigned int i; |
| 369 | 369 | ||
| 370 | desc = kzalloc(sizeof(struct axi_dmac_desc) + | 370 | desc = kzalloc(struct_size(desc, sg, num_sgs), GFP_NOWAIT); |
| 371 | sizeof(struct axi_dmac_sg) * num_sgs, GFP_NOWAIT); | ||
| 372 | if (!desc) | 371 | if (!desc) |
| 373 | return NULL; | 372 | return NULL; |
| 374 | 373 | ||
diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c index a8b6225faa12..9ce0a386225b 100644 --- a/drivers/dma/dma-jz4780.c +++ b/drivers/dma/dma-jz4780.c | |||
| @@ -838,9 +838,8 @@ static int jz4780_dma_probe(struct platform_device *pdev) | |||
| 838 | if (!soc_data) | 838 | if (!soc_data) |
| 839 | return -EINVAL; | 839 | return -EINVAL; |
| 840 | 840 | ||
| 841 | jzdma = devm_kzalloc(dev, sizeof(*jzdma) | 841 | jzdma = devm_kzalloc(dev, struct_size(jzdma, chan, |
| 842 | + sizeof(*jzdma->chan) * soc_data->nb_channels, | 842 | soc_data->nb_channels), GFP_KERNEL); |
| 843 | GFP_KERNEL); | ||
| 844 | if (!jzdma) | 843 | if (!jzdma) |
| 845 | return -ENOMEM; | 844 | return -ENOMEM; |
| 846 | 845 | ||
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 6511928b4cdf..b96814a7dceb 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c | |||
| @@ -200,15 +200,20 @@ struct dmatest_done { | |||
| 200 | wait_queue_head_t *wait; | 200 | wait_queue_head_t *wait; |
| 201 | }; | 201 | }; |
| 202 | 202 | ||
| 203 | struct dmatest_data { | ||
| 204 | u8 **raw; | ||
| 205 | u8 **aligned; | ||
| 206 | unsigned int cnt; | ||
| 207 | unsigned int off; | ||
| 208 | }; | ||
| 209 | |||
| 203 | struct dmatest_thread { | 210 | struct dmatest_thread { |
| 204 | struct list_head node; | 211 | struct list_head node; |
| 205 | struct dmatest_info *info; | 212 | struct dmatest_info *info; |
| 206 | struct task_struct *task; | 213 | struct task_struct *task; |
| 207 | struct dma_chan *chan; | 214 | struct dma_chan *chan; |
| 208 | u8 **srcs; | 215 | struct dmatest_data src; |
| 209 | u8 **usrcs; | 216 | struct dmatest_data dst; |
| 210 | u8 **dsts; | ||
| 211 | u8 **udsts; | ||
| 212 | enum dma_transaction_type type; | 217 | enum dma_transaction_type type; |
| 213 | wait_queue_head_t done_wait; | 218 | wait_queue_head_t done_wait; |
| 214 | struct dmatest_done test_done; | 219 | struct dmatest_done test_done; |
| @@ -481,6 +486,53 @@ static unsigned long long dmatest_KBs(s64 runtime, unsigned long long len) | |||
| 481 | return FIXPT_TO_INT(dmatest_persec(runtime, len >> 10)); | 486 | return FIXPT_TO_INT(dmatest_persec(runtime, len >> 10)); |
| 482 | } | 487 | } |
| 483 | 488 | ||
| 489 | static void __dmatest_free_test_data(struct dmatest_data *d, unsigned int cnt) | ||
| 490 | { | ||
| 491 | unsigned int i; | ||
| 492 | |||
| 493 | for (i = 0; i < cnt; i++) | ||
| 494 | kfree(d->raw[i]); | ||
| 495 | |||
| 496 | kfree(d->aligned); | ||
| 497 | kfree(d->raw); | ||
| 498 | } | ||
| 499 | |||
| 500 | static void dmatest_free_test_data(struct dmatest_data *d) | ||
| 501 | { | ||
| 502 | __dmatest_free_test_data(d, d->cnt); | ||
| 503 | } | ||
| 504 | |||
| 505 | static int dmatest_alloc_test_data(struct dmatest_data *d, | ||
| 506 | unsigned int buf_size, u8 align) | ||
| 507 | { | ||
| 508 | unsigned int i = 0; | ||
| 509 | |||
| 510 | d->raw = kcalloc(d->cnt + 1, sizeof(u8 *), GFP_KERNEL); | ||
| 511 | if (!d->raw) | ||
| 512 | return -ENOMEM; | ||
| 513 | |||
| 514 | d->aligned = kcalloc(d->cnt + 1, sizeof(u8 *), GFP_KERNEL); | ||
| 515 | if (!d->aligned) | ||
| 516 | goto err; | ||
| 517 | |||
| 518 | for (i = 0; i < d->cnt; i++) { | ||
| 519 | d->raw[i] = kmalloc(buf_size + align, GFP_KERNEL); | ||
| 520 | if (!d->raw[i]) | ||
| 521 | goto err; | ||
| 522 | |||
| 523 | /* align to alignment restriction */ | ||
| 524 | if (align) | ||
| 525 | d->aligned[i] = PTR_ALIGN(d->raw[i], align); | ||
| 526 | else | ||
| 527 | d->aligned[i] = d->raw[i]; | ||
| 528 | } | ||
| 529 | |||
| 530 | return 0; | ||
| 531 | err: | ||
| 532 | __dmatest_free_test_data(d, i); | ||
| 533 | return -ENOMEM; | ||
| 534 | } | ||
| 535 | |||
| 484 | /* | 536 | /* |
| 485 | * This function repeatedly tests DMA transfers of various lengths and | 537 | * This function repeatedly tests DMA transfers of various lengths and |
| 486 | * offsets for a given operation type until it is told to exit by | 538 | * offsets for a given operation type until it is told to exit by |
| @@ -511,8 +563,9 @@ static int dmatest_func(void *data) | |||
| 511 | enum dma_ctrl_flags flags; | 563 | enum dma_ctrl_flags flags; |
| 512 | u8 *pq_coefs = NULL; | 564 | u8 *pq_coefs = NULL; |
| 513 | int ret; | 565 | int ret; |
| 514 | int src_cnt; | 566 | unsigned int buf_size; |
| 515 | int dst_cnt; | 567 | struct dmatest_data *src; |
| 568 | struct dmatest_data *dst; | ||
| 516 | int i; | 569 | int i; |
| 517 | ktime_t ktime, start, diff; | 570 | ktime_t ktime, start, diff; |
| 518 | ktime_t filltime = 0; | 571 | ktime_t filltime = 0; |
| @@ -535,25 +588,27 @@ static int dmatest_func(void *data) | |||
| 535 | params = &info->params; | 588 | params = &info->params; |
| 536 | chan = thread->chan; | 589 | chan = thread->chan; |
| 537 | dev = chan->device; | 590 | dev = chan->device; |
| 591 | src = &thread->src; | ||
| 592 | dst = &thread->dst; | ||
| 538 | if (thread->type == DMA_MEMCPY) { | 593 | if (thread->type == DMA_MEMCPY) { |
| 539 | align = params->alignment < 0 ? dev->copy_align : | 594 | align = params->alignment < 0 ? dev->copy_align : |
| 540 | params->alignment; | 595 | params->alignment; |
| 541 | src_cnt = dst_cnt = 1; | 596 | src->cnt = dst->cnt = 1; |
| 542 | } else if (thread->type == DMA_MEMSET) { | 597 | } else if (thread->type == DMA_MEMSET) { |
| 543 | align = params->alignment < 0 ? dev->fill_align : | 598 | align = params->alignment < 0 ? dev->fill_align : |
| 544 | params->alignment; | 599 | params->alignment; |
| 545 | src_cnt = dst_cnt = 1; | 600 | src->cnt = dst->cnt = 1; |
| 546 | is_memset = true; | 601 | is_memset = true; |
| 547 | } else if (thread->type == DMA_XOR) { | 602 | } else if (thread->type == DMA_XOR) { |
| 548 | /* force odd to ensure dst = src */ | 603 | /* force odd to ensure dst = src */ |
| 549 | src_cnt = min_odd(params->xor_sources | 1, dev->max_xor); | 604 | src->cnt = min_odd(params->xor_sources | 1, dev->max_xor); |
| 550 | dst_cnt = 1; | 605 | dst->cnt = 1; |
| 551 | align = params->alignment < 0 ? dev->xor_align : | 606 | align = params->alignment < 0 ? dev->xor_align : |
| 552 | params->alignment; | 607 | params->alignment; |
| 553 | } else if (thread->type == DMA_PQ) { | 608 | } else if (thread->type == DMA_PQ) { |
| 554 | /* force odd to ensure dst = src */ | 609 | /* force odd to ensure dst = src */ |
| 555 | src_cnt = min_odd(params->pq_sources | 1, dma_maxpq(dev, 0)); | 610 | src->cnt = min_odd(params->pq_sources | 1, dma_maxpq(dev, 0)); |
| 556 | dst_cnt = 2; | 611 | dst->cnt = 2; |
| 557 | align = params->alignment < 0 ? dev->pq_align : | 612 | align = params->alignment < 0 ? dev->pq_align : |
| 558 | params->alignment; | 613 | params->alignment; |
| 559 | 614 | ||
| @@ -561,75 +616,38 @@ static int dmatest_func(void *data) | |||
| 561 | if (!pq_coefs) | 616 | if (!pq_coefs) |
| 562 | goto err_thread_type; | 617 | goto err_thread_type; |
| 563 | 618 | ||
| 564 | for (i = 0; i < src_cnt; i++) | 619 | for (i = 0; i < src->cnt; i++) |
| 565 | pq_coefs[i] = 1; | 620 | pq_coefs[i] = 1; |
| 566 | } else | 621 | } else |
| 567 | goto err_thread_type; | 622 | goto err_thread_type; |
| 568 | 623 | ||
| 569 | /* Check if buffer count fits into map count variable (u8) */ | 624 | /* Check if buffer count fits into map count variable (u8) */ |
| 570 | if ((src_cnt + dst_cnt) >= 255) { | 625 | if ((src->cnt + dst->cnt) >= 255) { |
| 571 | pr_err("too many buffers (%d of 255 supported)\n", | 626 | pr_err("too many buffers (%d of 255 supported)\n", |
| 572 | src_cnt + dst_cnt); | 627 | src->cnt + dst->cnt); |
| 573 | goto err_free_coefs; | 628 | goto err_free_coefs; |
| 574 | } | 629 | } |
| 575 | 630 | ||
| 576 | if (1 << align > params->buf_size) { | 631 | buf_size = params->buf_size; |
| 632 | if (1 << align > buf_size) { | ||
| 577 | pr_err("%u-byte buffer too small for %d-byte alignment\n", | 633 | pr_err("%u-byte buffer too small for %d-byte alignment\n", |
| 578 | params->buf_size, 1 << align); | 634 | buf_size, 1 << align); |
| 579 | goto err_free_coefs; | 635 | goto err_free_coefs; |
| 580 | } | 636 | } |
| 581 | 637 | ||
| 582 | thread->srcs = kcalloc(src_cnt + 1, sizeof(u8 *), GFP_KERNEL); | 638 | if (dmatest_alloc_test_data(src, buf_size, align) < 0) |
| 583 | if (!thread->srcs) | ||
| 584 | goto err_free_coefs; | 639 | goto err_free_coefs; |
| 585 | 640 | ||
| 586 | thread->usrcs = kcalloc(src_cnt + 1, sizeof(u8 *), GFP_KERNEL); | 641 | if (dmatest_alloc_test_data(dst, buf_size, align) < 0) |
| 587 | if (!thread->usrcs) | 642 | goto err_src; |
| 588 | goto err_usrcs; | ||
| 589 | |||
| 590 | for (i = 0; i < src_cnt; i++) { | ||
| 591 | thread->usrcs[i] = kmalloc(params->buf_size + align, | ||
| 592 | GFP_KERNEL); | ||
| 593 | if (!thread->usrcs[i]) | ||
| 594 | goto err_srcbuf; | ||
| 595 | |||
| 596 | /* align srcs to alignment restriction */ | ||
| 597 | if (align) | ||
| 598 | thread->srcs[i] = PTR_ALIGN(thread->usrcs[i], align); | ||
| 599 | else | ||
| 600 | thread->srcs[i] = thread->usrcs[i]; | ||
| 601 | } | ||
| 602 | thread->srcs[i] = NULL; | ||
| 603 | |||
| 604 | thread->dsts = kcalloc(dst_cnt + 1, sizeof(u8 *), GFP_KERNEL); | ||
| 605 | if (!thread->dsts) | ||
| 606 | goto err_dsts; | ||
| 607 | |||
| 608 | thread->udsts = kcalloc(dst_cnt + 1, sizeof(u8 *), GFP_KERNEL); | ||
| 609 | if (!thread->udsts) | ||
| 610 | goto err_udsts; | ||
| 611 | |||
| 612 | for (i = 0; i < dst_cnt; i++) { | ||
| 613 | thread->udsts[i] = kmalloc(params->buf_size + align, | ||
| 614 | GFP_KERNEL); | ||
| 615 | if (!thread->udsts[i]) | ||
| 616 | goto err_dstbuf; | ||
| 617 | |||
| 618 | /* align dsts to alignment restriction */ | ||
| 619 | if (align) | ||
| 620 | thread->dsts[i] = PTR_ALIGN(thread->udsts[i], align); | ||
| 621 | else | ||
| 622 | thread->dsts[i] = thread->udsts[i]; | ||
| 623 | } | ||
| 624 | thread->dsts[i] = NULL; | ||
| 625 | 643 | ||
| 626 | set_user_nice(current, 10); | 644 | set_user_nice(current, 10); |
| 627 | 645 | ||
| 628 | srcs = kcalloc(src_cnt, sizeof(dma_addr_t), GFP_KERNEL); | 646 | srcs = kcalloc(src->cnt, sizeof(dma_addr_t), GFP_KERNEL); |
| 629 | if (!srcs) | 647 | if (!srcs) |
| 630 | goto err_dstbuf; | 648 | goto err_dst; |
| 631 | 649 | ||
| 632 | dma_pq = kcalloc(dst_cnt, sizeof(dma_addr_t), GFP_KERNEL); | 650 | dma_pq = kcalloc(dst->cnt, sizeof(dma_addr_t), GFP_KERNEL); |
| 633 | if (!dma_pq) | 651 | if (!dma_pq) |
| 634 | goto err_srcs_array; | 652 | goto err_srcs_array; |
| 635 | 653 | ||
| @@ -644,21 +662,21 @@ static int dmatest_func(void *data) | |||
| 644 | struct dma_async_tx_descriptor *tx = NULL; | 662 | struct dma_async_tx_descriptor *tx = NULL; |
| 645 | struct dmaengine_unmap_data *um; | 663 | struct dmaengine_unmap_data *um; |
| 646 | dma_addr_t *dsts; | 664 | dma_addr_t *dsts; |
| 647 | unsigned int src_off, dst_off, len; | 665 | unsigned int len; |
| 648 | 666 | ||
| 649 | total_tests++; | 667 | total_tests++; |
| 650 | 668 | ||
| 651 | if (params->transfer_size) { | 669 | if (params->transfer_size) { |
| 652 | if (params->transfer_size >= params->buf_size) { | 670 | if (params->transfer_size >= buf_size) { |
| 653 | pr_err("%u-byte transfer size must be lower than %u-buffer size\n", | 671 | pr_err("%u-byte transfer size must be lower than %u-buffer size\n", |
| 654 | params->transfer_size, params->buf_size); | 672 | params->transfer_size, buf_size); |
| 655 | break; | 673 | break; |
| 656 | } | 674 | } |
| 657 | len = params->transfer_size; | 675 | len = params->transfer_size; |
| 658 | } else if (params->norandom) { | 676 | } else if (params->norandom) { |
| 659 | len = params->buf_size; | 677 | len = buf_size; |
| 660 | } else { | 678 | } else { |
| 661 | len = dmatest_random() % params->buf_size + 1; | 679 | len = dmatest_random() % buf_size + 1; |
| 662 | } | 680 | } |
| 663 | 681 | ||
| 664 | /* Do not alter transfer size explicitly defined by user */ | 682 | /* Do not alter transfer size explicitly defined by user */ |
| @@ -670,57 +688,57 @@ static int dmatest_func(void *data) | |||
| 670 | total_len += len; | 688 | total_len += len; |
| 671 | 689 | ||
| 672 | if (params->norandom) { | 690 | if (params->norandom) { |
| 673 | src_off = 0; | 691 | src->off = 0; |
| 674 | dst_off = 0; | 692 | dst->off = 0; |
| 675 | } else { | 693 | } else { |
| 676 | src_off = dmatest_random() % (params->buf_size - len + 1); | 694 | src->off = dmatest_random() % (buf_size - len + 1); |
| 677 | dst_off = dmatest_random() % (params->buf_size - len + 1); | 695 | dst->off = dmatest_random() % (buf_size - len + 1); |
| 678 | 696 | ||
| 679 | src_off = (src_off >> align) << align; | 697 | src->off = (src->off >> align) << align; |
| 680 | dst_off = (dst_off >> align) << align; | 698 | dst->off = (dst->off >> align) << align; |
| 681 | } | 699 | } |
| 682 | 700 | ||
| 683 | if (!params->noverify) { | 701 | if (!params->noverify) { |
| 684 | start = ktime_get(); | 702 | start = ktime_get(); |
| 685 | dmatest_init_srcs(thread->srcs, src_off, len, | 703 | dmatest_init_srcs(src->aligned, src->off, len, |
| 686 | params->buf_size, is_memset); | 704 | buf_size, is_memset); |
| 687 | dmatest_init_dsts(thread->dsts, dst_off, len, | 705 | dmatest_init_dsts(dst->aligned, dst->off, len, |
| 688 | params->buf_size, is_memset); | 706 | buf_size, is_memset); |
| 689 | 707 | ||
| 690 | diff = ktime_sub(ktime_get(), start); | 708 | diff = ktime_sub(ktime_get(), start); |
| 691 | filltime = ktime_add(filltime, diff); | 709 | filltime = ktime_add(filltime, diff); |
| 692 | } | 710 | } |
| 693 | 711 | ||
| 694 | um = dmaengine_get_unmap_data(dev->dev, src_cnt + dst_cnt, | 712 | um = dmaengine_get_unmap_data(dev->dev, src->cnt + dst->cnt, |
| 695 | GFP_KERNEL); | 713 | GFP_KERNEL); |
| 696 | if (!um) { | 714 | if (!um) { |
| 697 | failed_tests++; | 715 | failed_tests++; |
| 698 | result("unmap data NULL", total_tests, | 716 | result("unmap data NULL", total_tests, |
| 699 | src_off, dst_off, len, ret); | 717 | src->off, dst->off, len, ret); |
| 700 | continue; | 718 | continue; |
| 701 | } | 719 | } |
| 702 | 720 | ||
| 703 | um->len = params->buf_size; | 721 | um->len = buf_size; |
| 704 | for (i = 0; i < src_cnt; i++) { | 722 | for (i = 0; i < src->cnt; i++) { |
| 705 | void *buf = thread->srcs[i]; | 723 | void *buf = src->aligned[i]; |
| 706 | struct page *pg = virt_to_page(buf); | 724 | struct page *pg = virt_to_page(buf); |
| 707 | unsigned long pg_off = offset_in_page(buf); | 725 | unsigned long pg_off = offset_in_page(buf); |
| 708 | 726 | ||
| 709 | um->addr[i] = dma_map_page(dev->dev, pg, pg_off, | 727 | um->addr[i] = dma_map_page(dev->dev, pg, pg_off, |
| 710 | um->len, DMA_TO_DEVICE); | 728 | um->len, DMA_TO_DEVICE); |
| 711 | srcs[i] = um->addr[i] + src_off; | 729 | srcs[i] = um->addr[i] + src->off; |
| 712 | ret = dma_mapping_error(dev->dev, um->addr[i]); | 730 | ret = dma_mapping_error(dev->dev, um->addr[i]); |
| 713 | if (ret) { | 731 | if (ret) { |
| 714 | result("src mapping error", total_tests, | 732 | result("src mapping error", total_tests, |
| 715 | src_off, dst_off, len, ret); | 733 | src->off, dst->off, len, ret); |
| 716 | goto error_unmap_continue; | 734 | goto error_unmap_continue; |
| 717 | } | 735 | } |
| 718 | um->to_cnt++; | 736 | um->to_cnt++; |
| 719 | } | 737 | } |
| 720 | /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */ | 738 | /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */ |
| 721 | dsts = &um->addr[src_cnt]; | 739 | dsts = &um->addr[src->cnt]; |
| 722 | for (i = 0; i < dst_cnt; i++) { | 740 | for (i = 0; i < dst->cnt; i++) { |
| 723 | void *buf = thread->dsts[i]; | 741 | void *buf = dst->aligned[i]; |
| 724 | struct page *pg = virt_to_page(buf); | 742 | struct page *pg = virt_to_page(buf); |
| 725 | unsigned long pg_off = offset_in_page(buf); | 743 | unsigned long pg_off = offset_in_page(buf); |
| 726 | 744 | ||
| @@ -729,7 +747,7 @@ static int dmatest_func(void *data) | |||
| 729 | ret = dma_mapping_error(dev->dev, dsts[i]); | 747 | ret = dma_mapping_error(dev->dev, dsts[i]); |
| 730 | if (ret) { | 748 | if (ret) { |
| 731 | result("dst mapping error", total_tests, | 749 | result("dst mapping error", total_tests, |
| 732 | src_off, dst_off, len, ret); | 750 | src->off, dst->off, len, ret); |
| 733 | goto error_unmap_continue; | 751 | goto error_unmap_continue; |
| 734 | } | 752 | } |
| 735 | um->bidi_cnt++; | 753 | um->bidi_cnt++; |
| @@ -737,29 +755,29 @@ static int dmatest_func(void *data) | |||
| 737 | 755 | ||
| 738 | if (thread->type == DMA_MEMCPY) | 756 | if (thread->type == DMA_MEMCPY) |
| 739 | tx = dev->device_prep_dma_memcpy(chan, | 757 | tx = dev->device_prep_dma_memcpy(chan, |
| 740 | dsts[0] + dst_off, | 758 | dsts[0] + dst->off, |
| 741 | srcs[0], len, flags); | 759 | srcs[0], len, flags); |
| 742 | else if (thread->type == DMA_MEMSET) | 760 | else if (thread->type == DMA_MEMSET) |
| 743 | tx = dev->device_prep_dma_memset(chan, | 761 | tx = dev->device_prep_dma_memset(chan, |
| 744 | dsts[0] + dst_off, | 762 | dsts[0] + dst->off, |
| 745 | *(thread->srcs[0] + src_off), | 763 | *(src->aligned[0] + src->off), |
| 746 | len, flags); | 764 | len, flags); |
| 747 | else if (thread->type == DMA_XOR) | 765 | else if (thread->type == DMA_XOR) |
| 748 | tx = dev->device_prep_dma_xor(chan, | 766 | tx = dev->device_prep_dma_xor(chan, |
| 749 | dsts[0] + dst_off, | 767 | dsts[0] + dst->off, |
| 750 | srcs, src_cnt, | 768 | srcs, src->cnt, |
| 751 | len, flags); | 769 | len, flags); |
| 752 | else if (thread->type == DMA_PQ) { | 770 | else if (thread->type == DMA_PQ) { |
| 753 | for (i = 0; i < dst_cnt; i++) | 771 | for (i = 0; i < dst->cnt; i++) |
| 754 | dma_pq[i] = dsts[i] + dst_off; | 772 | dma_pq[i] = dsts[i] + dst->off; |
| 755 | tx = dev->device_prep_dma_pq(chan, dma_pq, srcs, | 773 | tx = dev->device_prep_dma_pq(chan, dma_pq, srcs, |
| 756 | src_cnt, pq_coefs, | 774 | src->cnt, pq_coefs, |
| 757 | len, flags); | 775 | len, flags); |
| 758 | } | 776 | } |
| 759 | 777 | ||
| 760 | if (!tx) { | 778 | if (!tx) { |
| 761 | result("prep error", total_tests, src_off, | 779 | result("prep error", total_tests, src->off, |
| 762 | dst_off, len, ret); | 780 | dst->off, len, ret); |
| 763 | msleep(100); | 781 | msleep(100); |
| 764 | goto error_unmap_continue; | 782 | goto error_unmap_continue; |
| 765 | } | 783 | } |
| @@ -770,8 +788,8 @@ static int dmatest_func(void *data) | |||
| 770 | cookie = tx->tx_submit(tx); | 788 | cookie = tx->tx_submit(tx); |
| 771 | 789 | ||
| 772 | if (dma_submit_error(cookie)) { | 790 | if (dma_submit_error(cookie)) { |
| 773 | result("submit error", total_tests, src_off, | 791 | result("submit error", total_tests, src->off, |
| 774 | dst_off, len, ret); | 792 | dst->off, len, ret); |
| 775 | msleep(100); | 793 | msleep(100); |
| 776 | goto error_unmap_continue; | 794 | goto error_unmap_continue; |
| 777 | } | 795 | } |
| @@ -783,58 +801,58 @@ static int dmatest_func(void *data) | |||
| 783 | status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); | 801 | status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); |
| 784 | 802 | ||
| 785 | if (!done->done) { | 803 | if (!done->done) { |
| 786 | result("test timed out", total_tests, src_off, dst_off, | 804 | result("test timed out", total_tests, src->off, dst->off, |
| 787 | len, 0); | 805 | len, 0); |
| 788 | goto error_unmap_continue; | 806 | goto error_unmap_continue; |
| 789 | } else if (status != DMA_COMPLETE) { | 807 | } else if (status != DMA_COMPLETE) { |
| 790 | result(status == DMA_ERROR ? | 808 | result(status == DMA_ERROR ? |
| 791 | "completion error status" : | 809 | "completion error status" : |
| 792 | "completion busy status", total_tests, src_off, | 810 | "completion busy status", total_tests, src->off, |
| 793 | dst_off, len, ret); | 811 | dst->off, len, ret); |
| 794 | goto error_unmap_continue; | 812 | goto error_unmap_continue; |
| 795 | } | 813 | } |
| 796 | 814 | ||
| 797 | dmaengine_unmap_put(um); | 815 | dmaengine_unmap_put(um); |
| 798 | 816 | ||
| 799 | if (params->noverify) { | 817 | if (params->noverify) { |
| 800 | verbose_result("test passed", total_tests, src_off, | 818 | verbose_result("test passed", total_tests, src->off, |
| 801 | dst_off, len, 0); | 819 | dst->off, len, 0); |
| 802 | continue; | 820 | continue; |
| 803 | } | 821 | } |
| 804 | 822 | ||
| 805 | start = ktime_get(); | 823 | start = ktime_get(); |
| 806 | pr_debug("%s: verifying source buffer...\n", current->comm); | 824 | pr_debug("%s: verifying source buffer...\n", current->comm); |
| 807 | error_count = dmatest_verify(thread->srcs, 0, src_off, | 825 | error_count = dmatest_verify(src->aligned, 0, src->off, |
| 808 | 0, PATTERN_SRC, true, is_memset); | 826 | 0, PATTERN_SRC, true, is_memset); |
| 809 | error_count += dmatest_verify(thread->srcs, src_off, | 827 | error_count += dmatest_verify(src->aligned, src->off, |
| 810 | src_off + len, src_off, | 828 | src->off + len, src->off, |
| 811 | PATTERN_SRC | PATTERN_COPY, true, is_memset); | 829 | PATTERN_SRC | PATTERN_COPY, true, is_memset); |
| 812 | error_count += dmatest_verify(thread->srcs, src_off + len, | 830 | error_count += dmatest_verify(src->aligned, src->off + len, |
| 813 | params->buf_size, src_off + len, | 831 | buf_size, src->off + len, |
| 814 | PATTERN_SRC, true, is_memset); | 832 | PATTERN_SRC, true, is_memset); |
| 815 | 833 | ||
| 816 | pr_debug("%s: verifying dest buffer...\n", current->comm); | 834 | pr_debug("%s: verifying dest buffer...\n", current->comm); |
| 817 | error_count += dmatest_verify(thread->dsts, 0, dst_off, | 835 | error_count += dmatest_verify(dst->aligned, 0, dst->off, |
| 818 | 0, PATTERN_DST, false, is_memset); | 836 | 0, PATTERN_DST, false, is_memset); |
| 819 | 837 | ||
| 820 | error_count += dmatest_verify(thread->dsts, dst_off, | 838 | error_count += dmatest_verify(dst->aligned, dst->off, |
| 821 | dst_off + len, src_off, | 839 | dst->off + len, src->off, |
| 822 | PATTERN_SRC | PATTERN_COPY, false, is_memset); | 840 | PATTERN_SRC | PATTERN_COPY, false, is_memset); |
| 823 | 841 | ||
| 824 | error_count += dmatest_verify(thread->dsts, dst_off + len, | 842 | error_count += dmatest_verify(dst->aligned, dst->off + len, |
| 825 | params->buf_size, dst_off + len, | 843 | buf_size, dst->off + len, |
| 826 | PATTERN_DST, false, is_memset); | 844 | PATTERN_DST, false, is_memset); |
| 827 | 845 | ||
| 828 | diff = ktime_sub(ktime_get(), start); | 846 | diff = ktime_sub(ktime_get(), start); |
| 829 | comparetime = ktime_add(comparetime, diff); | 847 | comparetime = ktime_add(comparetime, diff); |
| 830 | 848 | ||
| 831 | if (error_count) { | 849 | if (error_count) { |
| 832 | result("data error", total_tests, src_off, dst_off, | 850 | result("data error", total_tests, src->off, dst->off, |
| 833 | len, error_count); | 851 | len, error_count); |
| 834 | failed_tests++; | 852 | failed_tests++; |
| 835 | } else { | 853 | } else { |
| 836 | verbose_result("test passed", total_tests, src_off, | 854 | verbose_result("test passed", total_tests, src->off, |
| 837 | dst_off, len, 0); | 855 | dst->off, len, 0); |
| 838 | } | 856 | } |
| 839 | 857 | ||
| 840 | continue; | 858 | continue; |
| @@ -852,19 +870,10 @@ error_unmap_continue: | |||
| 852 | kfree(dma_pq); | 870 | kfree(dma_pq); |
| 853 | err_srcs_array: | 871 | err_srcs_array: |
| 854 | kfree(srcs); | 872 | kfree(srcs); |
| 855 | err_dstbuf: | 873 | err_dst: |
| 856 | for (i = 0; thread->udsts[i]; i++) | 874 | dmatest_free_test_data(dst); |
| 857 | kfree(thread->udsts[i]); | 875 | err_src: |
| 858 | kfree(thread->udsts); | 876 | dmatest_free_test_data(src); |
| 859 | err_udsts: | ||
| 860 | kfree(thread->dsts); | ||
| 861 | err_dsts: | ||
| 862 | err_srcbuf: | ||
| 863 | for (i = 0; thread->usrcs[i]; i++) | ||
| 864 | kfree(thread->usrcs[i]); | ||
| 865 | kfree(thread->usrcs); | ||
| 866 | err_usrcs: | ||
| 867 | kfree(thread->srcs); | ||
| 868 | err_free_coefs: | 877 | err_free_coefs: |
| 869 | kfree(pq_coefs); | 878 | kfree(pq_coefs); |
| 870 | err_thread_type: | 879 | err_thread_type: |
diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac.h b/drivers/dma/dw-axi-dmac/dw-axi-dmac.h index f8888dc0b8dc..18b6014cf9b4 100644 --- a/drivers/dma/dw-axi-dmac/dw-axi-dmac.h +++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac.h | |||
| @@ -75,7 +75,7 @@ struct __packed axi_dma_lli { | |||
| 75 | __le32 sstat; | 75 | __le32 sstat; |
| 76 | __le32 dstat; | 76 | __le32 dstat; |
| 77 | __le32 status_lo; | 77 | __le32 status_lo; |
| 78 | __le32 ststus_hi; | 78 | __le32 status_hi; |
| 79 | __le32 reserved_lo; | 79 | __le32 reserved_lo; |
| 80 | __le32 reserved_hi; | 80 | __le32 reserved_hi; |
| 81 | }; | 81 | }; |
diff --git a/drivers/dma/dw/Kconfig b/drivers/dma/dw/Kconfig index 04b9728c1d26..e5162690de8f 100644 --- a/drivers/dma/dw/Kconfig +++ b/drivers/dma/dw/Kconfig | |||
| @@ -1,3 +1,5 @@ | |||
| 1 | # SPDX-License-Identifier: GPL-2.0 | ||
| 2 | |||
| 1 | # | 3 | # |
| 2 | # DMA engine configuration for dw | 4 | # DMA engine configuration for dw |
| 3 | # | 5 | # |
diff --git a/drivers/dma/dw/Makefile b/drivers/dma/dw/Makefile index 2b949c2e4504..63ed895c09aa 100644 --- a/drivers/dma/dw/Makefile +++ b/drivers/dma/dw/Makefile | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | # SPDX-License-Identifier: GPL-2.0 | 1 | # SPDX-License-Identifier: GPL-2.0 |
| 2 | obj-$(CONFIG_DW_DMAC_CORE) += dw_dmac_core.o | 2 | obj-$(CONFIG_DW_DMAC_CORE) += dw_dmac_core.o |
| 3 | dw_dmac_core-objs := core.o | 3 | dw_dmac_core-objs := core.o dw.o idma32.o |
| 4 | 4 | ||
| 5 | obj-$(CONFIG_DW_DMAC) += dw_dmac.o | 5 | obj-$(CONFIG_DW_DMAC) += dw_dmac.o |
| 6 | dw_dmac-objs := platform.o | 6 | dw_dmac-objs := platform.o |
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index dc053e62f894..21cb2a58dbd2 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c | |||
| @@ -1,13 +1,10 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 1 | /* | 2 | /* |
| 2 | * Core driver for the Synopsys DesignWare DMA Controller | 3 | * Core driver for the Synopsys DesignWare DMA Controller |
| 3 | * | 4 | * |
| 4 | * Copyright (C) 2007-2008 Atmel Corporation | 5 | * Copyright (C) 2007-2008 Atmel Corporation |
| 5 | * Copyright (C) 2010-2011 ST Microelectronics | 6 | * Copyright (C) 2010-2011 ST Microelectronics |
| 6 | * Copyright (C) 2013 Intel Corporation | 7 | * Copyright (C) 2013 Intel Corporation |
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify | ||
| 9 | * it under the terms of the GNU General Public License version 2 as | ||
| 10 | * published by the Free Software Foundation. | ||
| 11 | */ | 8 | */ |
| 12 | 9 | ||
| 13 | #include <linux/bitops.h> | 10 | #include <linux/bitops.h> |
| @@ -37,27 +34,6 @@ | |||
| 37 | * support descriptor writeback. | 34 | * support descriptor writeback. |
| 38 | */ | 35 | */ |
| 39 | 36 | ||
| 40 | #define DWC_DEFAULT_CTLLO(_chan) ({ \ | ||
| 41 | struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \ | ||
| 42 | struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \ | ||
| 43 | bool _is_slave = is_slave_direction(_dwc->direction); \ | ||
| 44 | u8 _smsize = _is_slave ? _sconfig->src_maxburst : \ | ||
| 45 | DW_DMA_MSIZE_16; \ | ||
| 46 | u8 _dmsize = _is_slave ? _sconfig->dst_maxburst : \ | ||
| 47 | DW_DMA_MSIZE_16; \ | ||
| 48 | u8 _dms = (_dwc->direction == DMA_MEM_TO_DEV) ? \ | ||
| 49 | _dwc->dws.p_master : _dwc->dws.m_master; \ | ||
| 50 | u8 _sms = (_dwc->direction == DMA_DEV_TO_MEM) ? \ | ||
| 51 | _dwc->dws.p_master : _dwc->dws.m_master; \ | ||
| 52 | \ | ||
| 53 | (DWC_CTLL_DST_MSIZE(_dmsize) \ | ||
| 54 | | DWC_CTLL_SRC_MSIZE(_smsize) \ | ||
| 55 | | DWC_CTLL_LLP_D_EN \ | ||
| 56 | | DWC_CTLL_LLP_S_EN \ | ||
| 57 | | DWC_CTLL_DMS(_dms) \ | ||
| 58 | | DWC_CTLL_SMS(_sms)); \ | ||
| 59 | }) | ||
| 60 | |||
| 61 | /* The set of bus widths supported by the DMA controller */ | 37 | /* The set of bus widths supported by the DMA controller */ |
| 62 | #define DW_DMA_BUSWIDTHS \ | 38 | #define DW_DMA_BUSWIDTHS \ |
| 63 | BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \ | 39 | BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \ |
| @@ -138,44 +114,6 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc) | |||
| 138 | dwc->descs_allocated--; | 114 | dwc->descs_allocated--; |
| 139 | } | 115 | } |
| 140 | 116 | ||
| 141 | static void dwc_initialize_chan_idma32(struct dw_dma_chan *dwc) | ||
| 142 | { | ||
| 143 | u32 cfghi = 0; | ||
| 144 | u32 cfglo = 0; | ||
| 145 | |||
| 146 | /* Set default burst alignment */ | ||
| 147 | cfglo |= IDMA32C_CFGL_DST_BURST_ALIGN | IDMA32C_CFGL_SRC_BURST_ALIGN; | ||
| 148 | |||
| 149 | /* Low 4 bits of the request lines */ | ||
| 150 | cfghi |= IDMA32C_CFGH_DST_PER(dwc->dws.dst_id & 0xf); | ||
| 151 | cfghi |= IDMA32C_CFGH_SRC_PER(dwc->dws.src_id & 0xf); | ||
| 152 | |||
| 153 | /* Request line extension (2 bits) */ | ||
| 154 | cfghi |= IDMA32C_CFGH_DST_PER_EXT(dwc->dws.dst_id >> 4 & 0x3); | ||
| 155 | cfghi |= IDMA32C_CFGH_SRC_PER_EXT(dwc->dws.src_id >> 4 & 0x3); | ||
| 156 | |||
| 157 | channel_writel(dwc, CFG_LO, cfglo); | ||
| 158 | channel_writel(dwc, CFG_HI, cfghi); | ||
| 159 | } | ||
| 160 | |||
| 161 | static void dwc_initialize_chan_dw(struct dw_dma_chan *dwc) | ||
| 162 | { | ||
| 163 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | ||
| 164 | u32 cfghi = DWC_CFGH_FIFO_MODE; | ||
| 165 | u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority); | ||
| 166 | bool hs_polarity = dwc->dws.hs_polarity; | ||
| 167 | |||
| 168 | cfghi |= DWC_CFGH_DST_PER(dwc->dws.dst_id); | ||
| 169 | cfghi |= DWC_CFGH_SRC_PER(dwc->dws.src_id); | ||
| 170 | cfghi |= DWC_CFGH_PROTCTL(dw->pdata->protctl); | ||
| 171 | |||
| 172 | /* Set polarity of handshake interface */ | ||
| 173 | cfglo |= hs_polarity ? DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL : 0; | ||
| 174 | |||
| 175 | channel_writel(dwc, CFG_LO, cfglo); | ||
| 176 | channel_writel(dwc, CFG_HI, cfghi); | ||
| 177 | } | ||
| 178 | |||
| 179 | static void dwc_initialize(struct dw_dma_chan *dwc) | 117 | static void dwc_initialize(struct dw_dma_chan *dwc) |
| 180 | { | 118 | { |
| 181 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | 119 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); |
| @@ -183,10 +121,7 @@ static void dwc_initialize(struct dw_dma_chan *dwc) | |||
| 183 | if (test_bit(DW_DMA_IS_INITIALIZED, &dwc->flags)) | 121 | if (test_bit(DW_DMA_IS_INITIALIZED, &dwc->flags)) |
| 184 | return; | 122 | return; |
| 185 | 123 | ||
| 186 | if (dw->pdata->is_idma32) | 124 | dw->initialize_chan(dwc); |
| 187 | dwc_initialize_chan_idma32(dwc); | ||
| 188 | else | ||
| 189 | dwc_initialize_chan_dw(dwc); | ||
| 190 | 125 | ||
| 191 | /* Enable interrupts */ | 126 | /* Enable interrupts */ |
| 192 | channel_set_bit(dw, MASK.XFER, dwc->mask); | 127 | channel_set_bit(dw, MASK.XFER, dwc->mask); |
| @@ -215,37 +150,6 @@ static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
| 215 | cpu_relax(); | 150 | cpu_relax(); |
| 216 | } | 151 | } |
| 217 | 152 | ||
| 218 | static u32 bytes2block(struct dw_dma_chan *dwc, size_t bytes, | ||
| 219 | unsigned int width, size_t *len) | ||
| 220 | { | ||
| 221 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | ||
| 222 | u32 block; | ||
| 223 | |||
| 224 | /* Always in bytes for iDMA 32-bit */ | ||
| 225 | if (dw->pdata->is_idma32) | ||
| 226 | width = 0; | ||
| 227 | |||
| 228 | if ((bytes >> width) > dwc->block_size) { | ||
| 229 | block = dwc->block_size; | ||
| 230 | *len = block << width; | ||
| 231 | } else { | ||
| 232 | block = bytes >> width; | ||
| 233 | *len = bytes; | ||
| 234 | } | ||
| 235 | |||
| 236 | return block; | ||
| 237 | } | ||
| 238 | |||
| 239 | static size_t block2bytes(struct dw_dma_chan *dwc, u32 block, u32 width) | ||
| 240 | { | ||
| 241 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | ||
| 242 | |||
| 243 | if (dw->pdata->is_idma32) | ||
| 244 | return IDMA32C_CTLH_BLOCK_TS(block); | ||
| 245 | |||
| 246 | return DWC_CTLH_BLOCK_TS(block) << width; | ||
| 247 | } | ||
| 248 | |||
| 249 | /*----------------------------------------------------------------------*/ | 153 | /*----------------------------------------------------------------------*/ |
| 250 | 154 | ||
| 251 | /* Perform single block transfer */ | 155 | /* Perform single block transfer */ |
| @@ -391,10 +295,11 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
| 391 | /* Returns how many bytes were already received from source */ | 295 | /* Returns how many bytes were already received from source */ |
| 392 | static inline u32 dwc_get_sent(struct dw_dma_chan *dwc) | 296 | static inline u32 dwc_get_sent(struct dw_dma_chan *dwc) |
| 393 | { | 297 | { |
| 298 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | ||
| 394 | u32 ctlhi = channel_readl(dwc, CTL_HI); | 299 | u32 ctlhi = channel_readl(dwc, CTL_HI); |
| 395 | u32 ctllo = channel_readl(dwc, CTL_LO); | 300 | u32 ctllo = channel_readl(dwc, CTL_LO); |
| 396 | 301 | ||
| 397 | return block2bytes(dwc, ctlhi, ctllo >> 4 & 7); | 302 | return dw->block2bytes(dwc, ctlhi, ctllo >> 4 & 7); |
| 398 | } | 303 | } |
| 399 | 304 | ||
| 400 | static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) | 305 | static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) |
| @@ -651,7 +556,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
| 651 | unsigned int src_width; | 556 | unsigned int src_width; |
| 652 | unsigned int dst_width; | 557 | unsigned int dst_width; |
| 653 | unsigned int data_width = dw->pdata->data_width[m_master]; | 558 | unsigned int data_width = dw->pdata->data_width[m_master]; |
| 654 | u32 ctllo; | 559 | u32 ctllo, ctlhi; |
| 655 | u8 lms = DWC_LLP_LMS(m_master); | 560 | u8 lms = DWC_LLP_LMS(m_master); |
| 656 | 561 | ||
| 657 | dev_vdbg(chan2dev(chan), | 562 | dev_vdbg(chan2dev(chan), |
| @@ -667,7 +572,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
| 667 | 572 | ||
| 668 | src_width = dst_width = __ffs(data_width | src | dest | len); | 573 | src_width = dst_width = __ffs(data_width | src | dest | len); |
| 669 | 574 | ||
| 670 | ctllo = DWC_DEFAULT_CTLLO(chan) | 575 | ctllo = dw->prepare_ctllo(dwc) |
| 671 | | DWC_CTLL_DST_WIDTH(dst_width) | 576 | | DWC_CTLL_DST_WIDTH(dst_width) |
| 672 | | DWC_CTLL_SRC_WIDTH(src_width) | 577 | | DWC_CTLL_SRC_WIDTH(src_width) |
| 673 | | DWC_CTLL_DST_INC | 578 | | DWC_CTLL_DST_INC |
| @@ -680,10 +585,12 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
| 680 | if (!desc) | 585 | if (!desc) |
| 681 | goto err_desc_get; | 586 | goto err_desc_get; |
| 682 | 587 | ||
| 588 | ctlhi = dw->bytes2block(dwc, len - offset, src_width, &xfer_count); | ||
| 589 | |||
| 683 | lli_write(desc, sar, src + offset); | 590 | lli_write(desc, sar, src + offset); |
| 684 | lli_write(desc, dar, dest + offset); | 591 | lli_write(desc, dar, dest + offset); |
| 685 | lli_write(desc, ctllo, ctllo); | 592 | lli_write(desc, ctllo, ctllo); |
| 686 | lli_write(desc, ctlhi, bytes2block(dwc, len - offset, src_width, &xfer_count)); | 593 | lli_write(desc, ctlhi, ctlhi); |
| 687 | desc->len = xfer_count; | 594 | desc->len = xfer_count; |
| 688 | 595 | ||
| 689 | if (!first) { | 596 | if (!first) { |
| @@ -721,7 +628,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
| 721 | struct dma_slave_config *sconfig = &dwc->dma_sconfig; | 628 | struct dma_slave_config *sconfig = &dwc->dma_sconfig; |
| 722 | struct dw_desc *prev; | 629 | struct dw_desc *prev; |
| 723 | struct dw_desc *first; | 630 | struct dw_desc *first; |
| 724 | u32 ctllo; | 631 | u32 ctllo, ctlhi; |
| 725 | u8 m_master = dwc->dws.m_master; | 632 | u8 m_master = dwc->dws.m_master; |
| 726 | u8 lms = DWC_LLP_LMS(m_master); | 633 | u8 lms = DWC_LLP_LMS(m_master); |
| 727 | dma_addr_t reg; | 634 | dma_addr_t reg; |
| @@ -745,10 +652,10 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
| 745 | case DMA_MEM_TO_DEV: | 652 | case DMA_MEM_TO_DEV: |
| 746 | reg_width = __ffs(sconfig->dst_addr_width); | 653 | reg_width = __ffs(sconfig->dst_addr_width); |
| 747 | reg = sconfig->dst_addr; | 654 | reg = sconfig->dst_addr; |
| 748 | ctllo = (DWC_DEFAULT_CTLLO(chan) | 655 | ctllo = dw->prepare_ctllo(dwc) |
| 749 | | DWC_CTLL_DST_WIDTH(reg_width) | 656 | | DWC_CTLL_DST_WIDTH(reg_width) |
| 750 | | DWC_CTLL_DST_FIX | 657 | | DWC_CTLL_DST_FIX |
| 751 | | DWC_CTLL_SRC_INC); | 658 | | DWC_CTLL_SRC_INC; |
| 752 | 659 | ||
| 753 | ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) : | 660 | ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) : |
| 754 | DWC_CTLL_FC(DW_DMA_FC_D_M2P); | 661 | DWC_CTLL_FC(DW_DMA_FC_D_M2P); |
| @@ -768,9 +675,11 @@ slave_sg_todev_fill_desc: | |||
| 768 | if (!desc) | 675 | if (!desc) |
| 769 | goto err_desc_get; | 676 | goto err_desc_get; |
| 770 | 677 | ||
| 678 | ctlhi = dw->bytes2block(dwc, len, mem_width, &dlen); | ||
| 679 | |||
| 771 | lli_write(desc, sar, mem); | 680 | lli_write(desc, sar, mem); |
| 772 | lli_write(desc, dar, reg); | 681 | lli_write(desc, dar, reg); |
| 773 | lli_write(desc, ctlhi, bytes2block(dwc, len, mem_width, &dlen)); | 682 | lli_write(desc, ctlhi, ctlhi); |
| 774 | lli_write(desc, ctllo, ctllo | DWC_CTLL_SRC_WIDTH(mem_width)); | 683 | lli_write(desc, ctllo, ctllo | DWC_CTLL_SRC_WIDTH(mem_width)); |
| 775 | desc->len = dlen; | 684 | desc->len = dlen; |
| 776 | 685 | ||
| @@ -793,10 +702,10 @@ slave_sg_todev_fill_desc: | |||
| 793 | case DMA_DEV_TO_MEM: | 702 | case DMA_DEV_TO_MEM: |
| 794 | reg_width = __ffs(sconfig->src_addr_width); | 703 | reg_width = __ffs(sconfig->src_addr_width); |
| 795 | reg = sconfig->src_addr; | 704 | reg = sconfig->src_addr; |
| 796 | ctllo = (DWC_DEFAULT_CTLLO(chan) | 705 | ctllo = dw->prepare_ctllo(dwc) |
| 797 | | DWC_CTLL_SRC_WIDTH(reg_width) | 706 | | DWC_CTLL_SRC_WIDTH(reg_width) |
| 798 | | DWC_CTLL_DST_INC | 707 | | DWC_CTLL_DST_INC |
| 799 | | DWC_CTLL_SRC_FIX); | 708 | | DWC_CTLL_SRC_FIX; |
| 800 | 709 | ||
| 801 | ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) : | 710 | ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) : |
| 802 | DWC_CTLL_FC(DW_DMA_FC_D_P2M); | 711 | DWC_CTLL_FC(DW_DMA_FC_D_P2M); |
| @@ -814,9 +723,11 @@ slave_sg_fromdev_fill_desc: | |||
| 814 | if (!desc) | 723 | if (!desc) |
| 815 | goto err_desc_get; | 724 | goto err_desc_get; |
| 816 | 725 | ||
| 726 | ctlhi = dw->bytes2block(dwc, len, reg_width, &dlen); | ||
| 727 | |||
| 817 | lli_write(desc, sar, reg); | 728 | lli_write(desc, sar, reg); |
| 818 | lli_write(desc, dar, mem); | 729 | lli_write(desc, dar, mem); |
| 819 | lli_write(desc, ctlhi, bytes2block(dwc, len, reg_width, &dlen)); | 730 | lli_write(desc, ctlhi, ctlhi); |
| 820 | mem_width = __ffs(data_width | mem | dlen); | 731 | mem_width = __ffs(data_width | mem | dlen); |
| 821 | lli_write(desc, ctllo, ctllo | DWC_CTLL_DST_WIDTH(mem_width)); | 732 | lli_write(desc, ctllo, ctllo | DWC_CTLL_DST_WIDTH(mem_width)); |
| 822 | desc->len = dlen; | 733 | desc->len = dlen; |
| @@ -876,22 +787,12 @@ EXPORT_SYMBOL_GPL(dw_dma_filter); | |||
| 876 | static int dwc_config(struct dma_chan *chan, struct dma_slave_config *sconfig) | 787 | static int dwc_config(struct dma_chan *chan, struct dma_slave_config *sconfig) |
| 877 | { | 788 | { |
| 878 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 789 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
| 879 | struct dma_slave_config *sc = &dwc->dma_sconfig; | ||
| 880 | struct dw_dma *dw = to_dw_dma(chan->device); | 790 | struct dw_dma *dw = to_dw_dma(chan->device); |
| 881 | /* | ||
| 882 | * Fix sconfig's burst size according to dw_dmac. We need to convert | ||
| 883 | * them as: | ||
| 884 | * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3. | ||
| 885 | * | ||
| 886 | * NOTE: burst size 2 is not supported by DesignWare controller. | ||
| 887 | * iDMA 32-bit supports it. | ||
| 888 | */ | ||
| 889 | u32 s = dw->pdata->is_idma32 ? 1 : 2; | ||
| 890 | 791 | ||
| 891 | memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig)); | 792 | memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig)); |
| 892 | 793 | ||
| 893 | sc->src_maxburst = sc->src_maxburst > 1 ? fls(sc->src_maxburst) - s : 0; | 794 | dw->encode_maxburst(dwc, &dwc->dma_sconfig.src_maxburst); |
| 894 | sc->dst_maxburst = sc->dst_maxburst > 1 ? fls(sc->dst_maxburst) - s : 0; | 795 | dw->encode_maxburst(dwc, &dwc->dma_sconfig.dst_maxburst); |
| 895 | 796 | ||
| 896 | return 0; | 797 | return 0; |
| 897 | } | 798 | } |
| @@ -900,16 +801,9 @@ static void dwc_chan_pause(struct dw_dma_chan *dwc, bool drain) | |||
| 900 | { | 801 | { |
| 901 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | 802 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); |
| 902 | unsigned int count = 20; /* timeout iterations */ | 803 | unsigned int count = 20; /* timeout iterations */ |
| 903 | u32 cfglo; | ||
| 904 | 804 | ||
| 905 | cfglo = channel_readl(dwc, CFG_LO); | 805 | dw->suspend_chan(dwc, drain); |
| 906 | if (dw->pdata->is_idma32) { | 806 | |
| 907 | if (drain) | ||
| 908 | cfglo |= IDMA32C_CFGL_CH_DRAIN; | ||
| 909 | else | ||
| 910 | cfglo &= ~IDMA32C_CFGL_CH_DRAIN; | ||
| 911 | } | ||
| 912 | channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP); | ||
| 913 | while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--) | 807 | while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--) |
| 914 | udelay(2); | 808 | udelay(2); |
| 915 | 809 | ||
| @@ -928,11 +822,11 @@ static int dwc_pause(struct dma_chan *chan) | |||
| 928 | return 0; | 822 | return 0; |
| 929 | } | 823 | } |
| 930 | 824 | ||
| 931 | static inline void dwc_chan_resume(struct dw_dma_chan *dwc) | 825 | static inline void dwc_chan_resume(struct dw_dma_chan *dwc, bool drain) |
| 932 | { | 826 | { |
| 933 | u32 cfglo = channel_readl(dwc, CFG_LO); | 827 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); |
| 934 | 828 | ||
| 935 | channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP); | 829 | dw->resume_chan(dwc, drain); |
| 936 | 830 | ||
| 937 | clear_bit(DW_DMA_IS_PAUSED, &dwc->flags); | 831 | clear_bit(DW_DMA_IS_PAUSED, &dwc->flags); |
| 938 | } | 832 | } |
| @@ -945,7 +839,7 @@ static int dwc_resume(struct dma_chan *chan) | |||
| 945 | spin_lock_irqsave(&dwc->lock, flags); | 839 | spin_lock_irqsave(&dwc->lock, flags); |
| 946 | 840 | ||
| 947 | if (test_bit(DW_DMA_IS_PAUSED, &dwc->flags)) | 841 | if (test_bit(DW_DMA_IS_PAUSED, &dwc->flags)) |
| 948 | dwc_chan_resume(dwc); | 842 | dwc_chan_resume(dwc, false); |
| 949 | 843 | ||
| 950 | spin_unlock_irqrestore(&dwc->lock, flags); | 844 | spin_unlock_irqrestore(&dwc->lock, flags); |
| 951 | 845 | ||
| @@ -968,7 +862,7 @@ static int dwc_terminate_all(struct dma_chan *chan) | |||
| 968 | 862 | ||
| 969 | dwc_chan_disable(dw, dwc); | 863 | dwc_chan_disable(dw, dwc); |
| 970 | 864 | ||
| 971 | dwc_chan_resume(dwc); | 865 | dwc_chan_resume(dwc, true); |
| 972 | 866 | ||
| 973 | /* active_list entries will end up before queued entries */ | 867 | /* active_list entries will end up before queued entries */ |
| 974 | list_splice_init(&dwc->queue, &list); | 868 | list_splice_init(&dwc->queue, &list); |
| @@ -1058,33 +952,7 @@ static void dwc_issue_pending(struct dma_chan *chan) | |||
| 1058 | 952 | ||
| 1059 | /*----------------------------------------------------------------------*/ | 953 | /*----------------------------------------------------------------------*/ |
| 1060 | 954 | ||
| 1061 | /* | 955 | void do_dw_dma_off(struct dw_dma *dw) |
| 1062 | * Program FIFO size of channels. | ||
| 1063 | * | ||
| 1064 | * By default full FIFO (512 bytes) is assigned to channel 0. Here we | ||
| 1065 | * slice FIFO on equal parts between channels. | ||
| 1066 | */ | ||
| 1067 | static void idma32_fifo_partition(struct dw_dma *dw) | ||
| 1068 | { | ||
| 1069 | u64 value = IDMA32C_FP_PSIZE_CH0(64) | IDMA32C_FP_PSIZE_CH1(64) | | ||
| 1070 | IDMA32C_FP_UPDATE; | ||
| 1071 | u64 fifo_partition = 0; | ||
| 1072 | |||
| 1073 | if (!dw->pdata->is_idma32) | ||
| 1074 | return; | ||
| 1075 | |||
| 1076 | /* Fill FIFO_PARTITION low bits (Channels 0..1, 4..5) */ | ||
| 1077 | fifo_partition |= value << 0; | ||
| 1078 | |||
| 1079 | /* Fill FIFO_PARTITION high bits (Channels 2..3, 6..7) */ | ||
| 1080 | fifo_partition |= value << 32; | ||
| 1081 | |||
| 1082 | /* Program FIFO Partition registers - 64 bytes per channel */ | ||
| 1083 | idma32_writeq(dw, FIFO_PARTITION1, fifo_partition); | ||
| 1084 | idma32_writeq(dw, FIFO_PARTITION0, fifo_partition); | ||
| 1085 | } | ||
| 1086 | |||
| 1087 | static void dw_dma_off(struct dw_dma *dw) | ||
| 1088 | { | 956 | { |
| 1089 | unsigned int i; | 957 | unsigned int i; |
| 1090 | 958 | ||
| @@ -1103,7 +971,7 @@ static void dw_dma_off(struct dw_dma *dw) | |||
| 1103 | clear_bit(DW_DMA_IS_INITIALIZED, &dw->chan[i].flags); | 971 | clear_bit(DW_DMA_IS_INITIALIZED, &dw->chan[i].flags); |
| 1104 | } | 972 | } |
| 1105 | 973 | ||
| 1106 | static void dw_dma_on(struct dw_dma *dw) | 974 | void do_dw_dma_on(struct dw_dma *dw) |
| 1107 | { | 975 | { |
| 1108 | dma_writel(dw, CFG, DW_CFG_DMA_EN); | 976 | dma_writel(dw, CFG, DW_CFG_DMA_EN); |
| 1109 | } | 977 | } |
| @@ -1139,7 +1007,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan) | |||
| 1139 | 1007 | ||
| 1140 | /* Enable controller here if needed */ | 1008 | /* Enable controller here if needed */ |
| 1141 | if (!dw->in_use) | 1009 | if (!dw->in_use) |
| 1142 | dw_dma_on(dw); | 1010 | do_dw_dma_on(dw); |
| 1143 | dw->in_use |= dwc->mask; | 1011 | dw->in_use |= dwc->mask; |
| 1144 | 1012 | ||
| 1145 | return 0; | 1013 | return 0; |
| @@ -1150,7 +1018,6 @@ static void dwc_free_chan_resources(struct dma_chan *chan) | |||
| 1150 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 1018 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
| 1151 | struct dw_dma *dw = to_dw_dma(chan->device); | 1019 | struct dw_dma *dw = to_dw_dma(chan->device); |
| 1152 | unsigned long flags; | 1020 | unsigned long flags; |
| 1153 | LIST_HEAD(list); | ||
| 1154 | 1021 | ||
| 1155 | dev_dbg(chan2dev(chan), "%s: descs allocated=%u\n", __func__, | 1022 | dev_dbg(chan2dev(chan), "%s: descs allocated=%u\n", __func__, |
| 1156 | dwc->descs_allocated); | 1023 | dwc->descs_allocated); |
| @@ -1177,30 +1044,25 @@ static void dwc_free_chan_resources(struct dma_chan *chan) | |||
| 1177 | /* Disable controller in case it was a last user */ | 1044 | /* Disable controller in case it was a last user */ |
| 1178 | dw->in_use &= ~dwc->mask; | 1045 | dw->in_use &= ~dwc->mask; |
| 1179 | if (!dw->in_use) | 1046 | if (!dw->in_use) |
| 1180 | dw_dma_off(dw); | 1047 | do_dw_dma_off(dw); |
| 1181 | 1048 | ||
| 1182 | dev_vdbg(chan2dev(chan), "%s: done\n", __func__); | 1049 | dev_vdbg(chan2dev(chan), "%s: done\n", __func__); |
| 1183 | } | 1050 | } |
| 1184 | 1051 | ||
| 1185 | int dw_dma_probe(struct dw_dma_chip *chip) | 1052 | int do_dma_probe(struct dw_dma_chip *chip) |
| 1186 | { | 1053 | { |
| 1054 | struct dw_dma *dw = chip->dw; | ||
| 1187 | struct dw_dma_platform_data *pdata; | 1055 | struct dw_dma_platform_data *pdata; |
| 1188 | struct dw_dma *dw; | ||
| 1189 | bool autocfg = false; | 1056 | bool autocfg = false; |
| 1190 | unsigned int dw_params; | 1057 | unsigned int dw_params; |
| 1191 | unsigned int i; | 1058 | unsigned int i; |
| 1192 | int err; | 1059 | int err; |
| 1193 | 1060 | ||
| 1194 | dw = devm_kzalloc(chip->dev, sizeof(*dw), GFP_KERNEL); | ||
| 1195 | if (!dw) | ||
| 1196 | return -ENOMEM; | ||
| 1197 | |||
| 1198 | dw->pdata = devm_kzalloc(chip->dev, sizeof(*dw->pdata), GFP_KERNEL); | 1061 | dw->pdata = devm_kzalloc(chip->dev, sizeof(*dw->pdata), GFP_KERNEL); |
| 1199 | if (!dw->pdata) | 1062 | if (!dw->pdata) |
| 1200 | return -ENOMEM; | 1063 | return -ENOMEM; |
| 1201 | 1064 | ||
| 1202 | dw->regs = chip->regs; | 1065 | dw->regs = chip->regs; |
| 1203 | chip->dw = dw; | ||
| 1204 | 1066 | ||
| 1205 | pm_runtime_get_sync(chip->dev); | 1067 | pm_runtime_get_sync(chip->dev); |
| 1206 | 1068 | ||
| @@ -1227,8 +1089,6 @@ int dw_dma_probe(struct dw_dma_chip *chip) | |||
| 1227 | pdata->block_size = dma_readl(dw, MAX_BLK_SIZE); | 1089 | pdata->block_size = dma_readl(dw, MAX_BLK_SIZE); |
| 1228 | 1090 | ||
| 1229 | /* Fill platform data with the default values */ | 1091 | /* Fill platform data with the default values */ |
| 1230 | pdata->is_private = true; | ||
| 1231 | pdata->is_memcpy = true; | ||
| 1232 | pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING; | 1092 | pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING; |
| 1233 | pdata->chan_priority = CHAN_PRIORITY_ASCENDING; | 1093 | pdata->chan_priority = CHAN_PRIORITY_ASCENDING; |
| 1234 | } else if (chip->pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) { | 1094 | } else if (chip->pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) { |
| @@ -1252,15 +1112,10 @@ int dw_dma_probe(struct dw_dma_chip *chip) | |||
| 1252 | dw->all_chan_mask = (1 << pdata->nr_channels) - 1; | 1112 | dw->all_chan_mask = (1 << pdata->nr_channels) - 1; |
| 1253 | 1113 | ||
| 1254 | /* Force dma off, just in case */ | 1114 | /* Force dma off, just in case */ |
| 1255 | dw_dma_off(dw); | 1115 | dw->disable(dw); |
| 1256 | |||
| 1257 | idma32_fifo_partition(dw); | ||
| 1258 | 1116 | ||
| 1259 | /* Device and instance ID for IRQ and DMA pool */ | 1117 | /* Device and instance ID for IRQ and DMA pool */ |
| 1260 | if (pdata->is_idma32) | 1118 | dw->set_device_name(dw, chip->id); |
| 1261 | snprintf(dw->name, sizeof(dw->name), "idma32:dmac%d", chip->id); | ||
| 1262 | else | ||
| 1263 | snprintf(dw->name, sizeof(dw->name), "dw:dmac%d", chip->id); | ||
| 1264 | 1119 | ||
| 1265 | /* Create a pool of consistent memory blocks for hardware descriptors */ | 1120 | /* Create a pool of consistent memory blocks for hardware descriptors */ |
| 1266 | dw->desc_pool = dmam_pool_create(dw->name, chip->dev, | 1121 | dw->desc_pool = dmam_pool_create(dw->name, chip->dev, |
| @@ -1340,10 +1195,8 @@ int dw_dma_probe(struct dw_dma_chip *chip) | |||
| 1340 | 1195 | ||
| 1341 | /* Set capabilities */ | 1196 | /* Set capabilities */ |
| 1342 | dma_cap_set(DMA_SLAVE, dw->dma.cap_mask); | 1197 | dma_cap_set(DMA_SLAVE, dw->dma.cap_mask); |
| 1343 | if (pdata->is_private) | 1198 | dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask); |
| 1344 | dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask); | 1199 | dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask); |
| 1345 | if (pdata->is_memcpy) | ||
| 1346 | dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask); | ||
| 1347 | 1200 | ||
| 1348 | dw->dma.dev = chip->dev; | 1201 | dw->dma.dev = chip->dev; |
| 1349 | dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources; | 1202 | dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources; |
| @@ -1384,16 +1237,15 @@ err_pdata: | |||
| 1384 | pm_runtime_put_sync_suspend(chip->dev); | 1237 | pm_runtime_put_sync_suspend(chip->dev); |
| 1385 | return err; | 1238 | return err; |
| 1386 | } | 1239 | } |
| 1387 | EXPORT_SYMBOL_GPL(dw_dma_probe); | ||
| 1388 | 1240 | ||
| 1389 | int dw_dma_remove(struct dw_dma_chip *chip) | 1241 | int do_dma_remove(struct dw_dma_chip *chip) |
| 1390 | { | 1242 | { |
| 1391 | struct dw_dma *dw = chip->dw; | 1243 | struct dw_dma *dw = chip->dw; |
| 1392 | struct dw_dma_chan *dwc, *_dwc; | 1244 | struct dw_dma_chan *dwc, *_dwc; |
| 1393 | 1245 | ||
| 1394 | pm_runtime_get_sync(chip->dev); | 1246 | pm_runtime_get_sync(chip->dev); |
| 1395 | 1247 | ||
| 1396 | dw_dma_off(dw); | 1248 | do_dw_dma_off(dw); |
| 1397 | dma_async_device_unregister(&dw->dma); | 1249 | dma_async_device_unregister(&dw->dma); |
| 1398 | 1250 | ||
| 1399 | free_irq(chip->irq, dw); | 1251 | free_irq(chip->irq, dw); |
| @@ -1408,27 +1260,24 @@ int dw_dma_remove(struct dw_dma_chip *chip) | |||
| 1408 | pm_runtime_put_sync_suspend(chip->dev); | 1260 | pm_runtime_put_sync_suspend(chip->dev); |
| 1409 | return 0; | 1261 | return 0; |
| 1410 | } | 1262 | } |
| 1411 | EXPORT_SYMBOL_GPL(dw_dma_remove); | ||
| 1412 | 1263 | ||
| 1413 | int dw_dma_disable(struct dw_dma_chip *chip) | 1264 | int do_dw_dma_disable(struct dw_dma_chip *chip) |
| 1414 | { | 1265 | { |
| 1415 | struct dw_dma *dw = chip->dw; | 1266 | struct dw_dma *dw = chip->dw; |
| 1416 | 1267 | ||
| 1417 | dw_dma_off(dw); | 1268 | dw->disable(dw); |
| 1418 | return 0; | 1269 | return 0; |
| 1419 | } | 1270 | } |
| 1420 | EXPORT_SYMBOL_GPL(dw_dma_disable); | 1271 | EXPORT_SYMBOL_GPL(do_dw_dma_disable); |
| 1421 | 1272 | ||
| 1422 | int dw_dma_enable(struct dw_dma_chip *chip) | 1273 | int do_dw_dma_enable(struct dw_dma_chip *chip) |
| 1423 | { | 1274 | { |
| 1424 | struct dw_dma *dw = chip->dw; | 1275 | struct dw_dma *dw = chip->dw; |
| 1425 | 1276 | ||
| 1426 | idma32_fifo_partition(dw); | 1277 | dw->enable(dw); |
| 1427 | |||
| 1428 | dw_dma_on(dw); | ||
| 1429 | return 0; | 1278 | return 0; |
| 1430 | } | 1279 | } |
| 1431 | EXPORT_SYMBOL_GPL(dw_dma_enable); | 1280 | EXPORT_SYMBOL_GPL(do_dw_dma_enable); |
| 1432 | 1281 | ||
| 1433 | MODULE_LICENSE("GPL v2"); | 1282 | MODULE_LICENSE("GPL v2"); |
| 1434 | MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller core driver"); | 1283 | MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller core driver"); |
diff --git a/drivers/dma/dw/dw.c b/drivers/dma/dw/dw.c new file mode 100644 index 000000000000..7a085b3c1854 --- /dev/null +++ b/drivers/dma/dw/dw.c | |||
| @@ -0,0 +1,138 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2007-2008 Atmel Corporation | ||
| 3 | // Copyright (C) 2010-2011 ST Microelectronics | ||
| 4 | // Copyright (C) 2013,2018 Intel Corporation | ||
| 5 | |||
| 6 | #include <linux/bitops.h> | ||
| 7 | #include <linux/dmaengine.h> | ||
| 8 | #include <linux/errno.h> | ||
| 9 | #include <linux/slab.h> | ||
| 10 | #include <linux/types.h> | ||
| 11 | |||
| 12 | #include "internal.h" | ||
| 13 | |||
| 14 | static void dw_dma_initialize_chan(struct dw_dma_chan *dwc) | ||
| 15 | { | ||
| 16 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | ||
| 17 | u32 cfghi = DWC_CFGH_FIFO_MODE; | ||
| 18 | u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority); | ||
| 19 | bool hs_polarity = dwc->dws.hs_polarity; | ||
| 20 | |||
| 21 | cfghi |= DWC_CFGH_DST_PER(dwc->dws.dst_id); | ||
| 22 | cfghi |= DWC_CFGH_SRC_PER(dwc->dws.src_id); | ||
| 23 | cfghi |= DWC_CFGH_PROTCTL(dw->pdata->protctl); | ||
| 24 | |||
| 25 | /* Set polarity of handshake interface */ | ||
| 26 | cfglo |= hs_polarity ? DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL : 0; | ||
| 27 | |||
| 28 | channel_writel(dwc, CFG_LO, cfglo); | ||
| 29 | channel_writel(dwc, CFG_HI, cfghi); | ||
| 30 | } | ||
| 31 | |||
| 32 | static void dw_dma_suspend_chan(struct dw_dma_chan *dwc, bool drain) | ||
| 33 | { | ||
| 34 | u32 cfglo = channel_readl(dwc, CFG_LO); | ||
| 35 | |||
| 36 | channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP); | ||
| 37 | } | ||
| 38 | |||
| 39 | static void dw_dma_resume_chan(struct dw_dma_chan *dwc, bool drain) | ||
| 40 | { | ||
| 41 | u32 cfglo = channel_readl(dwc, CFG_LO); | ||
| 42 | |||
| 43 | channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP); | ||
| 44 | } | ||
| 45 | |||
| 46 | static u32 dw_dma_bytes2block(struct dw_dma_chan *dwc, | ||
| 47 | size_t bytes, unsigned int width, size_t *len) | ||
| 48 | { | ||
| 49 | u32 block; | ||
| 50 | |||
| 51 | if ((bytes >> width) > dwc->block_size) { | ||
| 52 | block = dwc->block_size; | ||
| 53 | *len = dwc->block_size << width; | ||
| 54 | } else { | ||
| 55 | block = bytes >> width; | ||
| 56 | *len = bytes; | ||
| 57 | } | ||
| 58 | |||
| 59 | return block; | ||
| 60 | } | ||
| 61 | |||
| 62 | static size_t dw_dma_block2bytes(struct dw_dma_chan *dwc, u32 block, u32 width) | ||
| 63 | { | ||
| 64 | return DWC_CTLH_BLOCK_TS(block) << width; | ||
| 65 | } | ||
| 66 | |||
| 67 | static u32 dw_dma_prepare_ctllo(struct dw_dma_chan *dwc) | ||
| 68 | { | ||
| 69 | struct dma_slave_config *sconfig = &dwc->dma_sconfig; | ||
| 70 | bool is_slave = is_slave_direction(dwc->direction); | ||
| 71 | u8 smsize = is_slave ? sconfig->src_maxburst : DW_DMA_MSIZE_16; | ||
| 72 | u8 dmsize = is_slave ? sconfig->dst_maxburst : DW_DMA_MSIZE_16; | ||
| 73 | u8 p_master = dwc->dws.p_master; | ||
| 74 | u8 m_master = dwc->dws.m_master; | ||
| 75 | u8 dms = (dwc->direction == DMA_MEM_TO_DEV) ? p_master : m_master; | ||
| 76 | u8 sms = (dwc->direction == DMA_DEV_TO_MEM) ? p_master : m_master; | ||
| 77 | |||
| 78 | return DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN | | ||
| 79 | DWC_CTLL_DST_MSIZE(dmsize) | DWC_CTLL_SRC_MSIZE(smsize) | | ||
| 80 | DWC_CTLL_DMS(dms) | DWC_CTLL_SMS(sms); | ||
| 81 | } | ||
| 82 | |||
| 83 | static void dw_dma_encode_maxburst(struct dw_dma_chan *dwc, u32 *maxburst) | ||
| 84 | { | ||
| 85 | /* | ||
| 86 | * Fix burst size according to dw_dmac. We need to convert them as: | ||
| 87 | * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3. | ||
| 88 | */ | ||
| 89 | *maxburst = *maxburst > 1 ? fls(*maxburst) - 2 : 0; | ||
| 90 | } | ||
| 91 | |||
| 92 | static void dw_dma_set_device_name(struct dw_dma *dw, int id) | ||
| 93 | { | ||
| 94 | snprintf(dw->name, sizeof(dw->name), "dw:dmac%d", id); | ||
| 95 | } | ||
| 96 | |||
| 97 | static void dw_dma_disable(struct dw_dma *dw) | ||
| 98 | { | ||
| 99 | do_dw_dma_off(dw); | ||
| 100 | } | ||
| 101 | |||
| 102 | static void dw_dma_enable(struct dw_dma *dw) | ||
| 103 | { | ||
| 104 | do_dw_dma_on(dw); | ||
| 105 | } | ||
| 106 | |||
| 107 | int dw_dma_probe(struct dw_dma_chip *chip) | ||
| 108 | { | ||
| 109 | struct dw_dma *dw; | ||
| 110 | |||
| 111 | dw = devm_kzalloc(chip->dev, sizeof(*dw), GFP_KERNEL); | ||
| 112 | if (!dw) | ||
| 113 | return -ENOMEM; | ||
| 114 | |||
| 115 | /* Channel operations */ | ||
| 116 | dw->initialize_chan = dw_dma_initialize_chan; | ||
| 117 | dw->suspend_chan = dw_dma_suspend_chan; | ||
| 118 | dw->resume_chan = dw_dma_resume_chan; | ||
| 119 | dw->prepare_ctllo = dw_dma_prepare_ctllo; | ||
| 120 | dw->encode_maxburst = dw_dma_encode_maxburst; | ||
| 121 | dw->bytes2block = dw_dma_bytes2block; | ||
| 122 | dw->block2bytes = dw_dma_block2bytes; | ||
| 123 | |||
| 124 | /* Device operations */ | ||
| 125 | dw->set_device_name = dw_dma_set_device_name; | ||
| 126 | dw->disable = dw_dma_disable; | ||
| 127 | dw->enable = dw_dma_enable; | ||
| 128 | |||
| 129 | chip->dw = dw; | ||
| 130 | return do_dma_probe(chip); | ||
| 131 | } | ||
| 132 | EXPORT_SYMBOL_GPL(dw_dma_probe); | ||
| 133 | |||
| 134 | int dw_dma_remove(struct dw_dma_chip *chip) | ||
| 135 | { | ||
| 136 | return do_dma_remove(chip); | ||
| 137 | } | ||
| 138 | EXPORT_SYMBOL_GPL(dw_dma_remove); | ||
diff --git a/drivers/dma/dw/idma32.c b/drivers/dma/dw/idma32.c new file mode 100644 index 000000000000..f00657308811 --- /dev/null +++ b/drivers/dma/dw/idma32.c | |||
| @@ -0,0 +1,160 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (C) 2013,2018 Intel Corporation | ||
| 3 | |||
| 4 | #include <linux/bitops.h> | ||
| 5 | #include <linux/dmaengine.h> | ||
| 6 | #include <linux/errno.h> | ||
| 7 | #include <linux/slab.h> | ||
| 8 | #include <linux/types.h> | ||
| 9 | |||
| 10 | #include "internal.h" | ||
| 11 | |||
| 12 | static void idma32_initialize_chan(struct dw_dma_chan *dwc) | ||
| 13 | { | ||
| 14 | u32 cfghi = 0; | ||
| 15 | u32 cfglo = 0; | ||
| 16 | |||
| 17 | /* Set default burst alignment */ | ||
| 18 | cfglo |= IDMA32C_CFGL_DST_BURST_ALIGN | IDMA32C_CFGL_SRC_BURST_ALIGN; | ||
| 19 | |||
| 20 | /* Low 4 bits of the request lines */ | ||
| 21 | cfghi |= IDMA32C_CFGH_DST_PER(dwc->dws.dst_id & 0xf); | ||
| 22 | cfghi |= IDMA32C_CFGH_SRC_PER(dwc->dws.src_id & 0xf); | ||
| 23 | |||
| 24 | /* Request line extension (2 bits) */ | ||
| 25 | cfghi |= IDMA32C_CFGH_DST_PER_EXT(dwc->dws.dst_id >> 4 & 0x3); | ||
| 26 | cfghi |= IDMA32C_CFGH_SRC_PER_EXT(dwc->dws.src_id >> 4 & 0x3); | ||
| 27 | |||
| 28 | channel_writel(dwc, CFG_LO, cfglo); | ||
| 29 | channel_writel(dwc, CFG_HI, cfghi); | ||
| 30 | } | ||
| 31 | |||
| 32 | static void idma32_suspend_chan(struct dw_dma_chan *dwc, bool drain) | ||
| 33 | { | ||
| 34 | u32 cfglo = channel_readl(dwc, CFG_LO); | ||
| 35 | |||
| 36 | if (drain) | ||
| 37 | cfglo |= IDMA32C_CFGL_CH_DRAIN; | ||
| 38 | |||
| 39 | channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP); | ||
| 40 | } | ||
| 41 | |||
| 42 | static void idma32_resume_chan(struct dw_dma_chan *dwc, bool drain) | ||
| 43 | { | ||
| 44 | u32 cfglo = channel_readl(dwc, CFG_LO); | ||
| 45 | |||
| 46 | if (drain) | ||
| 47 | cfglo &= ~IDMA32C_CFGL_CH_DRAIN; | ||
| 48 | |||
| 49 | channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP); | ||
| 50 | } | ||
| 51 | |||
| 52 | static u32 idma32_bytes2block(struct dw_dma_chan *dwc, | ||
| 53 | size_t bytes, unsigned int width, size_t *len) | ||
| 54 | { | ||
| 55 | u32 block; | ||
| 56 | |||
| 57 | if (bytes > dwc->block_size) { | ||
| 58 | block = dwc->block_size; | ||
| 59 | *len = dwc->block_size; | ||
| 60 | } else { | ||
| 61 | block = bytes; | ||
| 62 | *len = bytes; | ||
| 63 | } | ||
| 64 | |||
| 65 | return block; | ||
| 66 | } | ||
| 67 | |||
| 68 | static size_t idma32_block2bytes(struct dw_dma_chan *dwc, u32 block, u32 width) | ||
| 69 | { | ||
| 70 | return IDMA32C_CTLH_BLOCK_TS(block); | ||
| 71 | } | ||
| 72 | |||
| 73 | static u32 idma32_prepare_ctllo(struct dw_dma_chan *dwc) | ||
| 74 | { | ||
| 75 | struct dma_slave_config *sconfig = &dwc->dma_sconfig; | ||
| 76 | bool is_slave = is_slave_direction(dwc->direction); | ||
| 77 | u8 smsize = is_slave ? sconfig->src_maxburst : IDMA32_MSIZE_8; | ||
| 78 | u8 dmsize = is_slave ? sconfig->dst_maxburst : IDMA32_MSIZE_8; | ||
| 79 | |||
| 80 | return DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN | | ||
| 81 | DWC_CTLL_DST_MSIZE(dmsize) | DWC_CTLL_SRC_MSIZE(smsize); | ||
| 82 | } | ||
| 83 | |||
| 84 | static void idma32_encode_maxburst(struct dw_dma_chan *dwc, u32 *maxburst) | ||
| 85 | { | ||
| 86 | *maxburst = *maxburst > 1 ? fls(*maxburst) - 1 : 0; | ||
| 87 | } | ||
| 88 | |||
| 89 | static void idma32_set_device_name(struct dw_dma *dw, int id) | ||
| 90 | { | ||
| 91 | snprintf(dw->name, sizeof(dw->name), "idma32:dmac%d", id); | ||
| 92 | } | ||
| 93 | |||
| 94 | /* | ||
| 95 | * Program FIFO size of channels. | ||
| 96 | * | ||
| 97 | * By default full FIFO (512 bytes) is assigned to channel 0. Here we | ||
| 98 | * slice FIFO on equal parts between channels. | ||
| 99 | */ | ||
| 100 | static void idma32_fifo_partition(struct dw_dma *dw) | ||
| 101 | { | ||
| 102 | u64 value = IDMA32C_FP_PSIZE_CH0(64) | IDMA32C_FP_PSIZE_CH1(64) | | ||
| 103 | IDMA32C_FP_UPDATE; | ||
| 104 | u64 fifo_partition = 0; | ||
| 105 | |||
| 106 | /* Fill FIFO_PARTITION low bits (Channels 0..1, 4..5) */ | ||
| 107 | fifo_partition |= value << 0; | ||
| 108 | |||
| 109 | /* Fill FIFO_PARTITION high bits (Channels 2..3, 6..7) */ | ||
| 110 | fifo_partition |= value << 32; | ||
| 111 | |||
| 112 | /* Program FIFO Partition registers - 64 bytes per channel */ | ||
| 113 | idma32_writeq(dw, FIFO_PARTITION1, fifo_partition); | ||
| 114 | idma32_writeq(dw, FIFO_PARTITION0, fifo_partition); | ||
| 115 | } | ||
| 116 | |||
| 117 | static void idma32_disable(struct dw_dma *dw) | ||
| 118 | { | ||
| 119 | do_dw_dma_off(dw); | ||
| 120 | idma32_fifo_partition(dw); | ||
| 121 | } | ||
| 122 | |||
| 123 | static void idma32_enable(struct dw_dma *dw) | ||
| 124 | { | ||
| 125 | idma32_fifo_partition(dw); | ||
| 126 | do_dw_dma_on(dw); | ||
| 127 | } | ||
| 128 | |||
| 129 | int idma32_dma_probe(struct dw_dma_chip *chip) | ||
| 130 | { | ||
| 131 | struct dw_dma *dw; | ||
| 132 | |||
| 133 | dw = devm_kzalloc(chip->dev, sizeof(*dw), GFP_KERNEL); | ||
| 134 | if (!dw) | ||
| 135 | return -ENOMEM; | ||
| 136 | |||
| 137 | /* Channel operations */ | ||
| 138 | dw->initialize_chan = idma32_initialize_chan; | ||
| 139 | dw->suspend_chan = idma32_suspend_chan; | ||
| 140 | dw->resume_chan = idma32_resume_chan; | ||
| 141 | dw->prepare_ctllo = idma32_prepare_ctllo; | ||
| 142 | dw->encode_maxburst = idma32_encode_maxburst; | ||
| 143 | dw->bytes2block = idma32_bytes2block; | ||
| 144 | dw->block2bytes = idma32_block2bytes; | ||
| 145 | |||
| 146 | /* Device operations */ | ||
| 147 | dw->set_device_name = idma32_set_device_name; | ||
| 148 | dw->disable = idma32_disable; | ||
| 149 | dw->enable = idma32_enable; | ||
| 150 | |||
| 151 | chip->dw = dw; | ||
| 152 | return do_dma_probe(chip); | ||
| 153 | } | ||
| 154 | EXPORT_SYMBOL_GPL(idma32_dma_probe); | ||
| 155 | |||
| 156 | int idma32_dma_remove(struct dw_dma_chip *chip) | ||
| 157 | { | ||
| 158 | return do_dma_remove(chip); | ||
| 159 | } | ||
| 160 | EXPORT_SYMBOL_GPL(idma32_dma_remove); | ||
diff --git a/drivers/dma/dw/internal.h b/drivers/dma/dw/internal.h index 41439732ff6b..1dd7a4e6dd23 100644 --- a/drivers/dma/dw/internal.h +++ b/drivers/dma/dw/internal.h | |||
| @@ -1,11 +1,8 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
| 1 | /* | 2 | /* |
| 2 | * Driver for the Synopsys DesignWare DMA Controller | 3 | * Driver for the Synopsys DesignWare DMA Controller |
| 3 | * | 4 | * |
| 4 | * Copyright (C) 2013 Intel Corporation | 5 | * Copyright (C) 2013 Intel Corporation |
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License version 2 as | ||
| 8 | * published by the Free Software Foundation. | ||
| 9 | */ | 6 | */ |
| 10 | 7 | ||
| 11 | #ifndef _DMA_DW_INTERNAL_H | 8 | #ifndef _DMA_DW_INTERNAL_H |
| @@ -15,8 +12,14 @@ | |||
| 15 | 12 | ||
| 16 | #include "regs.h" | 13 | #include "regs.h" |
| 17 | 14 | ||
| 18 | int dw_dma_disable(struct dw_dma_chip *chip); | 15 | int do_dma_probe(struct dw_dma_chip *chip); |
| 19 | int dw_dma_enable(struct dw_dma_chip *chip); | 16 | int do_dma_remove(struct dw_dma_chip *chip); |
| 17 | |||
| 18 | void do_dw_dma_on(struct dw_dma *dw); | ||
| 19 | void do_dw_dma_off(struct dw_dma *dw); | ||
| 20 | |||
| 21 | int do_dw_dma_disable(struct dw_dma_chip *chip); | ||
| 22 | int do_dw_dma_enable(struct dw_dma_chip *chip); | ||
| 20 | 23 | ||
| 21 | extern bool dw_dma_filter(struct dma_chan *chan, void *param); | 24 | extern bool dw_dma_filter(struct dma_chan *chan, void *param); |
| 22 | 25 | ||
diff --git a/drivers/dma/dw/pci.c b/drivers/dma/dw/pci.c index 7778ed705a1a..e79a75db0852 100644 --- a/drivers/dma/dw/pci.c +++ b/drivers/dma/dw/pci.c | |||
| @@ -1,12 +1,9 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 1 | /* | 2 | /* |
| 2 | * PCI driver for the Synopsys DesignWare DMA Controller | 3 | * PCI driver for the Synopsys DesignWare DMA Controller |
| 3 | * | 4 | * |
| 4 | * Copyright (C) 2013 Intel Corporation | 5 | * Copyright (C) 2013 Intel Corporation |
| 5 | * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com> | 6 | * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com> |
| 6 | * | ||
| 7 | * This program is free software; you can redistribute it and/or modify | ||
| 8 | * it under the terms of the GNU General Public License version 2 as | ||
| 9 | * published by the Free Software Foundation. | ||
| 10 | */ | 7 | */ |
| 11 | 8 | ||
| 12 | #include <linux/module.h> | 9 | #include <linux/module.h> |
| @@ -15,21 +12,33 @@ | |||
| 15 | 12 | ||
| 16 | #include "internal.h" | 13 | #include "internal.h" |
| 17 | 14 | ||
| 18 | static struct dw_dma_platform_data mrfld_pdata = { | 15 | struct dw_dma_pci_data { |
| 16 | const struct dw_dma_platform_data *pdata; | ||
| 17 | int (*probe)(struct dw_dma_chip *chip); | ||
| 18 | }; | ||
| 19 | |||
| 20 | static const struct dw_dma_pci_data dw_pci_data = { | ||
| 21 | .probe = dw_dma_probe, | ||
| 22 | }; | ||
| 23 | |||
| 24 | static const struct dw_dma_platform_data idma32_pdata = { | ||
| 19 | .nr_channels = 8, | 25 | .nr_channels = 8, |
| 20 | .is_private = true, | ||
| 21 | .is_memcpy = true, | ||
| 22 | .is_idma32 = true, | ||
| 23 | .chan_allocation_order = CHAN_ALLOCATION_ASCENDING, | 26 | .chan_allocation_order = CHAN_ALLOCATION_ASCENDING, |
| 24 | .chan_priority = CHAN_PRIORITY_ASCENDING, | 27 | .chan_priority = CHAN_PRIORITY_ASCENDING, |
| 25 | .block_size = 131071, | 28 | .block_size = 131071, |
| 26 | .nr_masters = 1, | 29 | .nr_masters = 1, |
| 27 | .data_width = {4}, | 30 | .data_width = {4}, |
| 31 | .multi_block = {1, 1, 1, 1, 1, 1, 1, 1}, | ||
| 32 | }; | ||
| 33 | |||
| 34 | static const struct dw_dma_pci_data idma32_pci_data = { | ||
| 35 | .pdata = &idma32_pdata, | ||
| 36 | .probe = idma32_dma_probe, | ||
| 28 | }; | 37 | }; |
| 29 | 38 | ||
| 30 | static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) | 39 | static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) |
| 31 | { | 40 | { |
| 32 | const struct dw_dma_platform_data *pdata = (void *)pid->driver_data; | 41 | const struct dw_dma_pci_data *data = (void *)pid->driver_data; |
| 33 | struct dw_dma_chip *chip; | 42 | struct dw_dma_chip *chip; |
| 34 | int ret; | 43 | int ret; |
| 35 | 44 | ||
| @@ -62,9 +71,9 @@ static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) | |||
| 62 | chip->id = pdev->devfn; | 71 | chip->id = pdev->devfn; |
| 63 | chip->regs = pcim_iomap_table(pdev)[0]; | 72 | chip->regs = pcim_iomap_table(pdev)[0]; |
| 64 | chip->irq = pdev->irq; | 73 | chip->irq = pdev->irq; |
| 65 | chip->pdata = pdata; | 74 | chip->pdata = data->pdata; |
| 66 | 75 | ||
| 67 | ret = dw_dma_probe(chip); | 76 | ret = data->probe(chip); |
| 68 | if (ret) | 77 | if (ret) |
| 69 | return ret; | 78 | return ret; |
| 70 | 79 | ||
| @@ -90,7 +99,7 @@ static int dw_pci_suspend_late(struct device *dev) | |||
| 90 | struct pci_dev *pci = to_pci_dev(dev); | 99 | struct pci_dev *pci = to_pci_dev(dev); |
| 91 | struct dw_dma_chip *chip = pci_get_drvdata(pci); | 100 | struct dw_dma_chip *chip = pci_get_drvdata(pci); |
| 92 | 101 | ||
| 93 | return dw_dma_disable(chip); | 102 | return do_dw_dma_disable(chip); |
| 94 | }; | 103 | }; |
| 95 | 104 | ||
| 96 | static int dw_pci_resume_early(struct device *dev) | 105 | static int dw_pci_resume_early(struct device *dev) |
| @@ -98,7 +107,7 @@ static int dw_pci_resume_early(struct device *dev) | |||
| 98 | struct pci_dev *pci = to_pci_dev(dev); | 107 | struct pci_dev *pci = to_pci_dev(dev); |
| 99 | struct dw_dma_chip *chip = pci_get_drvdata(pci); | 108 | struct dw_dma_chip *chip = pci_get_drvdata(pci); |
| 100 | 109 | ||
| 101 | return dw_dma_enable(chip); | 110 | return do_dw_dma_enable(chip); |
| 102 | }; | 111 | }; |
| 103 | 112 | ||
| 104 | #endif /* CONFIG_PM_SLEEP */ | 113 | #endif /* CONFIG_PM_SLEEP */ |
| @@ -109,24 +118,24 @@ static const struct dev_pm_ops dw_pci_dev_pm_ops = { | |||
| 109 | 118 | ||
| 110 | static const struct pci_device_id dw_pci_id_table[] = { | 119 | static const struct pci_device_id dw_pci_id_table[] = { |
| 111 | /* Medfield (GPDMA) */ | 120 | /* Medfield (GPDMA) */ |
| 112 | { PCI_VDEVICE(INTEL, 0x0827) }, | 121 | { PCI_VDEVICE(INTEL, 0x0827), (kernel_ulong_t)&dw_pci_data }, |
| 113 | 122 | ||
| 114 | /* BayTrail */ | 123 | /* BayTrail */ |
| 115 | { PCI_VDEVICE(INTEL, 0x0f06) }, | 124 | { PCI_VDEVICE(INTEL, 0x0f06), (kernel_ulong_t)&dw_pci_data }, |
| 116 | { PCI_VDEVICE(INTEL, 0x0f40) }, | 125 | { PCI_VDEVICE(INTEL, 0x0f40), (kernel_ulong_t)&dw_pci_data }, |
| 117 | 126 | ||
| 118 | /* Merrifield iDMA 32-bit (GPDMA) */ | 127 | /* Merrifield */ |
| 119 | { PCI_VDEVICE(INTEL, 0x11a2), (kernel_ulong_t)&mrfld_pdata }, | 128 | { PCI_VDEVICE(INTEL, 0x11a2), (kernel_ulong_t)&idma32_pci_data }, |
| 120 | 129 | ||
| 121 | /* Braswell */ | 130 | /* Braswell */ |
| 122 | { PCI_VDEVICE(INTEL, 0x2286) }, | 131 | { PCI_VDEVICE(INTEL, 0x2286), (kernel_ulong_t)&dw_pci_data }, |
| 123 | { PCI_VDEVICE(INTEL, 0x22c0) }, | 132 | { PCI_VDEVICE(INTEL, 0x22c0), (kernel_ulong_t)&dw_pci_data }, |
| 124 | 133 | ||
| 125 | /* Haswell */ | 134 | /* Haswell */ |
| 126 | { PCI_VDEVICE(INTEL, 0x9c60) }, | 135 | { PCI_VDEVICE(INTEL, 0x9c60), (kernel_ulong_t)&dw_pci_data }, |
| 127 | 136 | ||
| 128 | /* Broadwell */ | 137 | /* Broadwell */ |
| 129 | { PCI_VDEVICE(INTEL, 0x9ce0) }, | 138 | { PCI_VDEVICE(INTEL, 0x9ce0), (kernel_ulong_t)&dw_pci_data }, |
| 130 | 139 | ||
| 131 | { } | 140 | { } |
| 132 | }; | 141 | }; |
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c index 31ff8113c3de..382dfd9e9600 100644 --- a/drivers/dma/dw/platform.c +++ b/drivers/dma/dw/platform.c | |||
| @@ -1,3 +1,4 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 1 | /* | 2 | /* |
| 2 | * Platform driver for the Synopsys DesignWare DMA Controller | 3 | * Platform driver for the Synopsys DesignWare DMA Controller |
| 3 | * | 4 | * |
| @@ -6,10 +7,6 @@ | |||
| 6 | * Copyright (C) 2013 Intel Corporation | 7 | * Copyright (C) 2013 Intel Corporation |
| 7 | * | 8 | * |
| 8 | * Some parts of this driver are derived from the original dw_dmac. | 9 | * Some parts of this driver are derived from the original dw_dmac. |
| 9 | * | ||
| 10 | * This program is free software; you can redistribute it and/or modify | ||
| 11 | * it under the terms of the GNU General Public License version 2 as | ||
| 12 | * published by the Free Software Foundation. | ||
| 13 | */ | 10 | */ |
| 14 | 11 | ||
| 15 | #include <linux/module.h> | 12 | #include <linux/module.h> |
| @@ -128,15 +125,6 @@ dw_dma_parse_dt(struct platform_device *pdev) | |||
| 128 | pdata->nr_masters = nr_masters; | 125 | pdata->nr_masters = nr_masters; |
| 129 | pdata->nr_channels = nr_channels; | 126 | pdata->nr_channels = nr_channels; |
| 130 | 127 | ||
| 131 | if (of_property_read_bool(np, "is_private")) | ||
| 132 | pdata->is_private = true; | ||
| 133 | |||
| 134 | /* | ||
| 135 | * All known devices, which use DT for configuration, support | ||
| 136 | * memory-to-memory transfers. So enable it by default. | ||
| 137 | */ | ||
| 138 | pdata->is_memcpy = true; | ||
| 139 | |||
| 140 | if (!of_property_read_u32(np, "chan_allocation_order", &tmp)) | 128 | if (!of_property_read_u32(np, "chan_allocation_order", &tmp)) |
| 141 | pdata->chan_allocation_order = (unsigned char)tmp; | 129 | pdata->chan_allocation_order = (unsigned char)tmp; |
| 142 | 130 | ||
| @@ -264,7 +252,7 @@ static void dw_shutdown(struct platform_device *pdev) | |||
| 264 | struct dw_dma_chip *chip = platform_get_drvdata(pdev); | 252 | struct dw_dma_chip *chip = platform_get_drvdata(pdev); |
| 265 | 253 | ||
| 266 | /* | 254 | /* |
| 267 | * We have to call dw_dma_disable() to stop any ongoing transfer. On | 255 | * We have to call do_dw_dma_disable() to stop any ongoing transfer. On |
| 268 | * some platforms we can't do that since DMA device is powered off. | 256 | * some platforms we can't do that since DMA device is powered off. |
| 269 | * Moreover we have no possibility to check if the platform is affected | 257 | * Moreover we have no possibility to check if the platform is affected |
| 270 | * or not. That's why we call pm_runtime_get_sync() / pm_runtime_put() | 258 | * or not. That's why we call pm_runtime_get_sync() / pm_runtime_put() |
| @@ -273,7 +261,7 @@ static void dw_shutdown(struct platform_device *pdev) | |||
| 273 | * used by the driver. | 261 | * used by the driver. |
| 274 | */ | 262 | */ |
| 275 | pm_runtime_get_sync(chip->dev); | 263 | pm_runtime_get_sync(chip->dev); |
| 276 | dw_dma_disable(chip); | 264 | do_dw_dma_disable(chip); |
| 277 | pm_runtime_put_sync_suspend(chip->dev); | 265 | pm_runtime_put_sync_suspend(chip->dev); |
| 278 | 266 | ||
| 279 | clk_disable_unprepare(chip->clk); | 267 | clk_disable_unprepare(chip->clk); |
| @@ -303,7 +291,7 @@ static int dw_suspend_late(struct device *dev) | |||
| 303 | { | 291 | { |
| 304 | struct dw_dma_chip *chip = dev_get_drvdata(dev); | 292 | struct dw_dma_chip *chip = dev_get_drvdata(dev); |
| 305 | 293 | ||
| 306 | dw_dma_disable(chip); | 294 | do_dw_dma_disable(chip); |
| 307 | clk_disable_unprepare(chip->clk); | 295 | clk_disable_unprepare(chip->clk); |
| 308 | 296 | ||
| 309 | return 0; | 297 | return 0; |
| @@ -318,7 +306,7 @@ static int dw_resume_early(struct device *dev) | |||
| 318 | if (ret) | 306 | if (ret) |
| 319 | return ret; | 307 | return ret; |
| 320 | 308 | ||
| 321 | return dw_dma_enable(chip); | 309 | return do_dw_dma_enable(chip); |
| 322 | } | 310 | } |
| 323 | 311 | ||
| 324 | #endif /* CONFIG_PM_SLEEP */ | 312 | #endif /* CONFIG_PM_SLEEP */ |
diff --git a/drivers/dma/dw/regs.h b/drivers/dma/dw/regs.h index 646c9c960c07..3fce66ecee7a 100644 --- a/drivers/dma/dw/regs.h +++ b/drivers/dma/dw/regs.h | |||
| @@ -1,13 +1,10 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
| 1 | /* | 2 | /* |
| 2 | * Driver for the Synopsys DesignWare AHB DMA Controller | 3 | * Driver for the Synopsys DesignWare AHB DMA Controller |
| 3 | * | 4 | * |
| 4 | * Copyright (C) 2005-2007 Atmel Corporation | 5 | * Copyright (C) 2005-2007 Atmel Corporation |
| 5 | * Copyright (C) 2010-2011 ST Microelectronics | 6 | * Copyright (C) 2010-2011 ST Microelectronics |
| 6 | * Copyright (C) 2016 Intel Corporation | 7 | * Copyright (C) 2016 Intel Corporation |
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify | ||
| 9 | * it under the terms of the GNU General Public License version 2 as | ||
| 10 | * published by the Free Software Foundation. | ||
| 11 | */ | 8 | */ |
| 12 | 9 | ||
| 13 | #include <linux/bitops.h> | 10 | #include <linux/bitops.h> |
| @@ -222,6 +219,16 @@ enum dw_dma_msize { | |||
| 222 | 219 | ||
| 223 | /* iDMA 32-bit support */ | 220 | /* iDMA 32-bit support */ |
| 224 | 221 | ||
| 222 | /* bursts size */ | ||
| 223 | enum idma32_msize { | ||
| 224 | IDMA32_MSIZE_1, | ||
| 225 | IDMA32_MSIZE_2, | ||
| 226 | IDMA32_MSIZE_4, | ||
| 227 | IDMA32_MSIZE_8, | ||
| 228 | IDMA32_MSIZE_16, | ||
| 229 | IDMA32_MSIZE_32, | ||
| 230 | }; | ||
| 231 | |||
| 225 | /* Bitfields in CTL_HI */ | 232 | /* Bitfields in CTL_HI */ |
| 226 | #define IDMA32C_CTLH_BLOCK_TS_MASK GENMASK(16, 0) | 233 | #define IDMA32C_CTLH_BLOCK_TS_MASK GENMASK(16, 0) |
| 227 | #define IDMA32C_CTLH_BLOCK_TS(x) ((x) & IDMA32C_CTLH_BLOCK_TS_MASK) | 234 | #define IDMA32C_CTLH_BLOCK_TS(x) ((x) & IDMA32C_CTLH_BLOCK_TS_MASK) |
| @@ -312,6 +319,21 @@ struct dw_dma { | |||
| 312 | u8 all_chan_mask; | 319 | u8 all_chan_mask; |
| 313 | u8 in_use; | 320 | u8 in_use; |
| 314 | 321 | ||
| 322 | /* Channel operations */ | ||
| 323 | void (*initialize_chan)(struct dw_dma_chan *dwc); | ||
| 324 | void (*suspend_chan)(struct dw_dma_chan *dwc, bool drain); | ||
| 325 | void (*resume_chan)(struct dw_dma_chan *dwc, bool drain); | ||
| 326 | u32 (*prepare_ctllo)(struct dw_dma_chan *dwc); | ||
| 327 | void (*encode_maxburst)(struct dw_dma_chan *dwc, u32 *maxburst); | ||
| 328 | u32 (*bytes2block)(struct dw_dma_chan *dwc, size_t bytes, | ||
| 329 | unsigned int width, size_t *len); | ||
| 330 | size_t (*block2bytes)(struct dw_dma_chan *dwc, u32 block, u32 width); | ||
| 331 | |||
| 332 | /* Device operations */ | ||
| 333 | void (*set_device_name)(struct dw_dma *dw, int id); | ||
| 334 | void (*disable)(struct dw_dma *dw); | ||
| 335 | void (*enable)(struct dw_dma *dw); | ||
| 336 | |||
| 315 | /* platform data */ | 337 | /* platform data */ |
| 316 | struct dw_dma_platform_data *pdata; | 338 | struct dw_dma_platform_data *pdata; |
| 317 | }; | 339 | }; |
diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c index 8876c4c1bb2c..680b2a00a953 100644 --- a/drivers/dma/fsl-edma-common.c +++ b/drivers/dma/fsl-edma-common.c | |||
| @@ -6,6 +6,7 @@ | |||
| 6 | #include <linux/dmapool.h> | 6 | #include <linux/dmapool.h> |
| 7 | #include <linux/module.h> | 7 | #include <linux/module.h> |
| 8 | #include <linux/slab.h> | 8 | #include <linux/slab.h> |
| 9 | #include <linux/dma-mapping.h> | ||
| 9 | 10 | ||
| 10 | #include "fsl-edma-common.h" | 11 | #include "fsl-edma-common.h" |
| 11 | 12 | ||
| @@ -173,12 +174,62 @@ int fsl_edma_resume(struct dma_chan *chan) | |||
| 173 | } | 174 | } |
| 174 | EXPORT_SYMBOL_GPL(fsl_edma_resume); | 175 | EXPORT_SYMBOL_GPL(fsl_edma_resume); |
| 175 | 176 | ||
| 177 | static void fsl_edma_unprep_slave_dma(struct fsl_edma_chan *fsl_chan) | ||
| 178 | { | ||
| 179 | if (fsl_chan->dma_dir != DMA_NONE) | ||
| 180 | dma_unmap_resource(fsl_chan->vchan.chan.device->dev, | ||
| 181 | fsl_chan->dma_dev_addr, | ||
| 182 | fsl_chan->dma_dev_size, | ||
| 183 | fsl_chan->dma_dir, 0); | ||
| 184 | fsl_chan->dma_dir = DMA_NONE; | ||
| 185 | } | ||
| 186 | |||
| 187 | static bool fsl_edma_prep_slave_dma(struct fsl_edma_chan *fsl_chan, | ||
| 188 | enum dma_transfer_direction dir) | ||
| 189 | { | ||
| 190 | struct device *dev = fsl_chan->vchan.chan.device->dev; | ||
| 191 | enum dma_data_direction dma_dir; | ||
| 192 | phys_addr_t addr = 0; | ||
| 193 | u32 size = 0; | ||
| 194 | |||
| 195 | switch (dir) { | ||
| 196 | case DMA_MEM_TO_DEV: | ||
| 197 | dma_dir = DMA_FROM_DEVICE; | ||
| 198 | addr = fsl_chan->cfg.dst_addr; | ||
| 199 | size = fsl_chan->cfg.dst_maxburst; | ||
| 200 | break; | ||
| 201 | case DMA_DEV_TO_MEM: | ||
| 202 | dma_dir = DMA_TO_DEVICE; | ||
| 203 | addr = fsl_chan->cfg.src_addr; | ||
| 204 | size = fsl_chan->cfg.src_maxburst; | ||
| 205 | break; | ||
| 206 | default: | ||
| 207 | dma_dir = DMA_NONE; | ||
| 208 | break; | ||
| 209 | } | ||
| 210 | |||
| 211 | /* Already mapped for this config? */ | ||
| 212 | if (fsl_chan->dma_dir == dma_dir) | ||
| 213 | return true; | ||
| 214 | |||
| 215 | fsl_edma_unprep_slave_dma(fsl_chan); | ||
| 216 | |||
| 217 | fsl_chan->dma_dev_addr = dma_map_resource(dev, addr, size, dma_dir, 0); | ||
| 218 | if (dma_mapping_error(dev, fsl_chan->dma_dev_addr)) | ||
| 219 | return false; | ||
| 220 | fsl_chan->dma_dev_size = size; | ||
| 221 | fsl_chan->dma_dir = dma_dir; | ||
| 222 | |||
| 223 | return true; | ||
| 224 | } | ||
| 225 | |||
| 176 | int fsl_edma_slave_config(struct dma_chan *chan, | 226 | int fsl_edma_slave_config(struct dma_chan *chan, |
| 177 | struct dma_slave_config *cfg) | 227 | struct dma_slave_config *cfg) |
| 178 | { | 228 | { |
| 179 | struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); | 229 | struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); |
| 180 | 230 | ||
| 181 | memcpy(&fsl_chan->cfg, cfg, sizeof(*cfg)); | 231 | memcpy(&fsl_chan->cfg, cfg, sizeof(*cfg)); |
| 232 | fsl_edma_unprep_slave_dma(fsl_chan); | ||
| 182 | 233 | ||
| 183 | return 0; | 234 | return 0; |
| 184 | } | 235 | } |
| @@ -339,9 +390,7 @@ static struct fsl_edma_desc *fsl_edma_alloc_desc(struct fsl_edma_chan *fsl_chan, | |||
| 339 | struct fsl_edma_desc *fsl_desc; | 390 | struct fsl_edma_desc *fsl_desc; |
| 340 | int i; | 391 | int i; |
| 341 | 392 | ||
| 342 | fsl_desc = kzalloc(sizeof(*fsl_desc) + | 393 | fsl_desc = kzalloc(struct_size(fsl_desc, tcd, sg_len), GFP_NOWAIT); |
| 343 | sizeof(struct fsl_edma_sw_tcd) * | ||
| 344 | sg_len, GFP_NOWAIT); | ||
| 345 | if (!fsl_desc) | 394 | if (!fsl_desc) |
| 346 | return NULL; | 395 | return NULL; |
| 347 | 396 | ||
| @@ -378,6 +427,9 @@ struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic( | |||
| 378 | if (!is_slave_direction(direction)) | 427 | if (!is_slave_direction(direction)) |
| 379 | return NULL; | 428 | return NULL; |
| 380 | 429 | ||
| 430 | if (!fsl_edma_prep_slave_dma(fsl_chan, direction)) | ||
| 431 | return NULL; | ||
| 432 | |||
| 381 | sg_len = buf_len / period_len; | 433 | sg_len = buf_len / period_len; |
| 382 | fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len); | 434 | fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len); |
| 383 | if (!fsl_desc) | 435 | if (!fsl_desc) |
| @@ -409,11 +461,11 @@ struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic( | |||
| 409 | 461 | ||
| 410 | if (direction == DMA_MEM_TO_DEV) { | 462 | if (direction == DMA_MEM_TO_DEV) { |
| 411 | src_addr = dma_buf_next; | 463 | src_addr = dma_buf_next; |
| 412 | dst_addr = fsl_chan->cfg.dst_addr; | 464 | dst_addr = fsl_chan->dma_dev_addr; |
| 413 | soff = fsl_chan->cfg.dst_addr_width; | 465 | soff = fsl_chan->cfg.dst_addr_width; |
| 414 | doff = 0; | 466 | doff = 0; |
| 415 | } else { | 467 | } else { |
| 416 | src_addr = fsl_chan->cfg.src_addr; | 468 | src_addr = fsl_chan->dma_dev_addr; |
| 417 | dst_addr = dma_buf_next; | 469 | dst_addr = dma_buf_next; |
| 418 | soff = 0; | 470 | soff = 0; |
| 419 | doff = fsl_chan->cfg.src_addr_width; | 471 | doff = fsl_chan->cfg.src_addr_width; |
| @@ -444,6 +496,9 @@ struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg( | |||
| 444 | if (!is_slave_direction(direction)) | 496 | if (!is_slave_direction(direction)) |
| 445 | return NULL; | 497 | return NULL; |
| 446 | 498 | ||
| 499 | if (!fsl_edma_prep_slave_dma(fsl_chan, direction)) | ||
| 500 | return NULL; | ||
| 501 | |||
| 447 | fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len); | 502 | fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len); |
| 448 | if (!fsl_desc) | 503 | if (!fsl_desc) |
| 449 | return NULL; | 504 | return NULL; |
| @@ -468,11 +523,11 @@ struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg( | |||
| 468 | 523 | ||
| 469 | if (direction == DMA_MEM_TO_DEV) { | 524 | if (direction == DMA_MEM_TO_DEV) { |
| 470 | src_addr = sg_dma_address(sg); | 525 | src_addr = sg_dma_address(sg); |
| 471 | dst_addr = fsl_chan->cfg.dst_addr; | 526 | dst_addr = fsl_chan->dma_dev_addr; |
| 472 | soff = fsl_chan->cfg.dst_addr_width; | 527 | soff = fsl_chan->cfg.dst_addr_width; |
| 473 | doff = 0; | 528 | doff = 0; |
| 474 | } else { | 529 | } else { |
| 475 | src_addr = fsl_chan->cfg.src_addr; | 530 | src_addr = fsl_chan->dma_dev_addr; |
| 476 | dst_addr = sg_dma_address(sg); | 531 | dst_addr = sg_dma_address(sg); |
| 477 | soff = 0; | 532 | soff = 0; |
| 478 | doff = fsl_chan->cfg.src_addr_width; | 533 | doff = fsl_chan->cfg.src_addr_width; |
| @@ -555,6 +610,7 @@ void fsl_edma_free_chan_resources(struct dma_chan *chan) | |||
| 555 | fsl_edma_chan_mux(fsl_chan, 0, false); | 610 | fsl_edma_chan_mux(fsl_chan, 0, false); |
| 556 | fsl_chan->edesc = NULL; | 611 | fsl_chan->edesc = NULL; |
| 557 | vchan_get_all_descriptors(&fsl_chan->vchan, &head); | 612 | vchan_get_all_descriptors(&fsl_chan->vchan, &head); |
| 613 | fsl_edma_unprep_slave_dma(fsl_chan); | ||
| 558 | spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); | 614 | spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); |
| 559 | 615 | ||
| 560 | vchan_dma_desc_free_list(&fsl_chan->vchan, &head); | 616 | vchan_dma_desc_free_list(&fsl_chan->vchan, &head); |
diff --git a/drivers/dma/fsl-edma-common.h b/drivers/dma/fsl-edma-common.h index 8917e8865959..b435d8e1e3a1 100644 --- a/drivers/dma/fsl-edma-common.h +++ b/drivers/dma/fsl-edma-common.h | |||
| @@ -6,6 +6,7 @@ | |||
| 6 | #ifndef _FSL_EDMA_COMMON_H_ | 6 | #ifndef _FSL_EDMA_COMMON_H_ |
| 7 | #define _FSL_EDMA_COMMON_H_ | 7 | #define _FSL_EDMA_COMMON_H_ |
| 8 | 8 | ||
| 9 | #include <linux/dma-direction.h> | ||
| 9 | #include "virt-dma.h" | 10 | #include "virt-dma.h" |
| 10 | 11 | ||
| 11 | #define EDMA_CR_EDBG BIT(1) | 12 | #define EDMA_CR_EDBG BIT(1) |
| @@ -120,6 +121,9 @@ struct fsl_edma_chan { | |||
| 120 | struct dma_slave_config cfg; | 121 | struct dma_slave_config cfg; |
| 121 | u32 attr; | 122 | u32 attr; |
| 122 | struct dma_pool *tcd_pool; | 123 | struct dma_pool *tcd_pool; |
| 124 | dma_addr_t dma_dev_addr; | ||
| 125 | u32 dma_dev_size; | ||
| 126 | enum dma_data_direction dma_dir; | ||
| 123 | }; | 127 | }; |
| 124 | 128 | ||
| 125 | struct fsl_edma_desc { | 129 | struct fsl_edma_desc { |
diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c index 34d70112fcc9..75e8a7ba3a22 100644 --- a/drivers/dma/fsl-edma.c +++ b/drivers/dma/fsl-edma.c | |||
| @@ -254,6 +254,7 @@ static int fsl_edma_probe(struct platform_device *pdev) | |||
| 254 | fsl_chan->pm_state = RUNNING; | 254 | fsl_chan->pm_state = RUNNING; |
| 255 | fsl_chan->slave_id = 0; | 255 | fsl_chan->slave_id = 0; |
| 256 | fsl_chan->idle = true; | 256 | fsl_chan->idle = true; |
| 257 | fsl_chan->dma_dir = DMA_NONE; | ||
| 257 | fsl_chan->vchan.desc_free = fsl_edma_free_desc; | 258 | fsl_chan->vchan.desc_free = fsl_edma_free_desc; |
| 258 | vchan_init(&fsl_chan->vchan, &fsl_edma->dma_dev); | 259 | vchan_init(&fsl_chan->vchan, &fsl_edma->dma_dev); |
| 259 | 260 | ||
diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c new file mode 100644 index 000000000000..aa1d0ae3d207 --- /dev/null +++ b/drivers/dma/fsl-qdma.c | |||
| @@ -0,0 +1,1259 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright 2014-2015 Freescale | ||
| 3 | // Copyright 2018 NXP | ||
| 4 | |||
| 5 | /* | ||
| 6 | * Driver for NXP Layerscape Queue Direct Memory Access Controller | ||
| 7 | * | ||
| 8 | * Author: | ||
| 9 | * Wen He <wen.he_1@nxp.com> | ||
| 10 | * Jiaheng Fan <jiaheng.fan@nxp.com> | ||
| 11 | * | ||
| 12 | */ | ||
| 13 | |||
| 14 | #include <linux/module.h> | ||
| 15 | #include <linux/delay.h> | ||
| 16 | #include <linux/of_irq.h> | ||
| 17 | #include <linux/of_platform.h> | ||
| 18 | #include <linux/of_dma.h> | ||
| 19 | #include <linux/dma-mapping.h> | ||
| 20 | |||
| 21 | #include "virt-dma.h" | ||
| 22 | #include "fsldma.h" | ||
| 23 | |||
| 24 | /* Register related definition */ | ||
| 25 | #define FSL_QDMA_DMR 0x0 | ||
| 26 | #define FSL_QDMA_DSR 0x4 | ||
| 27 | #define FSL_QDMA_DEIER 0xe00 | ||
| 28 | #define FSL_QDMA_DEDR 0xe04 | ||
| 29 | #define FSL_QDMA_DECFDW0R 0xe10 | ||
| 30 | #define FSL_QDMA_DECFDW1R 0xe14 | ||
| 31 | #define FSL_QDMA_DECFDW2R 0xe18 | ||
| 32 | #define FSL_QDMA_DECFDW3R 0xe1c | ||
| 33 | #define FSL_QDMA_DECFQIDR 0xe30 | ||
| 34 | #define FSL_QDMA_DECBR 0xe34 | ||
| 35 | |||
| 36 | #define FSL_QDMA_BCQMR(x) (0xc0 + 0x100 * (x)) | ||
| 37 | #define FSL_QDMA_BCQSR(x) (0xc4 + 0x100 * (x)) | ||
| 38 | #define FSL_QDMA_BCQEDPA_SADDR(x) (0xc8 + 0x100 * (x)) | ||
| 39 | #define FSL_QDMA_BCQDPA_SADDR(x) (0xcc + 0x100 * (x)) | ||
| 40 | #define FSL_QDMA_BCQEEPA_SADDR(x) (0xd0 + 0x100 * (x)) | ||
| 41 | #define FSL_QDMA_BCQEPA_SADDR(x) (0xd4 + 0x100 * (x)) | ||
| 42 | #define FSL_QDMA_BCQIER(x) (0xe0 + 0x100 * (x)) | ||
| 43 | #define FSL_QDMA_BCQIDR(x) (0xe4 + 0x100 * (x)) | ||
| 44 | |||
| 45 | #define FSL_QDMA_SQDPAR 0x80c | ||
| 46 | #define FSL_QDMA_SQEPAR 0x814 | ||
| 47 | #define FSL_QDMA_BSQMR 0x800 | ||
| 48 | #define FSL_QDMA_BSQSR 0x804 | ||
| 49 | #define FSL_QDMA_BSQICR 0x828 | ||
| 50 | #define FSL_QDMA_CQMR 0xa00 | ||
| 51 | #define FSL_QDMA_CQDSCR1 0xa08 | ||
| 52 | #define FSL_QDMA_CQDSCR2 0xa0c | ||
| 53 | #define FSL_QDMA_CQIER 0xa10 | ||
| 54 | #define FSL_QDMA_CQEDR 0xa14 | ||
| 55 | #define FSL_QDMA_SQCCMR 0xa20 | ||
| 56 | |||
| 57 | /* Registers for bit and genmask */ | ||
| 58 | #define FSL_QDMA_CQIDR_SQT BIT(15) | ||
| 59 | #define QDMA_CCDF_FOTMAT BIT(29) | ||
| 60 | #define QDMA_CCDF_SER BIT(30) | ||
| 61 | #define QDMA_SG_FIN BIT(30) | ||
| 62 | #define QDMA_SG_LEN_MASK GENMASK(29, 0) | ||
| 63 | #define QDMA_CCDF_MASK GENMASK(28, 20) | ||
| 64 | |||
| 65 | #define FSL_QDMA_DEDR_CLEAR GENMASK(31, 0) | ||
| 66 | #define FSL_QDMA_BCQIDR_CLEAR GENMASK(31, 0) | ||
| 67 | #define FSL_QDMA_DEIER_CLEAR GENMASK(31, 0) | ||
| 68 | |||
| 69 | #define FSL_QDMA_BCQIER_CQTIE BIT(15) | ||
| 70 | #define FSL_QDMA_BCQIER_CQPEIE BIT(23) | ||
| 71 | #define FSL_QDMA_BSQICR_ICEN BIT(31) | ||
| 72 | |||
| 73 | #define FSL_QDMA_BSQICR_ICST(x) ((x) << 16) | ||
| 74 | #define FSL_QDMA_CQIER_MEIE BIT(31) | ||
| 75 | #define FSL_QDMA_CQIER_TEIE BIT(0) | ||
| 76 | #define FSL_QDMA_SQCCMR_ENTER_WM BIT(21) | ||
| 77 | |||
| 78 | #define FSL_QDMA_BCQMR_EN BIT(31) | ||
| 79 | #define FSL_QDMA_BCQMR_EI BIT(30) | ||
| 80 | #define FSL_QDMA_BCQMR_CD_THLD(x) ((x) << 20) | ||
| 81 | #define FSL_QDMA_BCQMR_CQ_SIZE(x) ((x) << 16) | ||
| 82 | |||
| 83 | #define FSL_QDMA_BCQSR_QF BIT(16) | ||
| 84 | #define FSL_QDMA_BCQSR_XOFF BIT(0) | ||
| 85 | |||
| 86 | #define FSL_QDMA_BSQMR_EN BIT(31) | ||
| 87 | #define FSL_QDMA_BSQMR_DI BIT(30) | ||
| 88 | #define FSL_QDMA_BSQMR_CQ_SIZE(x) ((x) << 16) | ||
| 89 | |||
| 90 | #define FSL_QDMA_BSQSR_QE BIT(17) | ||
| 91 | |||
| 92 | #define FSL_QDMA_DMR_DQD BIT(30) | ||
| 93 | #define FSL_QDMA_DSR_DB BIT(31) | ||
| 94 | |||
| 95 | /* Size related definition */ | ||
| 96 | #define FSL_QDMA_QUEUE_MAX 8 | ||
| 97 | #define FSL_QDMA_COMMAND_BUFFER_SIZE 64 | ||
| 98 | #define FSL_QDMA_DESCRIPTOR_BUFFER_SIZE 32 | ||
| 99 | #define FSL_QDMA_CIRCULAR_DESC_SIZE_MIN 64 | ||
| 100 | #define FSL_QDMA_CIRCULAR_DESC_SIZE_MAX 16384 | ||
| 101 | #define FSL_QDMA_QUEUE_NUM_MAX 8 | ||
| 102 | |||
| 103 | /* Field definition for CMD */ | ||
| 104 | #define FSL_QDMA_CMD_RWTTYPE 0x4 | ||
| 105 | #define FSL_QDMA_CMD_LWC 0x2 | ||
| 106 | #define FSL_QDMA_CMD_RWTTYPE_OFFSET 28 | ||
| 107 | #define FSL_QDMA_CMD_NS_OFFSET 27 | ||
| 108 | #define FSL_QDMA_CMD_DQOS_OFFSET 24 | ||
| 109 | #define FSL_QDMA_CMD_WTHROTL_OFFSET 20 | ||
| 110 | #define FSL_QDMA_CMD_DSEN_OFFSET 19 | ||
| 111 | #define FSL_QDMA_CMD_LWC_OFFSET 16 | ||
| 112 | |||
| 113 | /* Field definition for Descriptor offset */ | ||
| 114 | #define QDMA_CCDF_STATUS 20 | ||
| 115 | #define QDMA_CCDF_OFFSET 20 | ||
| 116 | |||
| 117 | /* Field definition for safe loop count*/ | ||
| 118 | #define FSL_QDMA_HALT_COUNT 1500 | ||
| 119 | #define FSL_QDMA_MAX_SIZE 16385 | ||
| 120 | #define FSL_QDMA_COMP_TIMEOUT 1000 | ||
| 121 | #define FSL_COMMAND_QUEUE_OVERFLLOW 10 | ||
| 122 | |||
| 123 | #define FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma_engine, x) \ | ||
| 124 | (((fsl_qdma_engine)->block_offset) * (x)) | ||
| 125 | |||
| 126 | /** | ||
| 127 | * struct fsl_qdma_format - This is the struct holding describing compound | ||
| 128 | * descriptor format with qDMA. | ||
| 129 | * @status: Command status and enqueue status notification. | ||
| 130 | * @cfg: Frame offset and frame format. | ||
| 131 | * @addr_lo: Holding the compound descriptor of the lower | ||
| 132 | * 32-bits address in memory 40-bit address. | ||
| 133 | * @addr_hi: Same as above member, but point high 8-bits in | ||
| 134 | * memory 40-bit address. | ||
| 135 | * @__reserved1: Reserved field. | ||
| 136 | * @cfg8b_w1: Compound descriptor command queue origin produced | ||
| 137 | * by qDMA and dynamic debug field. | ||
| 138 | * @data Pointer to the memory 40-bit address, describes DMA | ||
| 139 | * source information and DMA destination information. | ||
| 140 | */ | ||
| 141 | struct fsl_qdma_format { | ||
| 142 | __le32 status; | ||
| 143 | __le32 cfg; | ||
| 144 | union { | ||
| 145 | struct { | ||
| 146 | __le32 addr_lo; | ||
| 147 | u8 addr_hi; | ||
| 148 | u8 __reserved1[2]; | ||
| 149 | u8 cfg8b_w1; | ||
| 150 | } __packed; | ||
| 151 | __le64 data; | ||
| 152 | }; | ||
| 153 | } __packed; | ||
| 154 | |||
| 155 | /* qDMA status notification pre information */ | ||
| 156 | struct fsl_pre_status { | ||
| 157 | u64 addr; | ||
| 158 | u8 queue; | ||
| 159 | }; | ||
| 160 | |||
| 161 | static DEFINE_PER_CPU(struct fsl_pre_status, pre); | ||
| 162 | |||
| 163 | struct fsl_qdma_chan { | ||
| 164 | struct virt_dma_chan vchan; | ||
| 165 | struct virt_dma_desc vdesc; | ||
| 166 | enum dma_status status; | ||
| 167 | struct fsl_qdma_engine *qdma; | ||
| 168 | struct fsl_qdma_queue *queue; | ||
| 169 | }; | ||
| 170 | |||
| 171 | struct fsl_qdma_queue { | ||
| 172 | struct fsl_qdma_format *virt_head; | ||
| 173 | struct fsl_qdma_format *virt_tail; | ||
| 174 | struct list_head comp_used; | ||
| 175 | struct list_head comp_free; | ||
| 176 | struct dma_pool *comp_pool; | ||
| 177 | struct dma_pool *desc_pool; | ||
| 178 | spinlock_t queue_lock; | ||
| 179 | dma_addr_t bus_addr; | ||
| 180 | u32 n_cq; | ||
| 181 | u32 id; | ||
| 182 | struct fsl_qdma_format *cq; | ||
| 183 | void __iomem *block_base; | ||
| 184 | }; | ||
| 185 | |||
| 186 | struct fsl_qdma_comp { | ||
| 187 | dma_addr_t bus_addr; | ||
| 188 | dma_addr_t desc_bus_addr; | ||
| 189 | struct fsl_qdma_format *virt_addr; | ||
| 190 | struct fsl_qdma_format *desc_virt_addr; | ||
| 191 | struct fsl_qdma_chan *qchan; | ||
| 192 | struct virt_dma_desc vdesc; | ||
| 193 | struct list_head list; | ||
| 194 | }; | ||
| 195 | |||
| 196 | struct fsl_qdma_engine { | ||
| 197 | struct dma_device dma_dev; | ||
| 198 | void __iomem *ctrl_base; | ||
| 199 | void __iomem *status_base; | ||
| 200 | void __iomem *block_base; | ||
| 201 | u32 n_chans; | ||
| 202 | u32 n_queues; | ||
| 203 | struct mutex fsl_qdma_mutex; | ||
| 204 | int error_irq; | ||
| 205 | int *queue_irq; | ||
| 206 | u32 feature; | ||
| 207 | struct fsl_qdma_queue *queue; | ||
| 208 | struct fsl_qdma_queue **status; | ||
| 209 | struct fsl_qdma_chan *chans; | ||
| 210 | int block_number; | ||
| 211 | int block_offset; | ||
| 212 | int irq_base; | ||
| 213 | int desc_allocated; | ||
| 214 | |||
| 215 | }; | ||
| 216 | |||
| 217 | static inline u64 | ||
| 218 | qdma_ccdf_addr_get64(const struct fsl_qdma_format *ccdf) | ||
| 219 | { | ||
| 220 | return le64_to_cpu(ccdf->data) & (U64_MAX >> 24); | ||
| 221 | } | ||
| 222 | |||
| 223 | static inline void | ||
| 224 | qdma_desc_addr_set64(struct fsl_qdma_format *ccdf, u64 addr) | ||
| 225 | { | ||
| 226 | ccdf->addr_hi = upper_32_bits(addr); | ||
| 227 | ccdf->addr_lo = cpu_to_le32(lower_32_bits(addr)); | ||
| 228 | } | ||
| 229 | |||
| 230 | static inline u8 | ||
| 231 | qdma_ccdf_get_queue(const struct fsl_qdma_format *ccdf) | ||
| 232 | { | ||
| 233 | return ccdf->cfg8b_w1 & U8_MAX; | ||
| 234 | } | ||
| 235 | |||
| 236 | static inline int | ||
| 237 | qdma_ccdf_get_offset(const struct fsl_qdma_format *ccdf) | ||
| 238 | { | ||
| 239 | return (le32_to_cpu(ccdf->cfg) & QDMA_CCDF_MASK) >> QDMA_CCDF_OFFSET; | ||
| 240 | } | ||
| 241 | |||
| 242 | static inline void | ||
| 243 | qdma_ccdf_set_format(struct fsl_qdma_format *ccdf, int offset) | ||
| 244 | { | ||
| 245 | ccdf->cfg = cpu_to_le32(QDMA_CCDF_FOTMAT | offset); | ||
| 246 | } | ||
| 247 | |||
| 248 | static inline int | ||
| 249 | qdma_ccdf_get_status(const struct fsl_qdma_format *ccdf) | ||
| 250 | { | ||
| 251 | return (le32_to_cpu(ccdf->status) & QDMA_CCDF_MASK) >> QDMA_CCDF_STATUS; | ||
| 252 | } | ||
| 253 | |||
| 254 | static inline void | ||
| 255 | qdma_ccdf_set_ser(struct fsl_qdma_format *ccdf, int status) | ||
| 256 | { | ||
| 257 | ccdf->status = cpu_to_le32(QDMA_CCDF_SER | status); | ||
| 258 | } | ||
| 259 | |||
| 260 | static inline void qdma_csgf_set_len(struct fsl_qdma_format *csgf, int len) | ||
| 261 | { | ||
| 262 | csgf->cfg = cpu_to_le32(len & QDMA_SG_LEN_MASK); | ||
| 263 | } | ||
| 264 | |||
| 265 | static inline void qdma_csgf_set_f(struct fsl_qdma_format *csgf, int len) | ||
| 266 | { | ||
| 267 | csgf->cfg = cpu_to_le32(QDMA_SG_FIN | (len & QDMA_SG_LEN_MASK)); | ||
| 268 | } | ||
| 269 | |||
| 270 | static u32 qdma_readl(struct fsl_qdma_engine *qdma, void __iomem *addr) | ||
| 271 | { | ||
| 272 | return FSL_DMA_IN(qdma, addr, 32); | ||
| 273 | } | ||
| 274 | |||
| 275 | static void qdma_writel(struct fsl_qdma_engine *qdma, u32 val, | ||
| 276 | void __iomem *addr) | ||
| 277 | { | ||
| 278 | FSL_DMA_OUT(qdma, addr, val, 32); | ||
| 279 | } | ||
| 280 | |||
| 281 | static struct fsl_qdma_chan *to_fsl_qdma_chan(struct dma_chan *chan) | ||
| 282 | { | ||
| 283 | return container_of(chan, struct fsl_qdma_chan, vchan.chan); | ||
| 284 | } | ||
| 285 | |||
| 286 | static struct fsl_qdma_comp *to_fsl_qdma_comp(struct virt_dma_desc *vd) | ||
| 287 | { | ||
| 288 | return container_of(vd, struct fsl_qdma_comp, vdesc); | ||
| 289 | } | ||
| 290 | |||
| 291 | static void fsl_qdma_free_chan_resources(struct dma_chan *chan) | ||
| 292 | { | ||
| 293 | struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan); | ||
| 294 | struct fsl_qdma_queue *fsl_queue = fsl_chan->queue; | ||
| 295 | struct fsl_qdma_engine *fsl_qdma = fsl_chan->qdma; | ||
| 296 | struct fsl_qdma_comp *comp_temp, *_comp_temp; | ||
| 297 | unsigned long flags; | ||
| 298 | LIST_HEAD(head); | ||
| 299 | |||
| 300 | spin_lock_irqsave(&fsl_chan->vchan.lock, flags); | ||
| 301 | vchan_get_all_descriptors(&fsl_chan->vchan, &head); | ||
| 302 | spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); | ||
| 303 | |||
| 304 | vchan_dma_desc_free_list(&fsl_chan->vchan, &head); | ||
| 305 | |||
| 306 | if (!fsl_queue->comp_pool && !fsl_queue->comp_pool) | ||
| 307 | return; | ||
| 308 | |||
| 309 | list_for_each_entry_safe(comp_temp, _comp_temp, | ||
| 310 | &fsl_queue->comp_used, list) { | ||
| 311 | dma_pool_free(fsl_queue->comp_pool, | ||
| 312 | comp_temp->virt_addr, | ||
| 313 | comp_temp->bus_addr); | ||
| 314 | dma_pool_free(fsl_queue->desc_pool, | ||
| 315 | comp_temp->desc_virt_addr, | ||
| 316 | comp_temp->desc_bus_addr); | ||
| 317 | list_del(&comp_temp->list); | ||
| 318 | kfree(comp_temp); | ||
| 319 | } | ||
| 320 | |||
| 321 | list_for_each_entry_safe(comp_temp, _comp_temp, | ||
| 322 | &fsl_queue->comp_free, list) { | ||
| 323 | dma_pool_free(fsl_queue->comp_pool, | ||
| 324 | comp_temp->virt_addr, | ||
| 325 | comp_temp->bus_addr); | ||
| 326 | dma_pool_free(fsl_queue->desc_pool, | ||
| 327 | comp_temp->desc_virt_addr, | ||
| 328 | comp_temp->desc_bus_addr); | ||
| 329 | list_del(&comp_temp->list); | ||
| 330 | kfree(comp_temp); | ||
| 331 | } | ||
| 332 | |||
| 333 | dma_pool_destroy(fsl_queue->comp_pool); | ||
| 334 | dma_pool_destroy(fsl_queue->desc_pool); | ||
| 335 | |||
| 336 | fsl_qdma->desc_allocated--; | ||
| 337 | fsl_queue->comp_pool = NULL; | ||
| 338 | fsl_queue->desc_pool = NULL; | ||
| 339 | } | ||
| 340 | |||
| 341 | static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp, | ||
| 342 | dma_addr_t dst, dma_addr_t src, u32 len) | ||
| 343 | { | ||
| 344 | struct fsl_qdma_format *sdf, *ddf; | ||
| 345 | struct fsl_qdma_format *ccdf, *csgf_desc, *csgf_src, *csgf_dest; | ||
| 346 | |||
| 347 | ccdf = fsl_comp->virt_addr; | ||
| 348 | csgf_desc = fsl_comp->virt_addr + 1; | ||
| 349 | csgf_src = fsl_comp->virt_addr + 2; | ||
| 350 | csgf_dest = fsl_comp->virt_addr + 3; | ||
| 351 | sdf = fsl_comp->desc_virt_addr; | ||
| 352 | ddf = fsl_comp->desc_virt_addr + 1; | ||
| 353 | |||
| 354 | memset(fsl_comp->virt_addr, 0, FSL_QDMA_COMMAND_BUFFER_SIZE); | ||
| 355 | memset(fsl_comp->desc_virt_addr, 0, FSL_QDMA_DESCRIPTOR_BUFFER_SIZE); | ||
| 356 | /* Head Command Descriptor(Frame Descriptor) */ | ||
| 357 | qdma_desc_addr_set64(ccdf, fsl_comp->bus_addr + 16); | ||
| 358 | qdma_ccdf_set_format(ccdf, qdma_ccdf_get_offset(ccdf)); | ||
| 359 | qdma_ccdf_set_ser(ccdf, qdma_ccdf_get_status(ccdf)); | ||
| 360 | /* Status notification is enqueued to status queue. */ | ||
| 361 | /* Compound Command Descriptor(Frame List Table) */ | ||
| 362 | qdma_desc_addr_set64(csgf_desc, fsl_comp->desc_bus_addr); | ||
| 363 | /* It must be 32 as Compound S/G Descriptor */ | ||
| 364 | qdma_csgf_set_len(csgf_desc, 32); | ||
| 365 | qdma_desc_addr_set64(csgf_src, src); | ||
| 366 | qdma_csgf_set_len(csgf_src, len); | ||
| 367 | qdma_desc_addr_set64(csgf_dest, dst); | ||
| 368 | qdma_csgf_set_len(csgf_dest, len); | ||
| 369 | /* This entry is the last entry. */ | ||
| 370 | qdma_csgf_set_f(csgf_dest, len); | ||
| 371 | /* Descriptor Buffer */ | ||
| 372 | sdf->data = | ||
| 373 | cpu_to_le64(FSL_QDMA_CMD_RWTTYPE << | ||
| 374 | FSL_QDMA_CMD_RWTTYPE_OFFSET); | ||
| 375 | ddf->data = | ||
| 376 | cpu_to_le64(FSL_QDMA_CMD_RWTTYPE << | ||
| 377 | FSL_QDMA_CMD_RWTTYPE_OFFSET); | ||
| 378 | ddf->data |= | ||
| 379 | cpu_to_le64(FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET); | ||
| 380 | } | ||
| 381 | |||
| 382 | /* | ||
| 383 | * Pre-request full command descriptor for enqueue. | ||
| 384 | */ | ||
| 385 | static int fsl_qdma_pre_request_enqueue_desc(struct fsl_qdma_queue *queue) | ||
| 386 | { | ||
| 387 | int i; | ||
| 388 | struct fsl_qdma_comp *comp_temp, *_comp_temp; | ||
| 389 | |||
| 390 | for (i = 0; i < queue->n_cq + FSL_COMMAND_QUEUE_OVERFLLOW; i++) { | ||
| 391 | comp_temp = kzalloc(sizeof(*comp_temp), GFP_KERNEL); | ||
| 392 | if (!comp_temp) | ||
| 393 | goto err_alloc; | ||
| 394 | comp_temp->virt_addr = | ||
| 395 | dma_pool_alloc(queue->comp_pool, GFP_KERNEL, | ||
| 396 | &comp_temp->bus_addr); | ||
| 397 | if (!comp_temp->virt_addr) | ||
| 398 | goto err_dma_alloc; | ||
| 399 | |||
| 400 | comp_temp->desc_virt_addr = | ||
| 401 | dma_pool_alloc(queue->desc_pool, GFP_KERNEL, | ||
| 402 | &comp_temp->desc_bus_addr); | ||
| 403 | if (!comp_temp->desc_virt_addr) | ||
| 404 | goto err_desc_dma_alloc; | ||
| 405 | |||
| 406 | list_add_tail(&comp_temp->list, &queue->comp_free); | ||
| 407 | } | ||
| 408 | |||
| 409 | return 0; | ||
| 410 | |||
| 411 | err_desc_dma_alloc: | ||
| 412 | dma_pool_free(queue->comp_pool, comp_temp->virt_addr, | ||
| 413 | comp_temp->bus_addr); | ||
| 414 | |||
| 415 | err_dma_alloc: | ||
| 416 | kfree(comp_temp); | ||
| 417 | |||
| 418 | err_alloc: | ||
| 419 | list_for_each_entry_safe(comp_temp, _comp_temp, | ||
| 420 | &queue->comp_free, list) { | ||
| 421 | if (comp_temp->virt_addr) | ||
| 422 | dma_pool_free(queue->comp_pool, | ||
| 423 | comp_temp->virt_addr, | ||
| 424 | comp_temp->bus_addr); | ||
| 425 | if (comp_temp->desc_virt_addr) | ||
| 426 | dma_pool_free(queue->desc_pool, | ||
| 427 | comp_temp->desc_virt_addr, | ||
| 428 | comp_temp->desc_bus_addr); | ||
| 429 | |||
| 430 | list_del(&comp_temp->list); | ||
| 431 | kfree(comp_temp); | ||
| 432 | } | ||
| 433 | |||
| 434 | return -ENOMEM; | ||
| 435 | } | ||
| 436 | |||
| 437 | /* | ||
| 438 | * Request a command descriptor for enqueue. | ||
| 439 | */ | ||
| 440 | static struct fsl_qdma_comp | ||
| 441 | *fsl_qdma_request_enqueue_desc(struct fsl_qdma_chan *fsl_chan) | ||
| 442 | { | ||
| 443 | unsigned long flags; | ||
| 444 | struct fsl_qdma_comp *comp_temp; | ||
| 445 | int timeout = FSL_QDMA_COMP_TIMEOUT; | ||
| 446 | struct fsl_qdma_queue *queue = fsl_chan->queue; | ||
| 447 | |||
| 448 | while (timeout--) { | ||
| 449 | spin_lock_irqsave(&queue->queue_lock, flags); | ||
| 450 | if (!list_empty(&queue->comp_free)) { | ||
| 451 | comp_temp = list_first_entry(&queue->comp_free, | ||
| 452 | struct fsl_qdma_comp, | ||
| 453 | list); | ||
| 454 | list_del(&comp_temp->list); | ||
| 455 | |||
| 456 | spin_unlock_irqrestore(&queue->queue_lock, flags); | ||
| 457 | comp_temp->qchan = fsl_chan; | ||
| 458 | return comp_temp; | ||
| 459 | } | ||
| 460 | spin_unlock_irqrestore(&queue->queue_lock, flags); | ||
| 461 | udelay(1); | ||
| 462 | } | ||
| 463 | |||
| 464 | return NULL; | ||
| 465 | } | ||
| 466 | |||
| 467 | static struct fsl_qdma_queue | ||
| 468 | *fsl_qdma_alloc_queue_resources(struct platform_device *pdev, | ||
| 469 | struct fsl_qdma_engine *fsl_qdma) | ||
| 470 | { | ||
| 471 | int ret, len, i, j; | ||
| 472 | int queue_num, block_number; | ||
| 473 | unsigned int queue_size[FSL_QDMA_QUEUE_MAX]; | ||
| 474 | struct fsl_qdma_queue *queue_head, *queue_temp; | ||
| 475 | |||
| 476 | queue_num = fsl_qdma->n_queues; | ||
| 477 | block_number = fsl_qdma->block_number; | ||
| 478 | |||
| 479 | if (queue_num > FSL_QDMA_QUEUE_MAX) | ||
| 480 | queue_num = FSL_QDMA_QUEUE_MAX; | ||
| 481 | len = sizeof(*queue_head) * queue_num * block_number; | ||
| 482 | queue_head = devm_kzalloc(&pdev->dev, len, GFP_KERNEL); | ||
| 483 | if (!queue_head) | ||
| 484 | return NULL; | ||
| 485 | |||
| 486 | ret = device_property_read_u32_array(&pdev->dev, "queue-sizes", | ||
| 487 | queue_size, queue_num); | ||
| 488 | if (ret) { | ||
| 489 | dev_err(&pdev->dev, "Can't get queue-sizes.\n"); | ||
| 490 | return NULL; | ||
| 491 | } | ||
| 492 | for (j = 0; j < block_number; j++) { | ||
| 493 | for (i = 0; i < queue_num; i++) { | ||
| 494 | if (queue_size[i] > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX || | ||
| 495 | queue_size[i] < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) { | ||
| 496 | dev_err(&pdev->dev, | ||
| 497 | "Get wrong queue-sizes.\n"); | ||
| 498 | return NULL; | ||
| 499 | } | ||
| 500 | queue_temp = queue_head + i + (j * queue_num); | ||
| 501 | |||
| 502 | queue_temp->cq = | ||
| 503 | dma_alloc_coherent(&pdev->dev, | ||
| 504 | sizeof(struct fsl_qdma_format) * | ||
| 505 | queue_size[i], | ||
| 506 | &queue_temp->bus_addr, | ||
| 507 | GFP_KERNEL); | ||
| 508 | if (!queue_temp->cq) | ||
| 509 | return NULL; | ||
| 510 | queue_temp->block_base = fsl_qdma->block_base + | ||
| 511 | FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j); | ||
| 512 | queue_temp->n_cq = queue_size[i]; | ||
| 513 | queue_temp->id = i; | ||
| 514 | queue_temp->virt_head = queue_temp->cq; | ||
| 515 | queue_temp->virt_tail = queue_temp->cq; | ||
| 516 | /* | ||
| 517 | * List for queue command buffer | ||
| 518 | */ | ||
| 519 | INIT_LIST_HEAD(&queue_temp->comp_used); | ||
| 520 | spin_lock_init(&queue_temp->queue_lock); | ||
| 521 | } | ||
| 522 | } | ||
| 523 | return queue_head; | ||
| 524 | } | ||
| 525 | |||
| 526 | static struct fsl_qdma_queue | ||
| 527 | *fsl_qdma_prep_status_queue(struct platform_device *pdev) | ||
| 528 | { | ||
| 529 | int ret; | ||
| 530 | unsigned int status_size; | ||
| 531 | struct fsl_qdma_queue *status_head; | ||
| 532 | struct device_node *np = pdev->dev.of_node; | ||
| 533 | |||
| 534 | ret = of_property_read_u32(np, "status-sizes", &status_size); | ||
| 535 | if (ret) { | ||
| 536 | dev_err(&pdev->dev, "Can't get status-sizes.\n"); | ||
| 537 | return NULL; | ||
| 538 | } | ||
| 539 | if (status_size > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX || | ||
| 540 | status_size < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) { | ||
| 541 | dev_err(&pdev->dev, "Get wrong status_size.\n"); | ||
| 542 | return NULL; | ||
| 543 | } | ||
| 544 | status_head = devm_kzalloc(&pdev->dev, | ||
| 545 | sizeof(*status_head), GFP_KERNEL); | ||
| 546 | if (!status_head) | ||
| 547 | return NULL; | ||
| 548 | |||
| 549 | /* | ||
| 550 | * Buffer for queue command | ||
| 551 | */ | ||
| 552 | status_head->cq = dma_alloc_coherent(&pdev->dev, | ||
| 553 | sizeof(struct fsl_qdma_format) * | ||
| 554 | status_size, | ||
| 555 | &status_head->bus_addr, | ||
| 556 | GFP_KERNEL); | ||
| 557 | if (!status_head->cq) { | ||
| 558 | devm_kfree(&pdev->dev, status_head); | ||
| 559 | return NULL; | ||
| 560 | } | ||
| 561 | status_head->n_cq = status_size; | ||
| 562 | status_head->virt_head = status_head->cq; | ||
| 563 | status_head->virt_tail = status_head->cq; | ||
| 564 | status_head->comp_pool = NULL; | ||
| 565 | |||
| 566 | return status_head; | ||
| 567 | } | ||
| 568 | |||
| 569 | static int fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma) | ||
| 570 | { | ||
| 571 | u32 reg; | ||
| 572 | int i, j, count = FSL_QDMA_HALT_COUNT; | ||
| 573 | void __iomem *block, *ctrl = fsl_qdma->ctrl_base; | ||
| 574 | |||
| 575 | /* Disable the command queue and wait for idle state. */ | ||
| 576 | reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR); | ||
| 577 | reg |= FSL_QDMA_DMR_DQD; | ||
| 578 | qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR); | ||
| 579 | for (j = 0; j < fsl_qdma->block_number; j++) { | ||
| 580 | block = fsl_qdma->block_base + | ||
| 581 | FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j); | ||
| 582 | for (i = 0; i < FSL_QDMA_QUEUE_NUM_MAX; i++) | ||
| 583 | qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BCQMR(i)); | ||
| 584 | } | ||
| 585 | while (1) { | ||
| 586 | reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DSR); | ||
| 587 | if (!(reg & FSL_QDMA_DSR_DB)) | ||
| 588 | break; | ||
| 589 | if (count-- < 0) | ||
| 590 | return -EBUSY; | ||
| 591 | udelay(100); | ||
| 592 | } | ||
| 593 | |||
| 594 | for (j = 0; j < fsl_qdma->block_number; j++) { | ||
| 595 | block = fsl_qdma->block_base + | ||
| 596 | FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j); | ||
| 597 | |||
| 598 | /* Disable status queue. */ | ||
| 599 | qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BSQMR); | ||
| 600 | |||
| 601 | /* | ||
| 602 | * clear the command queue interrupt detect register for | ||
| 603 | * all queues. | ||
| 604 | */ | ||
| 605 | qdma_writel(fsl_qdma, FSL_QDMA_BCQIDR_CLEAR, | ||
| 606 | block + FSL_QDMA_BCQIDR(0)); | ||
| 607 | } | ||
| 608 | |||
| 609 | return 0; | ||
| 610 | } | ||
| 611 | |||
| 612 | static int | ||
| 613 | fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma, | ||
| 614 | void *block, | ||
| 615 | int id) | ||
| 616 | { | ||
| 617 | bool duplicate; | ||
| 618 | u32 reg, i, count; | ||
| 619 | struct fsl_qdma_queue *temp_queue; | ||
| 620 | struct fsl_qdma_format *status_addr; | ||
| 621 | struct fsl_qdma_comp *fsl_comp = NULL; | ||
| 622 | struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue; | ||
| 623 | struct fsl_qdma_queue *fsl_status = fsl_qdma->status[id]; | ||
| 624 | |||
| 625 | count = FSL_QDMA_MAX_SIZE; | ||
| 626 | |||
| 627 | while (count--) { | ||
| 628 | duplicate = 0; | ||
| 629 | reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQSR); | ||
| 630 | if (reg & FSL_QDMA_BSQSR_QE) | ||
| 631 | return 0; | ||
| 632 | |||
| 633 | status_addr = fsl_status->virt_head; | ||
| 634 | |||
| 635 | if (qdma_ccdf_get_queue(status_addr) == | ||
| 636 | __this_cpu_read(pre.queue) && | ||
| 637 | qdma_ccdf_addr_get64(status_addr) == | ||
| 638 | __this_cpu_read(pre.addr)) | ||
| 639 | duplicate = 1; | ||
| 640 | i = qdma_ccdf_get_queue(status_addr) + | ||
| 641 | id * fsl_qdma->n_queues; | ||
| 642 | __this_cpu_write(pre.addr, qdma_ccdf_addr_get64(status_addr)); | ||
| 643 | __this_cpu_write(pre.queue, qdma_ccdf_get_queue(status_addr)); | ||
| 644 | temp_queue = fsl_queue + i; | ||
| 645 | |||
| 646 | spin_lock(&temp_queue->queue_lock); | ||
| 647 | if (list_empty(&temp_queue->comp_used)) { | ||
| 648 | if (!duplicate) { | ||
| 649 | spin_unlock(&temp_queue->queue_lock); | ||
| 650 | return -EAGAIN; | ||
| 651 | } | ||
| 652 | } else { | ||
| 653 | fsl_comp = list_first_entry(&temp_queue->comp_used, | ||
| 654 | struct fsl_qdma_comp, list); | ||
| 655 | if (fsl_comp->bus_addr + 16 != | ||
| 656 | __this_cpu_read(pre.addr)) { | ||
| 657 | if (!duplicate) { | ||
| 658 | spin_unlock(&temp_queue->queue_lock); | ||
| 659 | return -EAGAIN; | ||
| 660 | } | ||
| 661 | } | ||
| 662 | } | ||
| 663 | |||
| 664 | if (duplicate) { | ||
| 665 | reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR); | ||
| 666 | reg |= FSL_QDMA_BSQMR_DI; | ||
| 667 | qdma_desc_addr_set64(status_addr, 0x0); | ||
| 668 | fsl_status->virt_head++; | ||
| 669 | if (fsl_status->virt_head == fsl_status->cq | ||
| 670 | + fsl_status->n_cq) | ||
| 671 | fsl_status->virt_head = fsl_status->cq; | ||
| 672 | qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR); | ||
| 673 | spin_unlock(&temp_queue->queue_lock); | ||
| 674 | continue; | ||
| 675 | } | ||
| 676 | list_del(&fsl_comp->list); | ||
| 677 | |||
| 678 | reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR); | ||
| 679 | reg |= FSL_QDMA_BSQMR_DI; | ||
| 680 | qdma_desc_addr_set64(status_addr, 0x0); | ||
| 681 | fsl_status->virt_head++; | ||
| 682 | if (fsl_status->virt_head == fsl_status->cq + fsl_status->n_cq) | ||
| 683 | fsl_status->virt_head = fsl_status->cq; | ||
| 684 | qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR); | ||
| 685 | spin_unlock(&temp_queue->queue_lock); | ||
| 686 | |||
| 687 | spin_lock(&fsl_comp->qchan->vchan.lock); | ||
| 688 | vchan_cookie_complete(&fsl_comp->vdesc); | ||
| 689 | fsl_comp->qchan->status = DMA_COMPLETE; | ||
| 690 | spin_unlock(&fsl_comp->qchan->vchan.lock); | ||
| 691 | } | ||
| 692 | |||
| 693 | return 0; | ||
| 694 | } | ||
| 695 | |||
| 696 | static irqreturn_t fsl_qdma_error_handler(int irq, void *dev_id) | ||
| 697 | { | ||
| 698 | unsigned int intr; | ||
| 699 | struct fsl_qdma_engine *fsl_qdma = dev_id; | ||
| 700 | void __iomem *status = fsl_qdma->status_base; | ||
| 701 | |||
| 702 | intr = qdma_readl(fsl_qdma, status + FSL_QDMA_DEDR); | ||
| 703 | |||
| 704 | if (intr) { | ||
| 705 | dev_err(fsl_qdma->dma_dev.dev, "DMA transaction error!\n"); | ||
| 706 | return IRQ_NONE; | ||
| 707 | } | ||
| 708 | |||
| 709 | qdma_writel(fsl_qdma, FSL_QDMA_DEDR_CLEAR, status + FSL_QDMA_DEDR); | ||
| 710 | return IRQ_HANDLED; | ||
| 711 | } | ||
| 712 | |||
| 713 | static irqreturn_t fsl_qdma_queue_handler(int irq, void *dev_id) | ||
| 714 | { | ||
| 715 | int id; | ||
| 716 | unsigned int intr, reg; | ||
| 717 | struct fsl_qdma_engine *fsl_qdma = dev_id; | ||
| 718 | void __iomem *block, *ctrl = fsl_qdma->ctrl_base; | ||
| 719 | |||
| 720 | id = irq - fsl_qdma->irq_base; | ||
| 721 | if (id < 0 && id > fsl_qdma->block_number) { | ||
| 722 | dev_err(fsl_qdma->dma_dev.dev, | ||
| 723 | "irq %d is wrong irq_base is %d\n", | ||
| 724 | irq, fsl_qdma->irq_base); | ||
| 725 | } | ||
| 726 | |||
| 727 | block = fsl_qdma->block_base + | ||
| 728 | FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, id); | ||
| 729 | |||
| 730 | intr = qdma_readl(fsl_qdma, block + FSL_QDMA_BCQIDR(0)); | ||
| 731 | |||
| 732 | if ((intr & FSL_QDMA_CQIDR_SQT) != 0) | ||
| 733 | intr = fsl_qdma_queue_transfer_complete(fsl_qdma, block, id); | ||
| 734 | |||
| 735 | if (intr != 0) { | ||
| 736 | reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR); | ||
| 737 | reg |= FSL_QDMA_DMR_DQD; | ||
| 738 | qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR); | ||
| 739 | qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BCQIER(0)); | ||
| 740 | dev_err(fsl_qdma->dma_dev.dev, "QDMA: status err!\n"); | ||
| 741 | } | ||
| 742 | |||
| 743 | /* Clear all detected events and interrupts. */ | ||
| 744 | qdma_writel(fsl_qdma, FSL_QDMA_BCQIDR_CLEAR, | ||
| 745 | block + FSL_QDMA_BCQIDR(0)); | ||
| 746 | |||
| 747 | return IRQ_HANDLED; | ||
| 748 | } | ||
| 749 | |||
| 750 | static int | ||
| 751 | fsl_qdma_irq_init(struct platform_device *pdev, | ||
| 752 | struct fsl_qdma_engine *fsl_qdma) | ||
| 753 | { | ||
| 754 | int i; | ||
| 755 | int cpu; | ||
| 756 | int ret; | ||
| 757 | char irq_name[20]; | ||
| 758 | |||
| 759 | fsl_qdma->error_irq = | ||
| 760 | platform_get_irq_byname(pdev, "qdma-error"); | ||
| 761 | if (fsl_qdma->error_irq < 0) { | ||
| 762 | dev_err(&pdev->dev, "Can't get qdma controller irq.\n"); | ||
| 763 | return fsl_qdma->error_irq; | ||
| 764 | } | ||
| 765 | |||
| 766 | ret = devm_request_irq(&pdev->dev, fsl_qdma->error_irq, | ||
| 767 | fsl_qdma_error_handler, 0, | ||
| 768 | "qDMA error", fsl_qdma); | ||
| 769 | if (ret) { | ||
| 770 | dev_err(&pdev->dev, "Can't register qDMA controller IRQ.\n"); | ||
| 771 | return ret; | ||
| 772 | } | ||
| 773 | |||
| 774 | for (i = 0; i < fsl_qdma->block_number; i++) { | ||
| 775 | sprintf(irq_name, "qdma-queue%d", i); | ||
| 776 | fsl_qdma->queue_irq[i] = | ||
| 777 | platform_get_irq_byname(pdev, irq_name); | ||
| 778 | |||
| 779 | if (fsl_qdma->queue_irq[i] < 0) { | ||
| 780 | dev_err(&pdev->dev, | ||
| 781 | "Can't get qdma queue %d irq.\n", i); | ||
| 782 | return fsl_qdma->queue_irq[i]; | ||
| 783 | } | ||
| 784 | |||
| 785 | ret = devm_request_irq(&pdev->dev, | ||
| 786 | fsl_qdma->queue_irq[i], | ||
| 787 | fsl_qdma_queue_handler, | ||
| 788 | 0, | ||
| 789 | "qDMA queue", | ||
| 790 | fsl_qdma); | ||
| 791 | if (ret) { | ||
| 792 | dev_err(&pdev->dev, | ||
| 793 | "Can't register qDMA queue IRQ.\n"); | ||
| 794 | return ret; | ||
| 795 | } | ||
| 796 | |||
| 797 | cpu = i % num_online_cpus(); | ||
| 798 | ret = irq_set_affinity_hint(fsl_qdma->queue_irq[i], | ||
| 799 | get_cpu_mask(cpu)); | ||
| 800 | if (ret) { | ||
| 801 | dev_err(&pdev->dev, | ||
| 802 | "Can't set cpu %d affinity to IRQ %d.\n", | ||
| 803 | cpu, | ||
| 804 | fsl_qdma->queue_irq[i]); | ||
| 805 | return ret; | ||
| 806 | } | ||
| 807 | } | ||
| 808 | |||
| 809 | return 0; | ||
| 810 | } | ||
| 811 | |||
| 812 | static void fsl_qdma_irq_exit(struct platform_device *pdev, | ||
| 813 | struct fsl_qdma_engine *fsl_qdma) | ||
| 814 | { | ||
| 815 | int i; | ||
| 816 | |||
| 817 | devm_free_irq(&pdev->dev, fsl_qdma->error_irq, fsl_qdma); | ||
| 818 | for (i = 0; i < fsl_qdma->block_number; i++) | ||
| 819 | devm_free_irq(&pdev->dev, fsl_qdma->queue_irq[i], fsl_qdma); | ||
| 820 | } | ||
| 821 | |||
| 822 | static int fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma) | ||
| 823 | { | ||
| 824 | u32 reg; | ||
| 825 | int i, j, ret; | ||
| 826 | struct fsl_qdma_queue *temp; | ||
| 827 | void __iomem *status = fsl_qdma->status_base; | ||
| 828 | void __iomem *block, *ctrl = fsl_qdma->ctrl_base; | ||
| 829 | struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue; | ||
| 830 | |||
| 831 | /* Try to halt the qDMA engine first. */ | ||
| 832 | ret = fsl_qdma_halt(fsl_qdma); | ||
| 833 | if (ret) { | ||
| 834 | dev_err(fsl_qdma->dma_dev.dev, "DMA halt failed!"); | ||
| 835 | return ret; | ||
| 836 | } | ||
| 837 | |||
| 838 | for (i = 0; i < fsl_qdma->block_number; i++) { | ||
| 839 | /* | ||
| 840 | * Clear the command queue interrupt detect register for | ||
| 841 | * all queues. | ||
| 842 | */ | ||
| 843 | |||
| 844 | block = fsl_qdma->block_base + | ||
| 845 | FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, i); | ||
| 846 | qdma_writel(fsl_qdma, FSL_QDMA_BCQIDR_CLEAR, | ||
| 847 | block + FSL_QDMA_BCQIDR(0)); | ||
| 848 | } | ||
| 849 | |||
| 850 | for (j = 0; j < fsl_qdma->block_number; j++) { | ||
| 851 | block = fsl_qdma->block_base + | ||
| 852 | FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j); | ||
| 853 | for (i = 0; i < fsl_qdma->n_queues; i++) { | ||
| 854 | temp = fsl_queue + i + (j * fsl_qdma->n_queues); | ||
| 855 | /* | ||
| 856 | * Initialize Command Queue registers to | ||
| 857 | * point to the first | ||
| 858 | * command descriptor in memory. | ||
| 859 | * Dequeue Pointer Address Registers | ||
| 860 | * Enqueue Pointer Address Registers | ||
| 861 | */ | ||
| 862 | |||
| 863 | qdma_writel(fsl_qdma, temp->bus_addr, | ||
| 864 | block + FSL_QDMA_BCQDPA_SADDR(i)); | ||
| 865 | qdma_writel(fsl_qdma, temp->bus_addr, | ||
| 866 | block + FSL_QDMA_BCQEPA_SADDR(i)); | ||
| 867 | |||
| 868 | /* Initialize the queue mode. */ | ||
| 869 | reg = FSL_QDMA_BCQMR_EN; | ||
| 870 | reg |= FSL_QDMA_BCQMR_CD_THLD(ilog2(temp->n_cq) - 4); | ||
| 871 | reg |= FSL_QDMA_BCQMR_CQ_SIZE(ilog2(temp->n_cq) - 6); | ||
| 872 | qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BCQMR(i)); | ||
| 873 | } | ||
| 874 | |||
| 875 | /* | ||
| 876 | * Workaround for erratum: ERR010812. | ||
| 877 | * We must enable XOFF to avoid the enqueue rejection occurs. | ||
| 878 | * Setting SQCCMR ENTER_WM to 0x20. | ||
| 879 | */ | ||
| 880 | |||
| 881 | qdma_writel(fsl_qdma, FSL_QDMA_SQCCMR_ENTER_WM, | ||
| 882 | block + FSL_QDMA_SQCCMR); | ||
| 883 | |||
| 884 | /* | ||
| 885 | * Initialize status queue registers to point to the first | ||
| 886 | * command descriptor in memory. | ||
| 887 | * Dequeue Pointer Address Registers | ||
| 888 | * Enqueue Pointer Address Registers | ||
| 889 | */ | ||
| 890 | |||
| 891 | qdma_writel(fsl_qdma, fsl_qdma->status[j]->bus_addr, | ||
| 892 | block + FSL_QDMA_SQEPAR); | ||
| 893 | qdma_writel(fsl_qdma, fsl_qdma->status[j]->bus_addr, | ||
| 894 | block + FSL_QDMA_SQDPAR); | ||
| 895 | /* Initialize status queue interrupt. */ | ||
| 896 | qdma_writel(fsl_qdma, FSL_QDMA_BCQIER_CQTIE, | ||
| 897 | block + FSL_QDMA_BCQIER(0)); | ||
| 898 | qdma_writel(fsl_qdma, FSL_QDMA_BSQICR_ICEN | | ||
| 899 | FSL_QDMA_BSQICR_ICST(5) | 0x8000, | ||
| 900 | block + FSL_QDMA_BSQICR); | ||
| 901 | qdma_writel(fsl_qdma, FSL_QDMA_CQIER_MEIE | | ||
| 902 | FSL_QDMA_CQIER_TEIE, | ||
| 903 | block + FSL_QDMA_CQIER); | ||
| 904 | |||
| 905 | /* Initialize the status queue mode. */ | ||
| 906 | reg = FSL_QDMA_BSQMR_EN; | ||
| 907 | reg |= FSL_QDMA_BSQMR_CQ_SIZE(ilog2 | ||
| 908 | (fsl_qdma->status[j]->n_cq) - 6); | ||
| 909 | |||
| 910 | qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR); | ||
| 911 | reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR); | ||
| 912 | } | ||
| 913 | |||
| 914 | /* Initialize controller interrupt register. */ | ||
| 915 | qdma_writel(fsl_qdma, FSL_QDMA_DEDR_CLEAR, status + FSL_QDMA_DEDR); | ||
| 916 | qdma_writel(fsl_qdma, FSL_QDMA_DEIER_CLEAR, status + FSL_QDMA_DEIER); | ||
| 917 | |||
| 918 | reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR); | ||
| 919 | reg &= ~FSL_QDMA_DMR_DQD; | ||
| 920 | qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR); | ||
| 921 | |||
| 922 | return 0; | ||
| 923 | } | ||
| 924 | |||
| 925 | static struct dma_async_tx_descriptor * | ||
| 926 | fsl_qdma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, | ||
| 927 | dma_addr_t src, size_t len, unsigned long flags) | ||
| 928 | { | ||
| 929 | struct fsl_qdma_comp *fsl_comp; | ||
| 930 | struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan); | ||
| 931 | |||
| 932 | fsl_comp = fsl_qdma_request_enqueue_desc(fsl_chan); | ||
| 933 | |||
| 934 | if (!fsl_comp) | ||
| 935 | return NULL; | ||
| 936 | |||
| 937 | fsl_qdma_comp_fill_memcpy(fsl_comp, dst, src, len); | ||
| 938 | |||
| 939 | return vchan_tx_prep(&fsl_chan->vchan, &fsl_comp->vdesc, flags); | ||
| 940 | } | ||
| 941 | |||
| 942 | static void fsl_qdma_enqueue_desc(struct fsl_qdma_chan *fsl_chan) | ||
| 943 | { | ||
| 944 | u32 reg; | ||
| 945 | struct virt_dma_desc *vdesc; | ||
| 946 | struct fsl_qdma_comp *fsl_comp; | ||
| 947 | struct fsl_qdma_queue *fsl_queue = fsl_chan->queue; | ||
| 948 | void __iomem *block = fsl_queue->block_base; | ||
| 949 | |||
| 950 | reg = qdma_readl(fsl_chan->qdma, block + FSL_QDMA_BCQSR(fsl_queue->id)); | ||
| 951 | if (reg & (FSL_QDMA_BCQSR_QF | FSL_QDMA_BCQSR_XOFF)) | ||
| 952 | return; | ||
| 953 | vdesc = vchan_next_desc(&fsl_chan->vchan); | ||
| 954 | if (!vdesc) | ||
| 955 | return; | ||
| 956 | list_del(&vdesc->node); | ||
| 957 | fsl_comp = to_fsl_qdma_comp(vdesc); | ||
| 958 | |||
| 959 | memcpy(fsl_queue->virt_head++, | ||
| 960 | fsl_comp->virt_addr, sizeof(struct fsl_qdma_format)); | ||
| 961 | if (fsl_queue->virt_head == fsl_queue->cq + fsl_queue->n_cq) | ||
| 962 | fsl_queue->virt_head = fsl_queue->cq; | ||
| 963 | |||
| 964 | list_add_tail(&fsl_comp->list, &fsl_queue->comp_used); | ||
| 965 | barrier(); | ||
| 966 | reg = qdma_readl(fsl_chan->qdma, block + FSL_QDMA_BCQMR(fsl_queue->id)); | ||
| 967 | reg |= FSL_QDMA_BCQMR_EI; | ||
| 968 | qdma_writel(fsl_chan->qdma, reg, block + FSL_QDMA_BCQMR(fsl_queue->id)); | ||
| 969 | fsl_chan->status = DMA_IN_PROGRESS; | ||
| 970 | } | ||
| 971 | |||
| 972 | static void fsl_qdma_free_desc(struct virt_dma_desc *vdesc) | ||
| 973 | { | ||
| 974 | unsigned long flags; | ||
| 975 | struct fsl_qdma_comp *fsl_comp; | ||
| 976 | struct fsl_qdma_queue *fsl_queue; | ||
| 977 | |||
| 978 | fsl_comp = to_fsl_qdma_comp(vdesc); | ||
| 979 | fsl_queue = fsl_comp->qchan->queue; | ||
| 980 | |||
| 981 | spin_lock_irqsave(&fsl_queue->queue_lock, flags); | ||
| 982 | list_add_tail(&fsl_comp->list, &fsl_queue->comp_free); | ||
| 983 | spin_unlock_irqrestore(&fsl_queue->queue_lock, flags); | ||
| 984 | } | ||
| 985 | |||
| 986 | static void fsl_qdma_issue_pending(struct dma_chan *chan) | ||
| 987 | { | ||
| 988 | unsigned long flags; | ||
| 989 | struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan); | ||
| 990 | struct fsl_qdma_queue *fsl_queue = fsl_chan->queue; | ||
| 991 | |||
| 992 | spin_lock_irqsave(&fsl_queue->queue_lock, flags); | ||
| 993 | spin_lock(&fsl_chan->vchan.lock); | ||
| 994 | if (vchan_issue_pending(&fsl_chan->vchan)) | ||
| 995 | fsl_qdma_enqueue_desc(fsl_chan); | ||
| 996 | spin_unlock(&fsl_chan->vchan.lock); | ||
| 997 | spin_unlock_irqrestore(&fsl_queue->queue_lock, flags); | ||
| 998 | } | ||
| 999 | |||
| 1000 | static void fsl_qdma_synchronize(struct dma_chan *chan) | ||
| 1001 | { | ||
| 1002 | struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan); | ||
| 1003 | |||
| 1004 | vchan_synchronize(&fsl_chan->vchan); | ||
| 1005 | } | ||
| 1006 | |||
| 1007 | static int fsl_qdma_terminate_all(struct dma_chan *chan) | ||
| 1008 | { | ||
| 1009 | LIST_HEAD(head); | ||
| 1010 | unsigned long flags; | ||
| 1011 | struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan); | ||
| 1012 | |||
| 1013 | spin_lock_irqsave(&fsl_chan->vchan.lock, flags); | ||
| 1014 | vchan_get_all_descriptors(&fsl_chan->vchan, &head); | ||
| 1015 | spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); | ||
| 1016 | vchan_dma_desc_free_list(&fsl_chan->vchan, &head); | ||
| 1017 | return 0; | ||
| 1018 | } | ||
| 1019 | |||
| 1020 | static int fsl_qdma_alloc_chan_resources(struct dma_chan *chan) | ||
| 1021 | { | ||
| 1022 | int ret; | ||
| 1023 | struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan); | ||
| 1024 | struct fsl_qdma_engine *fsl_qdma = fsl_chan->qdma; | ||
| 1025 | struct fsl_qdma_queue *fsl_queue = fsl_chan->queue; | ||
| 1026 | |||
| 1027 | if (fsl_queue->comp_pool && fsl_queue->desc_pool) | ||
| 1028 | return fsl_qdma->desc_allocated; | ||
| 1029 | |||
| 1030 | INIT_LIST_HEAD(&fsl_queue->comp_free); | ||
| 1031 | |||
| 1032 | /* | ||
| 1033 | * The dma pool for queue command buffer | ||
| 1034 | */ | ||
| 1035 | fsl_queue->comp_pool = | ||
| 1036 | dma_pool_create("comp_pool", | ||
| 1037 | chan->device->dev, | ||
| 1038 | FSL_QDMA_COMMAND_BUFFER_SIZE, | ||
| 1039 | 64, 0); | ||
| 1040 | if (!fsl_queue->comp_pool) | ||
| 1041 | return -ENOMEM; | ||
| 1042 | |||
| 1043 | /* | ||
| 1044 | * The dma pool for Descriptor(SD/DD) buffer | ||
| 1045 | */ | ||
| 1046 | fsl_queue->desc_pool = | ||
| 1047 | dma_pool_create("desc_pool", | ||
| 1048 | chan->device->dev, | ||
| 1049 | FSL_QDMA_DESCRIPTOR_BUFFER_SIZE, | ||
| 1050 | 32, 0); | ||
| 1051 | if (!fsl_queue->desc_pool) | ||
| 1052 | goto err_desc_pool; | ||
| 1053 | |||
| 1054 | ret = fsl_qdma_pre_request_enqueue_desc(fsl_queue); | ||
| 1055 | if (ret) { | ||
| 1056 | dev_err(chan->device->dev, | ||
| 1057 | "failed to alloc dma buffer for S/G descriptor\n"); | ||
| 1058 | goto err_mem; | ||
| 1059 | } | ||
| 1060 | |||
| 1061 | fsl_qdma->desc_allocated++; | ||
| 1062 | return fsl_qdma->desc_allocated; | ||
| 1063 | |||
| 1064 | err_mem: | ||
| 1065 | dma_pool_destroy(fsl_queue->desc_pool); | ||
| 1066 | err_desc_pool: | ||
| 1067 | dma_pool_destroy(fsl_queue->comp_pool); | ||
| 1068 | return -ENOMEM; | ||
| 1069 | } | ||
| 1070 | |||
| 1071 | static int fsl_qdma_probe(struct platform_device *pdev) | ||
| 1072 | { | ||
| 1073 | int ret, i; | ||
| 1074 | int blk_num, blk_off; | ||
| 1075 | u32 len, chans, queues; | ||
| 1076 | struct resource *res; | ||
| 1077 | struct fsl_qdma_chan *fsl_chan; | ||
| 1078 | struct fsl_qdma_engine *fsl_qdma; | ||
| 1079 | struct device_node *np = pdev->dev.of_node; | ||
| 1080 | |||
| 1081 | ret = of_property_read_u32(np, "dma-channels", &chans); | ||
| 1082 | if (ret) { | ||
| 1083 | dev_err(&pdev->dev, "Can't get dma-channels.\n"); | ||
| 1084 | return ret; | ||
| 1085 | } | ||
| 1086 | |||
| 1087 | ret = of_property_read_u32(np, "block-offset", &blk_off); | ||
| 1088 | if (ret) { | ||
| 1089 | dev_err(&pdev->dev, "Can't get block-offset.\n"); | ||
| 1090 | return ret; | ||
| 1091 | } | ||
| 1092 | |||
| 1093 | ret = of_property_read_u32(np, "block-number", &blk_num); | ||
| 1094 | if (ret) { | ||
| 1095 | dev_err(&pdev->dev, "Can't get block-number.\n"); | ||
| 1096 | return ret; | ||
| 1097 | } | ||
| 1098 | |||
| 1099 | blk_num = min_t(int, blk_num, num_online_cpus()); | ||
| 1100 | |||
| 1101 | len = sizeof(*fsl_qdma); | ||
| 1102 | fsl_qdma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL); | ||
| 1103 | if (!fsl_qdma) | ||
| 1104 | return -ENOMEM; | ||
| 1105 | |||
| 1106 | len = sizeof(*fsl_chan) * chans; | ||
| 1107 | fsl_qdma->chans = devm_kzalloc(&pdev->dev, len, GFP_KERNEL); | ||
| 1108 | if (!fsl_qdma->chans) | ||
| 1109 | return -ENOMEM; | ||
| 1110 | |||
| 1111 | len = sizeof(struct fsl_qdma_queue *) * blk_num; | ||
| 1112 | fsl_qdma->status = devm_kzalloc(&pdev->dev, len, GFP_KERNEL); | ||
| 1113 | if (!fsl_qdma->status) | ||
| 1114 | return -ENOMEM; | ||
| 1115 | |||
| 1116 | len = sizeof(int) * blk_num; | ||
| 1117 | fsl_qdma->queue_irq = devm_kzalloc(&pdev->dev, len, GFP_KERNEL); | ||
| 1118 | if (!fsl_qdma->queue_irq) | ||
| 1119 | return -ENOMEM; | ||
| 1120 | |||
| 1121 | ret = of_property_read_u32(np, "fsl,dma-queues", &queues); | ||
| 1122 | if (ret) { | ||
| 1123 | dev_err(&pdev->dev, "Can't get queues.\n"); | ||
| 1124 | return ret; | ||
| 1125 | } | ||
| 1126 | |||
| 1127 | fsl_qdma->desc_allocated = 0; | ||
| 1128 | fsl_qdma->n_chans = chans; | ||
| 1129 | fsl_qdma->n_queues = queues; | ||
| 1130 | fsl_qdma->block_number = blk_num; | ||
| 1131 | fsl_qdma->block_offset = blk_off; | ||
| 1132 | |||
| 1133 | mutex_init(&fsl_qdma->fsl_qdma_mutex); | ||
| 1134 | |||
| 1135 | for (i = 0; i < fsl_qdma->block_number; i++) { | ||
| 1136 | fsl_qdma->status[i] = fsl_qdma_prep_status_queue(pdev); | ||
| 1137 | if (!fsl_qdma->status[i]) | ||
| 1138 | return -ENOMEM; | ||
| 1139 | } | ||
| 1140 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 1141 | fsl_qdma->ctrl_base = devm_ioremap_resource(&pdev->dev, res); | ||
| 1142 | if (IS_ERR(fsl_qdma->ctrl_base)) | ||
| 1143 | return PTR_ERR(fsl_qdma->ctrl_base); | ||
| 1144 | |||
| 1145 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); | ||
| 1146 | fsl_qdma->status_base = devm_ioremap_resource(&pdev->dev, res); | ||
| 1147 | if (IS_ERR(fsl_qdma->status_base)) | ||
| 1148 | return PTR_ERR(fsl_qdma->status_base); | ||
| 1149 | |||
| 1150 | res = platform_get_resource(pdev, IORESOURCE_MEM, 2); | ||
| 1151 | fsl_qdma->block_base = devm_ioremap_resource(&pdev->dev, res); | ||
| 1152 | if (IS_ERR(fsl_qdma->block_base)) | ||
| 1153 | return PTR_ERR(fsl_qdma->block_base); | ||
| 1154 | fsl_qdma->queue = fsl_qdma_alloc_queue_resources(pdev, fsl_qdma); | ||
| 1155 | if (!fsl_qdma->queue) | ||
| 1156 | return -ENOMEM; | ||
| 1157 | |||
| 1158 | ret = fsl_qdma_irq_init(pdev, fsl_qdma); | ||
| 1159 | if (ret) | ||
| 1160 | return ret; | ||
| 1161 | |||
| 1162 | fsl_qdma->irq_base = platform_get_irq_byname(pdev, "qdma-queue0"); | ||
| 1163 | fsl_qdma->feature = of_property_read_bool(np, "big-endian"); | ||
| 1164 | INIT_LIST_HEAD(&fsl_qdma->dma_dev.channels); | ||
| 1165 | |||
| 1166 | for (i = 0; i < fsl_qdma->n_chans; i++) { | ||
| 1167 | struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i]; | ||
| 1168 | |||
| 1169 | fsl_chan->qdma = fsl_qdma; | ||
| 1170 | fsl_chan->queue = fsl_qdma->queue + i % (fsl_qdma->n_queues * | ||
| 1171 | fsl_qdma->block_number); | ||
| 1172 | fsl_chan->vchan.desc_free = fsl_qdma_free_desc; | ||
| 1173 | vchan_init(&fsl_chan->vchan, &fsl_qdma->dma_dev); | ||
| 1174 | } | ||
| 1175 | |||
| 1176 | dma_cap_set(DMA_MEMCPY, fsl_qdma->dma_dev.cap_mask); | ||
| 1177 | |||
| 1178 | fsl_qdma->dma_dev.dev = &pdev->dev; | ||
| 1179 | fsl_qdma->dma_dev.device_free_chan_resources = | ||
| 1180 | fsl_qdma_free_chan_resources; | ||
| 1181 | fsl_qdma->dma_dev.device_alloc_chan_resources = | ||
| 1182 | fsl_qdma_alloc_chan_resources; | ||
| 1183 | fsl_qdma->dma_dev.device_tx_status = dma_cookie_status; | ||
| 1184 | fsl_qdma->dma_dev.device_prep_dma_memcpy = fsl_qdma_prep_memcpy; | ||
| 1185 | fsl_qdma->dma_dev.device_issue_pending = fsl_qdma_issue_pending; | ||
| 1186 | fsl_qdma->dma_dev.device_synchronize = fsl_qdma_synchronize; | ||
| 1187 | fsl_qdma->dma_dev.device_terminate_all = fsl_qdma_terminate_all; | ||
| 1188 | |||
| 1189 | dma_set_mask(&pdev->dev, DMA_BIT_MASK(40)); | ||
| 1190 | |||
| 1191 | platform_set_drvdata(pdev, fsl_qdma); | ||
| 1192 | |||
| 1193 | ret = dma_async_device_register(&fsl_qdma->dma_dev); | ||
| 1194 | if (ret) { | ||
| 1195 | dev_err(&pdev->dev, | ||
| 1196 | "Can't register NXP Layerscape qDMA engine.\n"); | ||
| 1197 | return ret; | ||
| 1198 | } | ||
| 1199 | |||
| 1200 | ret = fsl_qdma_reg_init(fsl_qdma); | ||
| 1201 | if (ret) { | ||
| 1202 | dev_err(&pdev->dev, "Can't Initialize the qDMA engine.\n"); | ||
| 1203 | return ret; | ||
| 1204 | } | ||
| 1205 | |||
| 1206 | return 0; | ||
| 1207 | } | ||
| 1208 | |||
| 1209 | static void fsl_qdma_cleanup_vchan(struct dma_device *dmadev) | ||
| 1210 | { | ||
| 1211 | struct fsl_qdma_chan *chan, *_chan; | ||
| 1212 | |||
| 1213 | list_for_each_entry_safe(chan, _chan, | ||
| 1214 | &dmadev->channels, vchan.chan.device_node) { | ||
| 1215 | list_del(&chan->vchan.chan.device_node); | ||
| 1216 | tasklet_kill(&chan->vchan.task); | ||
| 1217 | } | ||
| 1218 | } | ||
| 1219 | |||
| 1220 | static int fsl_qdma_remove(struct platform_device *pdev) | ||
| 1221 | { | ||
| 1222 | int i; | ||
| 1223 | struct fsl_qdma_queue *status; | ||
| 1224 | struct device_node *np = pdev->dev.of_node; | ||
| 1225 | struct fsl_qdma_engine *fsl_qdma = platform_get_drvdata(pdev); | ||
| 1226 | |||
| 1227 | fsl_qdma_irq_exit(pdev, fsl_qdma); | ||
| 1228 | fsl_qdma_cleanup_vchan(&fsl_qdma->dma_dev); | ||
| 1229 | of_dma_controller_free(np); | ||
| 1230 | dma_async_device_unregister(&fsl_qdma->dma_dev); | ||
| 1231 | |||
| 1232 | for (i = 0; i < fsl_qdma->block_number; i++) { | ||
| 1233 | status = fsl_qdma->status[i]; | ||
| 1234 | dma_free_coherent(&pdev->dev, sizeof(struct fsl_qdma_format) * | ||
| 1235 | status->n_cq, status->cq, status->bus_addr); | ||
| 1236 | } | ||
| 1237 | return 0; | ||
| 1238 | } | ||
| 1239 | |||
| 1240 | static const struct of_device_id fsl_qdma_dt_ids[] = { | ||
| 1241 | { .compatible = "fsl,ls1021a-qdma", }, | ||
| 1242 | { /* sentinel */ } | ||
| 1243 | }; | ||
| 1244 | MODULE_DEVICE_TABLE(of, fsl_qdma_dt_ids); | ||
| 1245 | |||
| 1246 | static struct platform_driver fsl_qdma_driver = { | ||
| 1247 | .driver = { | ||
| 1248 | .name = "fsl-qdma", | ||
| 1249 | .of_match_table = fsl_qdma_dt_ids, | ||
| 1250 | }, | ||
| 1251 | .probe = fsl_qdma_probe, | ||
| 1252 | .remove = fsl_qdma_remove, | ||
| 1253 | }; | ||
| 1254 | |||
| 1255 | module_platform_driver(fsl_qdma_driver); | ||
| 1256 | |||
| 1257 | MODULE_ALIAS("platform:fsl-qdma"); | ||
| 1258 | MODULE_LICENSE("GPL v2"); | ||
| 1259 | MODULE_DESCRIPTION("NXP Layerscape qDMA engine driver"); | ||
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 9d360a3fbae3..1e38e6b94006 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c | |||
| @@ -53,42 +53,42 @@ static const char msg_ld_oom[] = "No free memory for link descriptor"; | |||
| 53 | 53 | ||
| 54 | static void set_sr(struct fsldma_chan *chan, u32 val) | 54 | static void set_sr(struct fsldma_chan *chan, u32 val) |
| 55 | { | 55 | { |
| 56 | DMA_OUT(chan, &chan->regs->sr, val, 32); | 56 | FSL_DMA_OUT(chan, &chan->regs->sr, val, 32); |
| 57 | } | 57 | } |
| 58 | 58 | ||
| 59 | static u32 get_sr(struct fsldma_chan *chan) | 59 | static u32 get_sr(struct fsldma_chan *chan) |
| 60 | { | 60 | { |
| 61 | return DMA_IN(chan, &chan->regs->sr, 32); | 61 | return FSL_DMA_IN(chan, &chan->regs->sr, 32); |
| 62 | } | 62 | } |
| 63 | 63 | ||
| 64 | static void set_mr(struct fsldma_chan *chan, u32 val) | 64 | static void set_mr(struct fsldma_chan *chan, u32 val) |
| 65 | { | 65 | { |
| 66 | DMA_OUT(chan, &chan->regs->mr, val, 32); | 66 | FSL_DMA_OUT(chan, &chan->regs->mr, val, 32); |
| 67 | } | 67 | } |
| 68 | 68 | ||
| 69 | static u32 get_mr(struct fsldma_chan *chan) | 69 | static u32 get_mr(struct fsldma_chan *chan) |
| 70 | { | 70 | { |
| 71 | return DMA_IN(chan, &chan->regs->mr, 32); | 71 | return FSL_DMA_IN(chan, &chan->regs->mr, 32); |
| 72 | } | 72 | } |
| 73 | 73 | ||
| 74 | static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr) | 74 | static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr) |
| 75 | { | 75 | { |
| 76 | DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64); | 76 | FSL_DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64); |
| 77 | } | 77 | } |
| 78 | 78 | ||
| 79 | static dma_addr_t get_cdar(struct fsldma_chan *chan) | 79 | static dma_addr_t get_cdar(struct fsldma_chan *chan) |
| 80 | { | 80 | { |
| 81 | return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN; | 81 | return FSL_DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN; |
| 82 | } | 82 | } |
| 83 | 83 | ||
| 84 | static void set_bcr(struct fsldma_chan *chan, u32 val) | 84 | static void set_bcr(struct fsldma_chan *chan, u32 val) |
| 85 | { | 85 | { |
| 86 | DMA_OUT(chan, &chan->regs->bcr, val, 32); | 86 | FSL_DMA_OUT(chan, &chan->regs->bcr, val, 32); |
| 87 | } | 87 | } |
| 88 | 88 | ||
| 89 | static u32 get_bcr(struct fsldma_chan *chan) | 89 | static u32 get_bcr(struct fsldma_chan *chan) |
| 90 | { | 90 | { |
| 91 | return DMA_IN(chan, &chan->regs->bcr, 32); | 91 | return FSL_DMA_IN(chan, &chan->regs->bcr, 32); |
| 92 | } | 92 | } |
| 93 | 93 | ||
| 94 | /* | 94 | /* |
diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h index 4787d485dd76..a9b12f82b5c3 100644 --- a/drivers/dma/fsldma.h +++ b/drivers/dma/fsldma.h | |||
| @@ -196,39 +196,67 @@ struct fsldma_chan { | |||
| 196 | #define to_fsl_desc(lh) container_of(lh, struct fsl_desc_sw, node) | 196 | #define to_fsl_desc(lh) container_of(lh, struct fsl_desc_sw, node) |
| 197 | #define tx_to_fsl_desc(tx) container_of(tx, struct fsl_desc_sw, async_tx) | 197 | #define tx_to_fsl_desc(tx) container_of(tx, struct fsl_desc_sw, async_tx) |
| 198 | 198 | ||
| 199 | #ifndef __powerpc64__ | 199 | #ifdef CONFIG_PPC |
| 200 | static u64 in_be64(const u64 __iomem *addr) | 200 | #define fsl_ioread32(p) in_le32(p) |
| 201 | #define fsl_ioread32be(p) in_be32(p) | ||
| 202 | #define fsl_iowrite32(v, p) out_le32(p, v) | ||
| 203 | #define fsl_iowrite32be(v, p) out_be32(p, v) | ||
| 204 | |||
| 205 | #ifdef __powerpc64__ | ||
| 206 | #define fsl_ioread64(p) in_le64(p) | ||
| 207 | #define fsl_ioread64be(p) in_be64(p) | ||
| 208 | #define fsl_iowrite64(v, p) out_le64(p, v) | ||
| 209 | #define fsl_iowrite64be(v, p) out_be64(p, v) | ||
| 210 | #else | ||
| 211 | static u64 fsl_ioread64(const u64 __iomem *addr) | ||
| 201 | { | 212 | { |
| 202 | return ((u64)in_be32((u32 __iomem *)addr) << 32) | | 213 | u32 fsl_addr = lower_32_bits(addr); |
| 203 | (in_be32((u32 __iomem *)addr + 1)); | 214 | u64 fsl_addr_hi = (u64)in_le32((u32 *)(fsl_addr + 1)) << 32; |
| 215 | |||
| 216 | return fsl_addr_hi | in_le32((u32 *)fsl_addr); | ||
| 204 | } | 217 | } |
| 205 | 218 | ||
| 206 | static void out_be64(u64 __iomem *addr, u64 val) | 219 | static void fsl_iowrite64(u64 val, u64 __iomem *addr) |
| 207 | { | 220 | { |
| 208 | out_be32((u32 __iomem *)addr, val >> 32); | 221 | out_le32((u32 __iomem *)addr + 1, val >> 32); |
| 209 | out_be32((u32 __iomem *)addr + 1, (u32)val); | 222 | out_le32((u32 __iomem *)addr, (u32)val); |
| 210 | } | 223 | } |
| 211 | 224 | ||
| 212 | /* There is no asm instructions for 64 bits reverse loads and stores */ | 225 | static u64 fsl_ioread64be(const u64 __iomem *addr) |
| 213 | static u64 in_le64(const u64 __iomem *addr) | ||
| 214 | { | 226 | { |
| 215 | return ((u64)in_le32((u32 __iomem *)addr + 1) << 32) | | 227 | u32 fsl_addr = lower_32_bits(addr); |
| 216 | (in_le32((u32 __iomem *)addr)); | 228 | u64 fsl_addr_hi = (u64)in_be32((u32 *)fsl_addr) << 32; |
| 229 | |||
| 230 | return fsl_addr_hi | in_be32((u32 *)(fsl_addr + 1)); | ||
| 217 | } | 231 | } |
| 218 | 232 | ||
| 219 | static void out_le64(u64 __iomem *addr, u64 val) | 233 | static void fsl_iowrite64be(u64 val, u64 __iomem *addr) |
| 220 | { | 234 | { |
| 221 | out_le32((u32 __iomem *)addr + 1, val >> 32); | 235 | out_be32((u32 __iomem *)addr, val >> 32); |
| 222 | out_le32((u32 __iomem *)addr, (u32)val); | 236 | out_be32((u32 __iomem *)addr + 1, (u32)val); |
| 223 | } | 237 | } |
| 224 | #endif | 238 | #endif |
| 239 | #endif | ||
| 225 | 240 | ||
| 226 | #define DMA_IN(fsl_chan, addr, width) \ | 241 | #if defined(CONFIG_ARM64) || defined(CONFIG_ARM) |
| 227 | (((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ? \ | 242 | #define fsl_ioread32(p) ioread32(p) |
| 228 | in_be##width(addr) : in_le##width(addr)) | 243 | #define fsl_ioread32be(p) ioread32be(p) |
| 229 | #define DMA_OUT(fsl_chan, addr, val, width) \ | 244 | #define fsl_iowrite32(v, p) iowrite32(v, p) |
| 230 | (((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ? \ | 245 | #define fsl_iowrite32be(v, p) iowrite32be(v, p) |
| 231 | out_be##width(addr, val) : out_le##width(addr, val)) | 246 | #define fsl_ioread64(p) ioread64(p) |
| 247 | #define fsl_ioread64be(p) ioread64be(p) | ||
| 248 | #define fsl_iowrite64(v, p) iowrite64(v, p) | ||
| 249 | #define fsl_iowrite64be(v, p) iowrite64be(v, p) | ||
| 250 | #endif | ||
| 251 | |||
| 252 | #define FSL_DMA_IN(fsl_dma, addr, width) \ | ||
| 253 | (((fsl_dma)->feature & FSL_DMA_BIG_ENDIAN) ? \ | ||
| 254 | fsl_ioread##width##be(addr) : fsl_ioread##width(addr)) | ||
| 255 | |||
| 256 | #define FSL_DMA_OUT(fsl_dma, addr, val, width) \ | ||
| 257 | (((fsl_dma)->feature & FSL_DMA_BIG_ENDIAN) ? \ | ||
| 258 | fsl_iowrite##width##be(val, addr) : fsl_iowrite \ | ||
| 259 | ##width(val, addr)) | ||
| 232 | 260 | ||
| 233 | #define DMA_TO_CPU(fsl_chan, d, width) \ | 261 | #define DMA_TO_CPU(fsl_chan, d, width) \ |
| 234 | (((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ? \ | 262 | (((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ? \ |
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index 4a09af3cd546..00a089e24150 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c | |||
| @@ -278,14 +278,14 @@ static int imxdma_hw_chain(struct imxdma_channel *imxdmac) | |||
| 278 | /* | 278 | /* |
| 279 | * imxdma_sg_next - prepare next chunk for scatter-gather DMA emulation | 279 | * imxdma_sg_next - prepare next chunk for scatter-gather DMA emulation |
| 280 | */ | 280 | */ |
| 281 | static inline int imxdma_sg_next(struct imxdma_desc *d) | 281 | static inline void imxdma_sg_next(struct imxdma_desc *d) |
| 282 | { | 282 | { |
| 283 | struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan); | 283 | struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan); |
| 284 | struct imxdma_engine *imxdma = imxdmac->imxdma; | 284 | struct imxdma_engine *imxdma = imxdmac->imxdma; |
| 285 | struct scatterlist *sg = d->sg; | 285 | struct scatterlist *sg = d->sg; |
| 286 | unsigned long now; | 286 | size_t now; |
| 287 | 287 | ||
| 288 | now = min(d->len, sg_dma_len(sg)); | 288 | now = min_t(size_t, d->len, sg_dma_len(sg)); |
| 289 | if (d->len != IMX_DMA_LENGTH_LOOP) | 289 | if (d->len != IMX_DMA_LENGTH_LOOP) |
| 290 | d->len -= now; | 290 | d->len -= now; |
| 291 | 291 | ||
| @@ -303,8 +303,6 @@ static inline int imxdma_sg_next(struct imxdma_desc *d) | |||
| 303 | imx_dmav1_readl(imxdma, DMA_DAR(imxdmac->channel)), | 303 | imx_dmav1_readl(imxdma, DMA_DAR(imxdmac->channel)), |
| 304 | imx_dmav1_readl(imxdma, DMA_SAR(imxdmac->channel)), | 304 | imx_dmav1_readl(imxdma, DMA_SAR(imxdmac->channel)), |
| 305 | imx_dmav1_readl(imxdma, DMA_CNTR(imxdmac->channel))); | 305 | imx_dmav1_readl(imxdma, DMA_CNTR(imxdmac->channel))); |
| 306 | |||
| 307 | return now; | ||
| 308 | } | 306 | } |
| 309 | 307 | ||
| 310 | static void imxdma_enable_hw(struct imxdma_desc *d) | 308 | static void imxdma_enable_hw(struct imxdma_desc *d) |
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 86708fb9bda1..5f3c1378b90e 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c | |||
| @@ -377,6 +377,7 @@ struct sdma_channel { | |||
| 377 | unsigned long watermark_level; | 377 | unsigned long watermark_level; |
| 378 | u32 shp_addr, per_addr; | 378 | u32 shp_addr, per_addr; |
| 379 | enum dma_status status; | 379 | enum dma_status status; |
| 380 | bool context_loaded; | ||
| 380 | struct imx_dma_data data; | 381 | struct imx_dma_data data; |
| 381 | struct work_struct terminate_worker; | 382 | struct work_struct terminate_worker; |
| 382 | }; | 383 | }; |
| @@ -440,6 +441,8 @@ struct sdma_engine { | |||
| 440 | unsigned int irq; | 441 | unsigned int irq; |
| 441 | dma_addr_t bd0_phys; | 442 | dma_addr_t bd0_phys; |
| 442 | struct sdma_buffer_descriptor *bd0; | 443 | struct sdma_buffer_descriptor *bd0; |
| 444 | /* clock ratio for AHB:SDMA core. 1:1 is 1, 2:1 is 0*/ | ||
| 445 | bool clk_ratio; | ||
| 443 | }; | 446 | }; |
| 444 | 447 | ||
| 445 | static int sdma_config_write(struct dma_chan *chan, | 448 | static int sdma_config_write(struct dma_chan *chan, |
| @@ -662,8 +665,11 @@ static int sdma_run_channel0(struct sdma_engine *sdma) | |||
| 662 | dev_err(sdma->dev, "Timeout waiting for CH0 ready\n"); | 665 | dev_err(sdma->dev, "Timeout waiting for CH0 ready\n"); |
| 663 | 666 | ||
| 664 | /* Set bits of CONFIG register with dynamic context switching */ | 667 | /* Set bits of CONFIG register with dynamic context switching */ |
| 665 | if (readl(sdma->regs + SDMA_H_CONFIG) == 0) | 668 | reg = readl(sdma->regs + SDMA_H_CONFIG); |
| 666 | writel_relaxed(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG); | 669 | if ((reg & SDMA_H_CONFIG_CSM) == 0) { |
| 670 | reg |= SDMA_H_CONFIG_CSM; | ||
| 671 | writel_relaxed(reg, sdma->regs + SDMA_H_CONFIG); | ||
| 672 | } | ||
| 667 | 673 | ||
| 668 | return ret; | 674 | return ret; |
| 669 | } | 675 | } |
| @@ -677,7 +683,7 @@ static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size, | |||
| 677 | int ret; | 683 | int ret; |
| 678 | unsigned long flags; | 684 | unsigned long flags; |
| 679 | 685 | ||
| 680 | buf_virt = dma_alloc_coherent(NULL, size, &buf_phys, GFP_KERNEL); | 686 | buf_virt = dma_alloc_coherent(sdma->dev, size, &buf_phys, GFP_KERNEL); |
| 681 | if (!buf_virt) { | 687 | if (!buf_virt) { |
| 682 | return -ENOMEM; | 688 | return -ENOMEM; |
| 683 | } | 689 | } |
| @@ -696,7 +702,7 @@ static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size, | |||
| 696 | 702 | ||
| 697 | spin_unlock_irqrestore(&sdma->channel_0_lock, flags); | 703 | spin_unlock_irqrestore(&sdma->channel_0_lock, flags); |
| 698 | 704 | ||
| 699 | dma_free_coherent(NULL, size, buf_virt, buf_phys); | 705 | dma_free_coherent(sdma->dev, size, buf_virt, buf_phys); |
| 700 | 706 | ||
| 701 | return ret; | 707 | return ret; |
| 702 | } | 708 | } |
| @@ -970,6 +976,9 @@ static int sdma_load_context(struct sdma_channel *sdmac) | |||
| 970 | int ret; | 976 | int ret; |
| 971 | unsigned long flags; | 977 | unsigned long flags; |
| 972 | 978 | ||
| 979 | if (sdmac->context_loaded) | ||
| 980 | return 0; | ||
| 981 | |||
| 973 | if (sdmac->direction == DMA_DEV_TO_MEM) | 982 | if (sdmac->direction == DMA_DEV_TO_MEM) |
| 974 | load_address = sdmac->pc_from_device; | 983 | load_address = sdmac->pc_from_device; |
| 975 | else if (sdmac->direction == DMA_DEV_TO_DEV) | 984 | else if (sdmac->direction == DMA_DEV_TO_DEV) |
| @@ -1012,6 +1021,8 @@ static int sdma_load_context(struct sdma_channel *sdmac) | |||
| 1012 | 1021 | ||
| 1013 | spin_unlock_irqrestore(&sdma->channel_0_lock, flags); | 1022 | spin_unlock_irqrestore(&sdma->channel_0_lock, flags); |
| 1014 | 1023 | ||
| 1024 | sdmac->context_loaded = true; | ||
| 1025 | |||
| 1015 | return ret; | 1026 | return ret; |
| 1016 | } | 1027 | } |
| 1017 | 1028 | ||
| @@ -1051,6 +1062,7 @@ static void sdma_channel_terminate_work(struct work_struct *work) | |||
| 1051 | sdmac->desc = NULL; | 1062 | sdmac->desc = NULL; |
| 1052 | spin_unlock_irqrestore(&sdmac->vc.lock, flags); | 1063 | spin_unlock_irqrestore(&sdmac->vc.lock, flags); |
| 1053 | vchan_dma_desc_free_list(&sdmac->vc, &head); | 1064 | vchan_dma_desc_free_list(&sdmac->vc, &head); |
| 1065 | sdmac->context_loaded = false; | ||
| 1054 | } | 1066 | } |
| 1055 | 1067 | ||
| 1056 | static int sdma_disable_channel_async(struct dma_chan *chan) | 1068 | static int sdma_disable_channel_async(struct dma_chan *chan) |
| @@ -1182,8 +1194,8 @@ static int sdma_request_channel0(struct sdma_engine *sdma) | |||
| 1182 | { | 1194 | { |
| 1183 | int ret = -EBUSY; | 1195 | int ret = -EBUSY; |
| 1184 | 1196 | ||
| 1185 | sdma->bd0 = dma_alloc_coherent(NULL, PAGE_SIZE, &sdma->bd0_phys, | 1197 | sdma->bd0 = dma_alloc_coherent(sdma->dev, PAGE_SIZE, &sdma->bd0_phys, |
| 1186 | GFP_NOWAIT); | 1198 | GFP_NOWAIT); |
| 1187 | if (!sdma->bd0) { | 1199 | if (!sdma->bd0) { |
| 1188 | ret = -ENOMEM; | 1200 | ret = -ENOMEM; |
| 1189 | goto out; | 1201 | goto out; |
| @@ -1205,8 +1217,8 @@ static int sdma_alloc_bd(struct sdma_desc *desc) | |||
| 1205 | u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor); | 1217 | u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor); |
| 1206 | int ret = 0; | 1218 | int ret = 0; |
| 1207 | 1219 | ||
| 1208 | desc->bd = dma_alloc_coherent(NULL, bd_size, &desc->bd_phys, | 1220 | desc->bd = dma_alloc_coherent(desc->sdmac->sdma->dev, bd_size, |
| 1209 | GFP_NOWAIT); | 1221 | &desc->bd_phys, GFP_NOWAIT); |
| 1210 | if (!desc->bd) { | 1222 | if (!desc->bd) { |
| 1211 | ret = -ENOMEM; | 1223 | ret = -ENOMEM; |
| 1212 | goto out; | 1224 | goto out; |
| @@ -1219,7 +1231,8 @@ static void sdma_free_bd(struct sdma_desc *desc) | |||
| 1219 | { | 1231 | { |
| 1220 | u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor); | 1232 | u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor); |
| 1221 | 1233 | ||
| 1222 | dma_free_coherent(NULL, bd_size, desc->bd, desc->bd_phys); | 1234 | dma_free_coherent(desc->sdmac->sdma->dev, bd_size, desc->bd, |
| 1235 | desc->bd_phys); | ||
| 1223 | } | 1236 | } |
| 1224 | 1237 | ||
| 1225 | static void sdma_desc_free(struct virt_dma_desc *vd) | 1238 | static void sdma_desc_free(struct virt_dma_desc *vd) |
| @@ -1839,10 +1852,13 @@ static int sdma_init(struct sdma_engine *sdma) | |||
| 1839 | if (ret) | 1852 | if (ret) |
| 1840 | goto disable_clk_ipg; | 1853 | goto disable_clk_ipg; |
| 1841 | 1854 | ||
| 1855 | if (clk_get_rate(sdma->clk_ahb) == clk_get_rate(sdma->clk_ipg)) | ||
| 1856 | sdma->clk_ratio = 1; | ||
| 1857 | |||
| 1842 | /* Be sure SDMA has not started yet */ | 1858 | /* Be sure SDMA has not started yet */ |
| 1843 | writel_relaxed(0, sdma->regs + SDMA_H_C0PTR); | 1859 | writel_relaxed(0, sdma->regs + SDMA_H_C0PTR); |
| 1844 | 1860 | ||
| 1845 | sdma->channel_control = dma_alloc_coherent(NULL, | 1861 | sdma->channel_control = dma_alloc_coherent(sdma->dev, |
| 1846 | MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control) + | 1862 | MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control) + |
| 1847 | sizeof(struct sdma_context_data), | 1863 | sizeof(struct sdma_context_data), |
| 1848 | &ccb_phys, GFP_KERNEL); | 1864 | &ccb_phys, GFP_KERNEL); |
| @@ -1879,8 +1895,10 @@ static int sdma_init(struct sdma_engine *sdma) | |||
| 1879 | writel_relaxed(0x4050, sdma->regs + SDMA_CHN0ADDR); | 1895 | writel_relaxed(0x4050, sdma->regs + SDMA_CHN0ADDR); |
| 1880 | 1896 | ||
| 1881 | /* Set bits of CONFIG register but with static context switching */ | 1897 | /* Set bits of CONFIG register but with static context switching */ |
| 1882 | /* FIXME: Check whether to set ACR bit depending on clock ratios */ | 1898 | if (sdma->clk_ratio) |
| 1883 | writel_relaxed(0, sdma->regs + SDMA_H_CONFIG); | 1899 | writel_relaxed(SDMA_H_CONFIG_ACR, sdma->regs + SDMA_H_CONFIG); |
| 1900 | else | ||
| 1901 | writel_relaxed(0, sdma->regs + SDMA_H_CONFIG); | ||
| 1884 | 1902 | ||
| 1885 | writel_relaxed(ccb_phys, sdma->regs + SDMA_H_C0PTR); | 1903 | writel_relaxed(ccb_phys, sdma->regs + SDMA_H_C0PTR); |
| 1886 | 1904 | ||
| @@ -1903,11 +1921,16 @@ disable_clk_ipg: | |||
| 1903 | static bool sdma_filter_fn(struct dma_chan *chan, void *fn_param) | 1921 | static bool sdma_filter_fn(struct dma_chan *chan, void *fn_param) |
| 1904 | { | 1922 | { |
| 1905 | struct sdma_channel *sdmac = to_sdma_chan(chan); | 1923 | struct sdma_channel *sdmac = to_sdma_chan(chan); |
| 1924 | struct sdma_engine *sdma = sdmac->sdma; | ||
| 1906 | struct imx_dma_data *data = fn_param; | 1925 | struct imx_dma_data *data = fn_param; |
| 1907 | 1926 | ||
| 1908 | if (!imx_dma_is_general_purpose(chan)) | 1927 | if (!imx_dma_is_general_purpose(chan)) |
| 1909 | return false; | 1928 | return false; |
| 1910 | 1929 | ||
| 1930 | /* return false if it's not the right device */ | ||
| 1931 | if (sdma->dev->of_node != data->of_node) | ||
| 1932 | return false; | ||
| 1933 | |||
| 1911 | sdmac->data = *data; | 1934 | sdmac->data = *data; |
| 1912 | chan->private = &sdmac->data; | 1935 | chan->private = &sdmac->data; |
| 1913 | 1936 | ||
| @@ -1935,6 +1958,7 @@ static struct dma_chan *sdma_xlate(struct of_phandle_args *dma_spec, | |||
| 1935 | * be set to sdmac->event_id1. | 1958 | * be set to sdmac->event_id1. |
| 1936 | */ | 1959 | */ |
| 1937 | data.dma_request2 = 0; | 1960 | data.dma_request2 = 0; |
| 1961 | data.of_node = ofdma->of_node; | ||
| 1938 | 1962 | ||
| 1939 | return dma_request_channel(mask, sdma_filter_fn, &data); | 1963 | return dma_request_channel(mask, sdma_filter_fn, &data); |
| 1940 | } | 1964 | } |
| @@ -2097,6 +2121,7 @@ static int sdma_probe(struct platform_device *pdev) | |||
| 2097 | sdma->dma_device.device_prep_dma_memcpy = sdma_prep_memcpy; | 2121 | sdma->dma_device.device_prep_dma_memcpy = sdma_prep_memcpy; |
| 2098 | sdma->dma_device.device_issue_pending = sdma_issue_pending; | 2122 | sdma->dma_device.device_issue_pending = sdma_issue_pending; |
| 2099 | sdma->dma_device.dev->dma_parms = &sdma->dma_parms; | 2123 | sdma->dma_device.dev->dma_parms = &sdma->dma_parms; |
| 2124 | sdma->dma_device.copy_align = 2; | ||
| 2100 | dma_set_max_seg_size(sdma->dma_device.dev, SDMA_BD_MAX_CNT); | 2125 | dma_set_max_seg_size(sdma->dma_device.dev, SDMA_BD_MAX_CNT); |
| 2101 | 2126 | ||
| 2102 | platform_set_drvdata(pdev, sdma); | 2127 | platform_set_drvdata(pdev, sdma); |
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 23fb2fa04000..f373a139e0c3 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c | |||
| @@ -372,6 +372,7 @@ struct ioat_ring_ent ** | |||
| 372 | ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags) | 372 | ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags) |
| 373 | { | 373 | { |
| 374 | struct ioatdma_chan *ioat_chan = to_ioat_chan(c); | 374 | struct ioatdma_chan *ioat_chan = to_ioat_chan(c); |
| 375 | struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma; | ||
| 375 | struct ioat_ring_ent **ring; | 376 | struct ioat_ring_ent **ring; |
| 376 | int total_descs = 1 << order; | 377 | int total_descs = 1 << order; |
| 377 | int i, chunks; | 378 | int i, chunks; |
| @@ -437,6 +438,17 @@ ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags) | |||
| 437 | } | 438 | } |
| 438 | ring[i]->hw->next = ring[0]->txd.phys; | 439 | ring[i]->hw->next = ring[0]->txd.phys; |
| 439 | 440 | ||
| 441 | /* setup descriptor pre-fetching for v3.4 */ | ||
| 442 | if (ioat_dma->cap & IOAT_CAP_DPS) { | ||
| 443 | u16 drsctl = IOAT_CHAN_DRSZ_2MB | IOAT_CHAN_DRS_EN; | ||
| 444 | |||
| 445 | if (chunks == 1) | ||
| 446 | drsctl |= IOAT_CHAN_DRS_AUTOWRAP; | ||
| 447 | |||
| 448 | writew(drsctl, ioat_chan->reg_base + IOAT_CHAN_DRSCTL_OFFSET); | ||
| 449 | |||
| 450 | } | ||
| 451 | |||
| 440 | return ring; | 452 | return ring; |
| 441 | } | 453 | } |
| 442 | 454 | ||
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index 1ab42ec2b7ff..aaafd0e882b5 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h | |||
| @@ -27,7 +27,7 @@ | |||
| 27 | #include "registers.h" | 27 | #include "registers.h" |
| 28 | #include "hw.h" | 28 | #include "hw.h" |
| 29 | 29 | ||
| 30 | #define IOAT_DMA_VERSION "4.00" | 30 | #define IOAT_DMA_VERSION "5.00" |
| 31 | 31 | ||
| 32 | #define IOAT_DMA_DCA_ANY_CPU ~0 | 32 | #define IOAT_DMA_DCA_ANY_CPU ~0 |
| 33 | 33 | ||
diff --git a/drivers/dma/ioat/hw.h b/drivers/dma/ioat/hw.h index abcc51b343ce..781c94de8e81 100644 --- a/drivers/dma/ioat/hw.h +++ b/drivers/dma/ioat/hw.h | |||
| @@ -66,11 +66,14 @@ | |||
| 66 | 66 | ||
| 67 | #define PCI_DEVICE_ID_INTEL_IOAT_SKX 0x2021 | 67 | #define PCI_DEVICE_ID_INTEL_IOAT_SKX 0x2021 |
| 68 | 68 | ||
| 69 | #define PCI_DEVICE_ID_INTEL_IOAT_ICX 0x0b00 | ||
| 70 | |||
| 69 | #define IOAT_VER_1_2 0x12 /* Version 1.2 */ | 71 | #define IOAT_VER_1_2 0x12 /* Version 1.2 */ |
| 70 | #define IOAT_VER_2_0 0x20 /* Version 2.0 */ | 72 | #define IOAT_VER_2_0 0x20 /* Version 2.0 */ |
| 71 | #define IOAT_VER_3_0 0x30 /* Version 3.0 */ | 73 | #define IOAT_VER_3_0 0x30 /* Version 3.0 */ |
| 72 | #define IOAT_VER_3_2 0x32 /* Version 3.2 */ | 74 | #define IOAT_VER_3_2 0x32 /* Version 3.2 */ |
| 73 | #define IOAT_VER_3_3 0x33 /* Version 3.3 */ | 75 | #define IOAT_VER_3_3 0x33 /* Version 3.3 */ |
| 76 | #define IOAT_VER_3_4 0x34 /* Version 3.4 */ | ||
| 74 | 77 | ||
| 75 | 78 | ||
| 76 | int system_has_dca_enabled(struct pci_dev *pdev); | 79 | int system_has_dca_enabled(struct pci_dev *pdev); |
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c index 2d810dfcdc48..d41dc9a9ff68 100644 --- a/drivers/dma/ioat/init.c +++ b/drivers/dma/ioat/init.c | |||
| @@ -119,6 +119,9 @@ static const struct pci_device_id ioat_pci_tbl[] = { | |||
| 119 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE2) }, | 119 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE2) }, |
| 120 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE3) }, | 120 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE3) }, |
| 121 | 121 | ||
| 122 | /* I/OAT v3.4 platforms */ | ||
| 123 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_ICX) }, | ||
| 124 | |||
| 122 | { 0, } | 125 | { 0, } |
| 123 | }; | 126 | }; |
| 124 | MODULE_DEVICE_TABLE(pci, ioat_pci_tbl); | 127 | MODULE_DEVICE_TABLE(pci, ioat_pci_tbl); |
| @@ -135,10 +138,10 @@ static int ioat3_dma_self_test(struct ioatdma_device *ioat_dma); | |||
| 135 | static int ioat_dca_enabled = 1; | 138 | static int ioat_dca_enabled = 1; |
| 136 | module_param(ioat_dca_enabled, int, 0644); | 139 | module_param(ioat_dca_enabled, int, 0644); |
| 137 | MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)"); | 140 | MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)"); |
| 138 | int ioat_pending_level = 4; | 141 | int ioat_pending_level = 7; |
| 139 | module_param(ioat_pending_level, int, 0644); | 142 | module_param(ioat_pending_level, int, 0644); |
| 140 | MODULE_PARM_DESC(ioat_pending_level, | 143 | MODULE_PARM_DESC(ioat_pending_level, |
| 141 | "high-water mark for pushing ioat descriptors (default: 4)"); | 144 | "high-water mark for pushing ioat descriptors (default: 7)"); |
| 142 | static char ioat_interrupt_style[32] = "msix"; | 145 | static char ioat_interrupt_style[32] = "msix"; |
| 143 | module_param_string(ioat_interrupt_style, ioat_interrupt_style, | 146 | module_param_string(ioat_interrupt_style, ioat_interrupt_style, |
| 144 | sizeof(ioat_interrupt_style), 0644); | 147 | sizeof(ioat_interrupt_style), 0644); |
| @@ -635,6 +638,11 @@ static void ioat_free_chan_resources(struct dma_chan *c) | |||
| 635 | ioat_stop(ioat_chan); | 638 | ioat_stop(ioat_chan); |
| 636 | ioat_reset_hw(ioat_chan); | 639 | ioat_reset_hw(ioat_chan); |
| 637 | 640 | ||
| 641 | /* Put LTR to idle */ | ||
| 642 | if (ioat_dma->version >= IOAT_VER_3_4) | ||
| 643 | writeb(IOAT_CHAN_LTR_SWSEL_IDLE, | ||
| 644 | ioat_chan->reg_base + IOAT_CHAN_LTR_SWSEL_OFFSET); | ||
| 645 | |||
| 638 | spin_lock_bh(&ioat_chan->cleanup_lock); | 646 | spin_lock_bh(&ioat_chan->cleanup_lock); |
| 639 | spin_lock_bh(&ioat_chan->prep_lock); | 647 | spin_lock_bh(&ioat_chan->prep_lock); |
| 640 | descs = ioat_ring_space(ioat_chan); | 648 | descs = ioat_ring_space(ioat_chan); |
| @@ -724,6 +732,28 @@ static int ioat_alloc_chan_resources(struct dma_chan *c) | |||
| 724 | spin_unlock_bh(&ioat_chan->prep_lock); | 732 | spin_unlock_bh(&ioat_chan->prep_lock); |
| 725 | spin_unlock_bh(&ioat_chan->cleanup_lock); | 733 | spin_unlock_bh(&ioat_chan->cleanup_lock); |
| 726 | 734 | ||
| 735 | /* Setting up LTR values for 3.4 or later */ | ||
| 736 | if (ioat_chan->ioat_dma->version >= IOAT_VER_3_4) { | ||
| 737 | u32 lat_val; | ||
| 738 | |||
| 739 | lat_val = IOAT_CHAN_LTR_ACTIVE_SNVAL | | ||
| 740 | IOAT_CHAN_LTR_ACTIVE_SNLATSCALE | | ||
| 741 | IOAT_CHAN_LTR_ACTIVE_SNREQMNT; | ||
| 742 | writel(lat_val, ioat_chan->reg_base + | ||
| 743 | IOAT_CHAN_LTR_ACTIVE_OFFSET); | ||
| 744 | |||
| 745 | lat_val = IOAT_CHAN_LTR_IDLE_SNVAL | | ||
| 746 | IOAT_CHAN_LTR_IDLE_SNLATSCALE | | ||
| 747 | IOAT_CHAN_LTR_IDLE_SNREQMNT; | ||
| 748 | writel(lat_val, ioat_chan->reg_base + | ||
| 749 | IOAT_CHAN_LTR_IDLE_OFFSET); | ||
| 750 | |||
| 751 | /* Select to active */ | ||
| 752 | writeb(IOAT_CHAN_LTR_SWSEL_ACTIVE, | ||
| 753 | ioat_chan->reg_base + | ||
| 754 | IOAT_CHAN_LTR_SWSEL_OFFSET); | ||
| 755 | } | ||
| 756 | |||
| 727 | ioat_start_null_desc(ioat_chan); | 757 | ioat_start_null_desc(ioat_chan); |
| 728 | 758 | ||
| 729 | /* check that we got off the ground */ | 759 | /* check that we got off the ground */ |
| @@ -1185,6 +1215,10 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca) | |||
| 1185 | if (err) | 1215 | if (err) |
| 1186 | return err; | 1216 | return err; |
| 1187 | 1217 | ||
| 1218 | if (ioat_dma->cap & IOAT_CAP_DPS) | ||
| 1219 | writeb(ioat_pending_level + 1, | ||
| 1220 | ioat_dma->reg_base + IOAT_PREFETCH_LIMIT_OFFSET); | ||
| 1221 | |||
| 1188 | return 0; | 1222 | return 0; |
| 1189 | } | 1223 | } |
| 1190 | 1224 | ||
| @@ -1350,6 +1384,8 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 1350 | pci_set_drvdata(pdev, device); | 1384 | pci_set_drvdata(pdev, device); |
| 1351 | 1385 | ||
| 1352 | device->version = readb(device->reg_base + IOAT_VER_OFFSET); | 1386 | device->version = readb(device->reg_base + IOAT_VER_OFFSET); |
| 1387 | if (device->version >= IOAT_VER_3_4) | ||
| 1388 | ioat_dca_enabled = 0; | ||
| 1353 | if (device->version >= IOAT_VER_3_0) { | 1389 | if (device->version >= IOAT_VER_3_0) { |
| 1354 | if (is_skx_ioat(pdev)) | 1390 | if (is_skx_ioat(pdev)) |
| 1355 | device->version = IOAT_VER_3_2; | 1391 | device->version = IOAT_VER_3_2; |
diff --git a/drivers/dma/ioat/registers.h b/drivers/dma/ioat/registers.h index 2f3bbc88ff2a..99c1c24d465d 100644 --- a/drivers/dma/ioat/registers.h +++ b/drivers/dma/ioat/registers.h | |||
| @@ -84,6 +84,9 @@ | |||
| 84 | #define IOAT_CAP_PQ 0x00000200 | 84 | #define IOAT_CAP_PQ 0x00000200 |
| 85 | #define IOAT_CAP_DWBES 0x00002000 | 85 | #define IOAT_CAP_DWBES 0x00002000 |
| 86 | #define IOAT_CAP_RAID16SS 0x00020000 | 86 | #define IOAT_CAP_RAID16SS 0x00020000 |
| 87 | #define IOAT_CAP_DPS 0x00800000 | ||
| 88 | |||
| 89 | #define IOAT_PREFETCH_LIMIT_OFFSET 0x4C /* CHWPREFLMT */ | ||
| 87 | 90 | ||
| 88 | #define IOAT_CHANNEL_MMIO_SIZE 0x80 /* Each Channel MMIO space is this size */ | 91 | #define IOAT_CHANNEL_MMIO_SIZE 0x80 /* Each Channel MMIO space is this size */ |
| 89 | 92 | ||
| @@ -243,4 +246,25 @@ | |||
| 243 | 246 | ||
| 244 | #define IOAT_CHANERR_MASK_OFFSET 0x2C /* 32-bit Channel Error Register */ | 247 | #define IOAT_CHANERR_MASK_OFFSET 0x2C /* 32-bit Channel Error Register */ |
| 245 | 248 | ||
| 249 | #define IOAT_CHAN_DRSCTL_OFFSET 0xB6 | ||
| 250 | #define IOAT_CHAN_DRSZ_4KB 0x0000 | ||
| 251 | #define IOAT_CHAN_DRSZ_8KB 0x0001 | ||
| 252 | #define IOAT_CHAN_DRSZ_2MB 0x0009 | ||
| 253 | #define IOAT_CHAN_DRS_EN 0x0100 | ||
| 254 | #define IOAT_CHAN_DRS_AUTOWRAP 0x0200 | ||
| 255 | |||
| 256 | #define IOAT_CHAN_LTR_SWSEL_OFFSET 0xBC | ||
| 257 | #define IOAT_CHAN_LTR_SWSEL_ACTIVE 0x0 | ||
| 258 | #define IOAT_CHAN_LTR_SWSEL_IDLE 0x1 | ||
| 259 | |||
| 260 | #define IOAT_CHAN_LTR_ACTIVE_OFFSET 0xC0 | ||
| 261 | #define IOAT_CHAN_LTR_ACTIVE_SNVAL 0x0000 /* 0 us */ | ||
| 262 | #define IOAT_CHAN_LTR_ACTIVE_SNLATSCALE 0x0800 /* 1us scale */ | ||
| 263 | #define IOAT_CHAN_LTR_ACTIVE_SNREQMNT 0x8000 /* snoop req enable */ | ||
| 264 | |||
| 265 | #define IOAT_CHAN_LTR_IDLE_OFFSET 0xC4 | ||
| 266 | #define IOAT_CHAN_LTR_IDLE_SNVAL 0x0258 /* 600 us */ | ||
| 267 | #define IOAT_CHAN_LTR_IDLE_SNLATSCALE 0x0800 /* 1us scale */ | ||
| 268 | #define IOAT_CHAN_LTR_IDLE_SNREQMNT 0x8000 /* snoop req enable */ | ||
| 269 | |||
| 246 | #endif /* _IOAT_REGISTERS_H_ */ | 270 | #endif /* _IOAT_REGISTERS_H_ */ |
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c index fdec2b6cfbb0..5737d92eaeeb 100644 --- a/drivers/dma/k3dma.c +++ b/drivers/dma/k3dma.c | |||
| @@ -52,8 +52,6 @@ | |||
| 52 | #define CX_SRC 0x814 | 52 | #define CX_SRC 0x814 |
| 53 | #define CX_DST 0x818 | 53 | #define CX_DST 0x818 |
| 54 | #define CX_CFG 0x81c | 54 | #define CX_CFG 0x81c |
| 55 | #define AXI_CFG 0x820 | ||
| 56 | #define AXI_CFG_DEFAULT 0x201201 | ||
| 57 | 55 | ||
| 58 | #define CX_LLI_CHAIN_EN 0x2 | 56 | #define CX_LLI_CHAIN_EN 0x2 |
| 59 | #define CX_CFG_EN 0x1 | 57 | #define CX_CFG_EN 0x1 |
| @@ -113,9 +111,18 @@ struct k3_dma_dev { | |||
| 113 | struct dma_pool *pool; | 111 | struct dma_pool *pool; |
| 114 | u32 dma_channels; | 112 | u32 dma_channels; |
| 115 | u32 dma_requests; | 113 | u32 dma_requests; |
| 114 | u32 dma_channel_mask; | ||
| 116 | unsigned int irq; | 115 | unsigned int irq; |
| 117 | }; | 116 | }; |
| 118 | 117 | ||
| 118 | |||
| 119 | #define K3_FLAG_NOCLK BIT(1) | ||
| 120 | |||
| 121 | struct k3dma_soc_data { | ||
| 122 | unsigned long flags; | ||
| 123 | }; | ||
| 124 | |||
| 125 | |||
| 119 | #define to_k3_dma(dmadev) container_of(dmadev, struct k3_dma_dev, slave) | 126 | #define to_k3_dma(dmadev) container_of(dmadev, struct k3_dma_dev, slave) |
| 120 | 127 | ||
| 121 | static int k3_dma_config_write(struct dma_chan *chan, | 128 | static int k3_dma_config_write(struct dma_chan *chan, |
| @@ -161,7 +168,6 @@ static void k3_dma_set_desc(struct k3_dma_phy *phy, struct k3_desc_hw *hw) | |||
| 161 | writel_relaxed(hw->count, phy->base + CX_CNT0); | 168 | writel_relaxed(hw->count, phy->base + CX_CNT0); |
| 162 | writel_relaxed(hw->saddr, phy->base + CX_SRC); | 169 | writel_relaxed(hw->saddr, phy->base + CX_SRC); |
| 163 | writel_relaxed(hw->daddr, phy->base + CX_DST); | 170 | writel_relaxed(hw->daddr, phy->base + CX_DST); |
| 164 | writel_relaxed(AXI_CFG_DEFAULT, phy->base + AXI_CFG); | ||
| 165 | writel_relaxed(hw->config, phy->base + CX_CFG); | 171 | writel_relaxed(hw->config, phy->base + CX_CFG); |
| 166 | } | 172 | } |
| 167 | 173 | ||
| @@ -314,6 +320,9 @@ static void k3_dma_tasklet(unsigned long arg) | |||
| 314 | /* check new channel request in d->chan_pending */ | 320 | /* check new channel request in d->chan_pending */ |
| 315 | spin_lock_irq(&d->lock); | 321 | spin_lock_irq(&d->lock); |
| 316 | for (pch = 0; pch < d->dma_channels; pch++) { | 322 | for (pch = 0; pch < d->dma_channels; pch++) { |
| 323 | if (!(d->dma_channel_mask & (1 << pch))) | ||
| 324 | continue; | ||
| 325 | |||
| 317 | p = &d->phy[pch]; | 326 | p = &d->phy[pch]; |
| 318 | 327 | ||
| 319 | if (p->vchan == NULL && !list_empty(&d->chan_pending)) { | 328 | if (p->vchan == NULL && !list_empty(&d->chan_pending)) { |
| @@ -331,6 +340,9 @@ static void k3_dma_tasklet(unsigned long arg) | |||
| 331 | spin_unlock_irq(&d->lock); | 340 | spin_unlock_irq(&d->lock); |
| 332 | 341 | ||
| 333 | for (pch = 0; pch < d->dma_channels; pch++) { | 342 | for (pch = 0; pch < d->dma_channels; pch++) { |
| 343 | if (!(d->dma_channel_mask & (1 << pch))) | ||
| 344 | continue; | ||
| 345 | |||
| 334 | if (pch_alloc & (1 << pch)) { | 346 | if (pch_alloc & (1 << pch)) { |
| 335 | p = &d->phy[pch]; | 347 | p = &d->phy[pch]; |
| 336 | c = p->vchan; | 348 | c = p->vchan; |
| @@ -790,8 +802,21 @@ static int k3_dma_transfer_resume(struct dma_chan *chan) | |||
| 790 | return 0; | 802 | return 0; |
| 791 | } | 803 | } |
| 792 | 804 | ||
| 805 | static const struct k3dma_soc_data k3_v1_dma_data = { | ||
| 806 | .flags = 0, | ||
| 807 | }; | ||
| 808 | |||
| 809 | static const struct k3dma_soc_data asp_v1_dma_data = { | ||
| 810 | .flags = K3_FLAG_NOCLK, | ||
| 811 | }; | ||
| 812 | |||
| 793 | static const struct of_device_id k3_pdma_dt_ids[] = { | 813 | static const struct of_device_id k3_pdma_dt_ids[] = { |
| 794 | { .compatible = "hisilicon,k3-dma-1.0", }, | 814 | { .compatible = "hisilicon,k3-dma-1.0", |
| 815 | .data = &k3_v1_dma_data | ||
| 816 | }, | ||
| 817 | { .compatible = "hisilicon,hisi-pcm-asp-dma-1.0", | ||
| 818 | .data = &asp_v1_dma_data | ||
| 819 | }, | ||
| 795 | {} | 820 | {} |
| 796 | }; | 821 | }; |
| 797 | MODULE_DEVICE_TABLE(of, k3_pdma_dt_ids); | 822 | MODULE_DEVICE_TABLE(of, k3_pdma_dt_ids); |
| @@ -810,6 +835,7 @@ static struct dma_chan *k3_of_dma_simple_xlate(struct of_phandle_args *dma_spec, | |||
| 810 | 835 | ||
| 811 | static int k3_dma_probe(struct platform_device *op) | 836 | static int k3_dma_probe(struct platform_device *op) |
| 812 | { | 837 | { |
| 838 | const struct k3dma_soc_data *soc_data; | ||
| 813 | struct k3_dma_dev *d; | 839 | struct k3_dma_dev *d; |
| 814 | const struct of_device_id *of_id; | 840 | const struct of_device_id *of_id; |
| 815 | struct resource *iores; | 841 | struct resource *iores; |
| @@ -823,6 +849,10 @@ static int k3_dma_probe(struct platform_device *op) | |||
| 823 | if (!d) | 849 | if (!d) |
| 824 | return -ENOMEM; | 850 | return -ENOMEM; |
| 825 | 851 | ||
| 852 | soc_data = device_get_match_data(&op->dev); | ||
| 853 | if (!soc_data) | ||
| 854 | return -EINVAL; | ||
| 855 | |||
| 826 | d->base = devm_ioremap_resource(&op->dev, iores); | 856 | d->base = devm_ioremap_resource(&op->dev, iores); |
| 827 | if (IS_ERR(d->base)) | 857 | if (IS_ERR(d->base)) |
| 828 | return PTR_ERR(d->base); | 858 | return PTR_ERR(d->base); |
| @@ -833,12 +863,21 @@ static int k3_dma_probe(struct platform_device *op) | |||
| 833 | "dma-channels", &d->dma_channels); | 863 | "dma-channels", &d->dma_channels); |
| 834 | of_property_read_u32((&op->dev)->of_node, | 864 | of_property_read_u32((&op->dev)->of_node, |
| 835 | "dma-requests", &d->dma_requests); | 865 | "dma-requests", &d->dma_requests); |
| 866 | ret = of_property_read_u32((&op->dev)->of_node, | ||
| 867 | "dma-channel-mask", &d->dma_channel_mask); | ||
| 868 | if (ret) { | ||
| 869 | dev_warn(&op->dev, | ||
| 870 | "dma-channel-mask doesn't exist, considering all as available.\n"); | ||
| 871 | d->dma_channel_mask = (u32)~0UL; | ||
| 872 | } | ||
| 836 | } | 873 | } |
| 837 | 874 | ||
| 838 | d->clk = devm_clk_get(&op->dev, NULL); | 875 | if (!(soc_data->flags & K3_FLAG_NOCLK)) { |
| 839 | if (IS_ERR(d->clk)) { | 876 | d->clk = devm_clk_get(&op->dev, NULL); |
| 840 | dev_err(&op->dev, "no dma clk\n"); | 877 | if (IS_ERR(d->clk)) { |
| 841 | return PTR_ERR(d->clk); | 878 | dev_err(&op->dev, "no dma clk\n"); |
| 879 | return PTR_ERR(d->clk); | ||
| 880 | } | ||
| 842 | } | 881 | } |
| 843 | 882 | ||
| 844 | irq = platform_get_irq(op, 0); | 883 | irq = platform_get_irq(op, 0); |
| @@ -862,8 +901,12 @@ static int k3_dma_probe(struct platform_device *op) | |||
| 862 | return -ENOMEM; | 901 | return -ENOMEM; |
| 863 | 902 | ||
| 864 | for (i = 0; i < d->dma_channels; i++) { | 903 | for (i = 0; i < d->dma_channels; i++) { |
| 865 | struct k3_dma_phy *p = &d->phy[i]; | 904 | struct k3_dma_phy *p; |
| 905 | |||
| 906 | if (!(d->dma_channel_mask & BIT(i))) | ||
| 907 | continue; | ||
| 866 | 908 | ||
| 909 | p = &d->phy[i]; | ||
| 867 | p->idx = i; | 910 | p->idx = i; |
| 868 | p->base = d->base + i * 0x40; | 911 | p->base = d->base + i * 0x40; |
| 869 | } | 912 | } |
diff --git a/drivers/dma/mcf-edma.c b/drivers/dma/mcf-edma.c index 5de1b07eddff..7de54b2fafdb 100644 --- a/drivers/dma/mcf-edma.c +++ b/drivers/dma/mcf-edma.c | |||
| @@ -214,6 +214,7 @@ static int mcf_edma_probe(struct platform_device *pdev) | |||
| 214 | mcf_chan->edma = mcf_edma; | 214 | mcf_chan->edma = mcf_edma; |
| 215 | mcf_chan->slave_id = i; | 215 | mcf_chan->slave_id = i; |
| 216 | mcf_chan->idle = true; | 216 | mcf_chan->idle = true; |
| 217 | mcf_chan->dma_dir = DMA_NONE; | ||
| 217 | mcf_chan->vchan.desc_free = fsl_edma_free_desc; | 218 | mcf_chan->vchan.desc_free = fsl_edma_free_desc; |
| 218 | vchan_init(&mcf_chan->vchan, &mcf_edma->dma_dev); | 219 | vchan_init(&mcf_chan->vchan, &mcf_edma->dma_dev); |
| 219 | iowrite32(0x0, ®s->tcd[i].csr); | 220 | iowrite32(0x0, ®s->tcd[i].csr); |
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index 7f595355fb79..65af2e7fcb2c 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c | |||
| @@ -1059,6 +1059,7 @@ mv_xor_channel_add(struct mv_xor_device *xordev, | |||
| 1059 | mv_chan->op_in_desc = XOR_MODE_IN_DESC; | 1059 | mv_chan->op_in_desc = XOR_MODE_IN_DESC; |
| 1060 | 1060 | ||
| 1061 | dma_dev = &mv_chan->dmadev; | 1061 | dma_dev = &mv_chan->dmadev; |
| 1062 | dma_dev->dev = &pdev->dev; | ||
| 1062 | mv_chan->xordev = xordev; | 1063 | mv_chan->xordev = xordev; |
| 1063 | 1064 | ||
| 1064 | /* | 1065 | /* |
| @@ -1091,7 +1092,6 @@ mv_xor_channel_add(struct mv_xor_device *xordev, | |||
| 1091 | dma_dev->device_free_chan_resources = mv_xor_free_chan_resources; | 1092 | dma_dev->device_free_chan_resources = mv_xor_free_chan_resources; |
| 1092 | dma_dev->device_tx_status = mv_xor_status; | 1093 | dma_dev->device_tx_status = mv_xor_status; |
| 1093 | dma_dev->device_issue_pending = mv_xor_issue_pending; | 1094 | dma_dev->device_issue_pending = mv_xor_issue_pending; |
| 1094 | dma_dev->dev = &pdev->dev; | ||
| 1095 | 1095 | ||
| 1096 | /* set prep routines based on capability */ | 1096 | /* set prep routines based on capability */ |
| 1097 | if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask)) | 1097 | if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask)) |
| @@ -1153,7 +1153,10 @@ mv_xor_channel_add(struct mv_xor_device *xordev, | |||
| 1153 | dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "", | 1153 | dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "", |
| 1154 | dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); | 1154 | dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); |
| 1155 | 1155 | ||
| 1156 | dma_async_device_register(dma_dev); | 1156 | ret = dma_async_device_register(dma_dev); |
| 1157 | if (ret) | ||
| 1158 | goto err_free_irq; | ||
| 1159 | |||
| 1157 | return mv_chan; | 1160 | return mv_chan; |
| 1158 | 1161 | ||
| 1159 | err_free_irq: | 1162 | err_free_irq: |
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index cff1b143fff5..eec79fdf27a5 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c | |||
| @@ -2267,7 +2267,6 @@ static int pl330_terminate_all(struct dma_chan *chan) | |||
| 2267 | struct dma_pl330_desc *desc; | 2267 | struct dma_pl330_desc *desc; |
| 2268 | unsigned long flags; | 2268 | unsigned long flags; |
| 2269 | struct pl330_dmac *pl330 = pch->dmac; | 2269 | struct pl330_dmac *pl330 = pch->dmac; |
| 2270 | LIST_HEAD(list); | ||
| 2271 | bool power_down = false; | 2270 | bool power_down = false; |
| 2272 | 2271 | ||
| 2273 | pm_runtime_get_sync(pl330->ddma.dev); | 2272 | pm_runtime_get_sync(pl330->ddma.dev); |
diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c index 1617715aa6e0..cb860cb53c27 100644 --- a/drivers/dma/qcom/bam_dma.c +++ b/drivers/dma/qcom/bam_dma.c | |||
| @@ -636,8 +636,8 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan, | |||
| 636 | num_alloc += DIV_ROUND_UP(sg_dma_len(sg), BAM_FIFO_SIZE); | 636 | num_alloc += DIV_ROUND_UP(sg_dma_len(sg), BAM_FIFO_SIZE); |
| 637 | 637 | ||
| 638 | /* allocate enough room to accomodate the number of entries */ | 638 | /* allocate enough room to accomodate the number of entries */ |
| 639 | async_desc = kzalloc(sizeof(*async_desc) + | 639 | async_desc = kzalloc(struct_size(async_desc, desc, num_alloc), |
| 640 | (num_alloc * sizeof(struct bam_desc_hw)), GFP_NOWAIT); | 640 | GFP_NOWAIT); |
| 641 | 641 | ||
| 642 | if (!async_desc) | 642 | if (!async_desc) |
| 643 | goto err_out; | 643 | goto err_out; |
diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c index 43d4b00b8138..411f91fde734 100644 --- a/drivers/dma/qcom/hidma.c +++ b/drivers/dma/qcom/hidma.c | |||
| @@ -138,24 +138,25 @@ static void hidma_process_completed(struct hidma_chan *mchan) | |||
| 138 | desc = &mdesc->desc; | 138 | desc = &mdesc->desc; |
| 139 | last_cookie = desc->cookie; | 139 | last_cookie = desc->cookie; |
| 140 | 140 | ||
| 141 | llstat = hidma_ll_status(mdma->lldev, mdesc->tre_ch); | ||
| 142 | |||
| 141 | spin_lock_irqsave(&mchan->lock, irqflags); | 143 | spin_lock_irqsave(&mchan->lock, irqflags); |
| 144 | if (llstat == DMA_COMPLETE) { | ||
| 145 | mchan->last_success = last_cookie; | ||
| 146 | result.result = DMA_TRANS_NOERROR; | ||
| 147 | } else { | ||
| 148 | result.result = DMA_TRANS_ABORTED; | ||
| 149 | } | ||
| 150 | |||
| 142 | dma_cookie_complete(desc); | 151 | dma_cookie_complete(desc); |
| 143 | spin_unlock_irqrestore(&mchan->lock, irqflags); | 152 | spin_unlock_irqrestore(&mchan->lock, irqflags); |
| 144 | 153 | ||
| 145 | llstat = hidma_ll_status(mdma->lldev, mdesc->tre_ch); | ||
| 146 | dmaengine_desc_get_callback(desc, &cb); | 154 | dmaengine_desc_get_callback(desc, &cb); |
| 147 | 155 | ||
| 148 | dma_run_dependencies(desc); | 156 | dma_run_dependencies(desc); |
| 149 | 157 | ||
| 150 | spin_lock_irqsave(&mchan->lock, irqflags); | 158 | spin_lock_irqsave(&mchan->lock, irqflags); |
| 151 | list_move(&mdesc->node, &mchan->free); | 159 | list_move(&mdesc->node, &mchan->free); |
| 152 | |||
| 153 | if (llstat == DMA_COMPLETE) { | ||
| 154 | mchan->last_success = last_cookie; | ||
| 155 | result.result = DMA_TRANS_NOERROR; | ||
| 156 | } else | ||
| 157 | result.result = DMA_TRANS_ABORTED; | ||
| 158 | |||
| 159 | spin_unlock_irqrestore(&mchan->lock, irqflags); | 160 | spin_unlock_irqrestore(&mchan->lock, irqflags); |
| 160 | 161 | ||
| 161 | dmaengine_desc_callback_invoke(&cb, &result); | 162 | dmaengine_desc_callback_invoke(&cb, &result); |
| @@ -415,6 +416,7 @@ hidma_prep_dma_memcpy(struct dma_chan *dmach, dma_addr_t dest, dma_addr_t src, | |||
| 415 | if (!mdesc) | 416 | if (!mdesc) |
| 416 | return NULL; | 417 | return NULL; |
| 417 | 418 | ||
| 419 | mdesc->desc.flags = flags; | ||
| 418 | hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch, | 420 | hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch, |
| 419 | src, dest, len, flags, | 421 | src, dest, len, flags, |
| 420 | HIDMA_TRE_MEMCPY); | 422 | HIDMA_TRE_MEMCPY); |
| @@ -447,6 +449,7 @@ hidma_prep_dma_memset(struct dma_chan *dmach, dma_addr_t dest, int value, | |||
| 447 | if (!mdesc) | 449 | if (!mdesc) |
| 448 | return NULL; | 450 | return NULL; |
| 449 | 451 | ||
| 452 | mdesc->desc.flags = flags; | ||
| 450 | hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch, | 453 | hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch, |
| 451 | value, dest, len, flags, | 454 | value, dest, len, flags, |
| 452 | HIDMA_TRE_MEMSET); | 455 | HIDMA_TRE_MEMSET); |
diff --git a/drivers/dma/qcom/hidma_mgmt.c b/drivers/dma/qcom/hidma_mgmt.c index d64edeb6771a..681de12f4c67 100644 --- a/drivers/dma/qcom/hidma_mgmt.c +++ b/drivers/dma/qcom/hidma_mgmt.c | |||
| @@ -423,9 +423,8 @@ static int __init hidma_mgmt_init(void) | |||
| 423 | hidma_mgmt_of_populate_channels(child); | 423 | hidma_mgmt_of_populate_channels(child); |
| 424 | } | 424 | } |
| 425 | #endif | 425 | #endif |
| 426 | platform_driver_register(&hidma_mgmt_driver); | 426 | return platform_driver_register(&hidma_mgmt_driver); |
| 427 | 427 | ||
| 428 | return 0; | ||
| 429 | } | 428 | } |
| 430 | module_init(hidma_mgmt_init); | 429 | module_init(hidma_mgmt_init); |
| 431 | MODULE_LICENSE("GPL v2"); | 430 | MODULE_LICENSE("GPL v2"); |
diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c index 784d5f1a473b..3fae23768b47 100644 --- a/drivers/dma/sa11x0-dma.c +++ b/drivers/dma/sa11x0-dma.c | |||
| @@ -705,7 +705,6 @@ static int sa11x0_dma_device_pause(struct dma_chan *chan) | |||
| 705 | struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); | 705 | struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); |
| 706 | struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); | 706 | struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); |
| 707 | struct sa11x0_dma_phy *p; | 707 | struct sa11x0_dma_phy *p; |
| 708 | LIST_HEAD(head); | ||
| 709 | unsigned long flags; | 708 | unsigned long flags; |
| 710 | 709 | ||
| 711 | dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc); | 710 | dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc); |
| @@ -732,7 +731,6 @@ static int sa11x0_dma_device_resume(struct dma_chan *chan) | |||
| 732 | struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); | 731 | struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); |
| 733 | struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); | 732 | struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); |
| 734 | struct sa11x0_dma_phy *p; | 733 | struct sa11x0_dma_phy *p; |
| 735 | LIST_HEAD(head); | ||
| 736 | unsigned long flags; | 734 | unsigned long flags; |
| 737 | 735 | ||
| 738 | dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc); | 736 | dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc); |
diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c index 7f7184c3cf95..59403f6d008a 100644 --- a/drivers/dma/sh/usb-dmac.c +++ b/drivers/dma/sh/usb-dmac.c | |||
| @@ -694,6 +694,8 @@ static int usb_dmac_runtime_resume(struct device *dev) | |||
| 694 | #endif /* CONFIG_PM */ | 694 | #endif /* CONFIG_PM */ |
| 695 | 695 | ||
| 696 | static const struct dev_pm_ops usb_dmac_pm = { | 696 | static const struct dev_pm_ops usb_dmac_pm = { |
| 697 | SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, | ||
| 698 | pm_runtime_force_resume) | ||
| 697 | SET_RUNTIME_PM_OPS(usb_dmac_runtime_suspend, usb_dmac_runtime_resume, | 699 | SET_RUNTIME_PM_OPS(usb_dmac_runtime_suspend, usb_dmac_runtime_resume, |
| 698 | NULL) | 700 | NULL) |
| 699 | }; | 701 | }; |
diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c index e2f016700fcc..48431e2da987 100644 --- a/drivers/dma/sprd-dma.c +++ b/drivers/dma/sprd-dma.c | |||
| @@ -580,15 +580,7 @@ static irqreturn_t dma_irq_handle(int irq, void *dev_id) | |||
| 580 | 580 | ||
| 581 | static int sprd_dma_alloc_chan_resources(struct dma_chan *chan) | 581 | static int sprd_dma_alloc_chan_resources(struct dma_chan *chan) |
| 582 | { | 582 | { |
| 583 | struct sprd_dma_chn *schan = to_sprd_dma_chan(chan); | 583 | return pm_runtime_get_sync(chan->device->dev); |
| 584 | int ret; | ||
| 585 | |||
| 586 | ret = pm_runtime_get_sync(chan->device->dev); | ||
| 587 | if (ret < 0) | ||
| 588 | return ret; | ||
| 589 | |||
| 590 | schan->dev_id = SPRD_DMA_SOFTWARE_UID; | ||
| 591 | return 0; | ||
| 592 | } | 584 | } |
| 593 | 585 | ||
| 594 | static void sprd_dma_free_chan_resources(struct dma_chan *chan) | 586 | static void sprd_dma_free_chan_resources(struct dma_chan *chan) |
| @@ -1021,13 +1013,10 @@ static void sprd_dma_free_desc(struct virt_dma_desc *vd) | |||
| 1021 | static bool sprd_dma_filter_fn(struct dma_chan *chan, void *param) | 1013 | static bool sprd_dma_filter_fn(struct dma_chan *chan, void *param) |
| 1022 | { | 1014 | { |
| 1023 | struct sprd_dma_chn *schan = to_sprd_dma_chan(chan); | 1015 | struct sprd_dma_chn *schan = to_sprd_dma_chan(chan); |
| 1024 | struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan); | 1016 | u32 slave_id = *(u32 *)param; |
| 1025 | u32 req = *(u32 *)param; | ||
| 1026 | 1017 | ||
| 1027 | if (req < sdev->total_chns) | 1018 | schan->dev_id = slave_id; |
| 1028 | return req == schan->chn_num + 1; | 1019 | return true; |
| 1029 | else | ||
| 1030 | return false; | ||
| 1031 | } | 1020 | } |
| 1032 | 1021 | ||
| 1033 | static int sprd_dma_probe(struct platform_device *pdev) | 1022 | static int sprd_dma_probe(struct platform_device *pdev) |
diff --git a/drivers/dma/st_fdma.c b/drivers/dma/st_fdma.c index 07c20aa2e955..bc7a1de3f29b 100644 --- a/drivers/dma/st_fdma.c +++ b/drivers/dma/st_fdma.c | |||
| @@ -243,8 +243,7 @@ static struct st_fdma_desc *st_fdma_alloc_desc(struct st_fdma_chan *fchan, | |||
| 243 | struct st_fdma_desc *fdesc; | 243 | struct st_fdma_desc *fdesc; |
| 244 | int i; | 244 | int i; |
| 245 | 245 | ||
| 246 | fdesc = kzalloc(sizeof(*fdesc) + | 246 | fdesc = kzalloc(struct_size(fdesc, node, sg_len), GFP_NOWAIT); |
| 247 | sizeof(struct st_fdma_sw_node) * sg_len, GFP_NOWAIT); | ||
| 248 | if (!fdesc) | 247 | if (!fdesc) |
| 249 | return NULL; | 248 | return NULL; |
| 250 | 249 | ||
| @@ -294,8 +293,6 @@ static void st_fdma_free_chan_res(struct dma_chan *chan) | |||
| 294 | struct rproc *rproc = fchan->fdev->slim_rproc->rproc; | 293 | struct rproc *rproc = fchan->fdev->slim_rproc->rproc; |
| 295 | unsigned long flags; | 294 | unsigned long flags; |
| 296 | 295 | ||
| 297 | LIST_HEAD(head); | ||
| 298 | |||
| 299 | dev_dbg(fchan->fdev->dev, "%s: freeing chan:%d\n", | 296 | dev_dbg(fchan->fdev->dev, "%s: freeing chan:%d\n", |
| 300 | __func__, fchan->vchan.chan.chan_id); | 297 | __func__, fchan->vchan.chan.chan_id); |
| 301 | 298 | ||
| @@ -626,7 +623,6 @@ static void st_fdma_issue_pending(struct dma_chan *chan) | |||
| 626 | static int st_fdma_pause(struct dma_chan *chan) | 623 | static int st_fdma_pause(struct dma_chan *chan) |
| 627 | { | 624 | { |
| 628 | unsigned long flags; | 625 | unsigned long flags; |
| 629 | LIST_HEAD(head); | ||
| 630 | struct st_fdma_chan *fchan = to_st_fdma_chan(chan); | 626 | struct st_fdma_chan *fchan = to_st_fdma_chan(chan); |
| 631 | int ch_id = fchan->vchan.chan.chan_id; | 627 | int ch_id = fchan->vchan.chan.chan_id; |
| 632 | unsigned long cmd = FDMA_CMD_PAUSE(ch_id); | 628 | unsigned long cmd = FDMA_CMD_PAUSE(ch_id); |
diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c index 4903a408fc14..ba239b529fa9 100644 --- a/drivers/dma/stm32-dma.c +++ b/drivers/dma/stm32-dma.c | |||
| @@ -23,6 +23,7 @@ | |||
| 23 | #include <linux/of_device.h> | 23 | #include <linux/of_device.h> |
| 24 | #include <linux/of_dma.h> | 24 | #include <linux/of_dma.h> |
| 25 | #include <linux/platform_device.h> | 25 | #include <linux/platform_device.h> |
| 26 | #include <linux/pm_runtime.h> | ||
| 26 | #include <linux/reset.h> | 27 | #include <linux/reset.h> |
| 27 | #include <linux/sched.h> | 28 | #include <linux/sched.h> |
| 28 | #include <linux/slab.h> | 29 | #include <linux/slab.h> |
| @@ -641,12 +642,13 @@ static irqreturn_t stm32_dma_chan_irq(int irq, void *devid) | |||
| 641 | { | 642 | { |
| 642 | struct stm32_dma_chan *chan = devid; | 643 | struct stm32_dma_chan *chan = devid; |
| 643 | struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); | 644 | struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); |
| 644 | u32 status, scr; | 645 | u32 status, scr, sfcr; |
| 645 | 646 | ||
| 646 | spin_lock(&chan->vchan.lock); | 647 | spin_lock(&chan->vchan.lock); |
| 647 | 648 | ||
| 648 | status = stm32_dma_irq_status(chan); | 649 | status = stm32_dma_irq_status(chan); |
| 649 | scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id)); | 650 | scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id)); |
| 651 | sfcr = stm32_dma_read(dmadev, STM32_DMA_SFCR(chan->id)); | ||
| 650 | 652 | ||
| 651 | if (status & STM32_DMA_TCI) { | 653 | if (status & STM32_DMA_TCI) { |
| 652 | stm32_dma_irq_clear(chan, STM32_DMA_TCI); | 654 | stm32_dma_irq_clear(chan, STM32_DMA_TCI); |
| @@ -661,10 +663,12 @@ static irqreturn_t stm32_dma_chan_irq(int irq, void *devid) | |||
| 661 | if (status & STM32_DMA_FEI) { | 663 | if (status & STM32_DMA_FEI) { |
| 662 | stm32_dma_irq_clear(chan, STM32_DMA_FEI); | 664 | stm32_dma_irq_clear(chan, STM32_DMA_FEI); |
| 663 | status &= ~STM32_DMA_FEI; | 665 | status &= ~STM32_DMA_FEI; |
| 664 | if (!(scr & STM32_DMA_SCR_EN)) | 666 | if (sfcr & STM32_DMA_SFCR_FEIE) { |
| 665 | dev_err(chan2dev(chan), "FIFO Error\n"); | 667 | if (!(scr & STM32_DMA_SCR_EN)) |
| 666 | else | 668 | dev_err(chan2dev(chan), "FIFO Error\n"); |
| 667 | dev_dbg(chan2dev(chan), "FIFO over/underrun\n"); | 669 | else |
| 670 | dev_dbg(chan2dev(chan), "FIFO over/underrun\n"); | ||
| 671 | } | ||
| 668 | } | 672 | } |
| 669 | if (status) { | 673 | if (status) { |
| 670 | stm32_dma_irq_clear(chan, status); | 674 | stm32_dma_irq_clear(chan, status); |
| @@ -1112,15 +1116,14 @@ static int stm32_dma_alloc_chan_resources(struct dma_chan *c) | |||
| 1112 | int ret; | 1116 | int ret; |
| 1113 | 1117 | ||
| 1114 | chan->config_init = false; | 1118 | chan->config_init = false; |
| 1115 | ret = clk_prepare_enable(dmadev->clk); | 1119 | |
| 1116 | if (ret < 0) { | 1120 | ret = pm_runtime_get_sync(dmadev->ddev.dev); |
| 1117 | dev_err(chan2dev(chan), "clk_prepare_enable failed: %d\n", ret); | 1121 | if (ret < 0) |
| 1118 | return ret; | 1122 | return ret; |
| 1119 | } | ||
| 1120 | 1123 | ||
| 1121 | ret = stm32_dma_disable_chan(chan); | 1124 | ret = stm32_dma_disable_chan(chan); |
| 1122 | if (ret < 0) | 1125 | if (ret < 0) |
| 1123 | clk_disable_unprepare(dmadev->clk); | 1126 | pm_runtime_put(dmadev->ddev.dev); |
| 1124 | 1127 | ||
| 1125 | return ret; | 1128 | return ret; |
| 1126 | } | 1129 | } |
| @@ -1140,7 +1143,7 @@ static void stm32_dma_free_chan_resources(struct dma_chan *c) | |||
| 1140 | spin_unlock_irqrestore(&chan->vchan.lock, flags); | 1143 | spin_unlock_irqrestore(&chan->vchan.lock, flags); |
| 1141 | } | 1144 | } |
| 1142 | 1145 | ||
| 1143 | clk_disable_unprepare(dmadev->clk); | 1146 | pm_runtime_put(dmadev->ddev.dev); |
| 1144 | 1147 | ||
| 1145 | vchan_free_chan_resources(to_virt_chan(c)); | 1148 | vchan_free_chan_resources(to_virt_chan(c)); |
| 1146 | } | 1149 | } |
| @@ -1240,6 +1243,12 @@ static int stm32_dma_probe(struct platform_device *pdev) | |||
| 1240 | return PTR_ERR(dmadev->clk); | 1243 | return PTR_ERR(dmadev->clk); |
| 1241 | } | 1244 | } |
| 1242 | 1245 | ||
| 1246 | ret = clk_prepare_enable(dmadev->clk); | ||
| 1247 | if (ret < 0) { | ||
| 1248 | dev_err(&pdev->dev, "clk_prep_enable error: %d\n", ret); | ||
| 1249 | return ret; | ||
| 1250 | } | ||
| 1251 | |||
| 1243 | dmadev->mem2mem = of_property_read_bool(pdev->dev.of_node, | 1252 | dmadev->mem2mem = of_property_read_bool(pdev->dev.of_node, |
| 1244 | "st,mem2mem"); | 1253 | "st,mem2mem"); |
| 1245 | 1254 | ||
| @@ -1289,7 +1298,7 @@ static int stm32_dma_probe(struct platform_device *pdev) | |||
| 1289 | 1298 | ||
| 1290 | ret = dma_async_device_register(dd); | 1299 | ret = dma_async_device_register(dd); |
| 1291 | if (ret) | 1300 | if (ret) |
| 1292 | return ret; | 1301 | goto clk_free; |
| 1293 | 1302 | ||
| 1294 | for (i = 0; i < STM32_DMA_MAX_CHANNELS; i++) { | 1303 | for (i = 0; i < STM32_DMA_MAX_CHANNELS; i++) { |
| 1295 | chan = &dmadev->chan[i]; | 1304 | chan = &dmadev->chan[i]; |
| @@ -1321,20 +1330,58 @@ static int stm32_dma_probe(struct platform_device *pdev) | |||
| 1321 | 1330 | ||
| 1322 | platform_set_drvdata(pdev, dmadev); | 1331 | platform_set_drvdata(pdev, dmadev); |
| 1323 | 1332 | ||
| 1333 | pm_runtime_set_active(&pdev->dev); | ||
| 1334 | pm_runtime_enable(&pdev->dev); | ||
| 1335 | pm_runtime_get_noresume(&pdev->dev); | ||
| 1336 | pm_runtime_put(&pdev->dev); | ||
| 1337 | |||
| 1324 | dev_info(&pdev->dev, "STM32 DMA driver registered\n"); | 1338 | dev_info(&pdev->dev, "STM32 DMA driver registered\n"); |
| 1325 | 1339 | ||
| 1326 | return 0; | 1340 | return 0; |
| 1327 | 1341 | ||
| 1328 | err_unregister: | 1342 | err_unregister: |
| 1329 | dma_async_device_unregister(dd); | 1343 | dma_async_device_unregister(dd); |
| 1344 | clk_free: | ||
| 1345 | clk_disable_unprepare(dmadev->clk); | ||
| 1330 | 1346 | ||
| 1331 | return ret; | 1347 | return ret; |
| 1332 | } | 1348 | } |
| 1333 | 1349 | ||
| 1350 | #ifdef CONFIG_PM | ||
| 1351 | static int stm32_dma_runtime_suspend(struct device *dev) | ||
| 1352 | { | ||
| 1353 | struct stm32_dma_device *dmadev = dev_get_drvdata(dev); | ||
| 1354 | |||
| 1355 | clk_disable_unprepare(dmadev->clk); | ||
| 1356 | |||
| 1357 | return 0; | ||
| 1358 | } | ||
| 1359 | |||
| 1360 | static int stm32_dma_runtime_resume(struct device *dev) | ||
| 1361 | { | ||
| 1362 | struct stm32_dma_device *dmadev = dev_get_drvdata(dev); | ||
| 1363 | int ret; | ||
| 1364 | |||
| 1365 | ret = clk_prepare_enable(dmadev->clk); | ||
| 1366 | if (ret) { | ||
| 1367 | dev_err(dev, "failed to prepare_enable clock\n"); | ||
| 1368 | return ret; | ||
| 1369 | } | ||
| 1370 | |||
| 1371 | return 0; | ||
| 1372 | } | ||
| 1373 | #endif | ||
| 1374 | |||
| 1375 | static const struct dev_pm_ops stm32_dma_pm_ops = { | ||
| 1376 | SET_RUNTIME_PM_OPS(stm32_dma_runtime_suspend, | ||
| 1377 | stm32_dma_runtime_resume, NULL) | ||
| 1378 | }; | ||
| 1379 | |||
| 1334 | static struct platform_driver stm32_dma_driver = { | 1380 | static struct platform_driver stm32_dma_driver = { |
| 1335 | .driver = { | 1381 | .driver = { |
| 1336 | .name = "stm32-dma", | 1382 | .name = "stm32-dma", |
| 1337 | .of_match_table = stm32_dma_of_match, | 1383 | .of_match_table = stm32_dma_of_match, |
| 1384 | .pm = &stm32_dma_pm_ops, | ||
| 1338 | }, | 1385 | }, |
| 1339 | }; | 1386 | }; |
| 1340 | 1387 | ||
diff --git a/drivers/dma/stm32-dmamux.c b/drivers/dma/stm32-dmamux.c index b922db90939a..a67119199c45 100644 --- a/drivers/dma/stm32-dmamux.c +++ b/drivers/dma/stm32-dmamux.c | |||
| @@ -28,6 +28,7 @@ | |||
| 28 | #include <linux/module.h> | 28 | #include <linux/module.h> |
| 29 | #include <linux/of_device.h> | 29 | #include <linux/of_device.h> |
| 30 | #include <linux/of_dma.h> | 30 | #include <linux/of_dma.h> |
| 31 | #include <linux/pm_runtime.h> | ||
| 31 | #include <linux/reset.h> | 32 | #include <linux/reset.h> |
| 32 | #include <linux/slab.h> | 33 | #include <linux/slab.h> |
| 33 | #include <linux/spinlock.h> | 34 | #include <linux/spinlock.h> |
| @@ -79,8 +80,7 @@ static void stm32_dmamux_free(struct device *dev, void *route_data) | |||
| 79 | stm32_dmamux_write(dmamux->iomem, STM32_DMAMUX_CCR(mux->chan_id), 0); | 80 | stm32_dmamux_write(dmamux->iomem, STM32_DMAMUX_CCR(mux->chan_id), 0); |
| 80 | clear_bit(mux->chan_id, dmamux->dma_inuse); | 81 | clear_bit(mux->chan_id, dmamux->dma_inuse); |
| 81 | 82 | ||
| 82 | if (!IS_ERR(dmamux->clk)) | 83 | pm_runtime_put_sync(dev); |
| 83 | clk_disable(dmamux->clk); | ||
| 84 | 84 | ||
| 85 | spin_unlock_irqrestore(&dmamux->lock, flags); | 85 | spin_unlock_irqrestore(&dmamux->lock, flags); |
| 86 | 86 | ||
| @@ -146,13 +146,10 @@ static void *stm32_dmamux_route_allocate(struct of_phandle_args *dma_spec, | |||
| 146 | 146 | ||
| 147 | /* Set dma request */ | 147 | /* Set dma request */ |
| 148 | spin_lock_irqsave(&dmamux->lock, flags); | 148 | spin_lock_irqsave(&dmamux->lock, flags); |
| 149 | if (!IS_ERR(dmamux->clk)) { | 149 | ret = pm_runtime_get_sync(&pdev->dev); |
| 150 | ret = clk_enable(dmamux->clk); | 150 | if (ret < 0) { |
| 151 | if (ret < 0) { | 151 | spin_unlock_irqrestore(&dmamux->lock, flags); |
| 152 | spin_unlock_irqrestore(&dmamux->lock, flags); | 152 | goto error; |
| 153 | dev_err(&pdev->dev, "clk_prep_enable issue: %d\n", ret); | ||
| 154 | goto error; | ||
| 155 | } | ||
| 156 | } | 153 | } |
| 157 | spin_unlock_irqrestore(&dmamux->lock, flags); | 154 | spin_unlock_irqrestore(&dmamux->lock, flags); |
| 158 | 155 | ||
| @@ -254,6 +251,7 @@ static int stm32_dmamux_probe(struct platform_device *pdev) | |||
| 254 | dev_warn(&pdev->dev, "DMAMUX defaulting on %u requests\n", | 251 | dev_warn(&pdev->dev, "DMAMUX defaulting on %u requests\n", |
| 255 | stm32_dmamux->dmamux_requests); | 252 | stm32_dmamux->dmamux_requests); |
| 256 | } | 253 | } |
| 254 | pm_runtime_get_noresume(&pdev->dev); | ||
| 257 | 255 | ||
| 258 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 256 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 259 | iomem = devm_ioremap_resource(&pdev->dev, res); | 257 | iomem = devm_ioremap_resource(&pdev->dev, res); |
| @@ -282,6 +280,8 @@ static int stm32_dmamux_probe(struct platform_device *pdev) | |||
| 282 | stm32_dmamux->dmarouter.route_free = stm32_dmamux_free; | 280 | stm32_dmamux->dmarouter.route_free = stm32_dmamux_free; |
| 283 | 281 | ||
| 284 | platform_set_drvdata(pdev, stm32_dmamux); | 282 | platform_set_drvdata(pdev, stm32_dmamux); |
| 283 | pm_runtime_set_active(&pdev->dev); | ||
| 284 | pm_runtime_enable(&pdev->dev); | ||
| 285 | 285 | ||
| 286 | if (!IS_ERR(stm32_dmamux->clk)) { | 286 | if (!IS_ERR(stm32_dmamux->clk)) { |
| 287 | ret = clk_prepare_enable(stm32_dmamux->clk); | 287 | ret = clk_prepare_enable(stm32_dmamux->clk); |
| @@ -291,17 +291,52 @@ static int stm32_dmamux_probe(struct platform_device *pdev) | |||
| 291 | } | 291 | } |
| 292 | } | 292 | } |
| 293 | 293 | ||
| 294 | pm_runtime_get_noresume(&pdev->dev); | ||
| 295 | |||
| 294 | /* Reset the dmamux */ | 296 | /* Reset the dmamux */ |
| 295 | for (i = 0; i < stm32_dmamux->dma_requests; i++) | 297 | for (i = 0; i < stm32_dmamux->dma_requests; i++) |
| 296 | stm32_dmamux_write(stm32_dmamux->iomem, STM32_DMAMUX_CCR(i), 0); | 298 | stm32_dmamux_write(stm32_dmamux->iomem, STM32_DMAMUX_CCR(i), 0); |
| 297 | 299 | ||
| 298 | if (!IS_ERR(stm32_dmamux->clk)) | 300 | pm_runtime_put(&pdev->dev); |
| 299 | clk_disable(stm32_dmamux->clk); | ||
| 300 | 301 | ||
| 301 | return of_dma_router_register(node, stm32_dmamux_route_allocate, | 302 | return of_dma_router_register(node, stm32_dmamux_route_allocate, |
| 302 | &stm32_dmamux->dmarouter); | 303 | &stm32_dmamux->dmarouter); |
| 303 | } | 304 | } |
| 304 | 305 | ||
| 306 | #ifdef CONFIG_PM | ||
| 307 | static int stm32_dmamux_runtime_suspend(struct device *dev) | ||
| 308 | { | ||
| 309 | struct platform_device *pdev = | ||
| 310 | container_of(dev, struct platform_device, dev); | ||
| 311 | struct stm32_dmamux_data *stm32_dmamux = platform_get_drvdata(pdev); | ||
| 312 | |||
| 313 | clk_disable_unprepare(stm32_dmamux->clk); | ||
| 314 | |||
| 315 | return 0; | ||
| 316 | } | ||
| 317 | |||
| 318 | static int stm32_dmamux_runtime_resume(struct device *dev) | ||
| 319 | { | ||
| 320 | struct platform_device *pdev = | ||
| 321 | container_of(dev, struct platform_device, dev); | ||
| 322 | struct stm32_dmamux_data *stm32_dmamux = platform_get_drvdata(pdev); | ||
| 323 | int ret; | ||
| 324 | |||
| 325 | ret = clk_prepare_enable(stm32_dmamux->clk); | ||
| 326 | if (ret) { | ||
| 327 | dev_err(&pdev->dev, "failed to prepare_enable clock\n"); | ||
| 328 | return ret; | ||
| 329 | } | ||
| 330 | |||
| 331 | return 0; | ||
| 332 | } | ||
| 333 | #endif | ||
| 334 | |||
| 335 | static const struct dev_pm_ops stm32_dmamux_pm_ops = { | ||
| 336 | SET_RUNTIME_PM_OPS(stm32_dmamux_runtime_suspend, | ||
| 337 | stm32_dmamux_runtime_resume, NULL) | ||
| 338 | }; | ||
| 339 | |||
| 305 | static const struct of_device_id stm32_dmamux_match[] = { | 340 | static const struct of_device_id stm32_dmamux_match[] = { |
| 306 | { .compatible = "st,stm32h7-dmamux" }, | 341 | { .compatible = "st,stm32h7-dmamux" }, |
| 307 | {}, | 342 | {}, |
| @@ -312,6 +347,7 @@ static struct platform_driver stm32_dmamux_driver = { | |||
| 312 | .driver = { | 347 | .driver = { |
| 313 | .name = "stm32-dmamux", | 348 | .name = "stm32-dmamux", |
| 314 | .of_match_table = stm32_dmamux_match, | 349 | .of_match_table = stm32_dmamux_match, |
| 350 | .pm = &stm32_dmamux_pm_ops, | ||
| 315 | }, | 351 | }, |
| 316 | }; | 352 | }; |
| 317 | 353 | ||
diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c index 390e4cae0e1a..4e0eede599a8 100644 --- a/drivers/dma/stm32-mdma.c +++ b/drivers/dma/stm32-mdma.c | |||
| @@ -37,6 +37,7 @@ | |||
| 37 | #include <linux/of_device.h> | 37 | #include <linux/of_device.h> |
| 38 | #include <linux/of_dma.h> | 38 | #include <linux/of_dma.h> |
| 39 | #include <linux/platform_device.h> | 39 | #include <linux/platform_device.h> |
| 40 | #include <linux/pm_runtime.h> | ||
| 40 | #include <linux/reset.h> | 41 | #include <linux/reset.h> |
| 41 | #include <linux/slab.h> | 42 | #include <linux/slab.h> |
| 42 | 43 | ||
| @@ -1456,15 +1457,13 @@ static int stm32_mdma_alloc_chan_resources(struct dma_chan *c) | |||
| 1456 | return -ENOMEM; | 1457 | return -ENOMEM; |
| 1457 | } | 1458 | } |
| 1458 | 1459 | ||
| 1459 | ret = clk_prepare_enable(dmadev->clk); | 1460 | ret = pm_runtime_get_sync(dmadev->ddev.dev); |
| 1460 | if (ret < 0) { | 1461 | if (ret < 0) |
| 1461 | dev_err(chan2dev(chan), "clk_prepare_enable failed: %d\n", ret); | ||
| 1462 | return ret; | 1462 | return ret; |
| 1463 | } | ||
| 1464 | 1463 | ||
| 1465 | ret = stm32_mdma_disable_chan(chan); | 1464 | ret = stm32_mdma_disable_chan(chan); |
| 1466 | if (ret < 0) | 1465 | if (ret < 0) |
| 1467 | clk_disable_unprepare(dmadev->clk); | 1466 | pm_runtime_put(dmadev->ddev.dev); |
| 1468 | 1467 | ||
| 1469 | return ret; | 1468 | return ret; |
| 1470 | } | 1469 | } |
| @@ -1484,7 +1483,7 @@ static void stm32_mdma_free_chan_resources(struct dma_chan *c) | |||
| 1484 | spin_unlock_irqrestore(&chan->vchan.lock, flags); | 1483 | spin_unlock_irqrestore(&chan->vchan.lock, flags); |
| 1485 | } | 1484 | } |
| 1486 | 1485 | ||
| 1487 | clk_disable_unprepare(dmadev->clk); | 1486 | pm_runtime_put(dmadev->ddev.dev); |
| 1488 | vchan_free_chan_resources(to_virt_chan(c)); | 1487 | vchan_free_chan_resources(to_virt_chan(c)); |
| 1489 | dmam_pool_destroy(chan->desc_pool); | 1488 | dmam_pool_destroy(chan->desc_pool); |
| 1490 | chan->desc_pool = NULL; | 1489 | chan->desc_pool = NULL; |
| @@ -1579,9 +1578,11 @@ static int stm32_mdma_probe(struct platform_device *pdev) | |||
| 1579 | 1578 | ||
| 1580 | dmadev->nr_channels = nr_channels; | 1579 | dmadev->nr_channels = nr_channels; |
| 1581 | dmadev->nr_requests = nr_requests; | 1580 | dmadev->nr_requests = nr_requests; |
| 1582 | device_property_read_u32_array(&pdev->dev, "st,ahb-addr-masks", | 1581 | ret = device_property_read_u32_array(&pdev->dev, "st,ahb-addr-masks", |
| 1583 | dmadev->ahb_addr_masks, | 1582 | dmadev->ahb_addr_masks, |
| 1584 | count); | 1583 | count); |
| 1584 | if (ret) | ||
| 1585 | return ret; | ||
| 1585 | dmadev->nr_ahb_addr_masks = count; | 1586 | dmadev->nr_ahb_addr_masks = count; |
| 1586 | 1587 | ||
| 1587 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1588 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| @@ -1597,6 +1598,12 @@ static int stm32_mdma_probe(struct platform_device *pdev) | |||
| 1597 | return ret; | 1598 | return ret; |
| 1598 | } | 1599 | } |
| 1599 | 1600 | ||
| 1601 | ret = clk_prepare_enable(dmadev->clk); | ||
| 1602 | if (ret < 0) { | ||
| 1603 | dev_err(&pdev->dev, "clk_prep_enable error: %d\n", ret); | ||
| 1604 | return ret; | ||
| 1605 | } | ||
| 1606 | |||
| 1600 | dmadev->rst = devm_reset_control_get(&pdev->dev, NULL); | 1607 | dmadev->rst = devm_reset_control_get(&pdev->dev, NULL); |
| 1601 | if (!IS_ERR(dmadev->rst)) { | 1608 | if (!IS_ERR(dmadev->rst)) { |
| 1602 | reset_control_assert(dmadev->rst); | 1609 | reset_control_assert(dmadev->rst); |
| @@ -1668,6 +1675,10 @@ static int stm32_mdma_probe(struct platform_device *pdev) | |||
| 1668 | } | 1675 | } |
| 1669 | 1676 | ||
| 1670 | platform_set_drvdata(pdev, dmadev); | 1677 | platform_set_drvdata(pdev, dmadev); |
| 1678 | pm_runtime_set_active(&pdev->dev); | ||
| 1679 | pm_runtime_enable(&pdev->dev); | ||
| 1680 | pm_runtime_get_noresume(&pdev->dev); | ||
| 1681 | pm_runtime_put(&pdev->dev); | ||
| 1671 | 1682 | ||
| 1672 | dev_info(&pdev->dev, "STM32 MDMA driver registered\n"); | 1683 | dev_info(&pdev->dev, "STM32 MDMA driver registered\n"); |
| 1673 | 1684 | ||
| @@ -1677,11 +1688,42 @@ err_unregister: | |||
| 1677 | return ret; | 1688 | return ret; |
| 1678 | } | 1689 | } |
| 1679 | 1690 | ||
| 1691 | #ifdef CONFIG_PM | ||
| 1692 | static int stm32_mdma_runtime_suspend(struct device *dev) | ||
| 1693 | { | ||
| 1694 | struct stm32_mdma_device *dmadev = dev_get_drvdata(dev); | ||
| 1695 | |||
| 1696 | clk_disable_unprepare(dmadev->clk); | ||
| 1697 | |||
| 1698 | return 0; | ||
| 1699 | } | ||
| 1700 | |||
| 1701 | static int stm32_mdma_runtime_resume(struct device *dev) | ||
| 1702 | { | ||
| 1703 | struct stm32_mdma_device *dmadev = dev_get_drvdata(dev); | ||
| 1704 | int ret; | ||
| 1705 | |||
| 1706 | ret = clk_prepare_enable(dmadev->clk); | ||
| 1707 | if (ret) { | ||
| 1708 | dev_err(dev, "failed to prepare_enable clock\n"); | ||
| 1709 | return ret; | ||
| 1710 | } | ||
| 1711 | |||
| 1712 | return 0; | ||
| 1713 | } | ||
| 1714 | #endif | ||
| 1715 | |||
| 1716 | static const struct dev_pm_ops stm32_mdma_pm_ops = { | ||
| 1717 | SET_RUNTIME_PM_OPS(stm32_mdma_runtime_suspend, | ||
| 1718 | stm32_mdma_runtime_resume, NULL) | ||
| 1719 | }; | ||
| 1720 | |||
| 1680 | static struct platform_driver stm32_mdma_driver = { | 1721 | static struct platform_driver stm32_mdma_driver = { |
| 1681 | .probe = stm32_mdma_probe, | 1722 | .probe = stm32_mdma_probe, |
| 1682 | .driver = { | 1723 | .driver = { |
| 1683 | .name = "stm32-mdma", | 1724 | .name = "stm32-mdma", |
| 1684 | .of_match_table = stm32_mdma_of_match, | 1725 | .of_match_table = stm32_mdma_of_match, |
| 1726 | .pm = &stm32_mdma_pm_ops, | ||
| 1685 | }, | 1727 | }, |
| 1686 | }; | 1728 | }; |
| 1687 | 1729 | ||
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c index 9a558e30c461..cf462b1abc0b 100644 --- a/drivers/dma/tegra20-apb-dma.c +++ b/drivers/dma/tegra20-apb-dma.c | |||
| @@ -38,6 +38,9 @@ | |||
| 38 | 38 | ||
| 39 | #include "dmaengine.h" | 39 | #include "dmaengine.h" |
| 40 | 40 | ||
| 41 | #define CREATE_TRACE_POINTS | ||
| 42 | #include <trace/events/tegra_apb_dma.h> | ||
| 43 | |||
| 41 | #define TEGRA_APBDMA_GENERAL 0x0 | 44 | #define TEGRA_APBDMA_GENERAL 0x0 |
| 42 | #define TEGRA_APBDMA_GENERAL_ENABLE BIT(31) | 45 | #define TEGRA_APBDMA_GENERAL_ENABLE BIT(31) |
| 43 | 46 | ||
| @@ -146,7 +149,7 @@ struct tegra_dma_channel_regs { | |||
| 146 | }; | 149 | }; |
| 147 | 150 | ||
| 148 | /* | 151 | /* |
| 149 | * tegra_dma_sg_req: Dma request details to configure hardware. This | 152 | * tegra_dma_sg_req: DMA request details to configure hardware. This |
| 150 | * contains the details for one transfer to configure DMA hw. | 153 | * contains the details for one transfer to configure DMA hw. |
| 151 | * The client's request for data transfer can be broken into multiple | 154 | * The client's request for data transfer can be broken into multiple |
| 152 | * sub-transfer as per requester details and hw support. | 155 | * sub-transfer as per requester details and hw support. |
| @@ -155,7 +158,7 @@ struct tegra_dma_channel_regs { | |||
| 155 | */ | 158 | */ |
| 156 | struct tegra_dma_sg_req { | 159 | struct tegra_dma_sg_req { |
| 157 | struct tegra_dma_channel_regs ch_regs; | 160 | struct tegra_dma_channel_regs ch_regs; |
| 158 | int req_len; | 161 | unsigned int req_len; |
| 159 | bool configured; | 162 | bool configured; |
| 160 | bool last_sg; | 163 | bool last_sg; |
| 161 | struct list_head node; | 164 | struct list_head node; |
| @@ -169,8 +172,8 @@ struct tegra_dma_sg_req { | |||
| 169 | */ | 172 | */ |
| 170 | struct tegra_dma_desc { | 173 | struct tegra_dma_desc { |
| 171 | struct dma_async_tx_descriptor txd; | 174 | struct dma_async_tx_descriptor txd; |
| 172 | int bytes_requested; | 175 | unsigned int bytes_requested; |
| 173 | int bytes_transferred; | 176 | unsigned int bytes_transferred; |
| 174 | enum dma_status dma_status; | 177 | enum dma_status dma_status; |
| 175 | struct list_head node; | 178 | struct list_head node; |
| 176 | struct list_head tx_list; | 179 | struct list_head tx_list; |
| @@ -186,7 +189,7 @@ typedef void (*dma_isr_handler)(struct tegra_dma_channel *tdc, | |||
| 186 | /* tegra_dma_channel: Channel specific information */ | 189 | /* tegra_dma_channel: Channel specific information */ |
| 187 | struct tegra_dma_channel { | 190 | struct tegra_dma_channel { |
| 188 | struct dma_chan dma_chan; | 191 | struct dma_chan dma_chan; |
| 189 | char name[30]; | 192 | char name[12]; |
| 190 | bool config_init; | 193 | bool config_init; |
| 191 | int id; | 194 | int id; |
| 192 | int irq; | 195 | int irq; |
| @@ -574,7 +577,7 @@ static bool handle_continuous_head_request(struct tegra_dma_channel *tdc, | |||
| 574 | struct tegra_dma_sg_req *hsgreq = NULL; | 577 | struct tegra_dma_sg_req *hsgreq = NULL; |
| 575 | 578 | ||
| 576 | if (list_empty(&tdc->pending_sg_req)) { | 579 | if (list_empty(&tdc->pending_sg_req)) { |
| 577 | dev_err(tdc2dev(tdc), "Dma is running without req\n"); | 580 | dev_err(tdc2dev(tdc), "DMA is running without req\n"); |
| 578 | tegra_dma_stop(tdc); | 581 | tegra_dma_stop(tdc); |
| 579 | return false; | 582 | return false; |
| 580 | } | 583 | } |
| @@ -587,7 +590,7 @@ static bool handle_continuous_head_request(struct tegra_dma_channel *tdc, | |||
| 587 | hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node); | 590 | hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node); |
| 588 | if (!hsgreq->configured) { | 591 | if (!hsgreq->configured) { |
| 589 | tegra_dma_stop(tdc); | 592 | tegra_dma_stop(tdc); |
| 590 | dev_err(tdc2dev(tdc), "Error in dma transfer, aborting dma\n"); | 593 | dev_err(tdc2dev(tdc), "Error in DMA transfer, aborting DMA\n"); |
| 591 | tegra_dma_abort_all(tdc); | 594 | tegra_dma_abort_all(tdc); |
| 592 | return false; | 595 | return false; |
| 593 | } | 596 | } |
| @@ -636,7 +639,10 @@ static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc, | |||
| 636 | 639 | ||
| 637 | sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node); | 640 | sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node); |
| 638 | dma_desc = sgreq->dma_desc; | 641 | dma_desc = sgreq->dma_desc; |
| 639 | dma_desc->bytes_transferred += sgreq->req_len; | 642 | /* if we dma for long enough the transfer count will wrap */ |
| 643 | dma_desc->bytes_transferred = | ||
| 644 | (dma_desc->bytes_transferred + sgreq->req_len) % | ||
| 645 | dma_desc->bytes_requested; | ||
| 640 | 646 | ||
| 641 | /* Callback need to be call */ | 647 | /* Callback need to be call */ |
| 642 | if (!dma_desc->cb_count) | 648 | if (!dma_desc->cb_count) |
| @@ -669,6 +675,8 @@ static void tegra_dma_tasklet(unsigned long data) | |||
| 669 | dmaengine_desc_get_callback(&dma_desc->txd, &cb); | 675 | dmaengine_desc_get_callback(&dma_desc->txd, &cb); |
| 670 | cb_count = dma_desc->cb_count; | 676 | cb_count = dma_desc->cb_count; |
| 671 | dma_desc->cb_count = 0; | 677 | dma_desc->cb_count = 0; |
| 678 | trace_tegra_dma_complete_cb(&tdc->dma_chan, cb_count, | ||
| 679 | cb.callback); | ||
| 672 | spin_unlock_irqrestore(&tdc->lock, flags); | 680 | spin_unlock_irqrestore(&tdc->lock, flags); |
| 673 | while (cb_count--) | 681 | while (cb_count--) |
| 674 | dmaengine_desc_callback_invoke(&cb, NULL); | 682 | dmaengine_desc_callback_invoke(&cb, NULL); |
| @@ -685,6 +693,7 @@ static irqreturn_t tegra_dma_isr(int irq, void *dev_id) | |||
| 685 | 693 | ||
| 686 | spin_lock_irqsave(&tdc->lock, flags); | 694 | spin_lock_irqsave(&tdc->lock, flags); |
| 687 | 695 | ||
| 696 | trace_tegra_dma_isr(&tdc->dma_chan, irq); | ||
| 688 | status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); | 697 | status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); |
| 689 | if (status & TEGRA_APBDMA_STATUS_ISE_EOC) { | 698 | if (status & TEGRA_APBDMA_STATUS_ISE_EOC) { |
| 690 | tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status); | 699 | tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status); |
| @@ -843,6 +852,7 @@ found: | |||
| 843 | dma_set_residue(txstate, residual); | 852 | dma_set_residue(txstate, residual); |
| 844 | } | 853 | } |
| 845 | 854 | ||
| 855 | trace_tegra_dma_tx_status(&tdc->dma_chan, cookie, txstate); | ||
| 846 | spin_unlock_irqrestore(&tdc->lock, flags); | 856 | spin_unlock_irqrestore(&tdc->lock, flags); |
| 847 | return ret; | 857 | return ret; |
| 848 | } | 858 | } |
| @@ -919,7 +929,7 @@ static int get_transfer_param(struct tegra_dma_channel *tdc, | |||
| 919 | return 0; | 929 | return 0; |
| 920 | 930 | ||
| 921 | default: | 931 | default: |
| 922 | dev_err(tdc2dev(tdc), "Dma direction is not supported\n"); | 932 | dev_err(tdc2dev(tdc), "DMA direction is not supported\n"); |
| 923 | return -EINVAL; | 933 | return -EINVAL; |
| 924 | } | 934 | } |
| 925 | return -EINVAL; | 935 | return -EINVAL; |
| @@ -952,7 +962,7 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg( | |||
| 952 | enum dma_slave_buswidth slave_bw; | 962 | enum dma_slave_buswidth slave_bw; |
| 953 | 963 | ||
| 954 | if (!tdc->config_init) { | 964 | if (!tdc->config_init) { |
| 955 | dev_err(tdc2dev(tdc), "dma channel is not configured\n"); | 965 | dev_err(tdc2dev(tdc), "DMA channel is not configured\n"); |
| 956 | return NULL; | 966 | return NULL; |
| 957 | } | 967 | } |
| 958 | if (sg_len < 1) { | 968 | if (sg_len < 1) { |
| @@ -985,7 +995,7 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg( | |||
| 985 | 995 | ||
| 986 | dma_desc = tegra_dma_desc_get(tdc); | 996 | dma_desc = tegra_dma_desc_get(tdc); |
| 987 | if (!dma_desc) { | 997 | if (!dma_desc) { |
| 988 | dev_err(tdc2dev(tdc), "Dma descriptors not available\n"); | 998 | dev_err(tdc2dev(tdc), "DMA descriptors not available\n"); |
| 989 | return NULL; | 999 | return NULL; |
| 990 | } | 1000 | } |
| 991 | INIT_LIST_HEAD(&dma_desc->tx_list); | 1001 | INIT_LIST_HEAD(&dma_desc->tx_list); |
| @@ -1005,14 +1015,14 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg( | |||
| 1005 | if ((len & 3) || (mem & 3) || | 1015 | if ((len & 3) || (mem & 3) || |
| 1006 | (len > tdc->tdma->chip_data->max_dma_count)) { | 1016 | (len > tdc->tdma->chip_data->max_dma_count)) { |
| 1007 | dev_err(tdc2dev(tdc), | 1017 | dev_err(tdc2dev(tdc), |
| 1008 | "Dma length/memory address is not supported\n"); | 1018 | "DMA length/memory address is not supported\n"); |
| 1009 | tegra_dma_desc_put(tdc, dma_desc); | 1019 | tegra_dma_desc_put(tdc, dma_desc); |
| 1010 | return NULL; | 1020 | return NULL; |
| 1011 | } | 1021 | } |
| 1012 | 1022 | ||
| 1013 | sg_req = tegra_dma_sg_req_get(tdc); | 1023 | sg_req = tegra_dma_sg_req_get(tdc); |
| 1014 | if (!sg_req) { | 1024 | if (!sg_req) { |
| 1015 | dev_err(tdc2dev(tdc), "Dma sg-req not available\n"); | 1025 | dev_err(tdc2dev(tdc), "DMA sg-req not available\n"); |
| 1016 | tegra_dma_desc_put(tdc, dma_desc); | 1026 | tegra_dma_desc_put(tdc, dma_desc); |
| 1017 | return NULL; | 1027 | return NULL; |
| 1018 | } | 1028 | } |
| @@ -1087,7 +1097,7 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic( | |||
| 1087 | * terminating the DMA. | 1097 | * terminating the DMA. |
| 1088 | */ | 1098 | */ |
| 1089 | if (tdc->busy) { | 1099 | if (tdc->busy) { |
| 1090 | dev_err(tdc2dev(tdc), "Request not allowed when dma running\n"); | 1100 | dev_err(tdc2dev(tdc), "Request not allowed when DMA running\n"); |
| 1091 | return NULL; | 1101 | return NULL; |
| 1092 | } | 1102 | } |
| 1093 | 1103 | ||
| @@ -1144,7 +1154,7 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic( | |||
| 1144 | while (remain_len) { | 1154 | while (remain_len) { |
| 1145 | sg_req = tegra_dma_sg_req_get(tdc); | 1155 | sg_req = tegra_dma_sg_req_get(tdc); |
| 1146 | if (!sg_req) { | 1156 | if (!sg_req) { |
| 1147 | dev_err(tdc2dev(tdc), "Dma sg-req not available\n"); | 1157 | dev_err(tdc2dev(tdc), "DMA sg-req not available\n"); |
| 1148 | tegra_dma_desc_put(tdc, dma_desc); | 1158 | tegra_dma_desc_put(tdc, dma_desc); |
| 1149 | return NULL; | 1159 | return NULL; |
| 1150 | } | 1160 | } |
| @@ -1319,8 +1329,9 @@ static int tegra_dma_probe(struct platform_device *pdev) | |||
| 1319 | return -ENODEV; | 1329 | return -ENODEV; |
| 1320 | } | 1330 | } |
| 1321 | 1331 | ||
| 1322 | tdma = devm_kzalloc(&pdev->dev, sizeof(*tdma) + cdata->nr_channels * | 1332 | tdma = devm_kzalloc(&pdev->dev, |
| 1323 | sizeof(struct tegra_dma_channel), GFP_KERNEL); | 1333 | struct_size(tdma, channels, cdata->nr_channels), |
| 1334 | GFP_KERNEL); | ||
| 1324 | if (!tdma) | 1335 | if (!tdma) |
| 1325 | return -ENOMEM; | 1336 | return -ENOMEM; |
| 1326 | 1337 | ||
diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c index b26256f23d67..5ec0dd97b397 100644 --- a/drivers/dma/tegra210-adma.c +++ b/drivers/dma/tegra210-adma.c | |||
| @@ -678,8 +678,9 @@ static int tegra_adma_probe(struct platform_device *pdev) | |||
| 678 | return -ENODEV; | 678 | return -ENODEV; |
| 679 | } | 679 | } |
| 680 | 680 | ||
| 681 | tdma = devm_kzalloc(&pdev->dev, sizeof(*tdma) + cdata->nr_channels * | 681 | tdma = devm_kzalloc(&pdev->dev, |
| 682 | sizeof(struct tegra_adma_chan), GFP_KERNEL); | 682 | struct_size(tdma, channels, cdata->nr_channels), |
| 683 | GFP_KERNEL); | ||
| 683 | if (!tdma) | 684 | if (!tdma) |
| 684 | return -ENOMEM; | 685 | return -ENOMEM; |
| 685 | 686 | ||
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c index fc0f9c8766a8..afbb1c95b721 100644 --- a/drivers/dma/timb_dma.c +++ b/drivers/dma/timb_dma.c | |||
| @@ -643,8 +643,8 @@ static int td_probe(struct platform_device *pdev) | |||
| 643 | DRIVER_NAME)) | 643 | DRIVER_NAME)) |
| 644 | return -EBUSY; | 644 | return -EBUSY; |
| 645 | 645 | ||
| 646 | td = kzalloc(sizeof(struct timb_dma) + | 646 | td = kzalloc(struct_size(td, channels, pdata->nr_channels), |
| 647 | sizeof(struct timb_dma_chan) * pdata->nr_channels, GFP_KERNEL); | 647 | GFP_KERNEL); |
| 648 | if (!td) { | 648 | if (!td) { |
| 649 | err = -ENOMEM; | 649 | err = -ENOMEM; |
| 650 | goto err_release_region; | 650 | goto err_release_region; |
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c index cb20b411493e..c43c1a154604 100644 --- a/drivers/dma/xilinx/xilinx_dma.c +++ b/drivers/dma/xilinx/xilinx_dma.c | |||
| @@ -86,6 +86,7 @@ | |||
| 86 | #define XILINX_DMA_DMASR_DMA_DEC_ERR BIT(6) | 86 | #define XILINX_DMA_DMASR_DMA_DEC_ERR BIT(6) |
| 87 | #define XILINX_DMA_DMASR_DMA_SLAVE_ERR BIT(5) | 87 | #define XILINX_DMA_DMASR_DMA_SLAVE_ERR BIT(5) |
| 88 | #define XILINX_DMA_DMASR_DMA_INT_ERR BIT(4) | 88 | #define XILINX_DMA_DMASR_DMA_INT_ERR BIT(4) |
| 89 | #define XILINX_DMA_DMASR_SG_MASK BIT(3) | ||
| 89 | #define XILINX_DMA_DMASR_IDLE BIT(1) | 90 | #define XILINX_DMA_DMASR_IDLE BIT(1) |
| 90 | #define XILINX_DMA_DMASR_HALTED BIT(0) | 91 | #define XILINX_DMA_DMASR_HALTED BIT(0) |
| 91 | #define XILINX_DMA_DMASR_DELAY_MASK GENMASK(31, 24) | 92 | #define XILINX_DMA_DMASR_DELAY_MASK GENMASK(31, 24) |
| @@ -161,7 +162,9 @@ | |||
| 161 | #define XILINX_DMA_REG_BTT 0x28 | 162 | #define XILINX_DMA_REG_BTT 0x28 |
| 162 | 163 | ||
| 163 | /* AXI DMA Specific Masks/Bit fields */ | 164 | /* AXI DMA Specific Masks/Bit fields */ |
| 164 | #define XILINX_DMA_MAX_TRANS_LEN GENMASK(22, 0) | 165 | #define XILINX_DMA_MAX_TRANS_LEN_MIN 8 |
| 166 | #define XILINX_DMA_MAX_TRANS_LEN_MAX 23 | ||
| 167 | #define XILINX_DMA_V2_MAX_TRANS_LEN_MAX 26 | ||
| 165 | #define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16) | 168 | #define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16) |
| 166 | #define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4) | 169 | #define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4) |
| 167 | #define XILINX_DMA_CR_COALESCE_SHIFT 16 | 170 | #define XILINX_DMA_CR_COALESCE_SHIFT 16 |
| @@ -412,7 +415,6 @@ struct xilinx_dma_config { | |||
| 412 | * @dev: Device Structure | 415 | * @dev: Device Structure |
| 413 | * @common: DMA device structure | 416 | * @common: DMA device structure |
| 414 | * @chan: Driver specific DMA channel | 417 | * @chan: Driver specific DMA channel |
| 415 | * @has_sg: Specifies whether Scatter-Gather is present or not | ||
| 416 | * @mcdma: Specifies whether Multi-Channel is present or not | 418 | * @mcdma: Specifies whether Multi-Channel is present or not |
| 417 | * @flush_on_fsync: Flush on frame sync | 419 | * @flush_on_fsync: Flush on frame sync |
| 418 | * @ext_addr: Indicates 64 bit addressing is supported by dma device | 420 | * @ext_addr: Indicates 64 bit addressing is supported by dma device |
| @@ -425,13 +427,13 @@ struct xilinx_dma_config { | |||
| 425 | * @rxs_clk: DMA s2mm stream clock | 427 | * @rxs_clk: DMA s2mm stream clock |
| 426 | * @nr_channels: Number of channels DMA device supports | 428 | * @nr_channels: Number of channels DMA device supports |
| 427 | * @chan_id: DMA channel identifier | 429 | * @chan_id: DMA channel identifier |
| 430 | * @max_buffer_len: Max buffer length | ||
| 428 | */ | 431 | */ |
| 429 | struct xilinx_dma_device { | 432 | struct xilinx_dma_device { |
| 430 | void __iomem *regs; | 433 | void __iomem *regs; |
| 431 | struct device *dev; | 434 | struct device *dev; |
| 432 | struct dma_device common; | 435 | struct dma_device common; |
| 433 | struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE]; | 436 | struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE]; |
| 434 | bool has_sg; | ||
| 435 | bool mcdma; | 437 | bool mcdma; |
| 436 | u32 flush_on_fsync; | 438 | u32 flush_on_fsync; |
| 437 | bool ext_addr; | 439 | bool ext_addr; |
| @@ -444,6 +446,7 @@ struct xilinx_dma_device { | |||
| 444 | struct clk *rxs_clk; | 446 | struct clk *rxs_clk; |
| 445 | u32 nr_channels; | 447 | u32 nr_channels; |
| 446 | u32 chan_id; | 448 | u32 chan_id; |
| 449 | u32 max_buffer_len; | ||
| 447 | }; | 450 | }; |
| 448 | 451 | ||
| 449 | /* Macros */ | 452 | /* Macros */ |
| @@ -960,6 +963,34 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan) | |||
| 960 | } | 963 | } |
| 961 | 964 | ||
| 962 | /** | 965 | /** |
| 966 | * xilinx_dma_calc_copysize - Calculate the amount of data to copy | ||
| 967 | * @chan: Driver specific DMA channel | ||
| 968 | * @size: Total data that needs to be copied | ||
| 969 | * @done: Amount of data that has been already copied | ||
| 970 | * | ||
| 971 | * Return: Amount of data that has to be copied | ||
| 972 | */ | ||
| 973 | static int xilinx_dma_calc_copysize(struct xilinx_dma_chan *chan, | ||
| 974 | int size, int done) | ||
| 975 | { | ||
| 976 | size_t copy; | ||
| 977 | |||
| 978 | copy = min_t(size_t, size - done, | ||
| 979 | chan->xdev->max_buffer_len); | ||
| 980 | |||
| 981 | if ((copy + done < size) && | ||
| 982 | chan->xdev->common.copy_align) { | ||
| 983 | /* | ||
| 984 | * If this is not the last descriptor, make sure | ||
| 985 | * the next one will be properly aligned | ||
| 986 | */ | ||
| 987 | copy = rounddown(copy, | ||
| 988 | (1 << chan->xdev->common.copy_align)); | ||
| 989 | } | ||
| 990 | return copy; | ||
| 991 | } | ||
| 992 | |||
| 993 | /** | ||
| 963 | * xilinx_dma_tx_status - Get DMA transaction status | 994 | * xilinx_dma_tx_status - Get DMA transaction status |
| 964 | * @dchan: DMA channel | 995 | * @dchan: DMA channel |
| 965 | * @cookie: Transaction identifier | 996 | * @cookie: Transaction identifier |
| @@ -992,7 +1023,7 @@ static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan, | |||
| 992 | list_for_each_entry(segment, &desc->segments, node) { | 1023 | list_for_each_entry(segment, &desc->segments, node) { |
| 993 | hw = &segment->hw; | 1024 | hw = &segment->hw; |
| 994 | residue += (hw->control - hw->status) & | 1025 | residue += (hw->control - hw->status) & |
| 995 | XILINX_DMA_MAX_TRANS_LEN; | 1026 | chan->xdev->max_buffer_len; |
| 996 | } | 1027 | } |
| 997 | } | 1028 | } |
| 998 | spin_unlock_irqrestore(&chan->lock, flags); | 1029 | spin_unlock_irqrestore(&chan->lock, flags); |
| @@ -1070,7 +1101,8 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan) | |||
| 1070 | struct xilinx_vdma_config *config = &chan->config; | 1101 | struct xilinx_vdma_config *config = &chan->config; |
| 1071 | struct xilinx_dma_tx_descriptor *desc, *tail_desc; | 1102 | struct xilinx_dma_tx_descriptor *desc, *tail_desc; |
| 1072 | u32 reg, j; | 1103 | u32 reg, j; |
| 1073 | struct xilinx_vdma_tx_segment *tail_segment; | 1104 | struct xilinx_vdma_tx_segment *segment, *last = NULL; |
| 1105 | int i = 0; | ||
| 1074 | 1106 | ||
| 1075 | /* This function was invoked with lock held */ | 1107 | /* This function was invoked with lock held */ |
| 1076 | if (chan->err) | 1108 | if (chan->err) |
| @@ -1087,17 +1119,6 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan) | |||
| 1087 | tail_desc = list_last_entry(&chan->pending_list, | 1119 | tail_desc = list_last_entry(&chan->pending_list, |
| 1088 | struct xilinx_dma_tx_descriptor, node); | 1120 | struct xilinx_dma_tx_descriptor, node); |
| 1089 | 1121 | ||
| 1090 | tail_segment = list_last_entry(&tail_desc->segments, | ||
| 1091 | struct xilinx_vdma_tx_segment, node); | ||
| 1092 | |||
| 1093 | /* | ||
| 1094 | * If hardware is idle, then all descriptors on the running lists are | ||
| 1095 | * done, start new transfers | ||
| 1096 | */ | ||
| 1097 | if (chan->has_sg) | ||
| 1098 | dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC, | ||
| 1099 | desc->async_tx.phys); | ||
| 1100 | |||
| 1101 | /* Configure the hardware using info in the config structure */ | 1122 | /* Configure the hardware using info in the config structure */ |
| 1102 | if (chan->has_vflip) { | 1123 | if (chan->has_vflip) { |
| 1103 | reg = dma_read(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP); | 1124 | reg = dma_read(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP); |
| @@ -1114,15 +1135,11 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan) | |||
| 1114 | else | 1135 | else |
| 1115 | reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN; | 1136 | reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN; |
| 1116 | 1137 | ||
| 1117 | /* | 1138 | /* If not parking, enable circular mode */ |
| 1118 | * With SG, start with circular mode, so that BDs can be fetched. | ||
| 1119 | * In direct register mode, if not parking, enable circular mode | ||
| 1120 | */ | ||
| 1121 | if (chan->has_sg || !config->park) | ||
| 1122 | reg |= XILINX_DMA_DMACR_CIRC_EN; | ||
| 1123 | |||
| 1124 | if (config->park) | 1139 | if (config->park) |
| 1125 | reg &= ~XILINX_DMA_DMACR_CIRC_EN; | 1140 | reg &= ~XILINX_DMA_DMACR_CIRC_EN; |
| 1141 | else | ||
| 1142 | reg |= XILINX_DMA_DMACR_CIRC_EN; | ||
| 1126 | 1143 | ||
| 1127 | dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); | 1144 | dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); |
| 1128 | 1145 | ||
| @@ -1144,48 +1161,38 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan) | |||
| 1144 | return; | 1161 | return; |
| 1145 | 1162 | ||
| 1146 | /* Start the transfer */ | 1163 | /* Start the transfer */ |
| 1147 | if (chan->has_sg) { | 1164 | if (chan->desc_submitcount < chan->num_frms) |
| 1148 | dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, | 1165 | i = chan->desc_submitcount; |
| 1149 | tail_segment->phys); | 1166 | |
| 1150 | list_splice_tail_init(&chan->pending_list, &chan->active_list); | 1167 | list_for_each_entry(segment, &desc->segments, node) { |
| 1151 | chan->desc_pendingcount = 0; | 1168 | if (chan->ext_addr) |
| 1152 | } else { | 1169 | vdma_desc_write_64(chan, |
| 1153 | struct xilinx_vdma_tx_segment *segment, *last = NULL; | 1170 | XILINX_VDMA_REG_START_ADDRESS_64(i++), |
| 1154 | int i = 0; | 1171 | segment->hw.buf_addr, |
| 1155 | 1172 | segment->hw.buf_addr_msb); | |
| 1156 | if (chan->desc_submitcount < chan->num_frms) | 1173 | else |
| 1157 | i = chan->desc_submitcount; | 1174 | vdma_desc_write(chan, |
| 1158 | |||
| 1159 | list_for_each_entry(segment, &desc->segments, node) { | ||
| 1160 | if (chan->ext_addr) | ||
| 1161 | vdma_desc_write_64(chan, | ||
| 1162 | XILINX_VDMA_REG_START_ADDRESS_64(i++), | ||
| 1163 | segment->hw.buf_addr, | ||
| 1164 | segment->hw.buf_addr_msb); | ||
| 1165 | else | ||
| 1166 | vdma_desc_write(chan, | ||
| 1167 | XILINX_VDMA_REG_START_ADDRESS(i++), | 1175 | XILINX_VDMA_REG_START_ADDRESS(i++), |
| 1168 | segment->hw.buf_addr); | 1176 | segment->hw.buf_addr); |
| 1169 | 1177 | ||
| 1170 | last = segment; | 1178 | last = segment; |
| 1171 | } | 1179 | } |
| 1172 | 1180 | ||
| 1173 | if (!last) | 1181 | if (!last) |
| 1174 | return; | 1182 | return; |
| 1175 | 1183 | ||
| 1176 | /* HW expects these parameters to be same for one transaction */ | 1184 | /* HW expects these parameters to be same for one transaction */ |
| 1177 | vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize); | 1185 | vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize); |
| 1178 | vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE, | 1186 | vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE, |
| 1179 | last->hw.stride); | 1187 | last->hw.stride); |
| 1180 | vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize); | 1188 | vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize); |
| 1181 | 1189 | ||
| 1182 | chan->desc_submitcount++; | 1190 | chan->desc_submitcount++; |
| 1183 | chan->desc_pendingcount--; | 1191 | chan->desc_pendingcount--; |
| 1184 | list_del(&desc->node); | 1192 | list_del(&desc->node); |
| 1185 | list_add_tail(&desc->node, &chan->active_list); | 1193 | list_add_tail(&desc->node, &chan->active_list); |
| 1186 | if (chan->desc_submitcount == chan->num_frms) | 1194 | if (chan->desc_submitcount == chan->num_frms) |
| 1187 | chan->desc_submitcount = 0; | 1195 | chan->desc_submitcount = 0; |
| 1188 | } | ||
| 1189 | 1196 | ||
| 1190 | chan->idle = false; | 1197 | chan->idle = false; |
| 1191 | } | 1198 | } |
| @@ -1254,7 +1261,7 @@ static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan) | |||
| 1254 | 1261 | ||
| 1255 | /* Start the transfer */ | 1262 | /* Start the transfer */ |
| 1256 | dma_ctrl_write(chan, XILINX_DMA_REG_BTT, | 1263 | dma_ctrl_write(chan, XILINX_DMA_REG_BTT, |
| 1257 | hw->control & XILINX_DMA_MAX_TRANS_LEN); | 1264 | hw->control & chan->xdev->max_buffer_len); |
| 1258 | } | 1265 | } |
| 1259 | 1266 | ||
| 1260 | list_splice_tail_init(&chan->pending_list, &chan->active_list); | 1267 | list_splice_tail_init(&chan->pending_list, &chan->active_list); |
| @@ -1357,7 +1364,7 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan) | |||
| 1357 | 1364 | ||
| 1358 | /* Start the transfer */ | 1365 | /* Start the transfer */ |
| 1359 | dma_ctrl_write(chan, XILINX_DMA_REG_BTT, | 1366 | dma_ctrl_write(chan, XILINX_DMA_REG_BTT, |
| 1360 | hw->control & XILINX_DMA_MAX_TRANS_LEN); | 1367 | hw->control & chan->xdev->max_buffer_len); |
| 1361 | } | 1368 | } |
| 1362 | 1369 | ||
| 1363 | list_splice_tail_init(&chan->pending_list, &chan->active_list); | 1370 | list_splice_tail_init(&chan->pending_list, &chan->active_list); |
| @@ -1718,7 +1725,7 @@ xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst, | |||
| 1718 | struct xilinx_cdma_tx_segment *segment; | 1725 | struct xilinx_cdma_tx_segment *segment; |
| 1719 | struct xilinx_cdma_desc_hw *hw; | 1726 | struct xilinx_cdma_desc_hw *hw; |
| 1720 | 1727 | ||
| 1721 | if (!len || len > XILINX_DMA_MAX_TRANS_LEN) | 1728 | if (!len || len > chan->xdev->max_buffer_len) |
| 1722 | return NULL; | 1729 | return NULL; |
| 1723 | 1730 | ||
| 1724 | desc = xilinx_dma_alloc_tx_descriptor(chan); | 1731 | desc = xilinx_dma_alloc_tx_descriptor(chan); |
| @@ -1808,8 +1815,8 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg( | |||
| 1808 | * Calculate the maximum number of bytes to transfer, | 1815 | * Calculate the maximum number of bytes to transfer, |
| 1809 | * making sure it is less than the hw limit | 1816 | * making sure it is less than the hw limit |
| 1810 | */ | 1817 | */ |
| 1811 | copy = min_t(size_t, sg_dma_len(sg) - sg_used, | 1818 | copy = xilinx_dma_calc_copysize(chan, sg_dma_len(sg), |
| 1812 | XILINX_DMA_MAX_TRANS_LEN); | 1819 | sg_used); |
| 1813 | hw = &segment->hw; | 1820 | hw = &segment->hw; |
| 1814 | 1821 | ||
| 1815 | /* Fill in the descriptor */ | 1822 | /* Fill in the descriptor */ |
| @@ -1913,8 +1920,8 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic( | |||
| 1913 | * Calculate the maximum number of bytes to transfer, | 1920 | * Calculate the maximum number of bytes to transfer, |
| 1914 | * making sure it is less than the hw limit | 1921 | * making sure it is less than the hw limit |
| 1915 | */ | 1922 | */ |
| 1916 | copy = min_t(size_t, period_len - sg_used, | 1923 | copy = xilinx_dma_calc_copysize(chan, period_len, |
| 1917 | XILINX_DMA_MAX_TRANS_LEN); | 1924 | sg_used); |
| 1918 | hw = &segment->hw; | 1925 | hw = &segment->hw; |
| 1919 | xilinx_axidma_buf(chan, hw, buf_addr, sg_used, | 1926 | xilinx_axidma_buf(chan, hw, buf_addr, sg_used, |
| 1920 | period_len * i); | 1927 | period_len * i); |
| @@ -2389,7 +2396,6 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, | |||
| 2389 | 2396 | ||
| 2390 | chan->dev = xdev->dev; | 2397 | chan->dev = xdev->dev; |
| 2391 | chan->xdev = xdev; | 2398 | chan->xdev = xdev; |
| 2392 | chan->has_sg = xdev->has_sg; | ||
| 2393 | chan->desc_pendingcount = 0x0; | 2399 | chan->desc_pendingcount = 0x0; |
| 2394 | chan->ext_addr = xdev->ext_addr; | 2400 | chan->ext_addr = xdev->ext_addr; |
| 2395 | /* This variable ensures that descriptors are not | 2401 | /* This variable ensures that descriptors are not |
| @@ -2489,6 +2495,15 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, | |||
| 2489 | chan->stop_transfer = xilinx_dma_stop_transfer; | 2495 | chan->stop_transfer = xilinx_dma_stop_transfer; |
| 2490 | } | 2496 | } |
| 2491 | 2497 | ||
| 2498 | /* check if SG is enabled (only for AXIDMA and CDMA) */ | ||
| 2499 | if (xdev->dma_config->dmatype != XDMA_TYPE_VDMA) { | ||
| 2500 | if (dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) & | ||
| 2501 | XILINX_DMA_DMASR_SG_MASK) | ||
| 2502 | chan->has_sg = true; | ||
| 2503 | dev_dbg(chan->dev, "ch %d: SG %s\n", chan->id, | ||
| 2504 | chan->has_sg ? "enabled" : "disabled"); | ||
| 2505 | } | ||
| 2506 | |||
| 2492 | /* Initialize the tasklet */ | 2507 | /* Initialize the tasklet */ |
| 2493 | tasklet_init(&chan->tasklet, xilinx_dma_do_tasklet, | 2508 | tasklet_init(&chan->tasklet, xilinx_dma_do_tasklet, |
| 2494 | (unsigned long)chan); | 2509 | (unsigned long)chan); |
| @@ -2596,7 +2611,7 @@ static int xilinx_dma_probe(struct platform_device *pdev) | |||
| 2596 | struct xilinx_dma_device *xdev; | 2611 | struct xilinx_dma_device *xdev; |
| 2597 | struct device_node *child, *np = pdev->dev.of_node; | 2612 | struct device_node *child, *np = pdev->dev.of_node; |
| 2598 | struct resource *io; | 2613 | struct resource *io; |
| 2599 | u32 num_frames, addr_width; | 2614 | u32 num_frames, addr_width, len_width; |
| 2600 | int i, err; | 2615 | int i, err; |
| 2601 | 2616 | ||
| 2602 | /* Allocate and initialize the DMA engine structure */ | 2617 | /* Allocate and initialize the DMA engine structure */ |
| @@ -2627,9 +2642,24 @@ static int xilinx_dma_probe(struct platform_device *pdev) | |||
| 2627 | return PTR_ERR(xdev->regs); | 2642 | return PTR_ERR(xdev->regs); |
| 2628 | 2643 | ||
| 2629 | /* Retrieve the DMA engine properties from the device tree */ | 2644 | /* Retrieve the DMA engine properties from the device tree */ |
| 2630 | xdev->has_sg = of_property_read_bool(node, "xlnx,include-sg"); | 2645 | xdev->max_buffer_len = GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX - 1, 0); |
| 2631 | if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) | 2646 | |
| 2647 | if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { | ||
| 2632 | xdev->mcdma = of_property_read_bool(node, "xlnx,mcdma"); | 2648 | xdev->mcdma = of_property_read_bool(node, "xlnx,mcdma"); |
| 2649 | if (!of_property_read_u32(node, "xlnx,sg-length-width", | ||
| 2650 | &len_width)) { | ||
| 2651 | if (len_width < XILINX_DMA_MAX_TRANS_LEN_MIN || | ||
| 2652 | len_width > XILINX_DMA_V2_MAX_TRANS_LEN_MAX) { | ||
| 2653 | dev_warn(xdev->dev, | ||
| 2654 | "invalid xlnx,sg-length-width property value. Using default width\n"); | ||
| 2655 | } else { | ||
| 2656 | if (len_width > XILINX_DMA_MAX_TRANS_LEN_MAX) | ||
| 2657 | dev_warn(xdev->dev, "Please ensure that IP supports buffer length > 23 bits\n"); | ||
| 2658 | xdev->max_buffer_len = | ||
| 2659 | GENMASK(len_width - 1, 0); | ||
| 2660 | } | ||
| 2661 | } | ||
| 2662 | } | ||
| 2633 | 2663 | ||
| 2634 | if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { | 2664 | if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { |
| 2635 | err = of_property_read_u32(node, "xlnx,num-fstores", | 2665 | err = of_property_read_u32(node, "xlnx,num-fstores", |
diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c index ebb3fa2e1d00..362aa5450a5e 100644 --- a/drivers/isdn/hardware/mISDN/hfcpci.c +++ b/drivers/isdn/hardware/mISDN/hfcpci.c | |||
| @@ -2032,10 +2032,19 @@ setup_hw(struct hfc_pci *hc) | |||
| 2032 | hc->hw.fifos = buffer; | 2032 | hc->hw.fifos = buffer; |
| 2033 | pci_write_config_dword(hc->pdev, 0x80, hc->hw.dmahandle); | 2033 | pci_write_config_dword(hc->pdev, 0x80, hc->hw.dmahandle); |
| 2034 | hc->hw.pci_io = ioremap((ulong) hc->hw.pci_io, 256); | 2034 | hc->hw.pci_io = ioremap((ulong) hc->hw.pci_io, 256); |
| 2035 | if (unlikely(!hc->hw.pci_io)) { | ||
| 2036 | printk(KERN_WARNING | ||
| 2037 | "HFC-PCI: Error in ioremap for PCI!\n"); | ||
| 2038 | pci_free_consistent(hc->pdev, 0x8000, hc->hw.fifos, | ||
| 2039 | hc->hw.dmahandle); | ||
| 2040 | return 1; | ||
| 2041 | } | ||
| 2042 | |||
| 2035 | printk(KERN_INFO | 2043 | printk(KERN_INFO |
| 2036 | "HFC-PCI: defined at mem %#lx fifo %#lx(%#lx) IRQ %d HZ %d\n", | 2044 | "HFC-PCI: defined at mem %#lx fifo %#lx(%#lx) IRQ %d HZ %d\n", |
| 2037 | (u_long) hc->hw.pci_io, (u_long) hc->hw.fifos, | 2045 | (u_long) hc->hw.pci_io, (u_long) hc->hw.fifos, |
| 2038 | (u_long) hc->hw.dmahandle, hc->irq, HZ); | 2046 | (u_long) hc->hw.dmahandle, hc->irq, HZ); |
| 2047 | |||
| 2039 | /* enable memory mapped ports, disable busmaster */ | 2048 | /* enable memory mapped ports, disable busmaster */ |
| 2040 | pci_write_config_word(hc->pdev, PCI_COMMAND, PCI_ENA_MEMIO); | 2049 | pci_write_config_word(hc->pdev, PCI_COMMAND, PCI_ENA_MEMIO); |
| 2041 | hc->hw.int_m2 = 0; | 2050 | hc->hw.int_m2 = 0; |
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig index 3eeb12e93e98..d86e7a4ac04d 100644 --- a/drivers/mailbox/Kconfig +++ b/drivers/mailbox/Kconfig | |||
| @@ -205,4 +205,15 @@ config MTK_CMDQ_MBOX | |||
| 205 | mailbox driver. The CMDQ is used to help read/write registers with | 205 | mailbox driver. The CMDQ is used to help read/write registers with |
| 206 | critical time limitation, such as updating display configuration | 206 | critical time limitation, such as updating display configuration |
| 207 | during the vblank. | 207 | during the vblank. |
| 208 | |||
| 209 | config ZYNQMP_IPI_MBOX | ||
| 210 | bool "Xilinx ZynqMP IPI Mailbox" | ||
| 211 | depends on ARCH_ZYNQMP && OF | ||
| 212 | help | ||
| 213 | Say yes here to add support for Xilinx IPI mailbox driver. | ||
| 214 | This mailbox driver is used to send notification or short message | ||
| 215 | between processors with Xilinx ZynqMP IPI. It will place the | ||
| 216 | message to the IPI buffer and will access the IPI control | ||
| 217 | registers to kick the other processor or enquire status. | ||
| 218 | |||
| 208 | endif | 219 | endif |
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile index c818b5d011ae..8be3bcbcf882 100644 --- a/drivers/mailbox/Makefile +++ b/drivers/mailbox/Makefile | |||
| @@ -44,3 +44,5 @@ obj-$(CONFIG_TEGRA_HSP_MBOX) += tegra-hsp.o | |||
| 44 | obj-$(CONFIG_STM32_IPCC) += stm32-ipcc.o | 44 | obj-$(CONFIG_STM32_IPCC) += stm32-ipcc.o |
| 45 | 45 | ||
| 46 | obj-$(CONFIG_MTK_CMDQ_MBOX) += mtk-cmdq-mailbox.o | 46 | obj-$(CONFIG_MTK_CMDQ_MBOX) += mtk-cmdq-mailbox.o |
| 47 | |||
| 48 | obj-$(CONFIG_ZYNQMP_IPI_MBOX) += zynqmp-ipi-mailbox.o | ||
diff --git a/drivers/mailbox/imx-mailbox.c b/drivers/mailbox/imx-mailbox.c index 774362a05159..85fc5b56f99b 100644 --- a/drivers/mailbox/imx-mailbox.c +++ b/drivers/mailbox/imx-mailbox.c | |||
| @@ -187,8 +187,8 @@ static int imx_mu_startup(struct mbox_chan *chan) | |||
| 187 | return 0; | 187 | return 0; |
| 188 | } | 188 | } |
| 189 | 189 | ||
| 190 | ret = request_irq(priv->irq, imx_mu_isr, IRQF_SHARED, cp->irq_desc, | 190 | ret = request_irq(priv->irq, imx_mu_isr, IRQF_SHARED | |
| 191 | chan); | 191 | IRQF_NO_SUSPEND, cp->irq_desc, chan); |
| 192 | if (ret) { | 192 | if (ret) { |
| 193 | dev_err(priv->dev, | 193 | dev_err(priv->dev, |
| 194 | "Unable to acquire IRQ %d\n", priv->irq); | 194 | "Unable to acquire IRQ %d\n", priv->irq); |
diff --git a/drivers/mailbox/mailbox-test.c b/drivers/mailbox/mailbox-test.c index 58bfafc34bc4..4e4ac4be6423 100644 --- a/drivers/mailbox/mailbox-test.c +++ b/drivers/mailbox/mailbox-test.c | |||
| @@ -31,7 +31,6 @@ | |||
| 31 | (MBOX_MAX_MSG_LEN / MBOX_BYTES_PER_LINE)) | 31 | (MBOX_MAX_MSG_LEN / MBOX_BYTES_PER_LINE)) |
| 32 | 32 | ||
| 33 | static bool mbox_data_ready; | 33 | static bool mbox_data_ready; |
| 34 | static struct dentry *root_debugfs_dir; | ||
| 35 | 34 | ||
| 36 | struct mbox_test_device { | 35 | struct mbox_test_device { |
| 37 | struct device *dev; | 36 | struct device *dev; |
| @@ -45,6 +44,7 @@ struct mbox_test_device { | |||
| 45 | spinlock_t lock; | 44 | spinlock_t lock; |
| 46 | wait_queue_head_t waitq; | 45 | wait_queue_head_t waitq; |
| 47 | struct fasync_struct *async_queue; | 46 | struct fasync_struct *async_queue; |
| 47 | struct dentry *root_debugfs_dir; | ||
| 48 | }; | 48 | }; |
| 49 | 49 | ||
| 50 | static ssize_t mbox_test_signal_write(struct file *filp, | 50 | static ssize_t mbox_test_signal_write(struct file *filp, |
| @@ -262,16 +262,16 @@ static int mbox_test_add_debugfs(struct platform_device *pdev, | |||
| 262 | if (!debugfs_initialized()) | 262 | if (!debugfs_initialized()) |
| 263 | return 0; | 263 | return 0; |
| 264 | 264 | ||
| 265 | root_debugfs_dir = debugfs_create_dir("mailbox", NULL); | 265 | tdev->root_debugfs_dir = debugfs_create_dir(dev_name(&pdev->dev), NULL); |
| 266 | if (!root_debugfs_dir) { | 266 | if (!tdev->root_debugfs_dir) { |
| 267 | dev_err(&pdev->dev, "Failed to create Mailbox debugfs\n"); | 267 | dev_err(&pdev->dev, "Failed to create Mailbox debugfs\n"); |
| 268 | return -EINVAL; | 268 | return -EINVAL; |
| 269 | } | 269 | } |
| 270 | 270 | ||
| 271 | debugfs_create_file("message", 0600, root_debugfs_dir, | 271 | debugfs_create_file("message", 0600, tdev->root_debugfs_dir, |
| 272 | tdev, &mbox_test_message_ops); | 272 | tdev, &mbox_test_message_ops); |
| 273 | 273 | ||
| 274 | debugfs_create_file("signal", 0200, root_debugfs_dir, | 274 | debugfs_create_file("signal", 0200, tdev->root_debugfs_dir, |
| 275 | tdev, &mbox_test_signal_ops); | 275 | tdev, &mbox_test_signal_ops); |
| 276 | 276 | ||
| 277 | return 0; | 277 | return 0; |
| @@ -363,22 +363,24 @@ static int mbox_test_probe(struct platform_device *pdev) | |||
| 363 | 363 | ||
| 364 | /* It's okay for MMIO to be NULL */ | 364 | /* It's okay for MMIO to be NULL */ |
| 365 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 365 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 366 | size = resource_size(res); | ||
| 367 | tdev->tx_mmio = devm_ioremap_resource(&pdev->dev, res); | 366 | tdev->tx_mmio = devm_ioremap_resource(&pdev->dev, res); |
| 368 | if (PTR_ERR(tdev->tx_mmio) == -EBUSY) | 367 | if (PTR_ERR(tdev->tx_mmio) == -EBUSY) { |
| 369 | /* if reserved area in SRAM, try just ioremap */ | 368 | /* if reserved area in SRAM, try just ioremap */ |
| 369 | size = resource_size(res); | ||
| 370 | tdev->tx_mmio = devm_ioremap(&pdev->dev, res->start, size); | 370 | tdev->tx_mmio = devm_ioremap(&pdev->dev, res->start, size); |
| 371 | else if (IS_ERR(tdev->tx_mmio)) | 371 | } else if (IS_ERR(tdev->tx_mmio)) { |
| 372 | tdev->tx_mmio = NULL; | 372 | tdev->tx_mmio = NULL; |
| 373 | } | ||
| 373 | 374 | ||
| 374 | /* If specified, second reg entry is Rx MMIO */ | 375 | /* If specified, second reg entry is Rx MMIO */ |
| 375 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); | 376 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); |
| 376 | size = resource_size(res); | ||
| 377 | tdev->rx_mmio = devm_ioremap_resource(&pdev->dev, res); | 377 | tdev->rx_mmio = devm_ioremap_resource(&pdev->dev, res); |
| 378 | if (PTR_ERR(tdev->rx_mmio) == -EBUSY) | 378 | if (PTR_ERR(tdev->rx_mmio) == -EBUSY) { |
| 379 | size = resource_size(res); | ||
| 379 | tdev->rx_mmio = devm_ioremap(&pdev->dev, res->start, size); | 380 | tdev->rx_mmio = devm_ioremap(&pdev->dev, res->start, size); |
| 380 | else if (IS_ERR(tdev->rx_mmio)) | 381 | } else if (IS_ERR(tdev->rx_mmio)) { |
| 381 | tdev->rx_mmio = tdev->tx_mmio; | 382 | tdev->rx_mmio = tdev->tx_mmio; |
| 383 | } | ||
| 382 | 384 | ||
| 383 | tdev->tx_channel = mbox_test_request_channel(pdev, "tx"); | 385 | tdev->tx_channel = mbox_test_request_channel(pdev, "tx"); |
| 384 | tdev->rx_channel = mbox_test_request_channel(pdev, "rx"); | 386 | tdev->rx_channel = mbox_test_request_channel(pdev, "rx"); |
| @@ -416,7 +418,7 @@ static int mbox_test_remove(struct platform_device *pdev) | |||
| 416 | { | 418 | { |
| 417 | struct mbox_test_device *tdev = platform_get_drvdata(pdev); | 419 | struct mbox_test_device *tdev = platform_get_drvdata(pdev); |
| 418 | 420 | ||
| 419 | debugfs_remove_recursive(root_debugfs_dir); | 421 | debugfs_remove_recursive(tdev->root_debugfs_dir); |
| 420 | 422 | ||
| 421 | if (tdev->tx_channel) | 423 | if (tdev->tx_channel) |
| 422 | mbox_free_channel(tdev->tx_channel); | 424 | mbox_free_channel(tdev->tx_channel); |
diff --git a/drivers/mailbox/stm32-ipcc.c b/drivers/mailbox/stm32-ipcc.c index a338bd4cd7db..210fe504f5ae 100644 --- a/drivers/mailbox/stm32-ipcc.c +++ b/drivers/mailbox/stm32-ipcc.c | |||
| @@ -270,14 +270,12 @@ static int stm32_ipcc_probe(struct platform_device *pdev) | |||
| 270 | goto err_clk; | 270 | goto err_clk; |
| 271 | } | 271 | } |
| 272 | 272 | ||
| 273 | device_init_wakeup(dev, true); | 273 | device_set_wakeup_capable(dev, true); |
| 274 | ret = dev_pm_set_dedicated_wake_irq(dev, ipcc->wkp); | 274 | ret = dev_pm_set_dedicated_wake_irq(dev, ipcc->wkp); |
| 275 | if (ret) { | 275 | if (ret) { |
| 276 | dev_err(dev, "Failed to set wake up irq\n"); | 276 | dev_err(dev, "Failed to set wake up irq\n"); |
| 277 | goto err_init_wkp; | 277 | goto err_init_wkp; |
| 278 | } | 278 | } |
| 279 | } else { | ||
| 280 | device_init_wakeup(dev, false); | ||
| 281 | } | 279 | } |
| 282 | 280 | ||
| 283 | /* mailbox controller */ | 281 | /* mailbox controller */ |
diff --git a/drivers/mailbox/tegra-hsp.c b/drivers/mailbox/tegra-hsp.c index e443f6a2ec4b..11fc9fd6a94a 100644 --- a/drivers/mailbox/tegra-hsp.c +++ b/drivers/mailbox/tegra-hsp.c | |||
| @@ -779,7 +779,7 @@ static int tegra_hsp_probe(struct platform_device *pdev) | |||
| 779 | return 0; | 779 | return 0; |
| 780 | } | 780 | } |
| 781 | 781 | ||
| 782 | static int tegra_hsp_resume(struct device *dev) | 782 | static int __maybe_unused tegra_hsp_resume(struct device *dev) |
| 783 | { | 783 | { |
| 784 | struct tegra_hsp *hsp = dev_get_drvdata(dev); | 784 | struct tegra_hsp *hsp = dev_get_drvdata(dev); |
| 785 | unsigned int i; | 785 | unsigned int i; |
diff --git a/drivers/mailbox/zynqmp-ipi-mailbox.c b/drivers/mailbox/zynqmp-ipi-mailbox.c new file mode 100644 index 000000000000..86887c9a349a --- /dev/null +++ b/drivers/mailbox/zynqmp-ipi-mailbox.c | |||
| @@ -0,0 +1,725 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | /* | ||
| 3 | * Xilinx Inter Processor Interrupt(IPI) Mailbox Driver | ||
| 4 | * | ||
| 5 | * Copyright (C) 2018 Xilinx, Inc. | ||
| 6 | */ | ||
| 7 | |||
| 8 | #include <linux/arm-smccc.h> | ||
| 9 | #include <linux/delay.h> | ||
| 10 | #include <linux/device.h> | ||
| 11 | #include <linux/interrupt.h> | ||
| 12 | #include <linux/io.h> | ||
| 13 | #include <linux/kernel.h> | ||
| 14 | #include <linux/mailbox_controller.h> | ||
| 15 | #include <linux/mailbox/zynqmp-ipi-message.h> | ||
| 16 | #include <linux/module.h> | ||
| 17 | #include <linux/of.h> | ||
| 18 | #include <linux/of_address.h> | ||
| 19 | #include <linux/of_device.h> | ||
| 20 | #include <linux/of_irq.h> | ||
| 21 | #include <linux/platform_device.h> | ||
| 22 | |||
| 23 | /* IPI agent ID any */ | ||
| 24 | #define IPI_ID_ANY 0xFFUL | ||
| 25 | |||
| 26 | /* indicate if ZynqMP IPI mailbox driver uses SMC calls or HVC calls */ | ||
| 27 | #define USE_SMC 0 | ||
| 28 | #define USE_HVC 1 | ||
| 29 | |||
| 30 | /* Default IPI SMC function IDs */ | ||
| 31 | #define SMC_IPI_MAILBOX_OPEN 0x82001000U | ||
| 32 | #define SMC_IPI_MAILBOX_RELEASE 0x82001001U | ||
| 33 | #define SMC_IPI_MAILBOX_STATUS_ENQUIRY 0x82001002U | ||
| 34 | #define SMC_IPI_MAILBOX_NOTIFY 0x82001003U | ||
| 35 | #define SMC_IPI_MAILBOX_ACK 0x82001004U | ||
| 36 | #define SMC_IPI_MAILBOX_ENABLE_IRQ 0x82001005U | ||
| 37 | #define SMC_IPI_MAILBOX_DISABLE_IRQ 0x82001006U | ||
| 38 | |||
| 39 | /* IPI SMC Macros */ | ||
| 40 | #define IPI_SMC_ENQUIRY_DIRQ_MASK 0x00000001UL /* Flag to indicate if | ||
| 41 | * notification interrupt | ||
| 42 | * to be disabled. | ||
| 43 | */ | ||
| 44 | #define IPI_SMC_ACK_EIRQ_MASK 0x00000001UL /* Flag to indicate if | ||
| 45 | * notification interrupt | ||
| 46 | * to be enabled. | ||
| 47 | */ | ||
| 48 | |||
| 49 | /* IPI mailbox status */ | ||
| 50 | #define IPI_MB_STATUS_IDLE 0 | ||
| 51 | #define IPI_MB_STATUS_SEND_PENDING 1 | ||
| 52 | #define IPI_MB_STATUS_RECV_PENDING 2 | ||
| 53 | |||
| 54 | #define IPI_MB_CHNL_TX 0 /* IPI mailbox TX channel */ | ||
| 55 | #define IPI_MB_CHNL_RX 1 /* IPI mailbox RX channel */ | ||
| 56 | |||
| 57 | /** | ||
| 58 | * struct zynqmp_ipi_mchan - Description of a Xilinx ZynqMP IPI mailbox channel | ||
| 59 | * @is_opened: indicate if the IPI channel is opened | ||
| 60 | * @req_buf: local to remote request buffer start address | ||
| 61 | * @resp_buf: local to remote response buffer start address | ||
| 62 | * @req_buf_size: request buffer size | ||
| 63 | * @resp_buf_size: response buffer size | ||
| 64 | * @rx_buf: receive buffer to pass received message to client | ||
| 65 | * @chan_type: channel type | ||
| 66 | */ | ||
| 67 | struct zynqmp_ipi_mchan { | ||
| 68 | int is_opened; | ||
| 69 | void __iomem *req_buf; | ||
| 70 | void __iomem *resp_buf; | ||
| 71 | void *rx_buf; | ||
| 72 | size_t req_buf_size; | ||
| 73 | size_t resp_buf_size; | ||
| 74 | unsigned int chan_type; | ||
| 75 | }; | ||
| 76 | |||
| 77 | /** | ||
| 78 | * struct zynqmp_ipi_mbox - Description of a ZynqMP IPI mailbox | ||
| 79 | * platform data. | ||
| 80 | * @pdata: pointer to the IPI private data | ||
| 81 | * @dev: device pointer corresponding to the Xilinx ZynqMP | ||
| 82 | * IPI mailbox | ||
| 83 | * @remote_id: remote IPI agent ID | ||
| 84 | * @mbox: mailbox Controller | ||
| 85 | * @mchans: array for channels, tx channel and rx channel. | ||
| 86 | * @irq: IPI agent interrupt ID | ||
| 87 | */ | ||
| 88 | struct zynqmp_ipi_mbox { | ||
| 89 | struct zynqmp_ipi_pdata *pdata; | ||
| 90 | struct device dev; | ||
| 91 | u32 remote_id; | ||
| 92 | struct mbox_controller mbox; | ||
| 93 | struct zynqmp_ipi_mchan mchans[2]; | ||
| 94 | }; | ||
| 95 | |||
| 96 | /** | ||
| 97 | * struct zynqmp_ipi_pdata - Description of z ZynqMP IPI agent platform data. | ||
| 98 | * | ||
| 99 | * @dev: device pointer corresponding to the Xilinx ZynqMP | ||
| 100 | * IPI agent | ||
| 101 | * @irq: IPI agent interrupt ID | ||
| 102 | * @method: IPI SMC or HVC is going to be used | ||
| 103 | * @local_id: local IPI agent ID | ||
| 104 | * @num_mboxes: number of mailboxes of this IPI agent | ||
| 105 | * @ipi_mboxes: IPI mailboxes of this IPI agent | ||
| 106 | */ | ||
| 107 | struct zynqmp_ipi_pdata { | ||
| 108 | struct device *dev; | ||
| 109 | int irq; | ||
| 110 | unsigned int method; | ||
| 111 | u32 local_id; | ||
| 112 | int num_mboxes; | ||
| 113 | struct zynqmp_ipi_mbox *ipi_mboxes; | ||
| 114 | }; | ||
| 115 | |||
| 116 | static struct device_driver zynqmp_ipi_mbox_driver = { | ||
| 117 | .owner = THIS_MODULE, | ||
| 118 | .name = "zynqmp-ipi-mbox", | ||
| 119 | }; | ||
| 120 | |||
| 121 | static void zynqmp_ipi_fw_call(struct zynqmp_ipi_mbox *ipi_mbox, | ||
| 122 | unsigned long a0, unsigned long a3, | ||
| 123 | struct arm_smccc_res *res) | ||
| 124 | { | ||
| 125 | struct zynqmp_ipi_pdata *pdata = ipi_mbox->pdata; | ||
| 126 | unsigned long a1, a2; | ||
| 127 | |||
| 128 | a1 = pdata->local_id; | ||
| 129 | a2 = ipi_mbox->remote_id; | ||
| 130 | if (pdata->method == USE_SMC) | ||
| 131 | arm_smccc_smc(a0, a1, a2, a3, 0, 0, 0, 0, res); | ||
| 132 | else | ||
| 133 | arm_smccc_hvc(a0, a1, a2, a3, 0, 0, 0, 0, res); | ||
| 134 | } | ||
| 135 | |||
| 136 | /** | ||
| 137 | * zynqmp_ipi_interrupt - Interrupt handler for IPI notification | ||
| 138 | * | ||
| 139 | * @irq: Interrupt number | ||
| 140 | * @data: ZynqMP IPI mailbox platform data. | ||
| 141 | * | ||
| 142 | * Return: -EINVAL if there is no instance | ||
| 143 | * IRQ_NONE if the interrupt is not ours. | ||
| 144 | * IRQ_HANDLED if the rx interrupt was successfully handled. | ||
| 145 | */ | ||
| 146 | static irqreturn_t zynqmp_ipi_interrupt(int irq, void *data) | ||
| 147 | { | ||
| 148 | struct zynqmp_ipi_pdata *pdata = data; | ||
| 149 | struct mbox_chan *chan; | ||
| 150 | struct zynqmp_ipi_mbox *ipi_mbox; | ||
| 151 | struct zynqmp_ipi_mchan *mchan; | ||
| 152 | struct zynqmp_ipi_message *msg; | ||
| 153 | u64 arg0, arg3; | ||
| 154 | struct arm_smccc_res res; | ||
| 155 | int ret, i; | ||
| 156 | |||
| 157 | (void)irq; | ||
| 158 | arg0 = SMC_IPI_MAILBOX_STATUS_ENQUIRY; | ||
| 159 | arg3 = IPI_SMC_ENQUIRY_DIRQ_MASK; | ||
| 160 | for (i = 0; i < pdata->num_mboxes; i++) { | ||
| 161 | ipi_mbox = &pdata->ipi_mboxes[i]; | ||
| 162 | mchan = &ipi_mbox->mchans[IPI_MB_CHNL_RX]; | ||
| 163 | chan = &ipi_mbox->mbox.chans[IPI_MB_CHNL_RX]; | ||
| 164 | zynqmp_ipi_fw_call(ipi_mbox, arg0, arg3, &res); | ||
| 165 | ret = (int)(res.a0 & 0xFFFFFFFF); | ||
| 166 | if (ret > 0 && ret & IPI_MB_STATUS_RECV_PENDING) { | ||
| 167 | if (mchan->is_opened) { | ||
| 168 | msg = mchan->rx_buf; | ||
| 169 | msg->len = mchan->req_buf_size; | ||
| 170 | memcpy_fromio(msg->data, mchan->req_buf, | ||
| 171 | msg->len); | ||
| 172 | mbox_chan_received_data(chan, (void *)msg); | ||
| 173 | return IRQ_HANDLED; | ||
| 174 | } | ||
| 175 | } | ||
| 176 | } | ||
| 177 | return IRQ_NONE; | ||
| 178 | } | ||
| 179 | |||
| 180 | /** | ||
| 181 | * zynqmp_ipi_peek_data - Peek to see if there are any rx messages. | ||
| 182 | * | ||
| 183 | * @chan: Channel Pointer | ||
| 184 | * | ||
| 185 | * Return: 'true' if there is pending rx data, 'false' if there is none. | ||
| 186 | */ | ||
| 187 | static bool zynqmp_ipi_peek_data(struct mbox_chan *chan) | ||
| 188 | { | ||
| 189 | struct device *dev = chan->mbox->dev; | ||
| 190 | struct zynqmp_ipi_mbox *ipi_mbox = dev_get_drvdata(dev); | ||
| 191 | struct zynqmp_ipi_mchan *mchan = chan->con_priv; | ||
| 192 | int ret; | ||
| 193 | u64 arg0; | ||
| 194 | struct arm_smccc_res res; | ||
| 195 | |||
| 196 | if (WARN_ON(!ipi_mbox)) { | ||
| 197 | dev_err(dev, "no platform drv data??\n"); | ||
| 198 | return false; | ||
| 199 | } | ||
| 200 | |||
| 201 | arg0 = SMC_IPI_MAILBOX_STATUS_ENQUIRY; | ||
| 202 | zynqmp_ipi_fw_call(ipi_mbox, arg0, 0, &res); | ||
| 203 | ret = (int)(res.a0 & 0xFFFFFFFF); | ||
| 204 | |||
| 205 | if (mchan->chan_type == IPI_MB_CHNL_TX) { | ||
| 206 | /* TX channel, check if the message has been acked | ||
| 207 | * by the remote, if yes, response is available. | ||
| 208 | */ | ||
| 209 | if (ret < 0 || ret & IPI_MB_STATUS_SEND_PENDING) | ||
| 210 | return false; | ||
| 211 | else | ||
| 212 | return true; | ||
| 213 | } else if (ret > 0 && ret & IPI_MB_STATUS_RECV_PENDING) { | ||
| 214 | /* RX channel, check if there is message arrived. */ | ||
| 215 | return true; | ||
| 216 | } | ||
| 217 | return false; | ||
| 218 | } | ||
| 219 | |||
| 220 | /** | ||
| 221 | * zynqmp_ipi_last_tx_done - See if the last tx message is sent | ||
| 222 | * | ||
| 223 | * @chan: Channel pointer | ||
| 224 | * | ||
| 225 | * Return: 'true' is no pending tx data, 'false' if there are any. | ||
| 226 | */ | ||
| 227 | static bool zynqmp_ipi_last_tx_done(struct mbox_chan *chan) | ||
| 228 | { | ||
| 229 | struct device *dev = chan->mbox->dev; | ||
| 230 | struct zynqmp_ipi_mbox *ipi_mbox = dev_get_drvdata(dev); | ||
| 231 | struct zynqmp_ipi_mchan *mchan = chan->con_priv; | ||
| 232 | int ret; | ||
| 233 | u64 arg0; | ||
| 234 | struct arm_smccc_res res; | ||
| 235 | |||
| 236 | if (WARN_ON(!ipi_mbox)) { | ||
| 237 | dev_err(dev, "no platform drv data??\n"); | ||
| 238 | return false; | ||
| 239 | } | ||
| 240 | |||
| 241 | if (mchan->chan_type == IPI_MB_CHNL_TX) { | ||
| 242 | /* We only need to check if the message been taken | ||
| 243 | * by the remote in the TX channel | ||
| 244 | */ | ||
| 245 | arg0 = SMC_IPI_MAILBOX_STATUS_ENQUIRY; | ||
| 246 | zynqmp_ipi_fw_call(ipi_mbox, arg0, 0, &res); | ||
| 247 | /* Check the SMC call status, a0 of the result */ | ||
| 248 | ret = (int)(res.a0 & 0xFFFFFFFF); | ||
| 249 | if (ret < 0 || ret & IPI_MB_STATUS_SEND_PENDING) | ||
| 250 | return false; | ||
| 251 | return true; | ||
| 252 | } | ||
| 253 | /* Always true for the response message in RX channel */ | ||
| 254 | return true; | ||
| 255 | } | ||
| 256 | |||
| 257 | /** | ||
| 258 | * zynqmp_ipi_send_data - Send data | ||
| 259 | * | ||
| 260 | * @chan: Channel Pointer | ||
| 261 | * @data: Message Pointer | ||
| 262 | * | ||
| 263 | * Return: 0 if all goes good, else appropriate error messages. | ||
| 264 | */ | ||
| 265 | static int zynqmp_ipi_send_data(struct mbox_chan *chan, void *data) | ||
| 266 | { | ||
| 267 | struct device *dev = chan->mbox->dev; | ||
| 268 | struct zynqmp_ipi_mbox *ipi_mbox = dev_get_drvdata(dev); | ||
| 269 | struct zynqmp_ipi_mchan *mchan = chan->con_priv; | ||
| 270 | struct zynqmp_ipi_message *msg = data; | ||
| 271 | u64 arg0; | ||
| 272 | struct arm_smccc_res res; | ||
| 273 | |||
| 274 | if (WARN_ON(!ipi_mbox)) { | ||
| 275 | dev_err(dev, "no platform drv data??\n"); | ||
| 276 | return -EINVAL; | ||
| 277 | } | ||
| 278 | |||
| 279 | if (mchan->chan_type == IPI_MB_CHNL_TX) { | ||
| 280 | /* Send request message */ | ||
| 281 | if (msg && msg->len > mchan->req_buf_size) { | ||
| 282 | dev_err(dev, "channel %d message length %u > max %lu\n", | ||
| 283 | mchan->chan_type, (unsigned int)msg->len, | ||
| 284 | mchan->req_buf_size); | ||
| 285 | return -EINVAL; | ||
| 286 | } | ||
| 287 | if (msg && msg->len) | ||
| 288 | memcpy_toio(mchan->req_buf, msg->data, msg->len); | ||
| 289 | /* Kick IPI mailbox to send message */ | ||
| 290 | arg0 = SMC_IPI_MAILBOX_NOTIFY; | ||
| 291 | zynqmp_ipi_fw_call(ipi_mbox, arg0, 0, &res); | ||
| 292 | } else { | ||
| 293 | /* Send response message */ | ||
| 294 | if (msg && msg->len > mchan->resp_buf_size) { | ||
| 295 | dev_err(dev, "channel %d message length %u > max %lu\n", | ||
| 296 | mchan->chan_type, (unsigned int)msg->len, | ||
| 297 | mchan->resp_buf_size); | ||
| 298 | return -EINVAL; | ||
| 299 | } | ||
| 300 | if (msg && msg->len) | ||
| 301 | memcpy_toio(mchan->resp_buf, msg->data, msg->len); | ||
| 302 | arg0 = SMC_IPI_MAILBOX_ACK; | ||
| 303 | zynqmp_ipi_fw_call(ipi_mbox, arg0, IPI_SMC_ACK_EIRQ_MASK, | ||
| 304 | &res); | ||
| 305 | } | ||
| 306 | return 0; | ||
| 307 | } | ||
| 308 | |||
| 309 | /** | ||
| 310 | * zynqmp_ipi_startup - Startup the IPI channel | ||
| 311 | * | ||
| 312 | * @chan: Channel pointer | ||
| 313 | * | ||
| 314 | * Return: 0 if all goes good, else return corresponding error message | ||
| 315 | */ | ||
| 316 | static int zynqmp_ipi_startup(struct mbox_chan *chan) | ||
| 317 | { | ||
| 318 | struct device *dev = chan->mbox->dev; | ||
| 319 | struct zynqmp_ipi_mbox *ipi_mbox = dev_get_drvdata(dev); | ||
| 320 | struct zynqmp_ipi_mchan *mchan = chan->con_priv; | ||
| 321 | u64 arg0; | ||
| 322 | struct arm_smccc_res res; | ||
| 323 | int ret = 0; | ||
| 324 | unsigned int nchan_type; | ||
| 325 | |||
| 326 | if (mchan->is_opened) | ||
| 327 | return 0; | ||
| 328 | |||
| 329 | /* If no channel has been opened, open the IPI mailbox */ | ||
| 330 | nchan_type = (mchan->chan_type + 1) % 2; | ||
| 331 | if (!ipi_mbox->mchans[nchan_type].is_opened) { | ||
| 332 | arg0 = SMC_IPI_MAILBOX_OPEN; | ||
| 333 | zynqmp_ipi_fw_call(ipi_mbox, arg0, 0, &res); | ||
| 334 | /* Check the SMC call status, a0 of the result */ | ||
| 335 | ret = (int)(res.a0 & 0xFFFFFFFF); | ||
| 336 | if (ret < 0) { | ||
| 337 | dev_err(dev, "SMC to open the IPI channel failed.\n"); | ||
| 338 | return ret; | ||
| 339 | } | ||
| 340 | ret = 0; | ||
| 341 | } | ||
| 342 | |||
| 343 | /* If it is RX channel, enable the IPI notification interrupt */ | ||
| 344 | if (mchan->chan_type == IPI_MB_CHNL_RX) { | ||
| 345 | arg0 = SMC_IPI_MAILBOX_ENABLE_IRQ; | ||
| 346 | zynqmp_ipi_fw_call(ipi_mbox, arg0, 0, &res); | ||
| 347 | } | ||
| 348 | mchan->is_opened = 1; | ||
| 349 | |||
| 350 | return ret; | ||
| 351 | } | ||
| 352 | |||
| 353 | /** | ||
| 354 | * zynqmp_ipi_shutdown - Shutdown the IPI channel | ||
| 355 | * | ||
| 356 | * @chan: Channel pointer | ||
| 357 | */ | ||
| 358 | static void zynqmp_ipi_shutdown(struct mbox_chan *chan) | ||
| 359 | { | ||
| 360 | struct device *dev = chan->mbox->dev; | ||
| 361 | struct zynqmp_ipi_mbox *ipi_mbox = dev_get_drvdata(dev); | ||
| 362 | struct zynqmp_ipi_mchan *mchan = chan->con_priv; | ||
| 363 | u64 arg0; | ||
| 364 | struct arm_smccc_res res; | ||
| 365 | unsigned int chan_type; | ||
| 366 | |||
| 367 | if (!mchan->is_opened) | ||
| 368 | return; | ||
| 369 | |||
| 370 | /* If it is RX channel, disable notification interrupt */ | ||
| 371 | chan_type = mchan->chan_type; | ||
| 372 | if (chan_type == IPI_MB_CHNL_RX) { | ||
| 373 | arg0 = SMC_IPI_MAILBOX_DISABLE_IRQ; | ||
| 374 | zynqmp_ipi_fw_call(ipi_mbox, arg0, 0, &res); | ||
| 375 | } | ||
| 376 | /* Release IPI mailbox if no other channel is opened */ | ||
| 377 | chan_type = (chan_type + 1) % 2; | ||
| 378 | if (!ipi_mbox->mchans[chan_type].is_opened) { | ||
| 379 | arg0 = SMC_IPI_MAILBOX_RELEASE; | ||
| 380 | zynqmp_ipi_fw_call(ipi_mbox, arg0, 0, &res); | ||
| 381 | } | ||
| 382 | |||
| 383 | mchan->is_opened = 0; | ||
| 384 | } | ||
| 385 | |||
| 386 | /* ZynqMP IPI mailbox operations */ | ||
| 387 | static const struct mbox_chan_ops zynqmp_ipi_chan_ops = { | ||
| 388 | .startup = zynqmp_ipi_startup, | ||
| 389 | .shutdown = zynqmp_ipi_shutdown, | ||
| 390 | .peek_data = zynqmp_ipi_peek_data, | ||
| 391 | .last_tx_done = zynqmp_ipi_last_tx_done, | ||
| 392 | .send_data = zynqmp_ipi_send_data, | ||
| 393 | }; | ||
| 394 | |||
| 395 | /** | ||
| 396 | * zynqmp_ipi_of_xlate - Translate of phandle to IPI mailbox channel | ||
| 397 | * | ||
| 398 | * @mbox: mailbox controller pointer | ||
| 399 | * @p: phandle pointer | ||
| 400 | * | ||
| 401 | * Return: Mailbox channel, else return error pointer. | ||
| 402 | */ | ||
| 403 | static struct mbox_chan *zynqmp_ipi_of_xlate(struct mbox_controller *mbox, | ||
| 404 | const struct of_phandle_args *p) | ||
| 405 | { | ||
| 406 | struct mbox_chan *chan; | ||
| 407 | struct device *dev = mbox->dev; | ||
| 408 | unsigned int chan_type; | ||
| 409 | |||
| 410 | /* Only supports TX and RX channels */ | ||
| 411 | chan_type = p->args[0]; | ||
| 412 | if (chan_type != IPI_MB_CHNL_TX && chan_type != IPI_MB_CHNL_RX) { | ||
| 413 | dev_err(dev, "req chnl failure: invalid chnl type %u.\n", | ||
| 414 | chan_type); | ||
| 415 | return ERR_PTR(-EINVAL); | ||
| 416 | } | ||
| 417 | chan = &mbox->chans[chan_type]; | ||
| 418 | return chan; | ||
| 419 | } | ||
| 420 | |||
| 421 | static const struct of_device_id zynqmp_ipi_of_match[] = { | ||
| 422 | { .compatible = "xlnx,zynqmp-ipi-mailbox" }, | ||
| 423 | {}, | ||
| 424 | }; | ||
| 425 | MODULE_DEVICE_TABLE(of, zynqmp_ipi_of_match); | ||
| 426 | |||
| 427 | /** | ||
| 428 | * zynqmp_ipi_mbox_get_buf_res - Get buffer resource from the IPI dev node | ||
| 429 | * | ||
| 430 | * @node: IPI mbox device child node | ||
| 431 | * @name: name of the IPI buffer | ||
| 432 | * @res: pointer to where the resource information will be stored. | ||
| 433 | * | ||
| 434 | * Return: 0 for success, negative value for failure | ||
| 435 | */ | ||
| 436 | static int zynqmp_ipi_mbox_get_buf_res(struct device_node *node, | ||
| 437 | const char *name, | ||
| 438 | struct resource *res) | ||
| 439 | { | ||
| 440 | int ret, index; | ||
| 441 | |||
| 442 | index = of_property_match_string(node, "reg-names", name); | ||
| 443 | if (index >= 0) { | ||
| 444 | ret = of_address_to_resource(node, index, res); | ||
| 445 | if (ret < 0) | ||
| 446 | return -EINVAL; | ||
| 447 | return 0; | ||
| 448 | } | ||
| 449 | return -ENODEV; | ||
| 450 | } | ||
| 451 | |||
| 452 | /** | ||
| 453 | * zynqmp_ipi_mbox_dev_release() - release the existence of a ipi mbox dev | ||
| 454 | * | ||
| 455 | * @dev: the ipi mailbox device | ||
| 456 | * | ||
| 457 | * This is to avoid the no device release() function kernel warning. | ||
| 458 | * | ||
| 459 | */ | ||
| 460 | static void zynqmp_ipi_mbox_dev_release(struct device *dev) | ||
| 461 | { | ||
| 462 | (void)dev; | ||
| 463 | } | ||
| 464 | |||
| 465 | /** | ||
| 466 | * zynqmp_ipi_mbox_probe - probe IPI mailbox resource from device node | ||
| 467 | * | ||
| 468 | * @ipi_mbox: pointer to IPI mailbox private data structure | ||
| 469 | * @node: IPI mailbox device node | ||
| 470 | * | ||
| 471 | * Return: 0 for success, negative value for failure | ||
| 472 | */ | ||
| 473 | static int zynqmp_ipi_mbox_probe(struct zynqmp_ipi_mbox *ipi_mbox, | ||
| 474 | struct device_node *node) | ||
| 475 | { | ||
| 476 | struct zynqmp_ipi_mchan *mchan; | ||
| 477 | struct mbox_chan *chans; | ||
| 478 | struct mbox_controller *mbox; | ||
| 479 | struct resource res; | ||
| 480 | struct device *dev, *mdev; | ||
| 481 | const char *name; | ||
| 482 | int ret; | ||
| 483 | |||
| 484 | dev = ipi_mbox->pdata->dev; | ||
| 485 | /* Initialize dev for IPI mailbox */ | ||
| 486 | ipi_mbox->dev.parent = dev; | ||
| 487 | ipi_mbox->dev.release = NULL; | ||
| 488 | ipi_mbox->dev.of_node = node; | ||
| 489 | dev_set_name(&ipi_mbox->dev, "%s", of_node_full_name(node)); | ||
| 490 | dev_set_drvdata(&ipi_mbox->dev, ipi_mbox); | ||
| 491 | ipi_mbox->dev.release = zynqmp_ipi_mbox_dev_release; | ||
| 492 | ipi_mbox->dev.driver = &zynqmp_ipi_mbox_driver; | ||
| 493 | ret = device_register(&ipi_mbox->dev); | ||
| 494 | if (ret) { | ||
| 495 | dev_err(dev, "Failed to register ipi mbox dev.\n"); | ||
| 496 | return ret; | ||
| 497 | } | ||
| 498 | mdev = &ipi_mbox->dev; | ||
| 499 | |||
| 500 | mchan = &ipi_mbox->mchans[IPI_MB_CHNL_TX]; | ||
| 501 | name = "local_request_region"; | ||
| 502 | ret = zynqmp_ipi_mbox_get_buf_res(node, name, &res); | ||
| 503 | if (!ret) { | ||
| 504 | mchan->req_buf_size = resource_size(&res); | ||
| 505 | mchan->req_buf = devm_ioremap(mdev, res.start, | ||
| 506 | mchan->req_buf_size); | ||
| 507 | if (IS_ERR(mchan->req_buf)) { | ||
| 508 | dev_err(mdev, "Unable to map IPI buffer I/O memory\n"); | ||
| 509 | ret = PTR_ERR(mchan->req_buf); | ||
| 510 | return ret; | ||
| 511 | } | ||
| 512 | } else if (ret != -ENODEV) { | ||
| 513 | dev_err(mdev, "Unmatched resource %s, %d.\n", name, ret); | ||
| 514 | return ret; | ||
| 515 | } | ||
| 516 | |||
| 517 | name = "remote_response_region"; | ||
| 518 | ret = zynqmp_ipi_mbox_get_buf_res(node, name, &res); | ||
| 519 | if (!ret) { | ||
| 520 | mchan->resp_buf_size = resource_size(&res); | ||
| 521 | mchan->resp_buf = devm_ioremap(mdev, res.start, | ||
| 522 | mchan->resp_buf_size); | ||
| 523 | if (IS_ERR(mchan->resp_buf)) { | ||
| 524 | dev_err(mdev, "Unable to map IPI buffer I/O memory\n"); | ||
| 525 | ret = PTR_ERR(mchan->resp_buf); | ||
| 526 | return ret; | ||
| 527 | } | ||
| 528 | } else if (ret != -ENODEV) { | ||
| 529 | dev_err(mdev, "Unmatched resource %s.\n", name); | ||
| 530 | return ret; | ||
| 531 | } | ||
| 532 | mchan->rx_buf = devm_kzalloc(mdev, | ||
| 533 | mchan->resp_buf_size + | ||
| 534 | sizeof(struct zynqmp_ipi_message), | ||
| 535 | GFP_KERNEL); | ||
| 536 | if (!mchan->rx_buf) | ||
| 537 | return -ENOMEM; | ||
| 538 | |||
| 539 | mchan = &ipi_mbox->mchans[IPI_MB_CHNL_RX]; | ||
| 540 | name = "remote_request_region"; | ||
| 541 | ret = zynqmp_ipi_mbox_get_buf_res(node, name, &res); | ||
| 542 | if (!ret) { | ||
| 543 | mchan->req_buf_size = resource_size(&res); | ||
| 544 | mchan->req_buf = devm_ioremap(mdev, res.start, | ||
| 545 | mchan->req_buf_size); | ||
| 546 | if (IS_ERR(mchan->req_buf)) { | ||
| 547 | dev_err(mdev, "Unable to map IPI buffer I/O memory\n"); | ||
| 548 | ret = PTR_ERR(mchan->req_buf); | ||
| 549 | return ret; | ||
| 550 | } | ||
| 551 | } else if (ret != -ENODEV) { | ||
| 552 | dev_err(mdev, "Unmatched resource %s.\n", name); | ||
| 553 | return ret; | ||
| 554 | } | ||
| 555 | |||
| 556 | name = "local_response_region"; | ||
| 557 | ret = zynqmp_ipi_mbox_get_buf_res(node, name, &res); | ||
| 558 | if (!ret) { | ||
| 559 | mchan->resp_buf_size = resource_size(&res); | ||
| 560 | mchan->resp_buf = devm_ioremap(mdev, res.start, | ||
| 561 | mchan->resp_buf_size); | ||
| 562 | if (IS_ERR(mchan->resp_buf)) { | ||
| 563 | dev_err(mdev, "Unable to map IPI buffer I/O memory\n"); | ||
| 564 | ret = PTR_ERR(mchan->resp_buf); | ||
| 565 | return ret; | ||
| 566 | } | ||
| 567 | } else if (ret != -ENODEV) { | ||
| 568 | dev_err(mdev, "Unmatched resource %s.\n", name); | ||
| 569 | return ret; | ||
| 570 | } | ||
| 571 | mchan->rx_buf = devm_kzalloc(mdev, | ||
| 572 | mchan->resp_buf_size + | ||
| 573 | sizeof(struct zynqmp_ipi_message), | ||
| 574 | GFP_KERNEL); | ||
| 575 | if (!mchan->rx_buf) | ||
| 576 | return -ENOMEM; | ||
| 577 | |||
| 578 | /* Get the IPI remote agent ID */ | ||
| 579 | ret = of_property_read_u32(node, "xlnx,ipi-id", &ipi_mbox->remote_id); | ||
| 580 | if (ret < 0) { | ||
| 581 | dev_err(dev, "No IPI remote ID is specified.\n"); | ||
| 582 | return ret; | ||
| 583 | } | ||
| 584 | |||
| 585 | mbox = &ipi_mbox->mbox; | ||
| 586 | mbox->dev = mdev; | ||
| 587 | mbox->ops = &zynqmp_ipi_chan_ops; | ||
| 588 | mbox->num_chans = 2; | ||
| 589 | mbox->txdone_irq = false; | ||
| 590 | mbox->txdone_poll = true; | ||
| 591 | mbox->txpoll_period = 5; | ||
| 592 | mbox->of_xlate = zynqmp_ipi_of_xlate; | ||
| 593 | chans = devm_kzalloc(mdev, 2 * sizeof(*chans), GFP_KERNEL); | ||
| 594 | if (!chans) | ||
| 595 | return -ENOMEM; | ||
| 596 | mbox->chans = chans; | ||
| 597 | chans[IPI_MB_CHNL_TX].con_priv = &ipi_mbox->mchans[IPI_MB_CHNL_TX]; | ||
| 598 | chans[IPI_MB_CHNL_RX].con_priv = &ipi_mbox->mchans[IPI_MB_CHNL_RX]; | ||
| 599 | ipi_mbox->mchans[IPI_MB_CHNL_TX].chan_type = IPI_MB_CHNL_TX; | ||
| 600 | ipi_mbox->mchans[IPI_MB_CHNL_RX].chan_type = IPI_MB_CHNL_RX; | ||
| 601 | ret = devm_mbox_controller_register(mdev, mbox); | ||
| 602 | if (ret) | ||
| 603 | dev_err(mdev, | ||
| 604 | "Failed to register mbox_controller(%d)\n", ret); | ||
| 605 | else | ||
| 606 | dev_info(mdev, | ||
| 607 | "Registered ZynqMP IPI mbox with TX/RX channels.\n"); | ||
| 608 | return ret; | ||
| 609 | } | ||
| 610 | |||
| 611 | /** | ||
| 612 | * zynqmp_ipi_free_mboxes - Free IPI mailboxes devices | ||
| 613 | * | ||
| 614 | * @pdata: IPI private data | ||
| 615 | */ | ||
| 616 | static void zynqmp_ipi_free_mboxes(struct zynqmp_ipi_pdata *pdata) | ||
| 617 | { | ||
| 618 | struct zynqmp_ipi_mbox *ipi_mbox; | ||
| 619 | int i; | ||
| 620 | |||
| 621 | i = pdata->num_mboxes; | ||
| 622 | for (; i >= 0; i--) { | ||
| 623 | ipi_mbox = &pdata->ipi_mboxes[i]; | ||
| 624 | if (ipi_mbox->dev.parent) { | ||
| 625 | mbox_controller_unregister(&ipi_mbox->mbox); | ||
| 626 | device_unregister(&ipi_mbox->dev); | ||
| 627 | } | ||
| 628 | } | ||
| 629 | } | ||
| 630 | |||
| 631 | static int zynqmp_ipi_probe(struct platform_device *pdev) | ||
| 632 | { | ||
| 633 | struct device *dev = &pdev->dev; | ||
| 634 | struct device_node *nc, *np = pdev->dev.of_node; | ||
| 635 | struct zynqmp_ipi_pdata *pdata; | ||
| 636 | struct zynqmp_ipi_mbox *mbox; | ||
| 637 | int num_mboxes, ret = -EINVAL; | ||
| 638 | |||
| 639 | num_mboxes = of_get_child_count(np); | ||
| 640 | pdata = devm_kzalloc(dev, sizeof(*pdata) + (num_mboxes * sizeof(*mbox)), | ||
| 641 | GFP_KERNEL); | ||
| 642 | if (!pdata) | ||
| 643 | return -ENOMEM; | ||
| 644 | pdata->dev = dev; | ||
| 645 | |||
| 646 | /* Get the IPI local agents ID */ | ||
| 647 | ret = of_property_read_u32(np, "xlnx,ipi-id", &pdata->local_id); | ||
| 648 | if (ret < 0) { | ||
| 649 | dev_err(dev, "No IPI local ID is specified.\n"); | ||
| 650 | return ret; | ||
| 651 | } | ||
| 652 | |||
| 653 | pdata->num_mboxes = num_mboxes; | ||
| 654 | pdata->ipi_mboxes = (struct zynqmp_ipi_mbox *) | ||
| 655 | ((char *)pdata + sizeof(*pdata)); | ||
| 656 | |||
| 657 | mbox = pdata->ipi_mboxes; | ||
| 658 | for_each_available_child_of_node(np, nc) { | ||
| 659 | mbox->pdata = pdata; | ||
| 660 | ret = zynqmp_ipi_mbox_probe(mbox, nc); | ||
| 661 | if (ret) { | ||
| 662 | dev_err(dev, "failed to probe subdev.\n"); | ||
| 663 | ret = -EINVAL; | ||
| 664 | goto free_mbox_dev; | ||
| 665 | } | ||
| 666 | mbox++; | ||
| 667 | } | ||
| 668 | |||
| 669 | /* IPI IRQ */ | ||
| 670 | ret = platform_get_irq(pdev, 0); | ||
| 671 | if (ret < 0) { | ||
| 672 | dev_err(dev, "unable to find IPI IRQ.\n"); | ||
| 673 | goto free_mbox_dev; | ||
| 674 | } | ||
| 675 | pdata->irq = ret; | ||
| 676 | ret = devm_request_irq(dev, pdata->irq, zynqmp_ipi_interrupt, | ||
| 677 | IRQF_SHARED, dev_name(dev), pdata); | ||
| 678 | if (ret) { | ||
| 679 | dev_err(dev, "IRQ %d is not requested successfully.\n", | ||
| 680 | pdata->irq); | ||
| 681 | goto free_mbox_dev; | ||
| 682 | } | ||
| 683 | |||
| 684 | platform_set_drvdata(pdev, pdata); | ||
| 685 | return ret; | ||
| 686 | |||
| 687 | free_mbox_dev: | ||
| 688 | zynqmp_ipi_free_mboxes(pdata); | ||
| 689 | return ret; | ||
| 690 | } | ||
| 691 | |||
| 692 | static int zynqmp_ipi_remove(struct platform_device *pdev) | ||
| 693 | { | ||
| 694 | struct zynqmp_ipi_pdata *pdata; | ||
| 695 | |||
| 696 | pdata = platform_get_drvdata(pdev); | ||
| 697 | zynqmp_ipi_free_mboxes(pdata); | ||
| 698 | |||
| 699 | return 0; | ||
| 700 | } | ||
| 701 | |||
| 702 | static struct platform_driver zynqmp_ipi_driver = { | ||
| 703 | .probe = zynqmp_ipi_probe, | ||
| 704 | .remove = zynqmp_ipi_remove, | ||
| 705 | .driver = { | ||
| 706 | .name = "zynqmp-ipi", | ||
| 707 | .of_match_table = of_match_ptr(zynqmp_ipi_of_match), | ||
| 708 | }, | ||
| 709 | }; | ||
| 710 | |||
| 711 | static int __init zynqmp_ipi_init(void) | ||
| 712 | { | ||
| 713 | return platform_driver_register(&zynqmp_ipi_driver); | ||
| 714 | } | ||
| 715 | subsys_initcall(zynqmp_ipi_init); | ||
| 716 | |||
| 717 | static void __exit zynqmp_ipi_exit(void) | ||
| 718 | { | ||
| 719 | platform_driver_unregister(&zynqmp_ipi_driver); | ||
| 720 | } | ||
| 721 | module_exit(zynqmp_ipi_exit); | ||
| 722 | |||
| 723 | MODULE_LICENSE("GPL v2"); | ||
| 724 | MODULE_DESCRIPTION("Xilinx ZynqMP IPI Mailbox driver"); | ||
| 725 | MODULE_AUTHOR("Xilinx Inc."); | ||
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c index 22547d7a84ea..947a8adbc799 100644 --- a/drivers/mtd/ubi/cdev.c +++ b/drivers/mtd/ubi/cdev.c | |||
| @@ -974,6 +974,36 @@ static long ubi_cdev_ioctl(struct file *file, unsigned int cmd, | |||
| 974 | break; | 974 | break; |
| 975 | } | 975 | } |
| 976 | 976 | ||
| 977 | /* Check a specific PEB for bitflips and scrub it if needed */ | ||
| 978 | case UBI_IOCRPEB: | ||
| 979 | { | ||
| 980 | int pnum; | ||
| 981 | |||
| 982 | err = get_user(pnum, (__user int32_t *)argp); | ||
| 983 | if (err) { | ||
| 984 | err = -EFAULT; | ||
| 985 | break; | ||
| 986 | } | ||
| 987 | |||
| 988 | err = ubi_bitflip_check(ubi, pnum, 0); | ||
| 989 | break; | ||
| 990 | } | ||
| 991 | |||
| 992 | /* Force scrubbing for a specific PEB */ | ||
| 993 | case UBI_IOCSPEB: | ||
| 994 | { | ||
| 995 | int pnum; | ||
| 996 | |||
| 997 | err = get_user(pnum, (__user int32_t *)argp); | ||
| 998 | if (err) { | ||
| 999 | err = -EFAULT; | ||
| 1000 | break; | ||
| 1001 | } | ||
| 1002 | |||
| 1003 | err = ubi_bitflip_check(ubi, pnum, 1); | ||
| 1004 | break; | ||
| 1005 | } | ||
| 1006 | |||
| 977 | default: | 1007 | default: |
| 978 | err = -ENOTTY; | 1008 | err = -ENOTTY; |
| 979 | break; | 1009 | break; |
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h index d47b9e436e67..a1b9e764d489 100644 --- a/drivers/mtd/ubi/ubi.h +++ b/drivers/mtd/ubi/ubi.h | |||
| @@ -929,6 +929,7 @@ int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *used_e, | |||
| 929 | int ubi_is_erase_work(struct ubi_work *wrk); | 929 | int ubi_is_erase_work(struct ubi_work *wrk); |
| 930 | void ubi_refill_pools(struct ubi_device *ubi); | 930 | void ubi_refill_pools(struct ubi_device *ubi); |
| 931 | int ubi_ensure_anchor_pebs(struct ubi_device *ubi); | 931 | int ubi_ensure_anchor_pebs(struct ubi_device *ubi); |
| 932 | int ubi_bitflip_check(struct ubi_device *ubi, int pnum, int force_scrub); | ||
| 932 | 933 | ||
| 933 | /* io.c */ | 934 | /* io.c */ |
| 934 | int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset, | 935 | int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset, |
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index 6f2ac865ff05..2709dc02fc24 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c | |||
| @@ -278,6 +278,27 @@ static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root) | |||
| 278 | } | 278 | } |
| 279 | 279 | ||
| 280 | /** | 280 | /** |
| 281 | * in_pq - check if a wear-leveling entry is present in the protection queue. | ||
| 282 | * @ubi: UBI device description object | ||
| 283 | * @e: the wear-leveling entry to check | ||
| 284 | * | ||
| 285 | * This function returns non-zero if @e is in the protection queue and zero | ||
| 286 | * if it is not. | ||
| 287 | */ | ||
| 288 | static inline int in_pq(const struct ubi_device *ubi, struct ubi_wl_entry *e) | ||
| 289 | { | ||
| 290 | struct ubi_wl_entry *p; | ||
| 291 | int i; | ||
| 292 | |||
| 293 | for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) | ||
| 294 | list_for_each_entry(p, &ubi->pq[i], u.list) | ||
| 295 | if (p == e) | ||
| 296 | return 1; | ||
| 297 | |||
| 298 | return 0; | ||
| 299 | } | ||
| 300 | |||
| 301 | /** | ||
| 281 | * prot_queue_add - add physical eraseblock to the protection queue. | 302 | * prot_queue_add - add physical eraseblock to the protection queue. |
| 282 | * @ubi: UBI device description object | 303 | * @ubi: UBI device description object |
| 283 | * @e: the physical eraseblock to add | 304 | * @e: the physical eraseblock to add |
| @@ -1419,6 +1440,150 @@ int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum) | |||
| 1419 | return err; | 1440 | return err; |
| 1420 | } | 1441 | } |
| 1421 | 1442 | ||
| 1443 | static bool scrub_possible(struct ubi_device *ubi, struct ubi_wl_entry *e) | ||
| 1444 | { | ||
| 1445 | if (in_wl_tree(e, &ubi->scrub)) | ||
| 1446 | return false; | ||
| 1447 | else if (in_wl_tree(e, &ubi->erroneous)) | ||
| 1448 | return false; | ||
| 1449 | else if (ubi->move_from == e) | ||
| 1450 | return false; | ||
| 1451 | else if (ubi->move_to == e) | ||
| 1452 | return false; | ||
| 1453 | |||
| 1454 | return true; | ||
| 1455 | } | ||
| 1456 | |||
| 1457 | /** | ||
| 1458 | * ubi_bitflip_check - Check an eraseblock for bitflips and scrub it if needed. | ||
| 1459 | * @ubi: UBI device description object | ||
| 1460 | * @pnum: the physical eraseblock to schedule | ||
| 1461 | * @force: dont't read the block, assume bitflips happened and take action. | ||
| 1462 | * | ||
| 1463 | * This function reads the given eraseblock and checks if bitflips occured. | ||
| 1464 | * In case of bitflips, the eraseblock is scheduled for scrubbing. | ||
| 1465 | * If scrubbing is forced with @force, the eraseblock is not read, | ||
| 1466 | * but scheduled for scrubbing right away. | ||
| 1467 | * | ||
| 1468 | * Returns: | ||
| 1469 | * %EINVAL, PEB is out of range | ||
| 1470 | * %ENOENT, PEB is no longer used by UBI | ||
| 1471 | * %EBUSY, PEB cannot be checked now or a check is currently running on it | ||
| 1472 | * %EAGAIN, bit flips happened but scrubbing is currently not possible | ||
| 1473 | * %EUCLEAN, bit flips happened and PEB is scheduled for scrubbing | ||
| 1474 | * %0, no bit flips detected | ||
| 1475 | */ | ||
| 1476 | int ubi_bitflip_check(struct ubi_device *ubi, int pnum, int force) | ||
| 1477 | { | ||
| 1478 | int err; | ||
| 1479 | struct ubi_wl_entry *e; | ||
| 1480 | |||
| 1481 | if (pnum < 0 || pnum >= ubi->peb_count) { | ||
| 1482 | err = -EINVAL; | ||
| 1483 | goto out; | ||
| 1484 | } | ||
| 1485 | |||
| 1486 | /* | ||
| 1487 | * Pause all parallel work, otherwise it can happen that the | ||
| 1488 | * erase worker frees a wl entry under us. | ||
| 1489 | */ | ||
| 1490 | down_write(&ubi->work_sem); | ||
| 1491 | |||
| 1492 | /* | ||
| 1493 | * Make sure that the wl entry does not change state while | ||
| 1494 | * inspecting it. | ||
| 1495 | */ | ||
| 1496 | spin_lock(&ubi->wl_lock); | ||
| 1497 | e = ubi->lookuptbl[pnum]; | ||
| 1498 | if (!e) { | ||
| 1499 | spin_unlock(&ubi->wl_lock); | ||
| 1500 | err = -ENOENT; | ||
| 1501 | goto out_resume; | ||
| 1502 | } | ||
| 1503 | |||
| 1504 | /* | ||
| 1505 | * Does it make sense to check this PEB? | ||
| 1506 | */ | ||
| 1507 | if (!scrub_possible(ubi, e)) { | ||
| 1508 | spin_unlock(&ubi->wl_lock); | ||
| 1509 | err = -EBUSY; | ||
| 1510 | goto out_resume; | ||
| 1511 | } | ||
| 1512 | spin_unlock(&ubi->wl_lock); | ||
| 1513 | |||
| 1514 | if (!force) { | ||
| 1515 | mutex_lock(&ubi->buf_mutex); | ||
| 1516 | err = ubi_io_read(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size); | ||
| 1517 | mutex_unlock(&ubi->buf_mutex); | ||
| 1518 | } | ||
| 1519 | |||
| 1520 | if (force || err == UBI_IO_BITFLIPS) { | ||
| 1521 | /* | ||
| 1522 | * Okay, bit flip happened, let's figure out what we can do. | ||
| 1523 | */ | ||
| 1524 | spin_lock(&ubi->wl_lock); | ||
| 1525 | |||
| 1526 | /* | ||
| 1527 | * Recheck. We released wl_lock, UBI might have killed the | ||
| 1528 | * wl entry under us. | ||
| 1529 | */ | ||
| 1530 | e = ubi->lookuptbl[pnum]; | ||
| 1531 | if (!e) { | ||
| 1532 | spin_unlock(&ubi->wl_lock); | ||
| 1533 | err = -ENOENT; | ||
| 1534 | goto out_resume; | ||
| 1535 | } | ||
| 1536 | |||
| 1537 | /* | ||
| 1538 | * Need to re-check state | ||
| 1539 | */ | ||
| 1540 | if (!scrub_possible(ubi, e)) { | ||
| 1541 | spin_unlock(&ubi->wl_lock); | ||
| 1542 | err = -EBUSY; | ||
| 1543 | goto out_resume; | ||
| 1544 | } | ||
| 1545 | |||
| 1546 | if (in_pq(ubi, e)) { | ||
| 1547 | prot_queue_del(ubi, e->pnum); | ||
| 1548 | wl_tree_add(e, &ubi->scrub); | ||
| 1549 | spin_unlock(&ubi->wl_lock); | ||
| 1550 | |||
| 1551 | err = ensure_wear_leveling(ubi, 1); | ||
| 1552 | } else if (in_wl_tree(e, &ubi->used)) { | ||
| 1553 | rb_erase(&e->u.rb, &ubi->used); | ||
| 1554 | wl_tree_add(e, &ubi->scrub); | ||
| 1555 | spin_unlock(&ubi->wl_lock); | ||
| 1556 | |||
| 1557 | err = ensure_wear_leveling(ubi, 1); | ||
| 1558 | } else if (in_wl_tree(e, &ubi->free)) { | ||
| 1559 | rb_erase(&e->u.rb, &ubi->free); | ||
| 1560 | ubi->free_count--; | ||
| 1561 | spin_unlock(&ubi->wl_lock); | ||
| 1562 | |||
| 1563 | /* | ||
| 1564 | * This PEB is empty we can schedule it for | ||
| 1565 | * erasure right away. No wear leveling needed. | ||
| 1566 | */ | ||
| 1567 | err = schedule_erase(ubi, e, UBI_UNKNOWN, UBI_UNKNOWN, | ||
| 1568 | force ? 0 : 1, true); | ||
| 1569 | } else { | ||
| 1570 | spin_unlock(&ubi->wl_lock); | ||
| 1571 | err = -EAGAIN; | ||
| 1572 | } | ||
| 1573 | |||
| 1574 | if (!err && !force) | ||
| 1575 | err = -EUCLEAN; | ||
| 1576 | } else { | ||
| 1577 | err = 0; | ||
| 1578 | } | ||
| 1579 | |||
| 1580 | out_resume: | ||
| 1581 | up_write(&ubi->work_sem); | ||
| 1582 | out: | ||
| 1583 | |||
| 1584 | return err; | ||
| 1585 | } | ||
| 1586 | |||
| 1422 | /** | 1587 | /** |
| 1423 | * tree_destroy - destroy an RB-tree. | 1588 | * tree_destroy - destroy an RB-tree. |
| 1424 | * @ubi: UBI device description object | 1589 | * @ubi: UBI device description object |
| @@ -1848,16 +2013,11 @@ static int self_check_in_wl_tree(const struct ubi_device *ubi, | |||
| 1848 | static int self_check_in_pq(const struct ubi_device *ubi, | 2013 | static int self_check_in_pq(const struct ubi_device *ubi, |
| 1849 | struct ubi_wl_entry *e) | 2014 | struct ubi_wl_entry *e) |
| 1850 | { | 2015 | { |
| 1851 | struct ubi_wl_entry *p; | ||
| 1852 | int i; | ||
| 1853 | |||
| 1854 | if (!ubi_dbg_chk_gen(ubi)) | 2016 | if (!ubi_dbg_chk_gen(ubi)) |
| 1855 | return 0; | 2017 | return 0; |
| 1856 | 2018 | ||
| 1857 | for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) | 2019 | if (in_pq(ubi, e)) |
| 1858 | list_for_each_entry(p, &ubi->pq[i], u.list) | 2020 | return 0; |
| 1859 | if (p == e) | ||
| 1860 | return 0; | ||
| 1861 | 2021 | ||
| 1862 | ubi_err(ubi, "self-check failed for PEB %d, EC %d, Protect queue", | 2022 | ubi_err(ubi, "self-check failed for PEB %d, EC %d, Protect queue", |
| 1863 | e->pnum, e->ec); | 2023 | e->pnum, e->ec); |
diff --git a/drivers/net/ethernet/8390/pcnet_cs.c b/drivers/net/ethernet/8390/pcnet_cs.c index 61e43802b9a5..645efac6310d 100644 --- a/drivers/net/ethernet/8390/pcnet_cs.c +++ b/drivers/net/ethernet/8390/pcnet_cs.c | |||
| @@ -289,6 +289,11 @@ static struct hw_info *get_hwinfo(struct pcmcia_device *link) | |||
| 289 | 289 | ||
| 290 | virt = ioremap(link->resource[2]->start, | 290 | virt = ioremap(link->resource[2]->start, |
| 291 | resource_size(link->resource[2])); | 291 | resource_size(link->resource[2])); |
| 292 | if (unlikely(!virt)) { | ||
| 293 | pcmcia_release_window(link, link->resource[2]); | ||
| 294 | return NULL; | ||
| 295 | } | ||
| 296 | |||
| 292 | for (i = 0; i < NR_INFO; i++) { | 297 | for (i = 0; i < NR_INFO; i++) { |
| 293 | pcmcia_map_mem_page(link, link->resource[2], | 298 | pcmcia_map_mem_page(link, link->resource[2], |
| 294 | hw_info[i].offset & ~(resource_size(link->resource[2])-1)); | 299 | hw_info[i].offset & ~(resource_size(link->resource[2])-1)); |
| @@ -1423,6 +1428,11 @@ static int setup_shmem_window(struct pcmcia_device *link, int start_pg, | |||
| 1423 | /* Try scribbling on the buffer */ | 1428 | /* Try scribbling on the buffer */ |
| 1424 | info->base = ioremap(link->resource[3]->start, | 1429 | info->base = ioremap(link->resource[3]->start, |
| 1425 | resource_size(link->resource[3])); | 1430 | resource_size(link->resource[3])); |
| 1431 | if (unlikely(!info->base)) { | ||
| 1432 | ret = -ENOMEM; | ||
| 1433 | goto failed; | ||
| 1434 | } | ||
| 1435 | |||
| 1426 | for (i = 0; i < (TX_PAGES<<8); i += 2) | 1436 | for (i = 0; i < (TX_PAGES<<8); i += 2) |
| 1427 | __raw_writew((i>>1), info->base+offset+i); | 1437 | __raw_writew((i>>1), info->base+offset+i); |
| 1428 | udelay(100); | 1438 | udelay(100); |
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c index e21bf3724611..1c50c10b5a16 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_core.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c | |||
| @@ -1211,6 +1211,11 @@ int liquidio_change_mtu(struct net_device *netdev, int new_mtu) | |||
| 1211 | 1211 | ||
| 1212 | sc = (struct octeon_soft_command *) | 1212 | sc = (struct octeon_soft_command *) |
| 1213 | octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, 16, 0); | 1213 | octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, 16, 0); |
| 1214 | if (!sc) { | ||
| 1215 | netif_info(lio, rx_err, lio->netdev, | ||
| 1216 | "Failed to allocate soft command\n"); | ||
| 1217 | return -ENOMEM; | ||
| 1218 | } | ||
| 1214 | 1219 | ||
| 1215 | ncmd = (union octnet_cmd *)sc->virtdptr; | 1220 | ncmd = (union octnet_cmd *)sc->virtdptr; |
| 1216 | 1221 | ||
| @@ -1684,6 +1689,11 @@ int liquidio_set_fec(struct lio *lio, int on_off) | |||
| 1684 | 1689 | ||
| 1685 | sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, | 1690 | sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, |
| 1686 | sizeof(struct oct_nic_seapi_resp), 0); | 1691 | sizeof(struct oct_nic_seapi_resp), 0); |
| 1692 | if (!sc) { | ||
| 1693 | dev_err(&oct->pci_dev->dev, | ||
| 1694 | "Failed to allocate soft command\n"); | ||
| 1695 | return -ENOMEM; | ||
| 1696 | } | ||
| 1687 | 1697 | ||
| 1688 | ncmd = sc->virtdptr; | 1698 | ncmd = sc->virtdptr; |
| 1689 | resp = sc->virtrptr; | 1699 | resp = sc->virtrptr; |
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 9b7819fdc9de..fb6f813cff65 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c | |||
| @@ -1192,6 +1192,11 @@ static void send_rx_ctrl_cmd(struct lio *lio, int start_stop) | |||
| 1192 | sc = (struct octeon_soft_command *) | 1192 | sc = (struct octeon_soft_command *) |
| 1193 | octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, | 1193 | octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, |
| 1194 | 16, 0); | 1194 | 16, 0); |
| 1195 | if (!sc) { | ||
| 1196 | netif_info(lio, rx_err, lio->netdev, | ||
| 1197 | "Failed to allocate octeon_soft_command\n"); | ||
| 1198 | return; | ||
| 1199 | } | ||
| 1195 | 1200 | ||
| 1196 | ncmd = (union octnet_cmd *)sc->virtdptr; | 1201 | ncmd = (union octnet_cmd *)sc->virtdptr; |
| 1197 | 1202 | ||
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index 503cfadff4ac..aa2be4807191 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c | |||
| @@ -2234,6 +2234,12 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 2234 | nic->nicvf_rx_mode_wq = alloc_ordered_workqueue("nicvf_rx_mode_wq_VF%d", | 2234 | nic->nicvf_rx_mode_wq = alloc_ordered_workqueue("nicvf_rx_mode_wq_VF%d", |
| 2235 | WQ_MEM_RECLAIM, | 2235 | WQ_MEM_RECLAIM, |
| 2236 | nic->vf_id); | 2236 | nic->vf_id); |
| 2237 | if (!nic->nicvf_rx_mode_wq) { | ||
| 2238 | err = -ENOMEM; | ||
| 2239 | dev_err(dev, "Failed to allocate work queue\n"); | ||
| 2240 | goto err_unregister_interrupts; | ||
| 2241 | } | ||
| 2242 | |||
| 2237 | INIT_WORK(&nic->rx_mode_work.work, nicvf_set_rx_mode_task); | 2243 | INIT_WORK(&nic->rx_mode_work.work, nicvf_set_rx_mode_task); |
| 2238 | spin_lock_init(&nic->rx_mode_wq_lock); | 2244 | spin_lock_init(&nic->rx_mode_wq_lock); |
| 2239 | mutex_init(&nic->rx_mode_mtx); | 2245 | mutex_init(&nic->rx_mode_mtx); |
diff --git a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c index a69cd19a55ae..1eca0fdb9933 100644 --- a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c +++ b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c | |||
| @@ -547,6 +547,11 @@ static int fmvj18x_get_hwinfo(struct pcmcia_device *link, u_char *node_id) | |||
| 547 | return -1; | 547 | return -1; |
| 548 | 548 | ||
| 549 | base = ioremap(link->resource[2]->start, resource_size(link->resource[2])); | 549 | base = ioremap(link->resource[2]->start, resource_size(link->resource[2])); |
| 550 | if (!base) { | ||
| 551 | pcmcia_release_window(link, link->resource[2]); | ||
| 552 | return -ENOMEM; | ||
| 553 | } | ||
| 554 | |||
| 550 | pcmcia_map_mem_page(link, link->resource[2], 0); | 555 | pcmcia_map_mem_page(link, link->resource[2], 0); |
| 551 | 556 | ||
| 552 | /* | 557 | /* |
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index c19e74e6ac94..a5d5d6fc1da0 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c | |||
| @@ -2645,6 +2645,8 @@ int mlx4_cmd_use_events(struct mlx4_dev *dev) | |||
| 2645 | if (!priv->cmd.context) | 2645 | if (!priv->cmd.context) |
| 2646 | return -ENOMEM; | 2646 | return -ENOMEM; |
| 2647 | 2647 | ||
| 2648 | if (mlx4_is_mfunc(dev)) | ||
| 2649 | mutex_lock(&priv->cmd.slave_cmd_mutex); | ||
| 2648 | down_write(&priv->cmd.switch_sem); | 2650 | down_write(&priv->cmd.switch_sem); |
| 2649 | for (i = 0; i < priv->cmd.max_cmds; ++i) { | 2651 | for (i = 0; i < priv->cmd.max_cmds; ++i) { |
| 2650 | priv->cmd.context[i].token = i; | 2652 | priv->cmd.context[i].token = i; |
| @@ -2670,6 +2672,8 @@ int mlx4_cmd_use_events(struct mlx4_dev *dev) | |||
| 2670 | down(&priv->cmd.poll_sem); | 2672 | down(&priv->cmd.poll_sem); |
| 2671 | priv->cmd.use_events = 1; | 2673 | priv->cmd.use_events = 1; |
| 2672 | up_write(&priv->cmd.switch_sem); | 2674 | up_write(&priv->cmd.switch_sem); |
| 2675 | if (mlx4_is_mfunc(dev)) | ||
| 2676 | mutex_unlock(&priv->cmd.slave_cmd_mutex); | ||
| 2673 | 2677 | ||
| 2674 | return err; | 2678 | return err; |
| 2675 | } | 2679 | } |
| @@ -2682,6 +2686,8 @@ void mlx4_cmd_use_polling(struct mlx4_dev *dev) | |||
| 2682 | struct mlx4_priv *priv = mlx4_priv(dev); | 2686 | struct mlx4_priv *priv = mlx4_priv(dev); |
| 2683 | int i; | 2687 | int i; |
| 2684 | 2688 | ||
| 2689 | if (mlx4_is_mfunc(dev)) | ||
| 2690 | mutex_lock(&priv->cmd.slave_cmd_mutex); | ||
| 2685 | down_write(&priv->cmd.switch_sem); | 2691 | down_write(&priv->cmd.switch_sem); |
| 2686 | priv->cmd.use_events = 0; | 2692 | priv->cmd.use_events = 0; |
| 2687 | 2693 | ||
| @@ -2689,9 +2695,12 @@ void mlx4_cmd_use_polling(struct mlx4_dev *dev) | |||
| 2689 | down(&priv->cmd.event_sem); | 2695 | down(&priv->cmd.event_sem); |
| 2690 | 2696 | ||
| 2691 | kfree(priv->cmd.context); | 2697 | kfree(priv->cmd.context); |
| 2698 | priv->cmd.context = NULL; | ||
| 2692 | 2699 | ||
| 2693 | up(&priv->cmd.poll_sem); | 2700 | up(&priv->cmd.poll_sem); |
| 2694 | up_write(&priv->cmd.switch_sem); | 2701 | up_write(&priv->cmd.switch_sem); |
| 2702 | if (mlx4_is_mfunc(dev)) | ||
| 2703 | mutex_unlock(&priv->cmd.slave_cmd_mutex); | ||
| 2695 | } | 2704 | } |
| 2696 | 2705 | ||
| 2697 | struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev) | 2706 | struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev) |
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index eb13d3618162..4356f3a58002 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c | |||
| @@ -2719,13 +2719,13 @@ static int qp_get_mtt_size(struct mlx4_qp_context *qpc) | |||
| 2719 | int total_pages; | 2719 | int total_pages; |
| 2720 | int total_mem; | 2720 | int total_mem; |
| 2721 | int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f; | 2721 | int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f; |
| 2722 | int tot; | ||
| 2722 | 2723 | ||
| 2723 | sq_size = 1 << (log_sq_size + log_sq_sride + 4); | 2724 | sq_size = 1 << (log_sq_size + log_sq_sride + 4); |
| 2724 | rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4)); | 2725 | rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4)); |
| 2725 | total_mem = sq_size + rq_size; | 2726 | total_mem = sq_size + rq_size; |
| 2726 | total_pages = | 2727 | tot = (total_mem + (page_offset << 6)) >> page_shift; |
| 2727 | roundup_pow_of_two((total_mem + (page_offset << 6)) >> | 2728 | total_pages = !tot ? 1 : roundup_pow_of_two(tot); |
| 2728 | page_shift); | ||
| 2729 | 2729 | ||
| 2730 | return total_pages; | 2730 | return total_pages; |
| 2731 | } | 2731 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 0804b478ad19..a0987cc5fe4a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c | |||
| @@ -424,6 +424,9 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv, | |||
| 424 | 424 | ||
| 425 | if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { | 425 | if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { |
| 426 | priv->channels.params = new_channels.params; | 426 | priv->channels.params = new_channels.params; |
| 427 | if (!netif_is_rxfh_configured(priv->netdev)) | ||
| 428 | mlx5e_build_default_indir_rqt(priv->rss_params.indirection_rqt, | ||
| 429 | MLX5E_INDIR_RQT_SIZE, count); | ||
| 427 | goto out; | 430 | goto out; |
| 428 | } | 431 | } |
| 429 | 432 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index a1a3e2774989..a66b6ed80b30 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | |||
| @@ -1129,16 +1129,17 @@ static int mlx5e_rep_get_phys_port_name(struct net_device *dev, | |||
| 1129 | struct mlx5e_priv *priv = netdev_priv(dev); | 1129 | struct mlx5e_priv *priv = netdev_priv(dev); |
| 1130 | struct mlx5e_rep_priv *rpriv = priv->ppriv; | 1130 | struct mlx5e_rep_priv *rpriv = priv->ppriv; |
| 1131 | struct mlx5_eswitch_rep *rep = rpriv->rep; | 1131 | struct mlx5_eswitch_rep *rep = rpriv->rep; |
| 1132 | int ret, pf_num; | 1132 | unsigned int fn; |
| 1133 | int ret; | ||
| 1133 | 1134 | ||
| 1134 | ret = mlx5_lag_get_pf_num(priv->mdev, &pf_num); | 1135 | fn = PCI_FUNC(priv->mdev->pdev->devfn); |
| 1135 | if (ret) | 1136 | if (fn >= MLX5_MAX_PORTS) |
| 1136 | return ret; | 1137 | return -EOPNOTSUPP; |
| 1137 | 1138 | ||
| 1138 | if (rep->vport == MLX5_VPORT_UPLINK) | 1139 | if (rep->vport == MLX5_VPORT_UPLINK) |
| 1139 | ret = snprintf(buf, len, "p%d", pf_num); | 1140 | ret = snprintf(buf, len, "p%d", fn); |
| 1140 | else | 1141 | else |
| 1141 | ret = snprintf(buf, len, "pf%dvf%d", pf_num, rep->vport - 1); | 1142 | ret = snprintf(buf, len, "pf%dvf%d", fn, rep->vport - 1); |
| 1142 | 1143 | ||
| 1143 | if (ret >= len) | 1144 | if (ret >= len) |
| 1144 | return -EOPNOTSUPP; | 1145 | return -EOPNOTSUPP; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index be396e5e4e39..3dde5c7e0739 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | |||
| @@ -1295,8 +1295,14 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, | |||
| 1295 | 1295 | ||
| 1296 | skb->protocol = *((__be16 *)(skb->data)); | 1296 | skb->protocol = *((__be16 *)(skb->data)); |
| 1297 | 1297 | ||
| 1298 | skb->ip_summed = CHECKSUM_COMPLETE; | 1298 | if (netdev->features & NETIF_F_RXCSUM) { |
| 1299 | skb->csum = csum_unfold((__force __sum16)cqe->check_sum); | 1299 | skb->ip_summed = CHECKSUM_COMPLETE; |
| 1300 | skb->csum = csum_unfold((__force __sum16)cqe->check_sum); | ||
| 1301 | stats->csum_complete++; | ||
| 1302 | } else { | ||
| 1303 | skb->ip_summed = CHECKSUM_NONE; | ||
| 1304 | stats->csum_none++; | ||
| 1305 | } | ||
| 1300 | 1306 | ||
| 1301 | if (unlikely(mlx5e_rx_hw_stamp(tstamp))) | 1307 | if (unlikely(mlx5e_rx_hw_stamp(tstamp))) |
| 1302 | skb_hwtstamps(skb)->hwtstamp = | 1308 | skb_hwtstamps(skb)->hwtstamp = |
| @@ -1315,7 +1321,6 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, | |||
| 1315 | 1321 | ||
| 1316 | skb->dev = netdev; | 1322 | skb->dev = netdev; |
| 1317 | 1323 | ||
| 1318 | stats->csum_complete++; | ||
| 1319 | stats->packets++; | 1324 | stats->packets++; |
| 1320 | stats->bytes += cqe_bcnt; | 1325 | stats->bytes += cqe_bcnt; |
| 1321 | } | 1326 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index d0b28251abf2..ecd2c747f726 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | |||
| @@ -1931,7 +1931,7 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, | |||
| 1931 | u64 node_guid; | 1931 | u64 node_guid; |
| 1932 | int err = 0; | 1932 | int err = 0; |
| 1933 | 1933 | ||
| 1934 | if (!MLX5_CAP_GEN(esw->dev, vport_group_manager)) | 1934 | if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager)) |
| 1935 | return -EPERM; | 1935 | return -EPERM; |
| 1936 | if (!LEGAL_VPORT(esw, vport) || is_multicast_ether_addr(mac)) | 1936 | if (!LEGAL_VPORT(esw, vport) || is_multicast_ether_addr(mac)) |
| 1937 | return -EINVAL; | 1937 | return -EINVAL; |
| @@ -2005,7 +2005,7 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, | |||
| 2005 | { | 2005 | { |
| 2006 | struct mlx5_vport *evport; | 2006 | struct mlx5_vport *evport; |
| 2007 | 2007 | ||
| 2008 | if (!MLX5_CAP_GEN(esw->dev, vport_group_manager)) | 2008 | if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager)) |
| 2009 | return -EPERM; | 2009 | return -EPERM; |
| 2010 | if (!LEGAL_VPORT(esw, vport)) | 2010 | if (!LEGAL_VPORT(esw, vport)) |
| 2011 | return -EINVAL; | 2011 | return -EINVAL; |
| @@ -2297,19 +2297,24 @@ static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider) | |||
| 2297 | int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, int vport, | 2297 | int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, int vport, |
| 2298 | u32 max_rate, u32 min_rate) | 2298 | u32 max_rate, u32 min_rate) |
| 2299 | { | 2299 | { |
| 2300 | u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share); | ||
| 2301 | bool min_rate_supported = MLX5_CAP_QOS(esw->dev, esw_bw_share) && | ||
| 2302 | fw_max_bw_share >= MLX5_MIN_BW_SHARE; | ||
| 2303 | bool max_rate_supported = MLX5_CAP_QOS(esw->dev, esw_rate_limit); | ||
| 2304 | struct mlx5_vport *evport; | 2300 | struct mlx5_vport *evport; |
| 2301 | u32 fw_max_bw_share; | ||
| 2305 | u32 previous_min_rate; | 2302 | u32 previous_min_rate; |
| 2306 | u32 divider; | 2303 | u32 divider; |
| 2304 | bool min_rate_supported; | ||
| 2305 | bool max_rate_supported; | ||
| 2307 | int err = 0; | 2306 | int err = 0; |
| 2308 | 2307 | ||
| 2309 | if (!ESW_ALLOWED(esw)) | 2308 | if (!ESW_ALLOWED(esw)) |
| 2310 | return -EPERM; | 2309 | return -EPERM; |
| 2311 | if (!LEGAL_VPORT(esw, vport)) | 2310 | if (!LEGAL_VPORT(esw, vport)) |
| 2312 | return -EINVAL; | 2311 | return -EINVAL; |
| 2312 | |||
| 2313 | fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share); | ||
| 2314 | min_rate_supported = MLX5_CAP_QOS(esw->dev, esw_bw_share) && | ||
| 2315 | fw_max_bw_share >= MLX5_MIN_BW_SHARE; | ||
| 2316 | max_rate_supported = MLX5_CAP_QOS(esw->dev, esw_rate_limit); | ||
| 2317 | |||
| 2313 | if ((min_rate && !min_rate_supported) || (max_rate && !max_rate_supported)) | 2318 | if ((min_rate && !min_rate_supported) || (max_rate && !max_rate_supported)) |
| 2314 | return -EOPNOTSUPP; | 2319 | return -EOPNOTSUPP; |
| 2315 | 2320 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index f2cfa012315e..0be3eb86dd84 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | |||
| @@ -263,10 +263,11 @@ static void nested_down_write_ref_node(struct fs_node *node, | |||
| 263 | } | 263 | } |
| 264 | } | 264 | } |
| 265 | 265 | ||
| 266 | static void down_write_ref_node(struct fs_node *node) | 266 | static void down_write_ref_node(struct fs_node *node, bool locked) |
| 267 | { | 267 | { |
| 268 | if (node) { | 268 | if (node) { |
| 269 | down_write(&node->lock); | 269 | if (!locked) |
| 270 | down_write(&node->lock); | ||
| 270 | refcount_inc(&node->refcount); | 271 | refcount_inc(&node->refcount); |
| 271 | } | 272 | } |
| 272 | } | 273 | } |
| @@ -277,13 +278,14 @@ static void up_read_ref_node(struct fs_node *node) | |||
| 277 | up_read(&node->lock); | 278 | up_read(&node->lock); |
| 278 | } | 279 | } |
| 279 | 280 | ||
| 280 | static void up_write_ref_node(struct fs_node *node) | 281 | static void up_write_ref_node(struct fs_node *node, bool locked) |
| 281 | { | 282 | { |
| 282 | refcount_dec(&node->refcount); | 283 | refcount_dec(&node->refcount); |
| 283 | up_write(&node->lock); | 284 | if (!locked) |
| 285 | up_write(&node->lock); | ||
| 284 | } | 286 | } |
| 285 | 287 | ||
| 286 | static void tree_put_node(struct fs_node *node) | 288 | static void tree_put_node(struct fs_node *node, bool locked) |
| 287 | { | 289 | { |
| 288 | struct fs_node *parent_node = node->parent; | 290 | struct fs_node *parent_node = node->parent; |
| 289 | 291 | ||
| @@ -294,27 +296,27 @@ static void tree_put_node(struct fs_node *node) | |||
| 294 | /* Only root namespace doesn't have parent and we just | 296 | /* Only root namespace doesn't have parent and we just |
| 295 | * need to free its node. | 297 | * need to free its node. |
| 296 | */ | 298 | */ |
| 297 | down_write_ref_node(parent_node); | 299 | down_write_ref_node(parent_node, locked); |
| 298 | list_del_init(&node->list); | 300 | list_del_init(&node->list); |
| 299 | if (node->del_sw_func) | 301 | if (node->del_sw_func) |
| 300 | node->del_sw_func(node); | 302 | node->del_sw_func(node); |
| 301 | up_write_ref_node(parent_node); | 303 | up_write_ref_node(parent_node, locked); |
| 302 | } else { | 304 | } else { |
| 303 | kfree(node); | 305 | kfree(node); |
| 304 | } | 306 | } |
| 305 | node = NULL; | 307 | node = NULL; |
| 306 | } | 308 | } |
| 307 | if (!node && parent_node) | 309 | if (!node && parent_node) |
| 308 | tree_put_node(parent_node); | 310 | tree_put_node(parent_node, locked); |
| 309 | } | 311 | } |
| 310 | 312 | ||
| 311 | static int tree_remove_node(struct fs_node *node) | 313 | static int tree_remove_node(struct fs_node *node, bool locked) |
| 312 | { | 314 | { |
| 313 | if (refcount_read(&node->refcount) > 1) { | 315 | if (refcount_read(&node->refcount) > 1) { |
| 314 | refcount_dec(&node->refcount); | 316 | refcount_dec(&node->refcount); |
| 315 | return -EEXIST; | 317 | return -EEXIST; |
| 316 | } | 318 | } |
| 317 | tree_put_node(node); | 319 | tree_put_node(node, locked); |
| 318 | return 0; | 320 | return 0; |
| 319 | } | 321 | } |
| 320 | 322 | ||
| @@ -420,22 +422,34 @@ static void del_sw_flow_table(struct fs_node *node) | |||
| 420 | kfree(ft); | 422 | kfree(ft); |
| 421 | } | 423 | } |
| 422 | 424 | ||
| 423 | static void del_sw_hw_rule(struct fs_node *node) | 425 | static void modify_fte(struct fs_fte *fte) |
| 424 | { | 426 | { |
| 425 | struct mlx5_flow_root_namespace *root; | 427 | struct mlx5_flow_root_namespace *root; |
| 426 | struct mlx5_flow_rule *rule; | ||
| 427 | struct mlx5_flow_table *ft; | 428 | struct mlx5_flow_table *ft; |
| 428 | struct mlx5_flow_group *fg; | 429 | struct mlx5_flow_group *fg; |
| 429 | struct fs_fte *fte; | 430 | struct mlx5_core_dev *dev; |
| 430 | int modify_mask; | ||
| 431 | struct mlx5_core_dev *dev = get_dev(node); | ||
| 432 | int err; | 431 | int err; |
| 433 | bool update_fte = false; | ||
| 434 | 432 | ||
| 435 | fs_get_obj(rule, node); | ||
| 436 | fs_get_obj(fte, rule->node.parent); | ||
| 437 | fs_get_obj(fg, fte->node.parent); | 433 | fs_get_obj(fg, fte->node.parent); |
| 438 | fs_get_obj(ft, fg->node.parent); | 434 | fs_get_obj(ft, fg->node.parent); |
| 435 | dev = get_dev(&fte->node); | ||
| 436 | |||
| 437 | root = find_root(&ft->node); | ||
| 438 | err = root->cmds->update_fte(dev, ft, fg->id, fte->modify_mask, fte); | ||
| 439 | if (err) | ||
| 440 | mlx5_core_warn(dev, | ||
| 441 | "%s can't del rule fg id=%d fte_index=%d\n", | ||
| 442 | __func__, fg->id, fte->index); | ||
| 443 | fte->modify_mask = 0; | ||
| 444 | } | ||
| 445 | |||
| 446 | static void del_sw_hw_rule(struct fs_node *node) | ||
| 447 | { | ||
| 448 | struct mlx5_flow_rule *rule; | ||
| 449 | struct fs_fte *fte; | ||
| 450 | |||
| 451 | fs_get_obj(rule, node); | ||
| 452 | fs_get_obj(fte, rule->node.parent); | ||
| 439 | trace_mlx5_fs_del_rule(rule); | 453 | trace_mlx5_fs_del_rule(rule); |
| 440 | if (rule->sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) { | 454 | if (rule->sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) { |
| 441 | mutex_lock(&rule->dest_attr.ft->lock); | 455 | mutex_lock(&rule->dest_attr.ft->lock); |
| @@ -445,27 +459,19 @@ static void del_sw_hw_rule(struct fs_node *node) | |||
| 445 | 459 | ||
| 446 | if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER && | 460 | if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER && |
| 447 | --fte->dests_size) { | 461 | --fte->dests_size) { |
| 448 | modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) | | 462 | fte->modify_mask |= |
| 449 | BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS); | 463 | BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) | |
| 464 | BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS); | ||
| 450 | fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT; | 465 | fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT; |
| 451 | update_fte = true; | ||
| 452 | goto out; | 466 | goto out; |
| 453 | } | 467 | } |
| 454 | 468 | ||
| 455 | if ((fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) && | 469 | if ((fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) && |
| 456 | --fte->dests_size) { | 470 | --fte->dests_size) { |
| 457 | modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST); | 471 | fte->modify_mask |= |
| 458 | update_fte = true; | 472 | BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST); |
| 459 | } | 473 | } |
| 460 | out: | 474 | out: |
| 461 | root = find_root(&ft->node); | ||
| 462 | if (update_fte && fte->dests_size) { | ||
| 463 | err = root->cmds->update_fte(dev, ft, fg->id, modify_mask, fte); | ||
| 464 | if (err) | ||
| 465 | mlx5_core_warn(dev, | ||
| 466 | "%s can't del rule fg id=%d fte_index=%d\n", | ||
| 467 | __func__, fg->id, fte->index); | ||
| 468 | } | ||
| 469 | kfree(rule); | 475 | kfree(rule); |
| 470 | } | 476 | } |
| 471 | 477 | ||
| @@ -491,6 +497,7 @@ static void del_hw_fte(struct fs_node *node) | |||
| 491 | mlx5_core_warn(dev, | 497 | mlx5_core_warn(dev, |
| 492 | "flow steering can't delete fte in index %d of flow group id %d\n", | 498 | "flow steering can't delete fte in index %d of flow group id %d\n", |
| 493 | fte->index, fg->id); | 499 | fte->index, fg->id); |
| 500 | node->active = 0; | ||
| 494 | } | 501 | } |
| 495 | } | 502 | } |
| 496 | 503 | ||
| @@ -591,7 +598,7 @@ static struct fs_fte *alloc_fte(struct mlx5_flow_table *ft, | |||
| 591 | fte->node.type = FS_TYPE_FLOW_ENTRY; | 598 | fte->node.type = FS_TYPE_FLOW_ENTRY; |
| 592 | fte->action = *flow_act; | 599 | fte->action = *flow_act; |
| 593 | 600 | ||
| 594 | tree_init_node(&fte->node, del_hw_fte, del_sw_fte); | 601 | tree_init_node(&fte->node, NULL, del_sw_fte); |
| 595 | 602 | ||
| 596 | return fte; | 603 | return fte; |
| 597 | } | 604 | } |
| @@ -858,7 +865,7 @@ static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule, | |||
| 858 | fs_get_obj(fte, rule->node.parent); | 865 | fs_get_obj(fte, rule->node.parent); |
| 859 | if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST)) | 866 | if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST)) |
| 860 | return -EINVAL; | 867 | return -EINVAL; |
| 861 | down_write_ref_node(&fte->node); | 868 | down_write_ref_node(&fte->node, false); |
| 862 | fs_get_obj(fg, fte->node.parent); | 869 | fs_get_obj(fg, fte->node.parent); |
| 863 | fs_get_obj(ft, fg->node.parent); | 870 | fs_get_obj(ft, fg->node.parent); |
| 864 | 871 | ||
| @@ -866,7 +873,7 @@ static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule, | |||
| 866 | root = find_root(&ft->node); | 873 | root = find_root(&ft->node); |
| 867 | err = root->cmds->update_fte(get_dev(&ft->node), ft, fg->id, | 874 | err = root->cmds->update_fte(get_dev(&ft->node), ft, fg->id, |
| 868 | modify_mask, fte); | 875 | modify_mask, fte); |
| 869 | up_write_ref_node(&fte->node); | 876 | up_write_ref_node(&fte->node, false); |
| 870 | 877 | ||
| 871 | return err; | 878 | return err; |
| 872 | } | 879 | } |
| @@ -1016,11 +1023,11 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa | |||
| 1016 | if (err) | 1023 | if (err) |
| 1017 | goto destroy_ft; | 1024 | goto destroy_ft; |
| 1018 | ft->node.active = true; | 1025 | ft->node.active = true; |
| 1019 | down_write_ref_node(&fs_prio->node); | 1026 | down_write_ref_node(&fs_prio->node, false); |
| 1020 | tree_add_node(&ft->node, &fs_prio->node); | 1027 | tree_add_node(&ft->node, &fs_prio->node); |
| 1021 | list_add_flow_table(ft, fs_prio); | 1028 | list_add_flow_table(ft, fs_prio); |
| 1022 | fs_prio->num_ft++; | 1029 | fs_prio->num_ft++; |
| 1023 | up_write_ref_node(&fs_prio->node); | 1030 | up_write_ref_node(&fs_prio->node, false); |
| 1024 | mutex_unlock(&root->chain_lock); | 1031 | mutex_unlock(&root->chain_lock); |
| 1025 | trace_mlx5_fs_add_ft(ft); | 1032 | trace_mlx5_fs_add_ft(ft); |
| 1026 | return ft; | 1033 | return ft; |
| @@ -1114,17 +1121,17 @@ struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft, | |||
| 1114 | if (ft->autogroup.active) | 1121 | if (ft->autogroup.active) |
| 1115 | return ERR_PTR(-EPERM); | 1122 | return ERR_PTR(-EPERM); |
| 1116 | 1123 | ||
| 1117 | down_write_ref_node(&ft->node); | 1124 | down_write_ref_node(&ft->node, false); |
| 1118 | fg = alloc_insert_flow_group(ft, match_criteria_enable, match_criteria, | 1125 | fg = alloc_insert_flow_group(ft, match_criteria_enable, match_criteria, |
| 1119 | start_index, end_index, | 1126 | start_index, end_index, |
| 1120 | ft->node.children.prev); | 1127 | ft->node.children.prev); |
| 1121 | up_write_ref_node(&ft->node); | 1128 | up_write_ref_node(&ft->node, false); |
| 1122 | if (IS_ERR(fg)) | 1129 | if (IS_ERR(fg)) |
| 1123 | return fg; | 1130 | return fg; |
| 1124 | 1131 | ||
| 1125 | err = root->cmds->create_flow_group(dev, ft, fg_in, &fg->id); | 1132 | err = root->cmds->create_flow_group(dev, ft, fg_in, &fg->id); |
| 1126 | if (err) { | 1133 | if (err) { |
| 1127 | tree_put_node(&fg->node); | 1134 | tree_put_node(&fg->node, false); |
| 1128 | return ERR_PTR(err); | 1135 | return ERR_PTR(err); |
| 1129 | } | 1136 | } |
| 1130 | trace_mlx5_fs_add_fg(fg); | 1137 | trace_mlx5_fs_add_fg(fg); |
| @@ -1521,10 +1528,10 @@ static void free_match_list(struct match_list_head *head) | |||
| 1521 | struct match_list *iter, *match_tmp; | 1528 | struct match_list *iter, *match_tmp; |
| 1522 | 1529 | ||
| 1523 | list_del(&head->first.list); | 1530 | list_del(&head->first.list); |
| 1524 | tree_put_node(&head->first.g->node); | 1531 | tree_put_node(&head->first.g->node, false); |
| 1525 | list_for_each_entry_safe(iter, match_tmp, &head->list, | 1532 | list_for_each_entry_safe(iter, match_tmp, &head->list, |
| 1526 | list) { | 1533 | list) { |
| 1527 | tree_put_node(&iter->g->node); | 1534 | tree_put_node(&iter->g->node, false); |
| 1528 | list_del(&iter->list); | 1535 | list_del(&iter->list); |
| 1529 | kfree(iter); | 1536 | kfree(iter); |
| 1530 | } | 1537 | } |
| @@ -1601,11 +1608,16 @@ lookup_fte_locked(struct mlx5_flow_group *g, | |||
| 1601 | fte_tmp = NULL; | 1608 | fte_tmp = NULL; |
| 1602 | goto out; | 1609 | goto out; |
| 1603 | } | 1610 | } |
| 1611 | if (!fte_tmp->node.active) { | ||
| 1612 | tree_put_node(&fte_tmp->node, false); | ||
| 1613 | fte_tmp = NULL; | ||
| 1614 | goto out; | ||
| 1615 | } | ||
| 1604 | 1616 | ||
| 1605 | nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD); | 1617 | nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD); |
| 1606 | out: | 1618 | out: |
| 1607 | if (take_write) | 1619 | if (take_write) |
| 1608 | up_write_ref_node(&g->node); | 1620 | up_write_ref_node(&g->node, false); |
| 1609 | else | 1621 | else |
| 1610 | up_read_ref_node(&g->node); | 1622 | up_read_ref_node(&g->node); |
| 1611 | return fte_tmp; | 1623 | return fte_tmp; |
| @@ -1647,8 +1659,8 @@ search_again_locked: | |||
| 1647 | continue; | 1659 | continue; |
| 1648 | rule = add_rule_fg(g, spec->match_value, | 1660 | rule = add_rule_fg(g, spec->match_value, |
| 1649 | flow_act, dest, dest_num, fte_tmp); | 1661 | flow_act, dest, dest_num, fte_tmp); |
| 1650 | up_write_ref_node(&fte_tmp->node); | 1662 | up_write_ref_node(&fte_tmp->node, false); |
| 1651 | tree_put_node(&fte_tmp->node); | 1663 | tree_put_node(&fte_tmp->node, false); |
| 1652 | kmem_cache_free(steering->ftes_cache, fte); | 1664 | kmem_cache_free(steering->ftes_cache, fte); |
| 1653 | return rule; | 1665 | return rule; |
| 1654 | } | 1666 | } |
| @@ -1684,7 +1696,7 @@ skip_search: | |||
| 1684 | 1696 | ||
| 1685 | err = insert_fte(g, fte); | 1697 | err = insert_fte(g, fte); |
| 1686 | if (err) { | 1698 | if (err) { |
| 1687 | up_write_ref_node(&g->node); | 1699 | up_write_ref_node(&g->node, false); |
| 1688 | if (err == -ENOSPC) | 1700 | if (err == -ENOSPC) |
| 1689 | continue; | 1701 | continue; |
| 1690 | kmem_cache_free(steering->ftes_cache, fte); | 1702 | kmem_cache_free(steering->ftes_cache, fte); |
| @@ -1692,11 +1704,11 @@ skip_search: | |||
| 1692 | } | 1704 | } |
| 1693 | 1705 | ||
| 1694 | nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD); | 1706 | nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD); |
| 1695 | up_write_ref_node(&g->node); | 1707 | up_write_ref_node(&g->node, false); |
| 1696 | rule = add_rule_fg(g, spec->match_value, | 1708 | rule = add_rule_fg(g, spec->match_value, |
| 1697 | flow_act, dest, dest_num, fte); | 1709 | flow_act, dest, dest_num, fte); |
| 1698 | up_write_ref_node(&fte->node); | 1710 | up_write_ref_node(&fte->node, false); |
| 1699 | tree_put_node(&fte->node); | 1711 | tree_put_node(&fte->node, false); |
| 1700 | return rule; | 1712 | return rule; |
| 1701 | } | 1713 | } |
| 1702 | rule = ERR_PTR(-ENOENT); | 1714 | rule = ERR_PTR(-ENOENT); |
| @@ -1738,7 +1750,7 @@ search_again_locked: | |||
| 1738 | err = build_match_list(&match_head, ft, spec); | 1750 | err = build_match_list(&match_head, ft, spec); |
| 1739 | if (err) { | 1751 | if (err) { |
| 1740 | if (take_write) | 1752 | if (take_write) |
| 1741 | up_write_ref_node(&ft->node); | 1753 | up_write_ref_node(&ft->node, false); |
| 1742 | else | 1754 | else |
| 1743 | up_read_ref_node(&ft->node); | 1755 | up_read_ref_node(&ft->node); |
| 1744 | return ERR_PTR(err); | 1756 | return ERR_PTR(err); |
| @@ -1753,7 +1765,7 @@ search_again_locked: | |||
| 1753 | if (!IS_ERR(rule) || | 1765 | if (!IS_ERR(rule) || |
| 1754 | (PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN)) { | 1766 | (PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN)) { |
| 1755 | if (take_write) | 1767 | if (take_write) |
| 1756 | up_write_ref_node(&ft->node); | 1768 | up_write_ref_node(&ft->node, false); |
| 1757 | return rule; | 1769 | return rule; |
| 1758 | } | 1770 | } |
| 1759 | 1771 | ||
| @@ -1769,12 +1781,12 @@ search_again_locked: | |||
| 1769 | g = alloc_auto_flow_group(ft, spec); | 1781 | g = alloc_auto_flow_group(ft, spec); |
| 1770 | if (IS_ERR(g)) { | 1782 | if (IS_ERR(g)) { |
| 1771 | rule = ERR_CAST(g); | 1783 | rule = ERR_CAST(g); |
| 1772 | up_write_ref_node(&ft->node); | 1784 | up_write_ref_node(&ft->node, false); |
| 1773 | return rule; | 1785 | return rule; |
| 1774 | } | 1786 | } |
| 1775 | 1787 | ||
| 1776 | nested_down_write_ref_node(&g->node, FS_LOCK_PARENT); | 1788 | nested_down_write_ref_node(&g->node, FS_LOCK_PARENT); |
| 1777 | up_write_ref_node(&ft->node); | 1789 | up_write_ref_node(&ft->node, false); |
| 1778 | 1790 | ||
| 1779 | err = create_auto_flow_group(ft, g); | 1791 | err = create_auto_flow_group(ft, g); |
| 1780 | if (err) | 1792 | if (err) |
| @@ -1793,17 +1805,17 @@ search_again_locked: | |||
| 1793 | } | 1805 | } |
| 1794 | 1806 | ||
| 1795 | nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD); | 1807 | nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD); |
| 1796 | up_write_ref_node(&g->node); | 1808 | up_write_ref_node(&g->node, false); |
| 1797 | rule = add_rule_fg(g, spec->match_value, flow_act, dest, | 1809 | rule = add_rule_fg(g, spec->match_value, flow_act, dest, |
| 1798 | dest_num, fte); | 1810 | dest_num, fte); |
| 1799 | up_write_ref_node(&fte->node); | 1811 | up_write_ref_node(&fte->node, false); |
| 1800 | tree_put_node(&fte->node); | 1812 | tree_put_node(&fte->node, false); |
| 1801 | tree_put_node(&g->node); | 1813 | tree_put_node(&g->node, false); |
| 1802 | return rule; | 1814 | return rule; |
| 1803 | 1815 | ||
| 1804 | err_release_fg: | 1816 | err_release_fg: |
| 1805 | up_write_ref_node(&g->node); | 1817 | up_write_ref_node(&g->node, false); |
| 1806 | tree_put_node(&g->node); | 1818 | tree_put_node(&g->node, false); |
| 1807 | return ERR_PTR(err); | 1819 | return ERR_PTR(err); |
| 1808 | } | 1820 | } |
| 1809 | 1821 | ||
| @@ -1866,10 +1878,33 @@ EXPORT_SYMBOL(mlx5_add_flow_rules); | |||
| 1866 | 1878 | ||
| 1867 | void mlx5_del_flow_rules(struct mlx5_flow_handle *handle) | 1879 | void mlx5_del_flow_rules(struct mlx5_flow_handle *handle) |
| 1868 | { | 1880 | { |
| 1881 | struct fs_fte *fte; | ||
| 1869 | int i; | 1882 | int i; |
| 1870 | 1883 | ||
| 1884 | /* In order to consolidate the HW changes we lock the FTE for other | ||
| 1885 | * changes, and increase its refcount, in order not to perform the | ||
| 1886 | * "del" functions of the FTE. Will handle them here. | ||
| 1887 | * The removal of the rules is done under locked FTE. | ||
| 1888 | * After removing all the handle's rules, if there are remaining | ||
| 1889 | * rules, it means we just need to modify the FTE in FW, and | ||
| 1890 | * unlock/decrease the refcount we increased before. | ||
| 1891 | * Otherwise, it means the FTE should be deleted. First delete the | ||
| 1892 | * FTE in FW. Then, unlock the FTE, and proceed the tree_put_node of | ||
| 1893 | * the FTE, which will handle the last decrease of the refcount, as | ||
| 1894 | * well as required handling of its parent. | ||
| 1895 | */ | ||
| 1896 | fs_get_obj(fte, handle->rule[0]->node.parent); | ||
| 1897 | down_write_ref_node(&fte->node, false); | ||
| 1871 | for (i = handle->num_rules - 1; i >= 0; i--) | 1898 | for (i = handle->num_rules - 1; i >= 0; i--) |
| 1872 | tree_remove_node(&handle->rule[i]->node); | 1899 | tree_remove_node(&handle->rule[i]->node, true); |
| 1900 | if (fte->modify_mask && fte->dests_size) { | ||
| 1901 | modify_fte(fte); | ||
| 1902 | up_write_ref_node(&fte->node, false); | ||
| 1903 | } else { | ||
| 1904 | del_hw_fte(&fte->node); | ||
| 1905 | up_write(&fte->node.lock); | ||
| 1906 | tree_put_node(&fte->node, false); | ||
| 1907 | } | ||
| 1873 | kfree(handle); | 1908 | kfree(handle); |
| 1874 | } | 1909 | } |
| 1875 | EXPORT_SYMBOL(mlx5_del_flow_rules); | 1910 | EXPORT_SYMBOL(mlx5_del_flow_rules); |
| @@ -1972,7 +2007,7 @@ int mlx5_destroy_flow_table(struct mlx5_flow_table *ft) | |||
| 1972 | mutex_unlock(&root->chain_lock); | 2007 | mutex_unlock(&root->chain_lock); |
| 1973 | return err; | 2008 | return err; |
| 1974 | } | 2009 | } |
| 1975 | if (tree_remove_node(&ft->node)) | 2010 | if (tree_remove_node(&ft->node, false)) |
| 1976 | mlx5_core_warn(get_dev(&ft->node), "Flow table %d wasn't destroyed, refcount > 1\n", | 2011 | mlx5_core_warn(get_dev(&ft->node), "Flow table %d wasn't destroyed, refcount > 1\n", |
| 1977 | ft->id); | 2012 | ft->id); |
| 1978 | mutex_unlock(&root->chain_lock); | 2013 | mutex_unlock(&root->chain_lock); |
| @@ -1983,7 +2018,7 @@ EXPORT_SYMBOL(mlx5_destroy_flow_table); | |||
| 1983 | 2018 | ||
| 1984 | void mlx5_destroy_flow_group(struct mlx5_flow_group *fg) | 2019 | void mlx5_destroy_flow_group(struct mlx5_flow_group *fg) |
| 1985 | { | 2020 | { |
| 1986 | if (tree_remove_node(&fg->node)) | 2021 | if (tree_remove_node(&fg->node, false)) |
| 1987 | mlx5_core_warn(get_dev(&fg->node), "Flow group %d wasn't destroyed, refcount > 1\n", | 2022 | mlx5_core_warn(get_dev(&fg->node), "Flow group %d wasn't destroyed, refcount > 1\n", |
| 1988 | fg->id); | 2023 | fg->id); |
| 1989 | } | 2024 | } |
| @@ -2367,8 +2402,8 @@ static void clean_tree(struct fs_node *node) | |||
| 2367 | tree_get_node(node); | 2402 | tree_get_node(node); |
| 2368 | list_for_each_entry_safe(iter, temp, &node->children, list) | 2403 | list_for_each_entry_safe(iter, temp, &node->children, list) |
| 2369 | clean_tree(iter); | 2404 | clean_tree(iter); |
| 2370 | tree_put_node(node); | 2405 | tree_put_node(node, false); |
| 2371 | tree_remove_node(node); | 2406 | tree_remove_node(node, false); |
| 2372 | } | 2407 | } |
| 2373 | } | 2408 | } |
| 2374 | 2409 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h index 2dc86347af58..87de0e4d9124 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h | |||
| @@ -172,6 +172,7 @@ struct fs_fte { | |||
| 172 | enum fs_fte_status status; | 172 | enum fs_fte_status status; |
| 173 | struct mlx5_fc *counter; | 173 | struct mlx5_fc *counter; |
| 174 | struct rhash_head hash; | 174 | struct rhash_head hash; |
| 175 | int modify_mask; | ||
| 175 | }; | 176 | }; |
| 176 | 177 | ||
| 177 | /* Type of children is mlx5_flow_table/namespace */ | 178 | /* Type of children is mlx5_flow_table/namespace */ |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c index 48aa6e030bcf..959605559858 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c | |||
| @@ -595,27 +595,6 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev) | |||
| 595 | err); | 595 | err); |
| 596 | } | 596 | } |
| 597 | 597 | ||
| 598 | int mlx5_lag_get_pf_num(struct mlx5_core_dev *dev, int *pf_num) | ||
| 599 | { | ||
| 600 | struct mlx5_lag *ldev; | ||
| 601 | int n; | ||
| 602 | |||
| 603 | ldev = mlx5_lag_dev_get(dev); | ||
| 604 | if (!ldev) { | ||
| 605 | mlx5_core_warn(dev, "no lag device, can't get pf num\n"); | ||
| 606 | return -EINVAL; | ||
| 607 | } | ||
| 608 | |||
| 609 | for (n = 0; n < MLX5_MAX_PORTS; n++) | ||
| 610 | if (ldev->pf[n].dev == dev) { | ||
| 611 | *pf_num = n; | ||
| 612 | return 0; | ||
| 613 | } | ||
| 614 | |||
| 615 | mlx5_core_warn(dev, "wasn't able to locate pf in the lag device\n"); | ||
| 616 | return -EINVAL; | ||
| 617 | } | ||
| 618 | |||
| 619 | /* Must be called with intf_mutex held */ | 598 | /* Must be called with intf_mutex held */ |
| 620 | void mlx5_lag_remove(struct mlx5_core_dev *dev) | 599 | void mlx5_lag_remove(struct mlx5_core_dev *dev) |
| 621 | { | 600 | { |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index 9529cf9623e3..7b331674622c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h | |||
| @@ -188,8 +188,6 @@ static inline int mlx5_lag_is_lacp_owner(struct mlx5_core_dev *dev) | |||
| 188 | MLX5_CAP_GEN(dev, lag_master); | 188 | MLX5_CAP_GEN(dev, lag_master); |
| 189 | } | 189 | } |
| 190 | 190 | ||
| 191 | int mlx5_lag_get_pf_num(struct mlx5_core_dev *dev, int *pf_num); | ||
| 192 | |||
| 193 | void mlx5_reload_interface(struct mlx5_core_dev *mdev, int protocol); | 191 | void mlx5_reload_interface(struct mlx5_core_dev *mdev, int protocol); |
| 194 | void mlx5_lag_update(struct mlx5_core_dev *dev); | 192 | void mlx5_lag_update(struct mlx5_core_dev *dev); |
| 195 | 193 | ||
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c index 0b85c7252f9e..472f63f9fac5 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c | |||
| @@ -111,7 +111,6 @@ struct mlxsw_thermal { | |||
| 111 | struct mlxsw_thermal_trip trips[MLXSW_THERMAL_NUM_TRIPS]; | 111 | struct mlxsw_thermal_trip trips[MLXSW_THERMAL_NUM_TRIPS]; |
| 112 | enum thermal_device_mode mode; | 112 | enum thermal_device_mode mode; |
| 113 | struct mlxsw_thermal_module *tz_module_arr; | 113 | struct mlxsw_thermal_module *tz_module_arr; |
| 114 | unsigned int tz_module_num; | ||
| 115 | }; | 114 | }; |
| 116 | 115 | ||
| 117 | static inline u8 mlxsw_state_to_duty(int state) | 116 | static inline u8 mlxsw_state_to_duty(int state) |
| @@ -711,6 +710,9 @@ mlxsw_thermal_module_init(struct device *dev, struct mlxsw_core *core, | |||
| 711 | 710 | ||
| 712 | module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0); | 711 | module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0); |
| 713 | module_tz = &thermal->tz_module_arr[module]; | 712 | module_tz = &thermal->tz_module_arr[module]; |
| 713 | /* Skip if parent is already set (case of port split). */ | ||
| 714 | if (module_tz->parent) | ||
| 715 | return 0; | ||
| 714 | module_tz->module = module; | 716 | module_tz->module = module; |
| 715 | module_tz->parent = thermal; | 717 | module_tz->parent = thermal; |
| 716 | memcpy(module_tz->trips, default_thermal_trips, | 718 | memcpy(module_tz->trips, default_thermal_trips, |
| @@ -718,13 +720,7 @@ mlxsw_thermal_module_init(struct device *dev, struct mlxsw_core *core, | |||
| 718 | /* Initialize all trip point. */ | 720 | /* Initialize all trip point. */ |
| 719 | mlxsw_thermal_module_trips_reset(module_tz); | 721 | mlxsw_thermal_module_trips_reset(module_tz); |
| 720 | /* Update trip point according to the module data. */ | 722 | /* Update trip point according to the module data. */ |
| 721 | err = mlxsw_thermal_module_trips_update(dev, core, module_tz); | 723 | return mlxsw_thermal_module_trips_update(dev, core, module_tz); |
| 722 | if (err) | ||
| 723 | return err; | ||
| 724 | |||
| 725 | thermal->tz_module_num++; | ||
| 726 | |||
| 727 | return 0; | ||
| 728 | } | 724 | } |
| 729 | 725 | ||
| 730 | static void mlxsw_thermal_module_fini(struct mlxsw_thermal_module *module_tz) | 726 | static void mlxsw_thermal_module_fini(struct mlxsw_thermal_module *module_tz) |
| @@ -732,6 +728,7 @@ static void mlxsw_thermal_module_fini(struct mlxsw_thermal_module *module_tz) | |||
| 732 | if (module_tz && module_tz->tzdev) { | 728 | if (module_tz && module_tz->tzdev) { |
| 733 | mlxsw_thermal_module_tz_fini(module_tz->tzdev); | 729 | mlxsw_thermal_module_tz_fini(module_tz->tzdev); |
| 734 | module_tz->tzdev = NULL; | 730 | module_tz->tzdev = NULL; |
| 731 | module_tz->parent = NULL; | ||
| 735 | } | 732 | } |
| 736 | } | 733 | } |
| 737 | 734 | ||
| @@ -740,6 +737,7 @@ mlxsw_thermal_modules_init(struct device *dev, struct mlxsw_core *core, | |||
| 740 | struct mlxsw_thermal *thermal) | 737 | struct mlxsw_thermal *thermal) |
| 741 | { | 738 | { |
| 742 | unsigned int module_count = mlxsw_core_max_ports(core); | 739 | unsigned int module_count = mlxsw_core_max_ports(core); |
| 740 | struct mlxsw_thermal_module *module_tz; | ||
| 743 | int i, err; | 741 | int i, err; |
| 744 | 742 | ||
| 745 | thermal->tz_module_arr = kcalloc(module_count, | 743 | thermal->tz_module_arr = kcalloc(module_count, |
| @@ -754,8 +752,11 @@ mlxsw_thermal_modules_init(struct device *dev, struct mlxsw_core *core, | |||
| 754 | goto err_unreg_tz_module_arr; | 752 | goto err_unreg_tz_module_arr; |
| 755 | } | 753 | } |
| 756 | 754 | ||
| 757 | for (i = 0; i < thermal->tz_module_num; i++) { | 755 | for (i = 0; i < module_count - 1; i++) { |
| 758 | err = mlxsw_thermal_module_tz_init(&thermal->tz_module_arr[i]); | 756 | module_tz = &thermal->tz_module_arr[i]; |
| 757 | if (!module_tz->parent) | ||
| 758 | continue; | ||
| 759 | err = mlxsw_thermal_module_tz_init(module_tz); | ||
| 759 | if (err) | 760 | if (err) |
| 760 | goto err_unreg_tz_module_arr; | 761 | goto err_unreg_tz_module_arr; |
| 761 | } | 762 | } |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/minimal.c b/drivers/net/ethernet/mellanox/mlxsw/minimal.c index 68bee9572a1b..00c390024350 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/minimal.c +++ b/drivers/net/ethernet/mellanox/mlxsw/minimal.c | |||
| @@ -34,6 +34,18 @@ struct mlxsw_m_port { | |||
| 34 | u8 module; | 34 | u8 module; |
| 35 | }; | 35 | }; |
| 36 | 36 | ||
| 37 | static int mlxsw_m_base_mac_get(struct mlxsw_m *mlxsw_m) | ||
| 38 | { | ||
| 39 | char spad_pl[MLXSW_REG_SPAD_LEN] = {0}; | ||
| 40 | int err; | ||
| 41 | |||
| 42 | err = mlxsw_reg_query(mlxsw_m->core, MLXSW_REG(spad), spad_pl); | ||
| 43 | if (err) | ||
| 44 | return err; | ||
| 45 | mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_m->base_mac); | ||
| 46 | return 0; | ||
| 47 | } | ||
| 48 | |||
| 37 | static int mlxsw_m_port_dummy_open_stop(struct net_device *dev) | 49 | static int mlxsw_m_port_dummy_open_stop(struct net_device *dev) |
| 38 | { | 50 | { |
| 39 | return 0; | 51 | return 0; |
| @@ -314,6 +326,12 @@ static int mlxsw_m_init(struct mlxsw_core *mlxsw_core, | |||
| 314 | mlxsw_m->core = mlxsw_core; | 326 | mlxsw_m->core = mlxsw_core; |
| 315 | mlxsw_m->bus_info = mlxsw_bus_info; | 327 | mlxsw_m->bus_info = mlxsw_bus_info; |
| 316 | 328 | ||
| 329 | err = mlxsw_m_base_mac_get(mlxsw_m); | ||
| 330 | if (err) { | ||
| 331 | dev_err(mlxsw_m->bus_info->dev, "Failed to get base mac\n"); | ||
| 332 | return err; | ||
| 333 | } | ||
| 334 | |||
| 317 | err = mlxsw_m_ports_create(mlxsw_m); | 335 | err = mlxsw_m_ports_create(mlxsw_m); |
| 318 | if (err) { | 336 | if (err) { |
| 319 | dev_err(mlxsw_m->bus_info->dev, "Failed to create ports\n"); | 337 | dev_err(mlxsw_m->bus_info->dev, "Failed to create ports\n"); |
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c index 4d1b4a24907f..13e6bf13ac4d 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.c +++ b/drivers/net/ethernet/microchip/lan743x_main.c | |||
| @@ -585,8 +585,7 @@ static int lan743x_intr_open(struct lan743x_adapter *adapter) | |||
| 585 | 585 | ||
| 586 | if (adapter->csr.flags & | 586 | if (adapter->csr.flags & |
| 587 | LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR) { | 587 | LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR) { |
| 588 | flags = LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR | | 588 | flags = LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET | |
| 589 | LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET | | ||
| 590 | LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET | | 589 | LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET | |
| 591 | LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR | | 590 | LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR | |
| 592 | LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR; | 591 | LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR; |
| @@ -599,12 +598,6 @@ static int lan743x_intr_open(struct lan743x_adapter *adapter) | |||
| 599 | /* map TX interrupt to vector */ | 598 | /* map TX interrupt to vector */ |
| 600 | int_vec_map1 |= INT_VEC_MAP1_TX_VEC_(index, vector); | 599 | int_vec_map1 |= INT_VEC_MAP1_TX_VEC_(index, vector); |
| 601 | lan743x_csr_write(adapter, INT_VEC_MAP1, int_vec_map1); | 600 | lan743x_csr_write(adapter, INT_VEC_MAP1, int_vec_map1); |
| 602 | if (flags & | ||
| 603 | LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR) { | ||
| 604 | int_vec_en_auto_clr |= INT_VEC_EN_(vector); | ||
| 605 | lan743x_csr_write(adapter, INT_VEC_EN_AUTO_CLR, | ||
| 606 | int_vec_en_auto_clr); | ||
| 607 | } | ||
| 608 | 601 | ||
| 609 | /* Remove TX interrupt from shared mask */ | 602 | /* Remove TX interrupt from shared mask */ |
| 610 | intr->vector_list[0].int_mask &= ~int_bit; | 603 | intr->vector_list[0].int_mask &= ~int_bit; |
| @@ -1902,7 +1895,17 @@ static int lan743x_rx_next_index(struct lan743x_rx *rx, int index) | |||
| 1902 | return ((++index) % rx->ring_size); | 1895 | return ((++index) % rx->ring_size); |
| 1903 | } | 1896 | } |
| 1904 | 1897 | ||
| 1905 | static int lan743x_rx_allocate_ring_element(struct lan743x_rx *rx, int index) | 1898 | static struct sk_buff *lan743x_rx_allocate_skb(struct lan743x_rx *rx) |
| 1899 | { | ||
| 1900 | int length = 0; | ||
| 1901 | |||
| 1902 | length = (LAN743X_MAX_FRAME_SIZE + ETH_HLEN + 4 + RX_HEAD_PADDING); | ||
| 1903 | return __netdev_alloc_skb(rx->adapter->netdev, | ||
| 1904 | length, GFP_ATOMIC | GFP_DMA); | ||
| 1905 | } | ||
| 1906 | |||
| 1907 | static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index, | ||
| 1908 | struct sk_buff *skb) | ||
| 1906 | { | 1909 | { |
| 1907 | struct lan743x_rx_buffer_info *buffer_info; | 1910 | struct lan743x_rx_buffer_info *buffer_info; |
| 1908 | struct lan743x_rx_descriptor *descriptor; | 1911 | struct lan743x_rx_descriptor *descriptor; |
| @@ -1911,9 +1914,7 @@ static int lan743x_rx_allocate_ring_element(struct lan743x_rx *rx, int index) | |||
| 1911 | length = (LAN743X_MAX_FRAME_SIZE + ETH_HLEN + 4 + RX_HEAD_PADDING); | 1914 | length = (LAN743X_MAX_FRAME_SIZE + ETH_HLEN + 4 + RX_HEAD_PADDING); |
| 1912 | descriptor = &rx->ring_cpu_ptr[index]; | 1915 | descriptor = &rx->ring_cpu_ptr[index]; |
| 1913 | buffer_info = &rx->buffer_info[index]; | 1916 | buffer_info = &rx->buffer_info[index]; |
| 1914 | buffer_info->skb = __netdev_alloc_skb(rx->adapter->netdev, | 1917 | buffer_info->skb = skb; |
| 1915 | length, | ||
| 1916 | GFP_ATOMIC | GFP_DMA); | ||
| 1917 | if (!(buffer_info->skb)) | 1918 | if (!(buffer_info->skb)) |
| 1918 | return -ENOMEM; | 1919 | return -ENOMEM; |
| 1919 | buffer_info->dma_ptr = dma_map_single(&rx->adapter->pdev->dev, | 1920 | buffer_info->dma_ptr = dma_map_single(&rx->adapter->pdev->dev, |
| @@ -2060,8 +2061,19 @@ static int lan743x_rx_process_packet(struct lan743x_rx *rx) | |||
| 2060 | /* packet is available */ | 2061 | /* packet is available */ |
| 2061 | if (first_index == last_index) { | 2062 | if (first_index == last_index) { |
| 2062 | /* single buffer packet */ | 2063 | /* single buffer packet */ |
| 2064 | struct sk_buff *new_skb = NULL; | ||
| 2063 | int packet_length; | 2065 | int packet_length; |
| 2064 | 2066 | ||
| 2067 | new_skb = lan743x_rx_allocate_skb(rx); | ||
| 2068 | if (!new_skb) { | ||
| 2069 | /* failed to allocate next skb. | ||
| 2070 | * Memory is very low. | ||
| 2071 | * Drop this packet and reuse buffer. | ||
| 2072 | */ | ||
| 2073 | lan743x_rx_reuse_ring_element(rx, first_index); | ||
| 2074 | goto process_extension; | ||
| 2075 | } | ||
| 2076 | |||
| 2065 | buffer_info = &rx->buffer_info[first_index]; | 2077 | buffer_info = &rx->buffer_info[first_index]; |
| 2066 | skb = buffer_info->skb; | 2078 | skb = buffer_info->skb; |
| 2067 | descriptor = &rx->ring_cpu_ptr[first_index]; | 2079 | descriptor = &rx->ring_cpu_ptr[first_index]; |
| @@ -2081,7 +2093,7 @@ static int lan743x_rx_process_packet(struct lan743x_rx *rx) | |||
| 2081 | skb_put(skb, packet_length - 4); | 2093 | skb_put(skb, packet_length - 4); |
| 2082 | skb->protocol = eth_type_trans(skb, | 2094 | skb->protocol = eth_type_trans(skb, |
| 2083 | rx->adapter->netdev); | 2095 | rx->adapter->netdev); |
| 2084 | lan743x_rx_allocate_ring_element(rx, first_index); | 2096 | lan743x_rx_init_ring_element(rx, first_index, new_skb); |
| 2085 | } else { | 2097 | } else { |
| 2086 | int index = first_index; | 2098 | int index = first_index; |
| 2087 | 2099 | ||
| @@ -2094,26 +2106,23 @@ static int lan743x_rx_process_packet(struct lan743x_rx *rx) | |||
| 2094 | if (first_index <= last_index) { | 2106 | if (first_index <= last_index) { |
| 2095 | while ((index >= first_index) && | 2107 | while ((index >= first_index) && |
| 2096 | (index <= last_index)) { | 2108 | (index <= last_index)) { |
| 2097 | lan743x_rx_release_ring_element(rx, | 2109 | lan743x_rx_reuse_ring_element(rx, |
| 2098 | index); | 2110 | index); |
| 2099 | lan743x_rx_allocate_ring_element(rx, | ||
| 2100 | index); | ||
| 2101 | index = lan743x_rx_next_index(rx, | 2111 | index = lan743x_rx_next_index(rx, |
| 2102 | index); | 2112 | index); |
| 2103 | } | 2113 | } |
| 2104 | } else { | 2114 | } else { |
| 2105 | while ((index >= first_index) || | 2115 | while ((index >= first_index) || |
| 2106 | (index <= last_index)) { | 2116 | (index <= last_index)) { |
| 2107 | lan743x_rx_release_ring_element(rx, | 2117 | lan743x_rx_reuse_ring_element(rx, |
| 2108 | index); | 2118 | index); |
| 2109 | lan743x_rx_allocate_ring_element(rx, | ||
| 2110 | index); | ||
| 2111 | index = lan743x_rx_next_index(rx, | 2119 | index = lan743x_rx_next_index(rx, |
| 2112 | index); | 2120 | index); |
| 2113 | } | 2121 | } |
| 2114 | } | 2122 | } |
| 2115 | } | 2123 | } |
| 2116 | 2124 | ||
| 2125 | process_extension: | ||
| 2117 | if (extension_index >= 0) { | 2126 | if (extension_index >= 0) { |
| 2118 | descriptor = &rx->ring_cpu_ptr[extension_index]; | 2127 | descriptor = &rx->ring_cpu_ptr[extension_index]; |
| 2119 | buffer_info = &rx->buffer_info[extension_index]; | 2128 | buffer_info = &rx->buffer_info[extension_index]; |
| @@ -2290,7 +2299,9 @@ static int lan743x_rx_ring_init(struct lan743x_rx *rx) | |||
| 2290 | 2299 | ||
| 2291 | rx->last_head = 0; | 2300 | rx->last_head = 0; |
| 2292 | for (index = 0; index < rx->ring_size; index++) { | 2301 | for (index = 0; index < rx->ring_size; index++) { |
| 2293 | ret = lan743x_rx_allocate_ring_element(rx, index); | 2302 | struct sk_buff *new_skb = lan743x_rx_allocate_skb(rx); |
| 2303 | |||
| 2304 | ret = lan743x_rx_init_ring_element(rx, index, new_skb); | ||
| 2294 | if (ret) | 2305 | if (ret) |
| 2295 | goto cleanup; | 2306 | goto cleanup; |
| 2296 | } | 2307 | } |
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c index 10b075bc5959..b61b88cbc0c7 100644 --- a/drivers/net/ethernet/qlogic/qla3xxx.c +++ b/drivers/net/ethernet/qlogic/qla3xxx.c | |||
| @@ -3886,6 +3886,12 @@ static int ql3xxx_probe(struct pci_dev *pdev, | |||
| 3886 | netif_stop_queue(ndev); | 3886 | netif_stop_queue(ndev); |
| 3887 | 3887 | ||
| 3888 | qdev->workqueue = create_singlethread_workqueue(ndev->name); | 3888 | qdev->workqueue = create_singlethread_workqueue(ndev->name); |
| 3889 | if (!qdev->workqueue) { | ||
| 3890 | unregister_netdev(ndev); | ||
| 3891 | err = -ENOMEM; | ||
| 3892 | goto err_out_iounmap; | ||
| 3893 | } | ||
| 3894 | |||
| 3889 | INIT_DELAYED_WORK(&qdev->reset_work, ql_reset_work); | 3895 | INIT_DELAYED_WORK(&qdev->reset_work, ql_reset_work); |
| 3890 | INIT_DELAYED_WORK(&qdev->tx_timeout_work, ql_tx_timeout_work); | 3896 | INIT_DELAYED_WORK(&qdev->tx_timeout_work, ql_tx_timeout_work); |
| 3891 | INIT_DELAYED_WORK(&qdev->link_state_work, ql_link_state_machine_work); | 3897 | INIT_DELAYED_WORK(&qdev->link_state_work, ql_link_state_machine_work); |
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index 096515c27263..07e1c623048e 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c | |||
| @@ -4681,6 +4681,11 @@ static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev, | |||
| 4681 | */ | 4681 | */ |
| 4682 | qdev->workqueue = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, | 4682 | qdev->workqueue = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, |
| 4683 | ndev->name); | 4683 | ndev->name); |
| 4684 | if (!qdev->workqueue) { | ||
| 4685 | err = -ENOMEM; | ||
| 4686 | goto err_out2; | ||
| 4687 | } | ||
| 4688 | |||
| 4684 | INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work); | 4689 | INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work); |
| 4685 | INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work); | 4690 | INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work); |
| 4686 | INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work); | 4691 | INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work); |
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 339b2eae2100..e33af371b169 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c | |||
| @@ -3181,12 +3181,16 @@ static struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev) | |||
| 3181 | struct device_node *np = dev->of_node; | 3181 | struct device_node *np = dev->of_node; |
| 3182 | struct sh_eth_plat_data *pdata; | 3182 | struct sh_eth_plat_data *pdata; |
| 3183 | const char *mac_addr; | 3183 | const char *mac_addr; |
| 3184 | int ret; | ||
| 3184 | 3185 | ||
| 3185 | pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); | 3186 | pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); |
| 3186 | if (!pdata) | 3187 | if (!pdata) |
| 3187 | return NULL; | 3188 | return NULL; |
| 3188 | 3189 | ||
| 3189 | pdata->phy_interface = of_get_phy_mode(np); | 3190 | ret = of_get_phy_mode(np); |
| 3191 | if (ret < 0) | ||
| 3192 | return NULL; | ||
| 3193 | pdata->phy_interface = ret; | ||
| 3190 | 3194 | ||
| 3191 | mac_addr = of_get_mac_address(np); | 3195 | mac_addr = of_get_mac_address(np); |
| 3192 | if (mac_addr) | 3196 | if (mac_addr) |
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c index c883aa89b7ca..a71c900ca04f 100644 --- a/drivers/net/ethernet/rocker/rocker_main.c +++ b/drivers/net/ethernet/rocker/rocker_main.c | |||
| @@ -2805,6 +2805,11 @@ static int rocker_switchdev_event(struct notifier_block *unused, | |||
| 2805 | memcpy(&switchdev_work->fdb_info, ptr, | 2805 | memcpy(&switchdev_work->fdb_info, ptr, |
| 2806 | sizeof(switchdev_work->fdb_info)); | 2806 | sizeof(switchdev_work->fdb_info)); |
| 2807 | switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC); | 2807 | switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC); |
| 2808 | if (unlikely(!switchdev_work->fdb_info.addr)) { | ||
| 2809 | kfree(switchdev_work); | ||
| 2810 | return NOTIFY_BAD; | ||
| 2811 | } | ||
| 2812 | |||
| 2808 | ether_addr_copy((u8 *)switchdev_work->fdb_info.addr, | 2813 | ether_addr_copy((u8 *)switchdev_work->fdb_info.addr, |
| 2809 | fdb_info->addr); | 2814 | fdb_info->addr); |
| 2810 | /* Take a reference on the rocker device */ | 2815 | /* Take a reference on the rocker device */ |
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c index 0f660af01a4b..195669f550f0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c | |||
| @@ -1147,7 +1147,10 @@ static int sun8i_dwmac_probe(struct platform_device *pdev) | |||
| 1147 | return ret; | 1147 | return ret; |
| 1148 | } | 1148 | } |
| 1149 | 1149 | ||
| 1150 | plat_dat->interface = of_get_phy_mode(dev->of_node); | 1150 | ret = of_get_phy_mode(dev->of_node); |
| 1151 | if (ret < 0) | ||
| 1152 | return -EINVAL; | ||
| 1153 | plat_dat->interface = ret; | ||
| 1151 | 1154 | ||
| 1152 | /* platform data specifying hardware features and callbacks. | 1155 | /* platform data specifying hardware features and callbacks. |
| 1153 | * hardware features were copied from Allwinner drivers. | 1156 | * hardware features were copied from Allwinner drivers. |
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c index 8f09edd811e9..50c60550f295 100644 --- a/drivers/net/ppp/pptp.c +++ b/drivers/net/ppp/pptp.c | |||
| @@ -532,6 +532,7 @@ static void pptp_sock_destruct(struct sock *sk) | |||
| 532 | pppox_unbind_sock(sk); | 532 | pppox_unbind_sock(sk); |
| 533 | } | 533 | } |
| 534 | skb_queue_purge(&sk->sk_receive_queue); | 534 | skb_queue_purge(&sk->sk_receive_queue); |
| 535 | dst_release(rcu_dereference_protected(sk->sk_dst_cache, 1)); | ||
| 535 | } | 536 | } |
| 536 | 537 | ||
| 537 | static int pptp_create(struct net *net, struct socket *sock, int kern) | 538 | static int pptp_create(struct net *net, struct socket *sock, int kern) |
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c index b123b0dcf274..4671776f5623 100644 --- a/drivers/nvdimm/btt.c +++ b/drivers/nvdimm/btt.c | |||
| @@ -541,9 +541,9 @@ static int arena_clear_freelist_error(struct arena_info *arena, u32 lane) | |||
| 541 | 541 | ||
| 542 | static int btt_freelist_init(struct arena_info *arena) | 542 | static int btt_freelist_init(struct arena_info *arena) |
| 543 | { | 543 | { |
| 544 | int old, new, ret; | 544 | int new, ret; |
| 545 | u32 i, map_entry; | 545 | struct log_entry log_new; |
| 546 | struct log_entry log_new, log_old; | 546 | u32 i, map_entry, log_oldmap, log_newmap; |
| 547 | 547 | ||
| 548 | arena->freelist = kcalloc(arena->nfree, sizeof(struct free_entry), | 548 | arena->freelist = kcalloc(arena->nfree, sizeof(struct free_entry), |
| 549 | GFP_KERNEL); | 549 | GFP_KERNEL); |
| @@ -551,24 +551,26 @@ static int btt_freelist_init(struct arena_info *arena) | |||
| 551 | return -ENOMEM; | 551 | return -ENOMEM; |
| 552 | 552 | ||
| 553 | for (i = 0; i < arena->nfree; i++) { | 553 | for (i = 0; i < arena->nfree; i++) { |
| 554 | old = btt_log_read(arena, i, &log_old, LOG_OLD_ENT); | ||
| 555 | if (old < 0) | ||
| 556 | return old; | ||
| 557 | |||
| 558 | new = btt_log_read(arena, i, &log_new, LOG_NEW_ENT); | 554 | new = btt_log_read(arena, i, &log_new, LOG_NEW_ENT); |
| 559 | if (new < 0) | 555 | if (new < 0) |
| 560 | return new; | 556 | return new; |
| 561 | 557 | ||
| 558 | /* old and new map entries with any flags stripped out */ | ||
| 559 | log_oldmap = ent_lba(le32_to_cpu(log_new.old_map)); | ||
| 560 | log_newmap = ent_lba(le32_to_cpu(log_new.new_map)); | ||
| 561 | |||
| 562 | /* sub points to the next one to be overwritten */ | 562 | /* sub points to the next one to be overwritten */ |
| 563 | arena->freelist[i].sub = 1 - new; | 563 | arena->freelist[i].sub = 1 - new; |
| 564 | arena->freelist[i].seq = nd_inc_seq(le32_to_cpu(log_new.seq)); | 564 | arena->freelist[i].seq = nd_inc_seq(le32_to_cpu(log_new.seq)); |
| 565 | arena->freelist[i].block = le32_to_cpu(log_new.old_map); | 565 | arena->freelist[i].block = log_oldmap; |
| 566 | 566 | ||
| 567 | /* | 567 | /* |
| 568 | * FIXME: if error clearing fails during init, we want to make | 568 | * FIXME: if error clearing fails during init, we want to make |
| 569 | * the BTT read-only | 569 | * the BTT read-only |
| 570 | */ | 570 | */ |
| 571 | if (ent_e_flag(log_new.old_map)) { | 571 | if (ent_e_flag(log_new.old_map) && |
| 572 | !ent_normal(log_new.old_map)) { | ||
| 573 | arena->freelist[i].has_err = 1; | ||
| 572 | ret = arena_clear_freelist_error(arena, i); | 574 | ret = arena_clear_freelist_error(arena, i); |
| 573 | if (ret) | 575 | if (ret) |
| 574 | dev_err_ratelimited(to_dev(arena), | 576 | dev_err_ratelimited(to_dev(arena), |
| @@ -576,7 +578,7 @@ static int btt_freelist_init(struct arena_info *arena) | |||
| 576 | } | 578 | } |
| 577 | 579 | ||
| 578 | /* This implies a newly created or untouched flog entry */ | 580 | /* This implies a newly created or untouched flog entry */ |
| 579 | if (log_new.old_map == log_new.new_map) | 581 | if (log_oldmap == log_newmap) |
| 580 | continue; | 582 | continue; |
| 581 | 583 | ||
| 582 | /* Check if map recovery is needed */ | 584 | /* Check if map recovery is needed */ |
| @@ -584,8 +586,15 @@ static int btt_freelist_init(struct arena_info *arena) | |||
| 584 | NULL, NULL, 0); | 586 | NULL, NULL, 0); |
| 585 | if (ret) | 587 | if (ret) |
| 586 | return ret; | 588 | return ret; |
| 587 | if ((le32_to_cpu(log_new.new_map) != map_entry) && | 589 | |
| 588 | (le32_to_cpu(log_new.old_map) == map_entry)) { | 590 | /* |
| 591 | * The map_entry from btt_read_map is stripped of any flag bits, | ||
| 592 | * so use the stripped out versions from the log as well for | ||
| 593 | * testing whether recovery is needed. For restoration, use the | ||
| 594 | * 'raw' version of the log entries as that captured what we | ||
| 595 | * were going to write originally. | ||
| 596 | */ | ||
| 597 | if ((log_newmap != map_entry) && (log_oldmap == map_entry)) { | ||
| 589 | /* | 598 | /* |
| 590 | * Last transaction wrote the flog, but wasn't able | 599 | * Last transaction wrote the flog, but wasn't able |
| 591 | * to complete the map write. So fix up the map. | 600 | * to complete the map write. So fix up the map. |
diff --git a/drivers/nvdimm/btt.h b/drivers/nvdimm/btt.h index db3cb6d4d0d4..ddff49c707b0 100644 --- a/drivers/nvdimm/btt.h +++ b/drivers/nvdimm/btt.h | |||
| @@ -44,6 +44,8 @@ | |||
| 44 | #define ent_e_flag(ent) (!!(ent & MAP_ERR_MASK)) | 44 | #define ent_e_flag(ent) (!!(ent & MAP_ERR_MASK)) |
| 45 | #define ent_z_flag(ent) (!!(ent & MAP_TRIM_MASK)) | 45 | #define ent_z_flag(ent) (!!(ent & MAP_TRIM_MASK)) |
| 46 | #define set_e_flag(ent) (ent |= MAP_ERR_MASK) | 46 | #define set_e_flag(ent) (ent |= MAP_ERR_MASK) |
| 47 | /* 'normal' is both e and z flags set */ | ||
| 48 | #define ent_normal(ent) (ent_e_flag(ent) && ent_z_flag(ent)) | ||
| 47 | 49 | ||
| 48 | enum btt_init_state { | 50 | enum btt_init_state { |
| 49 | INIT_UNCHECKED = 0, | 51 | INIT_UNCHECKED = 0, |
diff --git a/drivers/nvdimm/btt_devs.c b/drivers/nvdimm/btt_devs.c index 795ad4ff35ca..b72a303176c7 100644 --- a/drivers/nvdimm/btt_devs.c +++ b/drivers/nvdimm/btt_devs.c | |||
| @@ -159,11 +159,19 @@ static ssize_t size_show(struct device *dev, | |||
| 159 | } | 159 | } |
| 160 | static DEVICE_ATTR_RO(size); | 160 | static DEVICE_ATTR_RO(size); |
| 161 | 161 | ||
| 162 | static ssize_t log_zero_flags_show(struct device *dev, | ||
| 163 | struct device_attribute *attr, char *buf) | ||
| 164 | { | ||
| 165 | return sprintf(buf, "Y\n"); | ||
| 166 | } | ||
| 167 | static DEVICE_ATTR_RO(log_zero_flags); | ||
| 168 | |||
| 162 | static struct attribute *nd_btt_attributes[] = { | 169 | static struct attribute *nd_btt_attributes[] = { |
| 163 | &dev_attr_sector_size.attr, | 170 | &dev_attr_sector_size.attr, |
| 164 | &dev_attr_namespace.attr, | 171 | &dev_attr_namespace.attr, |
| 165 | &dev_attr_uuid.attr, | 172 | &dev_attr_uuid.attr, |
| 166 | &dev_attr_size.attr, | 173 | &dev_attr_size.attr, |
| 174 | &dev_attr_log_zero_flags.attr, | ||
| 167 | NULL, | 175 | NULL, |
| 168 | }; | 176 | }; |
| 169 | 177 | ||
diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c index efe412a6b5b9..91b9abbf689c 100644 --- a/drivers/nvdimm/dimm_devs.c +++ b/drivers/nvdimm/dimm_devs.c | |||
| @@ -11,6 +11,7 @@ | |||
| 11 | * General Public License for more details. | 11 | * General Public License for more details. |
| 12 | */ | 12 | */ |
| 13 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 13 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 14 | #include <linux/moduleparam.h> | ||
| 14 | #include <linux/vmalloc.h> | 15 | #include <linux/vmalloc.h> |
| 15 | #include <linux/device.h> | 16 | #include <linux/device.h> |
| 16 | #include <linux/ndctl.h> | 17 | #include <linux/ndctl.h> |
| @@ -25,6 +26,10 @@ | |||
| 25 | 26 | ||
| 26 | static DEFINE_IDA(dimm_ida); | 27 | static DEFINE_IDA(dimm_ida); |
| 27 | 28 | ||
| 29 | static bool noblk; | ||
| 30 | module_param(noblk, bool, 0444); | ||
| 31 | MODULE_PARM_DESC(noblk, "force disable BLK / local alias support"); | ||
| 32 | |||
| 28 | /* | 33 | /* |
| 29 | * Retrieve bus and dimm handle and return if this bus supports | 34 | * Retrieve bus and dimm handle and return if this bus supports |
| 30 | * get_config_data commands | 35 | * get_config_data commands |
| @@ -551,6 +556,8 @@ struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus, | |||
| 551 | 556 | ||
| 552 | nvdimm->dimm_id = dimm_id; | 557 | nvdimm->dimm_id = dimm_id; |
| 553 | nvdimm->provider_data = provider_data; | 558 | nvdimm->provider_data = provider_data; |
| 559 | if (noblk) | ||
| 560 | flags |= 1 << NDD_NOBLK; | ||
| 554 | nvdimm->flags = flags; | 561 | nvdimm->flags = flags; |
| 555 | nvdimm->cmd_mask = cmd_mask; | 562 | nvdimm->cmd_mask = cmd_mask; |
| 556 | nvdimm->num_flush = num_flush; | 563 | nvdimm->num_flush = num_flush; |
diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c index a11bf4e6b451..f3d753d3169c 100644 --- a/drivers/nvdimm/label.c +++ b/drivers/nvdimm/label.c | |||
| @@ -392,6 +392,7 @@ int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd) | |||
| 392 | return 0; /* no label, nothing to reserve */ | 392 | return 0; /* no label, nothing to reserve */ |
| 393 | 393 | ||
| 394 | for_each_clear_bit_le(slot, free, nslot) { | 394 | for_each_clear_bit_le(slot, free, nslot) { |
| 395 | struct nvdimm *nvdimm = to_nvdimm(ndd->dev); | ||
| 395 | struct nd_namespace_label *nd_label; | 396 | struct nd_namespace_label *nd_label; |
| 396 | struct nd_region *nd_region = NULL; | 397 | struct nd_region *nd_region = NULL; |
| 397 | u8 label_uuid[NSLABEL_UUID_LEN]; | 398 | u8 label_uuid[NSLABEL_UUID_LEN]; |
| @@ -406,6 +407,8 @@ int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd) | |||
| 406 | 407 | ||
| 407 | memcpy(label_uuid, nd_label->uuid, NSLABEL_UUID_LEN); | 408 | memcpy(label_uuid, nd_label->uuid, NSLABEL_UUID_LEN); |
| 408 | flags = __le32_to_cpu(nd_label->flags); | 409 | flags = __le32_to_cpu(nd_label->flags); |
| 410 | if (test_bit(NDD_NOBLK, &nvdimm->flags)) | ||
| 411 | flags &= ~NSLABEL_FLAG_LOCAL; | ||
| 409 | nd_label_gen_id(&label_id, label_uuid, flags); | 412 | nd_label_gen_id(&label_id, label_uuid, flags); |
| 410 | res = nvdimm_allocate_dpa(ndd, &label_id, | 413 | res = nvdimm_allocate_dpa(ndd, &label_id, |
| 411 | __le64_to_cpu(nd_label->dpa), | 414 | __le64_to_cpu(nd_label->dpa), |
| @@ -755,7 +758,7 @@ static const guid_t *to_abstraction_guid(enum nvdimm_claim_class claim_class, | |||
| 755 | 758 | ||
| 756 | static int __pmem_label_update(struct nd_region *nd_region, | 759 | static int __pmem_label_update(struct nd_region *nd_region, |
| 757 | struct nd_mapping *nd_mapping, struct nd_namespace_pmem *nspm, | 760 | struct nd_mapping *nd_mapping, struct nd_namespace_pmem *nspm, |
| 758 | int pos) | 761 | int pos, unsigned long flags) |
| 759 | { | 762 | { |
| 760 | struct nd_namespace_common *ndns = &nspm->nsio.common; | 763 | struct nd_namespace_common *ndns = &nspm->nsio.common; |
| 761 | struct nd_interleave_set *nd_set = nd_region->nd_set; | 764 | struct nd_interleave_set *nd_set = nd_region->nd_set; |
| @@ -796,7 +799,7 @@ static int __pmem_label_update(struct nd_region *nd_region, | |||
| 796 | memcpy(nd_label->uuid, nspm->uuid, NSLABEL_UUID_LEN); | 799 | memcpy(nd_label->uuid, nspm->uuid, NSLABEL_UUID_LEN); |
| 797 | if (nspm->alt_name) | 800 | if (nspm->alt_name) |
| 798 | memcpy(nd_label->name, nspm->alt_name, NSLABEL_NAME_LEN); | 801 | memcpy(nd_label->name, nspm->alt_name, NSLABEL_NAME_LEN); |
| 799 | nd_label->flags = __cpu_to_le32(NSLABEL_FLAG_UPDATING); | 802 | nd_label->flags = __cpu_to_le32(flags); |
| 800 | nd_label->nlabel = __cpu_to_le16(nd_region->ndr_mappings); | 803 | nd_label->nlabel = __cpu_to_le16(nd_region->ndr_mappings); |
| 801 | nd_label->position = __cpu_to_le16(pos); | 804 | nd_label->position = __cpu_to_le16(pos); |
| 802 | nd_label->isetcookie = __cpu_to_le64(cookie); | 805 | nd_label->isetcookie = __cpu_to_le64(cookie); |
| @@ -1249,13 +1252,13 @@ static int del_labels(struct nd_mapping *nd_mapping, u8 *uuid) | |||
| 1249 | int nd_pmem_namespace_label_update(struct nd_region *nd_region, | 1252 | int nd_pmem_namespace_label_update(struct nd_region *nd_region, |
| 1250 | struct nd_namespace_pmem *nspm, resource_size_t size) | 1253 | struct nd_namespace_pmem *nspm, resource_size_t size) |
| 1251 | { | 1254 | { |
| 1252 | int i; | 1255 | int i, rc; |
| 1253 | 1256 | ||
| 1254 | for (i = 0; i < nd_region->ndr_mappings; i++) { | 1257 | for (i = 0; i < nd_region->ndr_mappings; i++) { |
| 1255 | struct nd_mapping *nd_mapping = &nd_region->mapping[i]; | 1258 | struct nd_mapping *nd_mapping = &nd_region->mapping[i]; |
| 1256 | struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); | 1259 | struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); |
| 1257 | struct resource *res; | 1260 | struct resource *res; |
| 1258 | int rc, count = 0; | 1261 | int count = 0; |
| 1259 | 1262 | ||
| 1260 | if (size == 0) { | 1263 | if (size == 0) { |
| 1261 | rc = del_labels(nd_mapping, nspm->uuid); | 1264 | rc = del_labels(nd_mapping, nspm->uuid); |
| @@ -1273,7 +1276,20 @@ int nd_pmem_namespace_label_update(struct nd_region *nd_region, | |||
| 1273 | if (rc < 0) | 1276 | if (rc < 0) |
| 1274 | return rc; | 1277 | return rc; |
| 1275 | 1278 | ||
| 1276 | rc = __pmem_label_update(nd_region, nd_mapping, nspm, i); | 1279 | rc = __pmem_label_update(nd_region, nd_mapping, nspm, i, |
| 1280 | NSLABEL_FLAG_UPDATING); | ||
| 1281 | if (rc) | ||
| 1282 | return rc; | ||
| 1283 | } | ||
| 1284 | |||
| 1285 | if (size == 0) | ||
| 1286 | return 0; | ||
| 1287 | |||
| 1288 | /* Clear the UPDATING flag per UEFI 2.7 expectations */ | ||
| 1289 | for (i = 0; i < nd_region->ndr_mappings; i++) { | ||
| 1290 | struct nd_mapping *nd_mapping = &nd_region->mapping[i]; | ||
| 1291 | |||
| 1292 | rc = __pmem_label_update(nd_region, nd_mapping, nspm, i, 0); | ||
| 1277 | if (rc) | 1293 | if (rc) |
| 1278 | return rc; | 1294 | return rc; |
| 1279 | } | 1295 | } |
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c index 4b077555ac70..7849bf1812c4 100644 --- a/drivers/nvdimm/namespace_devs.c +++ b/drivers/nvdimm/namespace_devs.c | |||
| @@ -138,6 +138,7 @@ bool nd_is_uuid_unique(struct device *dev, u8 *uuid) | |||
| 138 | bool pmem_should_map_pages(struct device *dev) | 138 | bool pmem_should_map_pages(struct device *dev) |
| 139 | { | 139 | { |
| 140 | struct nd_region *nd_region = to_nd_region(dev->parent); | 140 | struct nd_region *nd_region = to_nd_region(dev->parent); |
| 141 | struct nd_namespace_common *ndns = to_ndns(dev); | ||
| 141 | struct nd_namespace_io *nsio; | 142 | struct nd_namespace_io *nsio; |
| 142 | 143 | ||
| 143 | if (!IS_ENABLED(CONFIG_ZONE_DEVICE)) | 144 | if (!IS_ENABLED(CONFIG_ZONE_DEVICE)) |
| @@ -149,6 +150,9 @@ bool pmem_should_map_pages(struct device *dev) | |||
| 149 | if (is_nd_pfn(dev) || is_nd_btt(dev)) | 150 | if (is_nd_pfn(dev) || is_nd_btt(dev)) |
| 150 | return false; | 151 | return false; |
| 151 | 152 | ||
| 153 | if (ndns->force_raw) | ||
| 154 | return false; | ||
| 155 | |||
| 152 | nsio = to_nd_namespace_io(dev); | 156 | nsio = to_nd_namespace_io(dev); |
| 153 | if (region_intersects(nsio->res.start, resource_size(&nsio->res), | 157 | if (region_intersects(nsio->res.start, resource_size(&nsio->res), |
| 154 | IORESOURCE_SYSTEM_RAM, | 158 | IORESOURCE_SYSTEM_RAM, |
| @@ -1506,13 +1510,13 @@ static ssize_t __holder_class_store(struct device *dev, const char *buf) | |||
| 1506 | if (dev->driver || ndns->claim) | 1510 | if (dev->driver || ndns->claim) |
| 1507 | return -EBUSY; | 1511 | return -EBUSY; |
| 1508 | 1512 | ||
| 1509 | if (strcmp(buf, "btt") == 0 || strcmp(buf, "btt\n") == 0) | 1513 | if (sysfs_streq(buf, "btt")) |
| 1510 | ndns->claim_class = btt_claim_class(dev); | 1514 | ndns->claim_class = btt_claim_class(dev); |
| 1511 | else if (strcmp(buf, "pfn") == 0 || strcmp(buf, "pfn\n") == 0) | 1515 | else if (sysfs_streq(buf, "pfn")) |
| 1512 | ndns->claim_class = NVDIMM_CCLASS_PFN; | 1516 | ndns->claim_class = NVDIMM_CCLASS_PFN; |
| 1513 | else if (strcmp(buf, "dax") == 0 || strcmp(buf, "dax\n") == 0) | 1517 | else if (sysfs_streq(buf, "dax")) |
| 1514 | ndns->claim_class = NVDIMM_CCLASS_DAX; | 1518 | ndns->claim_class = NVDIMM_CCLASS_DAX; |
| 1515 | else if (strcmp(buf, "") == 0 || strcmp(buf, "\n") == 0) | 1519 | else if (sysfs_streq(buf, "")) |
| 1516 | ndns->claim_class = NVDIMM_CCLASS_NONE; | 1520 | ndns->claim_class = NVDIMM_CCLASS_NONE; |
| 1517 | else | 1521 | else |
| 1518 | return -EINVAL; | 1522 | return -EINVAL; |
| @@ -2492,6 +2496,12 @@ static int init_active_labels(struct nd_region *nd_region) | |||
| 2492 | if (!label_ent) | 2496 | if (!label_ent) |
| 2493 | break; | 2497 | break; |
| 2494 | label = nd_label_active(ndd, j); | 2498 | label = nd_label_active(ndd, j); |
| 2499 | if (test_bit(NDD_NOBLK, &nvdimm->flags)) { | ||
| 2500 | u32 flags = __le32_to_cpu(label->flags); | ||
| 2501 | |||
| 2502 | flags &= ~NSLABEL_FLAG_LOCAL; | ||
| 2503 | label->flags = __cpu_to_le32(flags); | ||
| 2504 | } | ||
| 2495 | label_ent->label = label; | 2505 | label_ent->label = label; |
| 2496 | 2506 | ||
| 2497 | mutex_lock(&nd_mapping->lock); | 2507 | mutex_lock(&nd_mapping->lock); |
diff --git a/drivers/nvdimm/of_pmem.c b/drivers/nvdimm/of_pmem.c index 0a701837dfc0..11b9821eba85 100644 --- a/drivers/nvdimm/of_pmem.c +++ b/drivers/nvdimm/of_pmem.c | |||
| @@ -108,7 +108,6 @@ static struct platform_driver of_pmem_region_driver = { | |||
| 108 | .remove = of_pmem_region_remove, | 108 | .remove = of_pmem_region_remove, |
| 109 | .driver = { | 109 | .driver = { |
| 110 | .name = "of_pmem", | 110 | .name = "of_pmem", |
| 111 | .owner = THIS_MODULE, | ||
| 112 | .of_match_table = of_pmem_region_match, | 111 | .of_match_table = of_pmem_region_match, |
| 113 | }, | 112 | }, |
| 114 | }; | 113 | }; |
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c index 6f22272e8d80..d271bd731af7 100644 --- a/drivers/nvdimm/pfn_devs.c +++ b/drivers/nvdimm/pfn_devs.c | |||
| @@ -580,6 +580,11 @@ int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns) | |||
| 580 | } | 580 | } |
| 581 | EXPORT_SYMBOL(nd_pfn_probe); | 581 | EXPORT_SYMBOL(nd_pfn_probe); |
| 582 | 582 | ||
| 583 | static u32 info_block_reserve(void) | ||
| 584 | { | ||
| 585 | return ALIGN(SZ_8K, PAGE_SIZE); | ||
| 586 | } | ||
| 587 | |||
| 583 | /* | 588 | /* |
| 584 | * We hotplug memory at section granularity, pad the reserved area from | 589 | * We hotplug memory at section granularity, pad the reserved area from |
| 585 | * the previous section base to the namespace base address. | 590 | * the previous section base to the namespace base address. |
| @@ -593,7 +598,7 @@ static unsigned long init_altmap_base(resource_size_t base) | |||
| 593 | 598 | ||
| 594 | static unsigned long init_altmap_reserve(resource_size_t base) | 599 | static unsigned long init_altmap_reserve(resource_size_t base) |
| 595 | { | 600 | { |
| 596 | unsigned long reserve = PHYS_PFN(SZ_8K); | 601 | unsigned long reserve = info_block_reserve() >> PAGE_SHIFT; |
| 597 | unsigned long base_pfn = PHYS_PFN(base); | 602 | unsigned long base_pfn = PHYS_PFN(base); |
| 598 | 603 | ||
| 599 | reserve += base_pfn - PFN_SECTION_ALIGN_DOWN(base_pfn); | 604 | reserve += base_pfn - PFN_SECTION_ALIGN_DOWN(base_pfn); |
| @@ -608,6 +613,7 @@ static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap) | |||
| 608 | u64 offset = le64_to_cpu(pfn_sb->dataoff); | 613 | u64 offset = le64_to_cpu(pfn_sb->dataoff); |
| 609 | u32 start_pad = __le32_to_cpu(pfn_sb->start_pad); | 614 | u32 start_pad = __le32_to_cpu(pfn_sb->start_pad); |
| 610 | u32 end_trunc = __le32_to_cpu(pfn_sb->end_trunc); | 615 | u32 end_trunc = __le32_to_cpu(pfn_sb->end_trunc); |
| 616 | u32 reserve = info_block_reserve(); | ||
| 611 | struct nd_namespace_common *ndns = nd_pfn->ndns; | 617 | struct nd_namespace_common *ndns = nd_pfn->ndns; |
| 612 | struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); | 618 | struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); |
| 613 | resource_size_t base = nsio->res.start + start_pad; | 619 | resource_size_t base = nsio->res.start + start_pad; |
| @@ -621,7 +627,7 @@ static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap) | |||
| 621 | res->end -= end_trunc; | 627 | res->end -= end_trunc; |
| 622 | 628 | ||
| 623 | if (nd_pfn->mode == PFN_MODE_RAM) { | 629 | if (nd_pfn->mode == PFN_MODE_RAM) { |
| 624 | if (offset < SZ_8K) | 630 | if (offset < reserve) |
| 625 | return -EINVAL; | 631 | return -EINVAL; |
| 626 | nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns); | 632 | nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns); |
| 627 | pgmap->altmap_valid = false; | 633 | pgmap->altmap_valid = false; |
| @@ -634,7 +640,7 @@ static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap) | |||
| 634 | le64_to_cpu(nd_pfn->pfn_sb->npfns), | 640 | le64_to_cpu(nd_pfn->pfn_sb->npfns), |
| 635 | nd_pfn->npfns); | 641 | nd_pfn->npfns); |
| 636 | memcpy(altmap, &__altmap, sizeof(*altmap)); | 642 | memcpy(altmap, &__altmap, sizeof(*altmap)); |
| 637 | altmap->free = PHYS_PFN(offset - SZ_8K); | 643 | altmap->free = PHYS_PFN(offset - reserve); |
| 638 | altmap->alloc = 0; | 644 | altmap->alloc = 0; |
| 639 | pgmap->altmap_valid = true; | 645 | pgmap->altmap_valid = true; |
| 640 | } else | 646 | } else |
| @@ -678,18 +684,17 @@ static void trim_pfn_device(struct nd_pfn *nd_pfn, u32 *start_pad, u32 *end_trun | |||
| 678 | if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM, | 684 | if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM, |
| 679 | IORES_DESC_NONE) == REGION_MIXED | 685 | IORES_DESC_NONE) == REGION_MIXED |
| 680 | || !IS_ALIGNED(end, nd_pfn->align) | 686 | || !IS_ALIGNED(end, nd_pfn->align) |
| 681 | || nd_region_conflict(nd_region, start, size + adjust)) | 687 | || nd_region_conflict(nd_region, start, size)) |
| 682 | *end_trunc = end - phys_pmem_align_down(nd_pfn, end); | 688 | *end_trunc = end - phys_pmem_align_down(nd_pfn, end); |
| 683 | } | 689 | } |
| 684 | 690 | ||
| 685 | static int nd_pfn_init(struct nd_pfn *nd_pfn) | 691 | static int nd_pfn_init(struct nd_pfn *nd_pfn) |
| 686 | { | 692 | { |
| 687 | u32 dax_label_reserve = is_nd_dax(&nd_pfn->dev) ? SZ_128K : 0; | ||
| 688 | struct nd_namespace_common *ndns = nd_pfn->ndns; | 693 | struct nd_namespace_common *ndns = nd_pfn->ndns; |
| 689 | struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); | 694 | struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); |
| 695 | u32 start_pad, end_trunc, reserve = info_block_reserve(); | ||
| 690 | resource_size_t start, size; | 696 | resource_size_t start, size; |
| 691 | struct nd_region *nd_region; | 697 | struct nd_region *nd_region; |
| 692 | u32 start_pad, end_trunc; | ||
| 693 | struct nd_pfn_sb *pfn_sb; | 698 | struct nd_pfn_sb *pfn_sb; |
| 694 | unsigned long npfns; | 699 | unsigned long npfns; |
| 695 | phys_addr_t offset; | 700 | phys_addr_t offset; |
| @@ -734,7 +739,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn) | |||
| 734 | */ | 739 | */ |
| 735 | start = nsio->res.start + start_pad; | 740 | start = nsio->res.start + start_pad; |
| 736 | size = resource_size(&nsio->res); | 741 | size = resource_size(&nsio->res); |
| 737 | npfns = PFN_SECTION_ALIGN_UP((size - start_pad - end_trunc - SZ_8K) | 742 | npfns = PFN_SECTION_ALIGN_UP((size - start_pad - end_trunc - reserve) |
| 738 | / PAGE_SIZE); | 743 | / PAGE_SIZE); |
| 739 | if (nd_pfn->mode == PFN_MODE_PMEM) { | 744 | if (nd_pfn->mode == PFN_MODE_PMEM) { |
| 740 | /* | 745 | /* |
| @@ -742,11 +747,10 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn) | |||
| 742 | * when populating the vmemmap. This *should* be equal to | 747 | * when populating the vmemmap. This *should* be equal to |
| 743 | * PMD_SIZE for most architectures. | 748 | * PMD_SIZE for most architectures. |
| 744 | */ | 749 | */ |
| 745 | offset = ALIGN(start + SZ_8K + 64 * npfns + dax_label_reserve, | 750 | offset = ALIGN(start + reserve + 64 * npfns, |
| 746 | max(nd_pfn->align, PMD_SIZE)) - start; | 751 | max(nd_pfn->align, PMD_SIZE)) - start; |
| 747 | } else if (nd_pfn->mode == PFN_MODE_RAM) | 752 | } else if (nd_pfn->mode == PFN_MODE_RAM) |
| 748 | offset = ALIGN(start + SZ_8K + dax_label_reserve, | 753 | offset = ALIGN(start + reserve, nd_pfn->align) - start; |
| 749 | nd_pfn->align) - start; | ||
| 750 | else | 754 | else |
| 751 | return -ENXIO; | 755 | return -ENXIO; |
| 752 | 756 | ||
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c index e2818f94f292..3b58baa44b5c 100644 --- a/drivers/nvdimm/region_devs.c +++ b/drivers/nvdimm/region_devs.c | |||
| @@ -1003,6 +1003,13 @@ static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus, | |||
| 1003 | 1003 | ||
| 1004 | if (test_bit(NDD_UNARMED, &nvdimm->flags)) | 1004 | if (test_bit(NDD_UNARMED, &nvdimm->flags)) |
| 1005 | ro = 1; | 1005 | ro = 1; |
| 1006 | |||
| 1007 | if (test_bit(NDD_NOBLK, &nvdimm->flags) | ||
| 1008 | && dev_type == &nd_blk_device_type) { | ||
| 1009 | dev_err(&nvdimm_bus->dev, "%s: %s mapping%d is not BLK capable\n", | ||
| 1010 | caller, dev_name(&nvdimm->dev), i); | ||
| 1011 | return NULL; | ||
| 1012 | } | ||
| 1006 | } | 1013 | } |
| 1007 | 1014 | ||
| 1008 | if (dev_type == &nd_blk_device_type) { | 1015 | if (dev_type == &nd_blk_device_type) { |
diff --git a/drivers/opp/core.c b/drivers/opp/core.c index d7f97167cac3..0420f7e8ad5b 100644 --- a/drivers/opp/core.c +++ b/drivers/opp/core.c | |||
| @@ -760,7 +760,7 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) | |||
| 760 | old_freq, freq); | 760 | old_freq, freq); |
| 761 | 761 | ||
| 762 | /* Scaling up? Configure required OPPs before frequency */ | 762 | /* Scaling up? Configure required OPPs before frequency */ |
| 763 | if (freq > old_freq) { | 763 | if (freq >= old_freq) { |
| 764 | ret = _set_required_opps(dev, opp_table, opp); | 764 | ret = _set_required_opps(dev, opp_table, opp); |
| 765 | if (ret) | 765 | if (ret) |
| 766 | goto put_opp; | 766 | goto put_opp; |
diff --git a/drivers/opp/of.c b/drivers/opp/of.c index 62504b18f198..c10c782d15aa 100644 --- a/drivers/opp/of.c +++ b/drivers/opp/of.c | |||
| @@ -173,7 +173,7 @@ static void _opp_table_alloc_required_tables(struct opp_table *opp_table, | |||
| 173 | struct opp_table **required_opp_tables; | 173 | struct opp_table **required_opp_tables; |
| 174 | struct device **genpd_virt_devs = NULL; | 174 | struct device **genpd_virt_devs = NULL; |
| 175 | struct device_node *required_np, *np; | 175 | struct device_node *required_np, *np; |
| 176 | int count, i; | 176 | int count, count_pd, i; |
| 177 | 177 | ||
| 178 | /* Traversing the first OPP node is all we need */ | 178 | /* Traversing the first OPP node is all we need */ |
| 179 | np = of_get_next_available_child(opp_np, NULL); | 179 | np = of_get_next_available_child(opp_np, NULL); |
| @@ -186,7 +186,19 @@ static void _opp_table_alloc_required_tables(struct opp_table *opp_table, | |||
| 186 | if (!count) | 186 | if (!count) |
| 187 | goto put_np; | 187 | goto put_np; |
| 188 | 188 | ||
| 189 | if (count > 1) { | 189 | /* |
| 190 | * Check the number of power-domains to know if we need to deal | ||
| 191 | * with virtual devices. In some cases we have devices with multiple | ||
| 192 | * power domains but with only one of them being scalable, hence | ||
| 193 | * 'count' could be 1, but we still have to deal with multiple genpds | ||
| 194 | * and virtual devices. | ||
| 195 | */ | ||
| 196 | count_pd = of_count_phandle_with_args(dev->of_node, "power-domains", | ||
| 197 | "#power-domain-cells"); | ||
| 198 | if (!count_pd) | ||
| 199 | goto put_np; | ||
| 200 | |||
| 201 | if (count_pd > 1) { | ||
| 190 | genpd_virt_devs = kcalloc(count, sizeof(*genpd_virt_devs), | 202 | genpd_virt_devs = kcalloc(count, sizeof(*genpd_virt_devs), |
| 191 | GFP_KERNEL); | 203 | GFP_KERNEL); |
| 192 | if (!genpd_virt_devs) | 204 | if (!genpd_virt_devs) |
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig index a8f47df0655a..54f8238aac0d 100644 --- a/drivers/pwm/Kconfig +++ b/drivers/pwm/Kconfig | |||
| @@ -192,14 +192,23 @@ config PWM_IMG | |||
| 192 | To compile this driver as a module, choose M here: the module | 192 | To compile this driver as a module, choose M here: the module |
| 193 | will be called pwm-img | 193 | will be called pwm-img |
| 194 | 194 | ||
| 195 | config PWM_IMX | 195 | config PWM_IMX1 |
| 196 | tristate "i.MX PWM support" | 196 | tristate "i.MX1 PWM support" |
| 197 | depends on ARCH_MXC | 197 | depends on ARCH_MXC |
| 198 | help | 198 | help |
| 199 | Generic PWM framework driver for i.MX. | 199 | Generic PWM framework driver for i.MX1 and i.MX21 |
| 200 | 200 | ||
| 201 | To compile this driver as a module, choose M here: the module | 201 | To compile this driver as a module, choose M here: the module |
| 202 | will be called pwm-imx. | 202 | will be called pwm-imx1. |
| 203 | |||
| 204 | config PWM_IMX27 | ||
| 205 | tristate "i.MX27 PWM support" | ||
| 206 | depends on ARCH_MXC | ||
| 207 | help | ||
| 208 | Generic PWM framework driver for i.MX27 and later i.MX SoCs. | ||
| 209 | |||
| 210 | To compile this driver as a module, choose M here: the module | ||
| 211 | will be called pwm-imx27. | ||
| 203 | 212 | ||
| 204 | config PWM_JZ4740 | 213 | config PWM_JZ4740 |
| 205 | tristate "Ingenic JZ47xx PWM support" | 214 | tristate "Ingenic JZ47xx PWM support" |
diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile index 9c676a0dadf5..448825e892bc 100644 --- a/drivers/pwm/Makefile +++ b/drivers/pwm/Makefile | |||
| @@ -17,7 +17,8 @@ obj-$(CONFIG_PWM_EP93XX) += pwm-ep93xx.o | |||
| 17 | obj-$(CONFIG_PWM_FSL_FTM) += pwm-fsl-ftm.o | 17 | obj-$(CONFIG_PWM_FSL_FTM) += pwm-fsl-ftm.o |
| 18 | obj-$(CONFIG_PWM_HIBVT) += pwm-hibvt.o | 18 | obj-$(CONFIG_PWM_HIBVT) += pwm-hibvt.o |
| 19 | obj-$(CONFIG_PWM_IMG) += pwm-img.o | 19 | obj-$(CONFIG_PWM_IMG) += pwm-img.o |
| 20 | obj-$(CONFIG_PWM_IMX) += pwm-imx.o | 20 | obj-$(CONFIG_PWM_IMX1) += pwm-imx1.o |
| 21 | obj-$(CONFIG_PWM_IMX27) += pwm-imx27.o | ||
| 21 | obj-$(CONFIG_PWM_JZ4740) += pwm-jz4740.o | 22 | obj-$(CONFIG_PWM_JZ4740) += pwm-jz4740.o |
| 22 | obj-$(CONFIG_PWM_LP3943) += pwm-lp3943.o | 23 | obj-$(CONFIG_PWM_LP3943) += pwm-lp3943.o |
| 23 | obj-$(CONFIG_PWM_LPC18XX_SCT) += pwm-lpc18xx-sct.o | 24 | obj-$(CONFIG_PWM_LPC18XX_SCT) += pwm-lpc18xx-sct.o |
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c index 1581f6ab1b1f..3149204567f3 100644 --- a/drivers/pwm/core.c +++ b/drivers/pwm/core.c | |||
| @@ -472,7 +472,10 @@ int pwm_apply_state(struct pwm_device *pwm, struct pwm_state *state) | |||
| 472 | state->duty_cycle > state->period) | 472 | state->duty_cycle > state->period) |
| 473 | return -EINVAL; | 473 | return -EINVAL; |
| 474 | 474 | ||
| 475 | if (!memcmp(state, &pwm->state, sizeof(*state))) | 475 | if (state->period == pwm->state.period && |
| 476 | state->duty_cycle == pwm->state.duty_cycle && | ||
| 477 | state->polarity == pwm->state.polarity && | ||
| 478 | state->enabled == pwm->state.enabled) | ||
| 476 | return 0; | 479 | return 0; |
| 477 | 480 | ||
| 478 | if (pwm->chip->ops->apply) { | 481 | if (pwm->chip->ops->apply) { |
| @@ -1033,10 +1036,7 @@ static int pwm_seq_show(struct seq_file *s, void *v) | |||
| 1033 | dev_name(chip->dev), chip->npwm, | 1036 | dev_name(chip->dev), chip->npwm, |
| 1034 | (chip->npwm != 1) ? "s" : ""); | 1037 | (chip->npwm != 1) ? "s" : ""); |
| 1035 | 1038 | ||
| 1036 | if (chip->ops->dbg_show) | 1039 | pwm_dbg_show(chip, s); |
| 1037 | chip->ops->dbg_show(chip, s); | ||
| 1038 | else | ||
| 1039 | pwm_dbg_show(chip, s); | ||
| 1040 | 1040 | ||
| 1041 | return 0; | 1041 | return 0; |
| 1042 | } | 1042 | } |
diff --git a/drivers/pwm/pwm-atmel.c b/drivers/pwm/pwm-atmel.c index 530d7dc5f1b5..a9fd6f0d408c 100644 --- a/drivers/pwm/pwm-atmel.c +++ b/drivers/pwm/pwm-atmel.c | |||
| @@ -48,16 +48,6 @@ | |||
| 48 | #define PWMV2_CPRD 0x0C | 48 | #define PWMV2_CPRD 0x0C |
| 49 | #define PWMV2_CPRDUPD 0x10 | 49 | #define PWMV2_CPRDUPD 0x10 |
| 50 | 50 | ||
| 51 | /* | ||
| 52 | * Max value for duty and period | ||
| 53 | * | ||
| 54 | * Although the duty and period register is 32 bit, | ||
| 55 | * however only the LSB 16 bits are significant. | ||
| 56 | */ | ||
| 57 | #define PWM_MAX_DTY 0xFFFF | ||
| 58 | #define PWM_MAX_PRD 0xFFFF | ||
| 59 | #define PRD_MAX_PRES 10 | ||
| 60 | |||
| 61 | struct atmel_pwm_registers { | 51 | struct atmel_pwm_registers { |
| 62 | u8 period; | 52 | u8 period; |
| 63 | u8 period_upd; | 53 | u8 period_upd; |
| @@ -65,11 +55,21 @@ struct atmel_pwm_registers { | |||
| 65 | u8 duty_upd; | 55 | u8 duty_upd; |
| 66 | }; | 56 | }; |
| 67 | 57 | ||
| 58 | struct atmel_pwm_config { | ||
| 59 | u32 max_period; | ||
| 60 | u32 max_pres; | ||
| 61 | }; | ||
| 62 | |||
| 63 | struct atmel_pwm_data { | ||
| 64 | struct atmel_pwm_registers regs; | ||
| 65 | struct atmel_pwm_config cfg; | ||
| 66 | }; | ||
| 67 | |||
| 68 | struct atmel_pwm_chip { | 68 | struct atmel_pwm_chip { |
| 69 | struct pwm_chip chip; | 69 | struct pwm_chip chip; |
| 70 | struct clk *clk; | 70 | struct clk *clk; |
| 71 | void __iomem *base; | 71 | void __iomem *base; |
| 72 | const struct atmel_pwm_registers *regs; | 72 | const struct atmel_pwm_data *data; |
| 73 | 73 | ||
| 74 | unsigned int updated_pwms; | 74 | unsigned int updated_pwms; |
| 75 | /* ISR is cleared when read, ensure only one thread does that */ | 75 | /* ISR is cleared when read, ensure only one thread does that */ |
| @@ -121,10 +121,10 @@ static int atmel_pwm_calculate_cprd_and_pres(struct pwm_chip *chip, | |||
| 121 | cycles *= clk_get_rate(atmel_pwm->clk); | 121 | cycles *= clk_get_rate(atmel_pwm->clk); |
| 122 | do_div(cycles, NSEC_PER_SEC); | 122 | do_div(cycles, NSEC_PER_SEC); |
| 123 | 123 | ||
| 124 | for (*pres = 0; cycles > PWM_MAX_PRD; cycles >>= 1) | 124 | for (*pres = 0; cycles > atmel_pwm->data->cfg.max_period; cycles >>= 1) |
| 125 | (*pres)++; | 125 | (*pres)++; |
| 126 | 126 | ||
| 127 | if (*pres > PRD_MAX_PRES) { | 127 | if (*pres > atmel_pwm->data->cfg.max_pres) { |
| 128 | dev_err(chip->dev, "pres exceeds the maximum value\n"); | 128 | dev_err(chip->dev, "pres exceeds the maximum value\n"); |
| 129 | return -EINVAL; | 129 | return -EINVAL; |
| 130 | } | 130 | } |
| @@ -150,15 +150,15 @@ static void atmel_pwm_update_cdty(struct pwm_chip *chip, struct pwm_device *pwm, | |||
| 150 | struct atmel_pwm_chip *atmel_pwm = to_atmel_pwm_chip(chip); | 150 | struct atmel_pwm_chip *atmel_pwm = to_atmel_pwm_chip(chip); |
| 151 | u32 val; | 151 | u32 val; |
| 152 | 152 | ||
| 153 | if (atmel_pwm->regs->duty_upd == | 153 | if (atmel_pwm->data->regs.duty_upd == |
| 154 | atmel_pwm->regs->period_upd) { | 154 | atmel_pwm->data->regs.period_upd) { |
| 155 | val = atmel_pwm_ch_readl(atmel_pwm, pwm->hwpwm, PWM_CMR); | 155 | val = atmel_pwm_ch_readl(atmel_pwm, pwm->hwpwm, PWM_CMR); |
| 156 | val &= ~PWM_CMR_UPD_CDTY; | 156 | val &= ~PWM_CMR_UPD_CDTY; |
| 157 | atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, PWM_CMR, val); | 157 | atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, PWM_CMR, val); |
| 158 | } | 158 | } |
| 159 | 159 | ||
| 160 | atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, | 160 | atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, |
| 161 | atmel_pwm->regs->duty_upd, cdty); | 161 | atmel_pwm->data->regs.duty_upd, cdty); |
| 162 | } | 162 | } |
| 163 | 163 | ||
| 164 | static void atmel_pwm_set_cprd_cdty(struct pwm_chip *chip, | 164 | static void atmel_pwm_set_cprd_cdty(struct pwm_chip *chip, |
| @@ -168,9 +168,9 @@ static void atmel_pwm_set_cprd_cdty(struct pwm_chip *chip, | |||
| 168 | struct atmel_pwm_chip *atmel_pwm = to_atmel_pwm_chip(chip); | 168 | struct atmel_pwm_chip *atmel_pwm = to_atmel_pwm_chip(chip); |
| 169 | 169 | ||
| 170 | atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, | 170 | atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, |
| 171 | atmel_pwm->regs->duty, cdty); | 171 | atmel_pwm->data->regs.duty, cdty); |
| 172 | atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, | 172 | atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, |
| 173 | atmel_pwm->regs->period, cprd); | 173 | atmel_pwm->data->regs.period, cprd); |
| 174 | } | 174 | } |
| 175 | 175 | ||
| 176 | static void atmel_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm, | 176 | static void atmel_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm, |
| @@ -225,7 +225,7 @@ static int atmel_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, | |||
| 225 | cstate.polarity == state->polarity && | 225 | cstate.polarity == state->polarity && |
| 226 | cstate.period == state->period) { | 226 | cstate.period == state->period) { |
| 227 | cprd = atmel_pwm_ch_readl(atmel_pwm, pwm->hwpwm, | 227 | cprd = atmel_pwm_ch_readl(atmel_pwm, pwm->hwpwm, |
| 228 | atmel_pwm->regs->period); | 228 | atmel_pwm->data->regs.period); |
| 229 | atmel_pwm_calculate_cdty(state, cprd, &cdty); | 229 | atmel_pwm_calculate_cdty(state, cprd, &cdty); |
| 230 | atmel_pwm_update_cdty(chip, pwm, cdty); | 230 | atmel_pwm_update_cdty(chip, pwm, cdty); |
| 231 | return 0; | 231 | return 0; |
| @@ -277,27 +277,55 @@ static const struct pwm_ops atmel_pwm_ops = { | |||
| 277 | .owner = THIS_MODULE, | 277 | .owner = THIS_MODULE, |
| 278 | }; | 278 | }; |
| 279 | 279 | ||
| 280 | static const struct atmel_pwm_registers atmel_pwm_regs_v1 = { | 280 | static const struct atmel_pwm_data atmel_sam9rl_pwm_data = { |
| 281 | .period = PWMV1_CPRD, | 281 | .regs = { |
| 282 | .period_upd = PWMV1_CUPD, | 282 | .period = PWMV1_CPRD, |
| 283 | .duty = PWMV1_CDTY, | 283 | .period_upd = PWMV1_CUPD, |
| 284 | .duty_upd = PWMV1_CUPD, | 284 | .duty = PWMV1_CDTY, |
| 285 | .duty_upd = PWMV1_CUPD, | ||
| 286 | }, | ||
| 287 | .cfg = { | ||
| 288 | /* 16 bits to keep period and duty. */ | ||
| 289 | .max_period = 0xffff, | ||
| 290 | .max_pres = 10, | ||
| 291 | }, | ||
| 292 | }; | ||
| 293 | |||
| 294 | static const struct atmel_pwm_data atmel_sama5_pwm_data = { | ||
| 295 | .regs = { | ||
| 296 | .period = PWMV2_CPRD, | ||
| 297 | .period_upd = PWMV2_CPRDUPD, | ||
| 298 | .duty = PWMV2_CDTY, | ||
| 299 | .duty_upd = PWMV2_CDTYUPD, | ||
| 300 | }, | ||
| 301 | .cfg = { | ||
| 302 | /* 16 bits to keep period and duty. */ | ||
| 303 | .max_period = 0xffff, | ||
| 304 | .max_pres = 10, | ||
| 305 | }, | ||
| 285 | }; | 306 | }; |
| 286 | 307 | ||
| 287 | static const struct atmel_pwm_registers atmel_pwm_regs_v2 = { | 308 | static const struct atmel_pwm_data mchp_sam9x60_pwm_data = { |
| 288 | .period = PWMV2_CPRD, | 309 | .regs = { |
| 289 | .period_upd = PWMV2_CPRDUPD, | 310 | .period = PWMV1_CPRD, |
| 290 | .duty = PWMV2_CDTY, | 311 | .period_upd = PWMV1_CUPD, |
| 291 | .duty_upd = PWMV2_CDTYUPD, | 312 | .duty = PWMV1_CDTY, |
| 313 | .duty_upd = PWMV1_CUPD, | ||
| 314 | }, | ||
| 315 | .cfg = { | ||
| 316 | /* 32 bits to keep period and duty. */ | ||
| 317 | .max_period = 0xffffffff, | ||
| 318 | .max_pres = 10, | ||
| 319 | }, | ||
| 292 | }; | 320 | }; |
| 293 | 321 | ||
| 294 | static const struct platform_device_id atmel_pwm_devtypes[] = { | 322 | static const struct platform_device_id atmel_pwm_devtypes[] = { |
| 295 | { | 323 | { |
| 296 | .name = "at91sam9rl-pwm", | 324 | .name = "at91sam9rl-pwm", |
| 297 | .driver_data = (kernel_ulong_t)&atmel_pwm_regs_v1, | 325 | .driver_data = (kernel_ulong_t)&atmel_sam9rl_pwm_data, |
| 298 | }, { | 326 | }, { |
| 299 | .name = "sama5d3-pwm", | 327 | .name = "sama5d3-pwm", |
| 300 | .driver_data = (kernel_ulong_t)&atmel_pwm_regs_v2, | 328 | .driver_data = (kernel_ulong_t)&atmel_sama5_pwm_data, |
| 301 | }, { | 329 | }, { |
| 302 | /* sentinel */ | 330 | /* sentinel */ |
| 303 | }, | 331 | }, |
| @@ -307,20 +335,23 @@ MODULE_DEVICE_TABLE(platform, atmel_pwm_devtypes); | |||
| 307 | static const struct of_device_id atmel_pwm_dt_ids[] = { | 335 | static const struct of_device_id atmel_pwm_dt_ids[] = { |
| 308 | { | 336 | { |
| 309 | .compatible = "atmel,at91sam9rl-pwm", | 337 | .compatible = "atmel,at91sam9rl-pwm", |
| 310 | .data = &atmel_pwm_regs_v1, | 338 | .data = &atmel_sam9rl_pwm_data, |
| 311 | }, { | 339 | }, { |
| 312 | .compatible = "atmel,sama5d3-pwm", | 340 | .compatible = "atmel,sama5d3-pwm", |
| 313 | .data = &atmel_pwm_regs_v2, | 341 | .data = &atmel_sama5_pwm_data, |
| 314 | }, { | 342 | }, { |
| 315 | .compatible = "atmel,sama5d2-pwm", | 343 | .compatible = "atmel,sama5d2-pwm", |
| 316 | .data = &atmel_pwm_regs_v2, | 344 | .data = &atmel_sama5_pwm_data, |
| 345 | }, { | ||
| 346 | .compatible = "microchip,sam9x60-pwm", | ||
| 347 | .data = &mchp_sam9x60_pwm_data, | ||
| 317 | }, { | 348 | }, { |
| 318 | /* sentinel */ | 349 | /* sentinel */ |
| 319 | }, | 350 | }, |
| 320 | }; | 351 | }; |
| 321 | MODULE_DEVICE_TABLE(of, atmel_pwm_dt_ids); | 352 | MODULE_DEVICE_TABLE(of, atmel_pwm_dt_ids); |
| 322 | 353 | ||
| 323 | static inline const struct atmel_pwm_registers * | 354 | static inline const struct atmel_pwm_data * |
| 324 | atmel_pwm_get_driver_data(struct platform_device *pdev) | 355 | atmel_pwm_get_driver_data(struct platform_device *pdev) |
| 325 | { | 356 | { |
| 326 | const struct platform_device_id *id; | 357 | const struct platform_device_id *id; |
| @@ -330,18 +361,18 @@ atmel_pwm_get_driver_data(struct platform_device *pdev) | |||
| 330 | 361 | ||
| 331 | id = platform_get_device_id(pdev); | 362 | id = platform_get_device_id(pdev); |
| 332 | 363 | ||
| 333 | return (struct atmel_pwm_registers *)id->driver_data; | 364 | return (struct atmel_pwm_data *)id->driver_data; |
| 334 | } | 365 | } |
| 335 | 366 | ||
| 336 | static int atmel_pwm_probe(struct platform_device *pdev) | 367 | static int atmel_pwm_probe(struct platform_device *pdev) |
| 337 | { | 368 | { |
| 338 | const struct atmel_pwm_registers *regs; | 369 | const struct atmel_pwm_data *data; |
| 339 | struct atmel_pwm_chip *atmel_pwm; | 370 | struct atmel_pwm_chip *atmel_pwm; |
| 340 | struct resource *res; | 371 | struct resource *res; |
| 341 | int ret; | 372 | int ret; |
| 342 | 373 | ||
| 343 | regs = atmel_pwm_get_driver_data(pdev); | 374 | data = atmel_pwm_get_driver_data(pdev); |
| 344 | if (!regs) | 375 | if (!data) |
| 345 | return -ENODEV; | 376 | return -ENODEV; |
| 346 | 377 | ||
| 347 | atmel_pwm = devm_kzalloc(&pdev->dev, sizeof(*atmel_pwm), GFP_KERNEL); | 378 | atmel_pwm = devm_kzalloc(&pdev->dev, sizeof(*atmel_pwm), GFP_KERNEL); |
| @@ -373,7 +404,7 @@ static int atmel_pwm_probe(struct platform_device *pdev) | |||
| 373 | 404 | ||
| 374 | atmel_pwm->chip.base = -1; | 405 | atmel_pwm->chip.base = -1; |
| 375 | atmel_pwm->chip.npwm = 4; | 406 | atmel_pwm->chip.npwm = 4; |
| 376 | atmel_pwm->regs = regs; | 407 | atmel_pwm->data = data; |
| 377 | atmel_pwm->updated_pwms = 0; | 408 | atmel_pwm->updated_pwms = 0; |
| 378 | mutex_init(&atmel_pwm->isr_lock); | 409 | mutex_init(&atmel_pwm->isr_lock); |
| 379 | 410 | ||
diff --git a/drivers/pwm/pwm-bcm-kona.c b/drivers/pwm/pwm-bcm-kona.c index 09a95aeb3a70..81da91df2529 100644 --- a/drivers/pwm/pwm-bcm-kona.c +++ b/drivers/pwm/pwm-bcm-kona.c | |||
| @@ -45,25 +45,25 @@ | |||
| 45 | * high or low depending on its state at that exact instant. | 45 | * high or low depending on its state at that exact instant. |
| 46 | */ | 46 | */ |
| 47 | 47 | ||
| 48 | #define PWM_CONTROL_OFFSET (0x00000000) | 48 | #define PWM_CONTROL_OFFSET 0x00000000 |
| 49 | #define PWM_CONTROL_SMOOTH_SHIFT(chan) (24 + (chan)) | 49 | #define PWM_CONTROL_SMOOTH_SHIFT(chan) (24 + (chan)) |
| 50 | #define PWM_CONTROL_TYPE_SHIFT(chan) (16 + (chan)) | 50 | #define PWM_CONTROL_TYPE_SHIFT(chan) (16 + (chan)) |
| 51 | #define PWM_CONTROL_POLARITY_SHIFT(chan) (8 + (chan)) | 51 | #define PWM_CONTROL_POLARITY_SHIFT(chan) (8 + (chan)) |
| 52 | #define PWM_CONTROL_TRIGGER_SHIFT(chan) (chan) | 52 | #define PWM_CONTROL_TRIGGER_SHIFT(chan) (chan) |
| 53 | 53 | ||
| 54 | #define PRESCALE_OFFSET (0x00000004) | 54 | #define PRESCALE_OFFSET 0x00000004 |
| 55 | #define PRESCALE_SHIFT(chan) ((chan) << 2) | 55 | #define PRESCALE_SHIFT(chan) ((chan) << 2) |
| 56 | #define PRESCALE_MASK(chan) (0x7 << PRESCALE_SHIFT(chan)) | 56 | #define PRESCALE_MASK(chan) (0x7 << PRESCALE_SHIFT(chan)) |
| 57 | #define PRESCALE_MIN (0x00000000) | 57 | #define PRESCALE_MIN 0x00000000 |
| 58 | #define PRESCALE_MAX (0x00000007) | 58 | #define PRESCALE_MAX 0x00000007 |
| 59 | 59 | ||
| 60 | #define PERIOD_COUNT_OFFSET(chan) (0x00000008 + ((chan) << 3)) | 60 | #define PERIOD_COUNT_OFFSET(chan) (0x00000008 + ((chan) << 3)) |
| 61 | #define PERIOD_COUNT_MIN (0x00000002) | 61 | #define PERIOD_COUNT_MIN 0x00000002 |
| 62 | #define PERIOD_COUNT_MAX (0x00ffffff) | 62 | #define PERIOD_COUNT_MAX 0x00ffffff |
| 63 | 63 | ||
| 64 | #define DUTY_CYCLE_HIGH_OFFSET(chan) (0x0000000c + ((chan) << 3)) | 64 | #define DUTY_CYCLE_HIGH_OFFSET(chan) (0x0000000c + ((chan) << 3)) |
| 65 | #define DUTY_CYCLE_HIGH_MIN (0x00000000) | 65 | #define DUTY_CYCLE_HIGH_MIN 0x00000000 |
| 66 | #define DUTY_CYCLE_HIGH_MAX (0x00ffffff) | 66 | #define DUTY_CYCLE_HIGH_MAX 0x00ffffff |
| 67 | 67 | ||
| 68 | struct kona_pwmc { | 68 | struct kona_pwmc { |
| 69 | struct pwm_chip chip; | 69 | struct pwm_chip chip; |
diff --git a/drivers/pwm/pwm-hibvt.c b/drivers/pwm/pwm-hibvt.c index 27c107e78d59..a0b09603d13d 100644 --- a/drivers/pwm/pwm-hibvt.c +++ b/drivers/pwm/pwm-hibvt.c | |||
| @@ -49,15 +49,30 @@ struct hibvt_pwm_chip { | |||
| 49 | struct clk *clk; | 49 | struct clk *clk; |
| 50 | void __iomem *base; | 50 | void __iomem *base; |
| 51 | struct reset_control *rstc; | 51 | struct reset_control *rstc; |
| 52 | const struct hibvt_pwm_soc *soc; | ||
| 52 | }; | 53 | }; |
| 53 | 54 | ||
| 54 | struct hibvt_pwm_soc { | 55 | struct hibvt_pwm_soc { |
| 55 | u32 num_pwms; | 56 | u32 num_pwms; |
| 57 | bool quirk_force_enable; | ||
| 56 | }; | 58 | }; |
| 57 | 59 | ||
| 58 | static const struct hibvt_pwm_soc pwm_soc[2] = { | 60 | static const struct hibvt_pwm_soc hi3516cv300_soc_info = { |
| 59 | { .num_pwms = 4 }, | 61 | .num_pwms = 4, |
| 60 | { .num_pwms = 8 }, | 62 | }; |
| 63 | |||
| 64 | static const struct hibvt_pwm_soc hi3519v100_soc_info = { | ||
| 65 | .num_pwms = 8, | ||
| 66 | }; | ||
| 67 | |||
| 68 | static const struct hibvt_pwm_soc hi3559v100_shub_soc_info = { | ||
| 69 | .num_pwms = 8, | ||
| 70 | .quirk_force_enable = true, | ||
| 71 | }; | ||
| 72 | |||
| 73 | static const struct hibvt_pwm_soc hi3559v100_soc_info = { | ||
| 74 | .num_pwms = 2, | ||
| 75 | .quirk_force_enable = true, | ||
| 61 | }; | 76 | }; |
| 62 | 77 | ||
| 63 | static inline struct hibvt_pwm_chip *to_hibvt_pwm_chip(struct pwm_chip *chip) | 78 | static inline struct hibvt_pwm_chip *to_hibvt_pwm_chip(struct pwm_chip *chip) |
| @@ -148,13 +163,23 @@ static void hibvt_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm, | |||
| 148 | static int hibvt_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, | 163 | static int hibvt_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, |
| 149 | struct pwm_state *state) | 164 | struct pwm_state *state) |
| 150 | { | 165 | { |
| 166 | struct hibvt_pwm_chip *hi_pwm_chip = to_hibvt_pwm_chip(chip); | ||
| 167 | |||
| 151 | if (state->polarity != pwm->state.polarity) | 168 | if (state->polarity != pwm->state.polarity) |
| 152 | hibvt_pwm_set_polarity(chip, pwm, state->polarity); | 169 | hibvt_pwm_set_polarity(chip, pwm, state->polarity); |
| 153 | 170 | ||
| 154 | if (state->period != pwm->state.period || | 171 | if (state->period != pwm->state.period || |
| 155 | state->duty_cycle != pwm->state.duty_cycle) | 172 | state->duty_cycle != pwm->state.duty_cycle) { |
| 156 | hibvt_pwm_config(chip, pwm, state->duty_cycle, state->period); | 173 | hibvt_pwm_config(chip, pwm, state->duty_cycle, state->period); |
| 157 | 174 | ||
| 175 | /* | ||
| 176 | * Some implementations require the PWM to be enabled twice | ||
| 177 | * each time the duty cycle is refreshed. | ||
| 178 | */ | ||
| 179 | if (hi_pwm_chip->soc->quirk_force_enable && state->enabled) | ||
| 180 | hibvt_pwm_enable(chip, pwm); | ||
| 181 | } | ||
| 182 | |||
| 158 | if (state->enabled != pwm->state.enabled) { | 183 | if (state->enabled != pwm->state.enabled) { |
| 159 | if (state->enabled) | 184 | if (state->enabled) |
| 160 | hibvt_pwm_enable(chip, pwm); | 185 | hibvt_pwm_enable(chip, pwm); |
| @@ -198,6 +223,7 @@ static int hibvt_pwm_probe(struct platform_device *pdev) | |||
| 198 | pwm_chip->chip.npwm = soc->num_pwms; | 223 | pwm_chip->chip.npwm = soc->num_pwms; |
| 199 | pwm_chip->chip.of_xlate = of_pwm_xlate_with_flags; | 224 | pwm_chip->chip.of_xlate = of_pwm_xlate_with_flags; |
| 200 | pwm_chip->chip.of_pwm_n_cells = 3; | 225 | pwm_chip->chip.of_pwm_n_cells = 3; |
| 226 | pwm_chip->soc = soc; | ||
| 201 | 227 | ||
| 202 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 228 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 203 | pwm_chip->base = devm_ioremap_resource(&pdev->dev, res); | 229 | pwm_chip->base = devm_ioremap_resource(&pdev->dev, res); |
| @@ -250,8 +276,14 @@ static int hibvt_pwm_remove(struct platform_device *pdev) | |||
| 250 | } | 276 | } |
| 251 | 277 | ||
| 252 | static const struct of_device_id hibvt_pwm_of_match[] = { | 278 | static const struct of_device_id hibvt_pwm_of_match[] = { |
| 253 | { .compatible = "hisilicon,hi3516cv300-pwm", .data = &pwm_soc[0] }, | 279 | { .compatible = "hisilicon,hi3516cv300-pwm", |
| 254 | { .compatible = "hisilicon,hi3519v100-pwm", .data = &pwm_soc[1] }, | 280 | .data = &hi3516cv300_soc_info }, |
| 281 | { .compatible = "hisilicon,hi3519v100-pwm", | ||
| 282 | .data = &hi3519v100_soc_info }, | ||
| 283 | { .compatible = "hisilicon,hi3559v100-shub-pwm", | ||
| 284 | .data = &hi3559v100_shub_soc_info }, | ||
| 285 | { .compatible = "hisilicon,hi3559v100-pwm", | ||
| 286 | .data = &hi3559v100_soc_info }, | ||
| 255 | { } | 287 | { } |
| 256 | }; | 288 | }; |
| 257 | MODULE_DEVICE_TABLE(of, hibvt_pwm_of_match); | 289 | MODULE_DEVICE_TABLE(of, hibvt_pwm_of_match); |
diff --git a/drivers/pwm/pwm-imx1.c b/drivers/pwm/pwm-imx1.c new file mode 100644 index 000000000000..f8b2c2e001a7 --- /dev/null +++ b/drivers/pwm/pwm-imx1.c | |||
| @@ -0,0 +1,199 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | /* | ||
| 3 | * simple driver for PWM (Pulse Width Modulator) controller | ||
| 4 | * | ||
| 5 | * Derived from pxa PWM driver by eric miao <eric.miao@marvell.com> | ||
| 6 | */ | ||
| 7 | |||
| 8 | #include <linux/bitfield.h> | ||
| 9 | #include <linux/bitops.h> | ||
| 10 | #include <linux/clk.h> | ||
| 11 | #include <linux/delay.h> | ||
| 12 | #include <linux/err.h> | ||
| 13 | #include <linux/io.h> | ||
| 14 | #include <linux/kernel.h> | ||
| 15 | #include <linux/module.h> | ||
| 16 | #include <linux/of.h> | ||
| 17 | #include <linux/of_device.h> | ||
| 18 | #include <linux/platform_device.h> | ||
| 19 | #include <linux/pwm.h> | ||
| 20 | #include <linux/slab.h> | ||
| 21 | |||
| 22 | #define MX1_PWMC 0x00 /* PWM Control Register */ | ||
| 23 | #define MX1_PWMS 0x04 /* PWM Sample Register */ | ||
| 24 | #define MX1_PWMP 0x08 /* PWM Period Register */ | ||
| 25 | |||
| 26 | #define MX1_PWMC_EN BIT(4) | ||
| 27 | |||
| 28 | struct pwm_imx1_chip { | ||
| 29 | struct clk *clk_ipg; | ||
| 30 | struct clk *clk_per; | ||
| 31 | void __iomem *mmio_base; | ||
| 32 | struct pwm_chip chip; | ||
| 33 | }; | ||
| 34 | |||
| 35 | #define to_pwm_imx1_chip(chip) container_of(chip, struct pwm_imx1_chip, chip) | ||
| 36 | |||
| 37 | static int pwm_imx1_clk_prepare_enable(struct pwm_chip *chip) | ||
| 38 | { | ||
| 39 | struct pwm_imx1_chip *imx = to_pwm_imx1_chip(chip); | ||
| 40 | int ret; | ||
| 41 | |||
| 42 | ret = clk_prepare_enable(imx->clk_ipg); | ||
| 43 | if (ret) | ||
| 44 | return ret; | ||
| 45 | |||
| 46 | ret = clk_prepare_enable(imx->clk_per); | ||
| 47 | if (ret) { | ||
| 48 | clk_disable_unprepare(imx->clk_ipg); | ||
| 49 | return ret; | ||
| 50 | } | ||
| 51 | |||
| 52 | return 0; | ||
| 53 | } | ||
| 54 | |||
| 55 | static void pwm_imx1_clk_disable_unprepare(struct pwm_chip *chip) | ||
| 56 | { | ||
| 57 | struct pwm_imx1_chip *imx = to_pwm_imx1_chip(chip); | ||
| 58 | |||
| 59 | clk_disable_unprepare(imx->clk_per); | ||
| 60 | clk_disable_unprepare(imx->clk_ipg); | ||
| 61 | } | ||
| 62 | |||
| 63 | static int pwm_imx1_config(struct pwm_chip *chip, | ||
| 64 | struct pwm_device *pwm, int duty_ns, int period_ns) | ||
| 65 | { | ||
| 66 | struct pwm_imx1_chip *imx = to_pwm_imx1_chip(chip); | ||
| 67 | u32 max, p; | ||
| 68 | |||
| 69 | /* | ||
| 70 | * The PWM subsystem allows for exact frequencies. However, | ||
| 71 | * I cannot connect a scope on my device to the PWM line and | ||
| 72 | * thus cannot provide the program the PWM controller | ||
| 73 | * exactly. Instead, I'm relying on the fact that the | ||
| 74 | * Bootloader (u-boot or WinCE+haret) has programmed the PWM | ||
| 75 | * function group already. So I'll just modify the PWM sample | ||
| 76 | * register to follow the ratio of duty_ns vs. period_ns | ||
| 77 | * accordingly. | ||
| 78 | * | ||
| 79 | * This is good enough for programming the brightness of | ||
| 80 | * the LCD backlight. | ||
| 81 | * | ||
| 82 | * The real implementation would divide PERCLK[0] first by | ||
| 83 | * both the prescaler (/1 .. /128) and then by CLKSEL | ||
| 84 | * (/2 .. /16). | ||
| 85 | */ | ||
| 86 | max = readl(imx->mmio_base + MX1_PWMP); | ||
| 87 | p = max * duty_ns / period_ns; | ||
| 88 | |||
| 89 | writel(max - p, imx->mmio_base + MX1_PWMS); | ||
| 90 | |||
| 91 | return 0; | ||
| 92 | } | ||
| 93 | |||
| 94 | static int pwm_imx1_enable(struct pwm_chip *chip, struct pwm_device *pwm) | ||
| 95 | { | ||
| 96 | struct pwm_imx1_chip *imx = to_pwm_imx1_chip(chip); | ||
| 97 | u32 value; | ||
| 98 | int ret; | ||
| 99 | |||
| 100 | ret = pwm_imx1_clk_prepare_enable(chip); | ||
| 101 | if (ret < 0) | ||
| 102 | return ret; | ||
| 103 | |||
| 104 | value = readl(imx->mmio_base + MX1_PWMC); | ||
| 105 | value |= MX1_PWMC_EN; | ||
| 106 | writel(value, imx->mmio_base + MX1_PWMC); | ||
| 107 | |||
| 108 | return 0; | ||
| 109 | } | ||
| 110 | |||
| 111 | static void pwm_imx1_disable(struct pwm_chip *chip, struct pwm_device *pwm) | ||
| 112 | { | ||
| 113 | struct pwm_imx1_chip *imx = to_pwm_imx1_chip(chip); | ||
| 114 | u32 value; | ||
| 115 | |||
| 116 | value = readl(imx->mmio_base + MX1_PWMC); | ||
| 117 | value &= ~MX1_PWMC_EN; | ||
| 118 | writel(value, imx->mmio_base + MX1_PWMC); | ||
| 119 | |||
| 120 | pwm_imx1_clk_disable_unprepare(chip); | ||
| 121 | } | ||
| 122 | |||
| 123 | static const struct pwm_ops pwm_imx1_ops = { | ||
| 124 | .enable = pwm_imx1_enable, | ||
| 125 | .disable = pwm_imx1_disable, | ||
| 126 | .config = pwm_imx1_config, | ||
| 127 | .owner = THIS_MODULE, | ||
| 128 | }; | ||
| 129 | |||
| 130 | static const struct of_device_id pwm_imx1_dt_ids[] = { | ||
| 131 | { .compatible = "fsl,imx1-pwm", }, | ||
| 132 | { /* sentinel */ } | ||
| 133 | }; | ||
| 134 | MODULE_DEVICE_TABLE(of, pwm_imx1_dt_ids); | ||
| 135 | |||
| 136 | static int pwm_imx1_probe(struct platform_device *pdev) | ||
| 137 | { | ||
| 138 | struct pwm_imx1_chip *imx; | ||
| 139 | struct resource *r; | ||
| 140 | |||
| 141 | imx = devm_kzalloc(&pdev->dev, sizeof(*imx), GFP_KERNEL); | ||
| 142 | if (!imx) | ||
| 143 | return -ENOMEM; | ||
| 144 | |||
| 145 | platform_set_drvdata(pdev, imx); | ||
| 146 | |||
| 147 | imx->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); | ||
| 148 | if (IS_ERR(imx->clk_ipg)) { | ||
| 149 | dev_err(&pdev->dev, "getting ipg clock failed with %ld\n", | ||
| 150 | PTR_ERR(imx->clk_ipg)); | ||
| 151 | return PTR_ERR(imx->clk_ipg); | ||
| 152 | } | ||
| 153 | |||
| 154 | imx->clk_per = devm_clk_get(&pdev->dev, "per"); | ||
| 155 | if (IS_ERR(imx->clk_per)) { | ||
| 156 | int ret = PTR_ERR(imx->clk_per); | ||
| 157 | |||
| 158 | if (ret != -EPROBE_DEFER) | ||
| 159 | dev_err(&pdev->dev, | ||
| 160 | "failed to get peripheral clock: %d\n", | ||
| 161 | ret); | ||
| 162 | |||
| 163 | return ret; | ||
| 164 | } | ||
| 165 | |||
| 166 | imx->chip.ops = &pwm_imx1_ops; | ||
| 167 | imx->chip.dev = &pdev->dev; | ||
| 168 | imx->chip.base = -1; | ||
| 169 | imx->chip.npwm = 1; | ||
| 170 | |||
| 171 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 172 | imx->mmio_base = devm_ioremap_resource(&pdev->dev, r); | ||
| 173 | if (IS_ERR(imx->mmio_base)) | ||
| 174 | return PTR_ERR(imx->mmio_base); | ||
| 175 | |||
| 176 | return pwmchip_add(&imx->chip); | ||
| 177 | } | ||
| 178 | |||
| 179 | static int pwm_imx1_remove(struct platform_device *pdev) | ||
| 180 | { | ||
| 181 | struct pwm_imx1_chip *imx = platform_get_drvdata(pdev); | ||
| 182 | |||
| 183 | pwm_imx1_clk_disable_unprepare(&imx->chip); | ||
| 184 | |||
| 185 | return pwmchip_remove(&imx->chip); | ||
| 186 | } | ||
| 187 | |||
| 188 | static struct platform_driver pwm_imx1_driver = { | ||
| 189 | .driver = { | ||
| 190 | .name = "pwm-imx1", | ||
| 191 | .of_match_table = pwm_imx1_dt_ids, | ||
| 192 | }, | ||
| 193 | .probe = pwm_imx1_probe, | ||
| 194 | .remove = pwm_imx1_remove, | ||
| 195 | }; | ||
| 196 | module_platform_driver(pwm_imx1_driver); | ||
| 197 | |||
| 198 | MODULE_LICENSE("GPL v2"); | ||
| 199 | MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>"); | ||
diff --git a/drivers/pwm/pwm-imx.c b/drivers/pwm/pwm-imx27.c index 55a3a363d5be..806130654211 100644 --- a/drivers/pwm/pwm-imx.c +++ b/drivers/pwm/pwm-imx27.c | |||
| @@ -19,16 +19,6 @@ | |||
| 19 | #include <linux/pwm.h> | 19 | #include <linux/pwm.h> |
| 20 | #include <linux/slab.h> | 20 | #include <linux/slab.h> |
| 21 | 21 | ||
| 22 | /* i.MX1 and i.MX21 share the same PWM function block: */ | ||
| 23 | |||
| 24 | #define MX1_PWMC 0x00 /* PWM Control Register */ | ||
| 25 | #define MX1_PWMS 0x04 /* PWM Sample Register */ | ||
| 26 | #define MX1_PWMP 0x08 /* PWM Period Register */ | ||
| 27 | |||
| 28 | #define MX1_PWMC_EN BIT(4) | ||
| 29 | |||
| 30 | /* i.MX27, i.MX31, i.MX35 share the same PWM function block: */ | ||
| 31 | |||
| 32 | #define MX3_PWMCR 0x00 /* PWM Control Register */ | 22 | #define MX3_PWMCR 0x00 /* PWM Control Register */ |
| 33 | #define MX3_PWMSR 0x04 /* PWM Status Register */ | 23 | #define MX3_PWMSR 0x04 /* PWM Status Register */ |
| 34 | #define MX3_PWMSAR 0x0C /* PWM Sample Register */ | 24 | #define MX3_PWMSAR 0x0C /* PWM Sample Register */ |
| @@ -86,21 +76,18 @@ | |||
| 86 | /* PWMPR register value of 0xffff has the same effect as 0xfffe */ | 76 | /* PWMPR register value of 0xffff has the same effect as 0xfffe */ |
| 87 | #define MX3_PWMPR_MAX 0xfffe | 77 | #define MX3_PWMPR_MAX 0xfffe |
| 88 | 78 | ||
| 89 | struct imx_chip { | 79 | struct pwm_imx27_chip { |
| 90 | struct clk *clk_ipg; | 80 | struct clk *clk_ipg; |
| 91 | |||
| 92 | struct clk *clk_per; | 81 | struct clk *clk_per; |
| 93 | |||
| 94 | void __iomem *mmio_base; | 82 | void __iomem *mmio_base; |
| 95 | |||
| 96 | struct pwm_chip chip; | 83 | struct pwm_chip chip; |
| 97 | }; | 84 | }; |
| 98 | 85 | ||
| 99 | #define to_imx_chip(chip) container_of(chip, struct imx_chip, chip) | 86 | #define to_pwm_imx27_chip(chip) container_of(chip, struct pwm_imx27_chip, chip) |
| 100 | 87 | ||
| 101 | static int imx_pwm_clk_prepare_enable(struct pwm_chip *chip) | 88 | static int pwm_imx27_clk_prepare_enable(struct pwm_chip *chip) |
| 102 | { | 89 | { |
| 103 | struct imx_chip *imx = to_imx_chip(chip); | 90 | struct pwm_imx27_chip *imx = to_pwm_imx27_chip(chip); |
| 104 | int ret; | 91 | int ret; |
| 105 | 92 | ||
| 106 | ret = clk_prepare_enable(imx->clk_ipg); | 93 | ret = clk_prepare_enable(imx->clk_ipg); |
| @@ -116,35 +103,32 @@ static int imx_pwm_clk_prepare_enable(struct pwm_chip *chip) | |||
| 116 | return 0; | 103 | return 0; |
| 117 | } | 104 | } |
| 118 | 105 | ||
| 119 | static void imx_pwm_clk_disable_unprepare(struct pwm_chip *chip) | 106 | static void pwm_imx27_clk_disable_unprepare(struct pwm_chip *chip) |
| 120 | { | 107 | { |
| 121 | struct imx_chip *imx = to_imx_chip(chip); | 108 | struct pwm_imx27_chip *imx = to_pwm_imx27_chip(chip); |
| 122 | 109 | ||
| 123 | clk_disable_unprepare(imx->clk_per); | 110 | clk_disable_unprepare(imx->clk_per); |
| 124 | clk_disable_unprepare(imx->clk_ipg); | 111 | clk_disable_unprepare(imx->clk_ipg); |
| 125 | } | 112 | } |
| 126 | 113 | ||
| 127 | static void imx_pwm_get_state(struct pwm_chip *chip, | 114 | static void pwm_imx27_get_state(struct pwm_chip *chip, |
| 128 | struct pwm_device *pwm, struct pwm_state *state) | 115 | struct pwm_device *pwm, struct pwm_state *state) |
| 129 | { | 116 | { |
| 130 | struct imx_chip *imx = to_imx_chip(chip); | 117 | struct pwm_imx27_chip *imx = to_pwm_imx27_chip(chip); |
| 131 | u32 period, prescaler, pwm_clk, ret, val; | 118 | u32 period, prescaler, pwm_clk, val; |
| 132 | u64 tmp; | 119 | u64 tmp; |
| 120 | int ret; | ||
| 133 | 121 | ||
| 134 | ret = imx_pwm_clk_prepare_enable(chip); | 122 | ret = pwm_imx27_clk_prepare_enable(chip); |
| 135 | if (ret < 0) | 123 | if (ret < 0) |
| 136 | return; | 124 | return; |
| 137 | 125 | ||
| 138 | val = readl(imx->mmio_base + MX3_PWMCR); | 126 | val = readl(imx->mmio_base + MX3_PWMCR); |
| 139 | 127 | ||
| 140 | if (val & MX3_PWMCR_EN) { | 128 | if (val & MX3_PWMCR_EN) |
| 141 | state->enabled = true; | 129 | state->enabled = true; |
| 142 | ret = imx_pwm_clk_prepare_enable(chip); | 130 | else |
| 143 | if (ret) | ||
| 144 | return; | ||
| 145 | } else { | ||
| 146 | state->enabled = false; | 131 | state->enabled = false; |
| 147 | } | ||
| 148 | 132 | ||
| 149 | switch (FIELD_GET(MX3_PWMCR_POUTC, val)) { | 133 | switch (FIELD_GET(MX3_PWMCR_POUTC, val)) { |
| 150 | case MX3_PWMCR_POUTC_NORMAL: | 134 | case MX3_PWMCR_POUTC_NORMAL: |
| @@ -176,70 +160,13 @@ static void imx_pwm_get_state(struct pwm_chip *chip, | |||
| 176 | state->duty_cycle = 0; | 160 | state->duty_cycle = 0; |
| 177 | } | 161 | } |
| 178 | 162 | ||
| 179 | imx_pwm_clk_disable_unprepare(chip); | 163 | if (!state->enabled) |
| 180 | } | 164 | pwm_imx27_clk_disable_unprepare(chip); |
| 181 | |||
| 182 | static int imx_pwm_config_v1(struct pwm_chip *chip, | ||
| 183 | struct pwm_device *pwm, int duty_ns, int period_ns) | ||
| 184 | { | ||
| 185 | struct imx_chip *imx = to_imx_chip(chip); | ||
| 186 | |||
| 187 | /* | ||
| 188 | * The PWM subsystem allows for exact frequencies. However, | ||
| 189 | * I cannot connect a scope on my device to the PWM line and | ||
| 190 | * thus cannot provide the program the PWM controller | ||
| 191 | * exactly. Instead, I'm relying on the fact that the | ||
| 192 | * Bootloader (u-boot or WinCE+haret) has programmed the PWM | ||
| 193 | * function group already. So I'll just modify the PWM sample | ||
| 194 | * register to follow the ratio of duty_ns vs. period_ns | ||
| 195 | * accordingly. | ||
| 196 | * | ||
| 197 | * This is good enough for programming the brightness of | ||
| 198 | * the LCD backlight. | ||
| 199 | * | ||
| 200 | * The real implementation would divide PERCLK[0] first by | ||
| 201 | * both the prescaler (/1 .. /128) and then by CLKSEL | ||
| 202 | * (/2 .. /16). | ||
| 203 | */ | ||
| 204 | u32 max = readl(imx->mmio_base + MX1_PWMP); | ||
| 205 | u32 p = max * duty_ns / period_ns; | ||
| 206 | writel(max - p, imx->mmio_base + MX1_PWMS); | ||
| 207 | |||
| 208 | return 0; | ||
| 209 | } | ||
| 210 | |||
| 211 | static int imx_pwm_enable_v1(struct pwm_chip *chip, struct pwm_device *pwm) | ||
| 212 | { | ||
| 213 | struct imx_chip *imx = to_imx_chip(chip); | ||
| 214 | u32 val; | ||
| 215 | int ret; | ||
| 216 | |||
| 217 | ret = imx_pwm_clk_prepare_enable(chip); | ||
| 218 | if (ret < 0) | ||
| 219 | return ret; | ||
| 220 | |||
| 221 | val = readl(imx->mmio_base + MX1_PWMC); | ||
| 222 | val |= MX1_PWMC_EN; | ||
| 223 | writel(val, imx->mmio_base + MX1_PWMC); | ||
| 224 | |||
| 225 | return 0; | ||
| 226 | } | ||
| 227 | |||
| 228 | static void imx_pwm_disable_v1(struct pwm_chip *chip, struct pwm_device *pwm) | ||
| 229 | { | ||
| 230 | struct imx_chip *imx = to_imx_chip(chip); | ||
| 231 | u32 val; | ||
| 232 | |||
| 233 | val = readl(imx->mmio_base + MX1_PWMC); | ||
| 234 | val &= ~MX1_PWMC_EN; | ||
| 235 | writel(val, imx->mmio_base + MX1_PWMC); | ||
| 236 | |||
| 237 | imx_pwm_clk_disable_unprepare(chip); | ||
| 238 | } | 165 | } |
| 239 | 166 | ||
| 240 | static void imx_pwm_sw_reset(struct pwm_chip *chip) | 167 | static void pwm_imx27_sw_reset(struct pwm_chip *chip) |
| 241 | { | 168 | { |
| 242 | struct imx_chip *imx = to_imx_chip(chip); | 169 | struct pwm_imx27_chip *imx = to_pwm_imx27_chip(chip); |
| 243 | struct device *dev = chip->dev; | 170 | struct device *dev = chip->dev; |
| 244 | int wait_count = 0; | 171 | int wait_count = 0; |
| 245 | u32 cr; | 172 | u32 cr; |
| @@ -255,10 +182,10 @@ static void imx_pwm_sw_reset(struct pwm_chip *chip) | |||
| 255 | dev_warn(dev, "software reset timeout\n"); | 182 | dev_warn(dev, "software reset timeout\n"); |
| 256 | } | 183 | } |
| 257 | 184 | ||
| 258 | static void imx_pwm_wait_fifo_slot(struct pwm_chip *chip, | 185 | static void pwm_imx27_wait_fifo_slot(struct pwm_chip *chip, |
| 259 | struct pwm_device *pwm) | 186 | struct pwm_device *pwm) |
| 260 | { | 187 | { |
| 261 | struct imx_chip *imx = to_imx_chip(chip); | 188 | struct pwm_imx27_chip *imx = to_pwm_imx27_chip(chip); |
| 262 | struct device *dev = chip->dev; | 189 | struct device *dev = chip->dev; |
| 263 | unsigned int period_ms; | 190 | unsigned int period_ms; |
| 264 | int fifoav; | 191 | int fifoav; |
| @@ -277,11 +204,11 @@ static void imx_pwm_wait_fifo_slot(struct pwm_chip *chip, | |||
| 277 | } | 204 | } |
| 278 | } | 205 | } |
| 279 | 206 | ||
| 280 | static int imx_pwm_apply_v2(struct pwm_chip *chip, struct pwm_device *pwm, | 207 | static int pwm_imx27_apply(struct pwm_chip *chip, struct pwm_device *pwm, |
| 281 | struct pwm_state *state) | 208 | struct pwm_state *state) |
| 282 | { | 209 | { |
| 283 | unsigned long period_cycles, duty_cycles, prescale; | 210 | unsigned long period_cycles, duty_cycles, prescale; |
| 284 | struct imx_chip *imx = to_imx_chip(chip); | 211 | struct pwm_imx27_chip *imx = to_pwm_imx27_chip(chip); |
| 285 | struct pwm_state cstate; | 212 | struct pwm_state cstate; |
| 286 | unsigned long long c; | 213 | unsigned long long c; |
| 287 | int ret; | 214 | int ret; |
| @@ -318,13 +245,13 @@ static int imx_pwm_apply_v2(struct pwm_chip *chip, struct pwm_device *pwm, | |||
| 318 | * enabled. | 245 | * enabled. |
| 319 | */ | 246 | */ |
| 320 | if (cstate.enabled) { | 247 | if (cstate.enabled) { |
| 321 | imx_pwm_wait_fifo_slot(chip, pwm); | 248 | pwm_imx27_wait_fifo_slot(chip, pwm); |
| 322 | } else { | 249 | } else { |
| 323 | ret = imx_pwm_clk_prepare_enable(chip); | 250 | ret = pwm_imx27_clk_prepare_enable(chip); |
| 324 | if (ret) | 251 | if (ret) |
| 325 | return ret; | 252 | return ret; |
| 326 | 253 | ||
| 327 | imx_pwm_sw_reset(chip); | 254 | pwm_imx27_sw_reset(chip); |
| 328 | } | 255 | } |
| 329 | 256 | ||
| 330 | writel(duty_cycles, imx->mmio_base + MX3_PWMSAR); | 257 | writel(duty_cycles, imx->mmio_base + MX3_PWMSAR); |
| @@ -343,64 +270,35 @@ static int imx_pwm_apply_v2(struct pwm_chip *chip, struct pwm_device *pwm, | |||
| 343 | } else if (cstate.enabled) { | 270 | } else if (cstate.enabled) { |
| 344 | writel(0, imx->mmio_base + MX3_PWMCR); | 271 | writel(0, imx->mmio_base + MX3_PWMCR); |
| 345 | 272 | ||
| 346 | imx_pwm_clk_disable_unprepare(chip); | 273 | pwm_imx27_clk_disable_unprepare(chip); |
| 347 | } | 274 | } |
| 348 | 275 | ||
| 349 | return 0; | 276 | return 0; |
| 350 | } | 277 | } |
| 351 | 278 | ||
| 352 | static const struct pwm_ops imx_pwm_ops_v1 = { | 279 | static const struct pwm_ops pwm_imx27_ops = { |
| 353 | .enable = imx_pwm_enable_v1, | 280 | .apply = pwm_imx27_apply, |
| 354 | .disable = imx_pwm_disable_v1, | 281 | .get_state = pwm_imx27_get_state, |
| 355 | .config = imx_pwm_config_v1, | ||
| 356 | .owner = THIS_MODULE, | 282 | .owner = THIS_MODULE, |
| 357 | }; | 283 | }; |
| 358 | 284 | ||
| 359 | static const struct pwm_ops imx_pwm_ops_v2 = { | 285 | static const struct of_device_id pwm_imx27_dt_ids[] = { |
| 360 | .apply = imx_pwm_apply_v2, | 286 | { .compatible = "fsl,imx27-pwm", }, |
| 361 | .get_state = imx_pwm_get_state, | ||
| 362 | .owner = THIS_MODULE, | ||
| 363 | }; | ||
| 364 | |||
| 365 | struct imx_pwm_data { | ||
| 366 | bool polarity_supported; | ||
| 367 | const struct pwm_ops *ops; | ||
| 368 | }; | ||
| 369 | |||
| 370 | static struct imx_pwm_data imx_pwm_data_v1 = { | ||
| 371 | .ops = &imx_pwm_ops_v1, | ||
| 372 | }; | ||
| 373 | |||
| 374 | static struct imx_pwm_data imx_pwm_data_v2 = { | ||
| 375 | .polarity_supported = true, | ||
| 376 | .ops = &imx_pwm_ops_v2, | ||
| 377 | }; | ||
| 378 | |||
| 379 | static const struct of_device_id imx_pwm_dt_ids[] = { | ||
| 380 | { .compatible = "fsl,imx1-pwm", .data = &imx_pwm_data_v1, }, | ||
| 381 | { .compatible = "fsl,imx27-pwm", .data = &imx_pwm_data_v2, }, | ||
| 382 | { /* sentinel */ } | 287 | { /* sentinel */ } |
| 383 | }; | 288 | }; |
| 384 | MODULE_DEVICE_TABLE(of, imx_pwm_dt_ids); | 289 | MODULE_DEVICE_TABLE(of, pwm_imx27_dt_ids); |
| 385 | 290 | ||
| 386 | static int imx_pwm_probe(struct platform_device *pdev) | 291 | static int pwm_imx27_probe(struct platform_device *pdev) |
| 387 | { | 292 | { |
| 388 | const struct of_device_id *of_id = | 293 | struct pwm_imx27_chip *imx; |
| 389 | of_match_device(imx_pwm_dt_ids, &pdev->dev); | ||
| 390 | const struct imx_pwm_data *data; | ||
| 391 | struct imx_chip *imx; | ||
| 392 | struct resource *r; | 294 | struct resource *r; |
| 393 | int ret = 0; | ||
| 394 | |||
| 395 | if (!of_id) | ||
| 396 | return -ENODEV; | ||
| 397 | |||
| 398 | data = of_id->data; | ||
| 399 | 295 | ||
| 400 | imx = devm_kzalloc(&pdev->dev, sizeof(*imx), GFP_KERNEL); | 296 | imx = devm_kzalloc(&pdev->dev, sizeof(*imx), GFP_KERNEL); |
| 401 | if (imx == NULL) | 297 | if (imx == NULL) |
| 402 | return -ENOMEM; | 298 | return -ENOMEM; |
| 403 | 299 | ||
| 300 | platform_set_drvdata(pdev, imx); | ||
| 301 | |||
| 404 | imx->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); | 302 | imx->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); |
| 405 | if (IS_ERR(imx->clk_ipg)) { | 303 | if (IS_ERR(imx->clk_ipg)) { |
| 406 | dev_err(&pdev->dev, "getting ipg clock failed with %ld\n", | 304 | dev_err(&pdev->dev, "getting ipg clock failed with %ld\n", |
| @@ -410,57 +308,51 @@ static int imx_pwm_probe(struct platform_device *pdev) | |||
| 410 | 308 | ||
| 411 | imx->clk_per = devm_clk_get(&pdev->dev, "per"); | 309 | imx->clk_per = devm_clk_get(&pdev->dev, "per"); |
| 412 | if (IS_ERR(imx->clk_per)) { | 310 | if (IS_ERR(imx->clk_per)) { |
| 413 | dev_err(&pdev->dev, "getting per clock failed with %ld\n", | 311 | int ret = PTR_ERR(imx->clk_per); |
| 414 | PTR_ERR(imx->clk_per)); | 312 | |
| 415 | return PTR_ERR(imx->clk_per); | 313 | if (ret != -EPROBE_DEFER) |
| 314 | dev_err(&pdev->dev, | ||
| 315 | "failed to get peripheral clock: %d\n", | ||
| 316 | ret); | ||
| 317 | |||
| 318 | return ret; | ||
| 416 | } | 319 | } |
| 417 | 320 | ||
| 418 | imx->chip.ops = data->ops; | 321 | imx->chip.ops = &pwm_imx27_ops; |
| 419 | imx->chip.dev = &pdev->dev; | 322 | imx->chip.dev = &pdev->dev; |
| 420 | imx->chip.base = -1; | 323 | imx->chip.base = -1; |
| 421 | imx->chip.npwm = 1; | 324 | imx->chip.npwm = 1; |
| 422 | 325 | ||
| 423 | if (data->polarity_supported) { | 326 | imx->chip.of_xlate = of_pwm_xlate_with_flags; |
| 424 | dev_dbg(&pdev->dev, "PWM supports output inversion\n"); | 327 | imx->chip.of_pwm_n_cells = 3; |
| 425 | imx->chip.of_xlate = of_pwm_xlate_with_flags; | ||
| 426 | imx->chip.of_pwm_n_cells = 3; | ||
| 427 | } | ||
| 428 | 328 | ||
| 429 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 329 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 430 | imx->mmio_base = devm_ioremap_resource(&pdev->dev, r); | 330 | imx->mmio_base = devm_ioremap_resource(&pdev->dev, r); |
| 431 | if (IS_ERR(imx->mmio_base)) | 331 | if (IS_ERR(imx->mmio_base)) |
| 432 | return PTR_ERR(imx->mmio_base); | 332 | return PTR_ERR(imx->mmio_base); |
| 433 | 333 | ||
| 434 | ret = pwmchip_add(&imx->chip); | 334 | return pwmchip_add(&imx->chip); |
| 435 | if (ret < 0) | ||
| 436 | return ret; | ||
| 437 | |||
| 438 | platform_set_drvdata(pdev, imx); | ||
| 439 | return 0; | ||
| 440 | } | 335 | } |
| 441 | 336 | ||
| 442 | static int imx_pwm_remove(struct platform_device *pdev) | 337 | static int pwm_imx27_remove(struct platform_device *pdev) |
| 443 | { | 338 | { |
| 444 | struct imx_chip *imx; | 339 | struct pwm_imx27_chip *imx; |
| 445 | 340 | ||
| 446 | imx = platform_get_drvdata(pdev); | 341 | imx = platform_get_drvdata(pdev); |
| 447 | if (imx == NULL) | ||
| 448 | return -ENODEV; | ||
| 449 | 342 | ||
| 450 | imx_pwm_clk_disable_unprepare(&imx->chip); | 343 | pwm_imx27_clk_disable_unprepare(&imx->chip); |
| 451 | 344 | ||
| 452 | return pwmchip_remove(&imx->chip); | 345 | return pwmchip_remove(&imx->chip); |
| 453 | } | 346 | } |
| 454 | 347 | ||
| 455 | static struct platform_driver imx_pwm_driver = { | 348 | static struct platform_driver imx_pwm_driver = { |
| 456 | .driver = { | 349 | .driver = { |
| 457 | .name = "imx-pwm", | 350 | .name = "pwm-imx27", |
| 458 | .of_match_table = imx_pwm_dt_ids, | 351 | .of_match_table = pwm_imx27_dt_ids, |
| 459 | }, | 352 | }, |
| 460 | .probe = imx_pwm_probe, | 353 | .probe = pwm_imx27_probe, |
| 461 | .remove = imx_pwm_remove, | 354 | .remove = pwm_imx27_remove, |
| 462 | }; | 355 | }; |
| 463 | |||
| 464 | module_platform_driver(imx_pwm_driver); | 356 | module_platform_driver(imx_pwm_driver); |
| 465 | 357 | ||
| 466 | MODULE_LICENSE("GPL v2"); | 358 | MODULE_LICENSE("GPL v2"); |
diff --git a/drivers/pwm/pwm-mtk-disp.c b/drivers/pwm/pwm-mtk-disp.c index 893940d45f0d..15803c71fe80 100644 --- a/drivers/pwm/pwm-mtk-disp.c +++ b/drivers/pwm/pwm-mtk-disp.c | |||
| @@ -277,10 +277,21 @@ static const struct mtk_pwm_data mt8173_pwm_data = { | |||
| 277 | .commit_mask = 0x1, | 277 | .commit_mask = 0x1, |
| 278 | }; | 278 | }; |
| 279 | 279 | ||
| 280 | static const struct mtk_pwm_data mt8183_pwm_data = { | ||
| 281 | .enable_mask = BIT(0), | ||
| 282 | .con0 = 0x18, | ||
| 283 | .con0_sel = 0x0, | ||
| 284 | .con1 = 0x1c, | ||
| 285 | .has_commit = false, | ||
| 286 | .bls_debug = 0x80, | ||
| 287 | .bls_debug_mask = 0x3, | ||
| 288 | }; | ||
| 289 | |||
| 280 | static const struct of_device_id mtk_disp_pwm_of_match[] = { | 290 | static const struct of_device_id mtk_disp_pwm_of_match[] = { |
| 281 | { .compatible = "mediatek,mt2701-disp-pwm", .data = &mt2701_pwm_data}, | 291 | { .compatible = "mediatek,mt2701-disp-pwm", .data = &mt2701_pwm_data}, |
| 282 | { .compatible = "mediatek,mt6595-disp-pwm", .data = &mt8173_pwm_data}, | 292 | { .compatible = "mediatek,mt6595-disp-pwm", .data = &mt8173_pwm_data}, |
| 283 | { .compatible = "mediatek,mt8173-disp-pwm", .data = &mt8173_pwm_data}, | 293 | { .compatible = "mediatek,mt8173-disp-pwm", .data = &mt8173_pwm_data}, |
| 294 | { .compatible = "mediatek,mt8183-disp-pwm", .data = &mt8183_pwm_data}, | ||
| 284 | { } | 295 | { } |
| 285 | }; | 296 | }; |
| 286 | MODULE_DEVICE_TABLE(of, mtk_disp_pwm_of_match); | 297 | MODULE_DEVICE_TABLE(of, mtk_disp_pwm_of_match); |
diff --git a/drivers/pwm/pwm-rcar.c b/drivers/pwm/pwm-rcar.c index a41812fc6f95..cfe7dd1b448e 100644 --- a/drivers/pwm/pwm-rcar.c +++ b/drivers/pwm/pwm-rcar.c | |||
| @@ -8,6 +8,8 @@ | |||
| 8 | #include <linux/clk.h> | 8 | #include <linux/clk.h> |
| 9 | #include <linux/err.h> | 9 | #include <linux/err.h> |
| 10 | #include <linux/io.h> | 10 | #include <linux/io.h> |
| 11 | #include <linux/log2.h> | ||
| 12 | #include <linux/math64.h> | ||
| 11 | #include <linux/module.h> | 13 | #include <linux/module.h> |
| 12 | #include <linux/of.h> | 14 | #include <linux/of.h> |
| 13 | #include <linux/platform_device.h> | 15 | #include <linux/platform_device.h> |
| @@ -68,19 +70,15 @@ static void rcar_pwm_update(struct rcar_pwm_chip *rp, u32 mask, u32 data, | |||
| 68 | static int rcar_pwm_get_clock_division(struct rcar_pwm_chip *rp, int period_ns) | 70 | static int rcar_pwm_get_clock_division(struct rcar_pwm_chip *rp, int period_ns) |
| 69 | { | 71 | { |
| 70 | unsigned long clk_rate = clk_get_rate(rp->clk); | 72 | unsigned long clk_rate = clk_get_rate(rp->clk); |
| 71 | unsigned long long max; /* max cycle / nanoseconds */ | 73 | u64 div, tmp; |
| 72 | unsigned int div; | ||
| 73 | 74 | ||
| 74 | if (clk_rate == 0) | 75 | if (clk_rate == 0) |
| 75 | return -EINVAL; | 76 | return -EINVAL; |
| 76 | 77 | ||
| 77 | for (div = 0; div <= RCAR_PWM_MAX_DIVISION; div++) { | 78 | div = (u64)NSEC_PER_SEC * RCAR_PWM_MAX_CYCLE; |
| 78 | max = (unsigned long long)NSEC_PER_SEC * RCAR_PWM_MAX_CYCLE * | 79 | tmp = (u64)period_ns * clk_rate + div - 1; |
| 79 | (1 << div); | 80 | tmp = div64_u64(tmp, div); |
| 80 | do_div(max, clk_rate); | 81 | div = ilog2(tmp - 1) + 1; |
| 81 | if (period_ns <= max) | ||
| 82 | break; | ||
| 83 | } | ||
| 84 | 82 | ||
| 85 | return (div <= RCAR_PWM_MAX_DIVISION) ? div : -ERANGE; | 83 | return (div <= RCAR_PWM_MAX_DIVISION) ? div : -ERANGE; |
| 86 | } | 84 | } |
| @@ -139,39 +137,8 @@ static void rcar_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm) | |||
| 139 | pm_runtime_put(chip->dev); | 137 | pm_runtime_put(chip->dev); |
| 140 | } | 138 | } |
| 141 | 139 | ||
| 142 | static int rcar_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, | 140 | static int rcar_pwm_enable(struct rcar_pwm_chip *rp) |
| 143 | int duty_ns, int period_ns) | ||
| 144 | { | 141 | { |
| 145 | struct rcar_pwm_chip *rp = to_rcar_pwm_chip(chip); | ||
| 146 | int div, ret; | ||
| 147 | |||
| 148 | div = rcar_pwm_get_clock_division(rp, period_ns); | ||
| 149 | if (div < 0) | ||
| 150 | return div; | ||
| 151 | |||
| 152 | /* | ||
| 153 | * Let the core driver set pwm->period if disabled and duty_ns == 0. | ||
| 154 | * But, this driver should prevent to set the new duty_ns if current | ||
| 155 | * duty_cycle is not set | ||
| 156 | */ | ||
| 157 | if (!pwm_is_enabled(pwm) && !duty_ns && !pwm->state.duty_cycle) | ||
| 158 | return 0; | ||
| 159 | |||
| 160 | rcar_pwm_update(rp, RCAR_PWMCR_SYNC, RCAR_PWMCR_SYNC, RCAR_PWMCR); | ||
| 161 | |||
| 162 | ret = rcar_pwm_set_counter(rp, div, duty_ns, period_ns); | ||
| 163 | if (!ret) | ||
| 164 | rcar_pwm_set_clock_control(rp, div); | ||
| 165 | |||
| 166 | /* The SYNC should be set to 0 even if rcar_pwm_set_counter failed */ | ||
| 167 | rcar_pwm_update(rp, RCAR_PWMCR_SYNC, 0, RCAR_PWMCR); | ||
| 168 | |||
| 169 | return ret; | ||
| 170 | } | ||
| 171 | |||
| 172 | static int rcar_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) | ||
| 173 | { | ||
| 174 | struct rcar_pwm_chip *rp = to_rcar_pwm_chip(chip); | ||
| 175 | u32 value; | 142 | u32 value; |
| 176 | 143 | ||
| 177 | /* Don't enable the PWM device if CYC0 or PH0 is 0 */ | 144 | /* Don't enable the PWM device if CYC0 or PH0 is 0 */ |
| @@ -185,19 +152,51 @@ static int rcar_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) | |||
| 185 | return 0; | 152 | return 0; |
| 186 | } | 153 | } |
| 187 | 154 | ||
| 188 | static void rcar_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm) | 155 | static void rcar_pwm_disable(struct rcar_pwm_chip *rp) |
| 156 | { | ||
| 157 | rcar_pwm_update(rp, RCAR_PWMCR_EN0, 0, RCAR_PWMCR); | ||
| 158 | } | ||
| 159 | |||
| 160 | static int rcar_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, | ||
| 161 | struct pwm_state *state) | ||
| 189 | { | 162 | { |
| 190 | struct rcar_pwm_chip *rp = to_rcar_pwm_chip(chip); | 163 | struct rcar_pwm_chip *rp = to_rcar_pwm_chip(chip); |
| 164 | struct pwm_state cur_state; | ||
| 165 | int div, ret; | ||
| 191 | 166 | ||
| 192 | rcar_pwm_update(rp, RCAR_PWMCR_EN0, 0, RCAR_PWMCR); | 167 | /* This HW/driver only supports normal polarity */ |
| 168 | pwm_get_state(pwm, &cur_state); | ||
| 169 | if (state->polarity != PWM_POLARITY_NORMAL) | ||
| 170 | return -ENOTSUPP; | ||
| 171 | |||
| 172 | if (!state->enabled) { | ||
| 173 | rcar_pwm_disable(rp); | ||
| 174 | return 0; | ||
| 175 | } | ||
| 176 | |||
| 177 | div = rcar_pwm_get_clock_division(rp, state->period); | ||
| 178 | if (div < 0) | ||
| 179 | return div; | ||
| 180 | |||
| 181 | rcar_pwm_update(rp, RCAR_PWMCR_SYNC, RCAR_PWMCR_SYNC, RCAR_PWMCR); | ||
| 182 | |||
| 183 | ret = rcar_pwm_set_counter(rp, div, state->duty_cycle, state->period); | ||
| 184 | if (!ret) | ||
| 185 | rcar_pwm_set_clock_control(rp, div); | ||
| 186 | |||
| 187 | /* The SYNC should be set to 0 even if rcar_pwm_set_counter failed */ | ||
| 188 | rcar_pwm_update(rp, RCAR_PWMCR_SYNC, 0, RCAR_PWMCR); | ||
| 189 | |||
| 190 | if (!ret && state->enabled) | ||
| 191 | ret = rcar_pwm_enable(rp); | ||
| 192 | |||
| 193 | return ret; | ||
| 193 | } | 194 | } |
| 194 | 195 | ||
| 195 | static const struct pwm_ops rcar_pwm_ops = { | 196 | static const struct pwm_ops rcar_pwm_ops = { |
| 196 | .request = rcar_pwm_request, | 197 | .request = rcar_pwm_request, |
| 197 | .free = rcar_pwm_free, | 198 | .free = rcar_pwm_free, |
| 198 | .config = rcar_pwm_config, | 199 | .apply = rcar_pwm_apply, |
| 199 | .enable = rcar_pwm_enable, | ||
| 200 | .disable = rcar_pwm_disable, | ||
| 201 | .owner = THIS_MODULE, | 200 | .owner = THIS_MODULE, |
| 202 | }; | 201 | }; |
| 203 | 202 | ||
| @@ -279,18 +278,16 @@ static int rcar_pwm_suspend(struct device *dev) | |||
| 279 | static int rcar_pwm_resume(struct device *dev) | 278 | static int rcar_pwm_resume(struct device *dev) |
| 280 | { | 279 | { |
| 281 | struct pwm_device *pwm = rcar_pwm_dev_to_pwm_dev(dev); | 280 | struct pwm_device *pwm = rcar_pwm_dev_to_pwm_dev(dev); |
| 281 | struct pwm_state state; | ||
| 282 | 282 | ||
| 283 | if (!test_bit(PWMF_REQUESTED, &pwm->flags)) | 283 | if (!test_bit(PWMF_REQUESTED, &pwm->flags)) |
| 284 | return 0; | 284 | return 0; |
| 285 | 285 | ||
| 286 | pm_runtime_get_sync(dev); | 286 | pm_runtime_get_sync(dev); |
| 287 | 287 | ||
| 288 | rcar_pwm_config(pwm->chip, pwm, pwm->state.duty_cycle, | 288 | pwm_get_state(pwm, &state); |
| 289 | pwm->state.period); | ||
| 290 | if (pwm_is_enabled(pwm)) | ||
| 291 | rcar_pwm_enable(pwm->chip, pwm); | ||
| 292 | 289 | ||
| 293 | return 0; | 290 | return rcar_pwm_apply(pwm->chip, pwm, &state); |
| 294 | } | 291 | } |
| 295 | #endif /* CONFIG_PM_SLEEP */ | 292 | #endif /* CONFIG_PM_SLEEP */ |
| 296 | static SIMPLE_DEV_PM_OPS(rcar_pwm_pm_ops, rcar_pwm_suspend, rcar_pwm_resume); | 293 | static SIMPLE_DEV_PM_OPS(rcar_pwm_pm_ops, rcar_pwm_suspend, rcar_pwm_resume); |
diff --git a/drivers/remoteproc/qcom_q6v5_adsp.c b/drivers/remoteproc/qcom_q6v5_adsp.c index 79374d1de311..1f3ef9ee493c 100644 --- a/drivers/remoteproc/qcom_q6v5_adsp.c +++ b/drivers/remoteproc/qcom_q6v5_adsp.c | |||
| @@ -48,7 +48,7 @@ | |||
| 48 | 48 | ||
| 49 | /* list of clocks required by ADSP PIL */ | 49 | /* list of clocks required by ADSP PIL */ |
| 50 | static const char * const adsp_clk_id[] = { | 50 | static const char * const adsp_clk_id[] = { |
| 51 | "sway_cbcr", "lpass_aon", "lpass_ahbs_aon_cbcr", "lpass_ahbm_aon_cbcr", | 51 | "sway_cbcr", "lpass_ahbs_aon_cbcr", "lpass_ahbm_aon_cbcr", |
| 52 | "qdsp6ss_xo", "qdsp6ss_sleep", "qdsp6ss_core", | 52 | "qdsp6ss_xo", "qdsp6ss_sleep", "qdsp6ss_core", |
| 53 | }; | 53 | }; |
| 54 | 54 | ||
| @@ -439,6 +439,10 @@ static int adsp_probe(struct platform_device *pdev) | |||
| 439 | adsp->sysmon = qcom_add_sysmon_subdev(rproc, | 439 | adsp->sysmon = qcom_add_sysmon_subdev(rproc, |
| 440 | desc->sysmon_name, | 440 | desc->sysmon_name, |
| 441 | desc->ssctl_id); | 441 | desc->ssctl_id); |
| 442 | if (IS_ERR(adsp->sysmon)) { | ||
| 443 | ret = PTR_ERR(adsp->sysmon); | ||
| 444 | goto disable_pm; | ||
| 445 | } | ||
| 442 | 446 | ||
| 443 | ret = rproc_add(rproc); | 447 | ret = rproc_add(rproc); |
| 444 | if (ret) | 448 | if (ret) |
diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c index 01be7314e176..eacdf10fcfaf 100644 --- a/drivers/remoteproc/qcom_q6v5_mss.c +++ b/drivers/remoteproc/qcom_q6v5_mss.c | |||
| @@ -25,6 +25,8 @@ | |||
| 25 | #include <linux/of_address.h> | 25 | #include <linux/of_address.h> |
| 26 | #include <linux/of_device.h> | 26 | #include <linux/of_device.h> |
| 27 | #include <linux/platform_device.h> | 27 | #include <linux/platform_device.h> |
| 28 | #include <linux/pm_domain.h> | ||
| 29 | #include <linux/pm_runtime.h> | ||
| 28 | #include <linux/regmap.h> | 30 | #include <linux/regmap.h> |
| 29 | #include <linux/regulator/consumer.h> | 31 | #include <linux/regulator/consumer.h> |
| 30 | #include <linux/remoteproc.h> | 32 | #include <linux/remoteproc.h> |
| @@ -131,6 +133,8 @@ struct rproc_hexagon_res { | |||
| 131 | char **proxy_clk_names; | 133 | char **proxy_clk_names; |
| 132 | char **reset_clk_names; | 134 | char **reset_clk_names; |
| 133 | char **active_clk_names; | 135 | char **active_clk_names; |
| 136 | char **active_pd_names; | ||
| 137 | char **proxy_pd_names; | ||
| 134 | int version; | 138 | int version; |
| 135 | bool need_mem_protection; | 139 | bool need_mem_protection; |
| 136 | bool has_alt_reset; | 140 | bool has_alt_reset; |
| @@ -156,9 +160,13 @@ struct q6v5 { | |||
| 156 | struct clk *active_clks[8]; | 160 | struct clk *active_clks[8]; |
| 157 | struct clk *reset_clks[4]; | 161 | struct clk *reset_clks[4]; |
| 158 | struct clk *proxy_clks[4]; | 162 | struct clk *proxy_clks[4]; |
| 163 | struct device *active_pds[1]; | ||
| 164 | struct device *proxy_pds[3]; | ||
| 159 | int active_clk_count; | 165 | int active_clk_count; |
| 160 | int reset_clk_count; | 166 | int reset_clk_count; |
| 161 | int proxy_clk_count; | 167 | int proxy_clk_count; |
| 168 | int active_pd_count; | ||
| 169 | int proxy_pd_count; | ||
| 162 | 170 | ||
| 163 | struct reg_info active_regs[1]; | 171 | struct reg_info active_regs[1]; |
| 164 | struct reg_info proxy_regs[3]; | 172 | struct reg_info proxy_regs[3]; |
| @@ -188,6 +196,7 @@ struct q6v5 { | |||
| 188 | bool has_alt_reset; | 196 | bool has_alt_reset; |
| 189 | int mpss_perm; | 197 | int mpss_perm; |
| 190 | int mba_perm; | 198 | int mba_perm; |
| 199 | const char *hexagon_mdt_image; | ||
| 191 | int version; | 200 | int version; |
| 192 | }; | 201 | }; |
| 193 | 202 | ||
| @@ -321,6 +330,41 @@ static void q6v5_clk_disable(struct device *dev, | |||
| 321 | clk_disable_unprepare(clks[i]); | 330 | clk_disable_unprepare(clks[i]); |
| 322 | } | 331 | } |
| 323 | 332 | ||
| 333 | static int q6v5_pds_enable(struct q6v5 *qproc, struct device **pds, | ||
| 334 | size_t pd_count) | ||
| 335 | { | ||
| 336 | int ret; | ||
| 337 | int i; | ||
| 338 | |||
| 339 | for (i = 0; i < pd_count; i++) { | ||
| 340 | dev_pm_genpd_set_performance_state(pds[i], INT_MAX); | ||
| 341 | ret = pm_runtime_get_sync(pds[i]); | ||
| 342 | if (ret < 0) | ||
| 343 | goto unroll_pd_votes; | ||
| 344 | } | ||
| 345 | |||
| 346 | return 0; | ||
| 347 | |||
| 348 | unroll_pd_votes: | ||
| 349 | for (i--; i >= 0; i--) { | ||
| 350 | dev_pm_genpd_set_performance_state(pds[i], 0); | ||
| 351 | pm_runtime_put(pds[i]); | ||
| 352 | } | ||
| 353 | |||
| 354 | return ret; | ||
| 355 | }; | ||
| 356 | |||
| 357 | static void q6v5_pds_disable(struct q6v5 *qproc, struct device **pds, | ||
| 358 | size_t pd_count) | ||
| 359 | { | ||
| 360 | int i; | ||
| 361 | |||
| 362 | for (i = 0; i < pd_count; i++) { | ||
| 363 | dev_pm_genpd_set_performance_state(pds[i], 0); | ||
| 364 | pm_runtime_put(pds[i]); | ||
| 365 | } | ||
| 366 | } | ||
| 367 | |||
| 324 | static int q6v5_xfer_mem_ownership(struct q6v5 *qproc, int *current_perm, | 368 | static int q6v5_xfer_mem_ownership(struct q6v5 *qproc, int *current_perm, |
| 325 | bool remote_owner, phys_addr_t addr, | 369 | bool remote_owner, phys_addr_t addr, |
| 326 | size_t size) | 370 | size_t size) |
| @@ -690,11 +734,23 @@ static int q6v5_mba_load(struct q6v5 *qproc) | |||
| 690 | 734 | ||
| 691 | qcom_q6v5_prepare(&qproc->q6v5); | 735 | qcom_q6v5_prepare(&qproc->q6v5); |
| 692 | 736 | ||
| 737 | ret = q6v5_pds_enable(qproc, qproc->active_pds, qproc->active_pd_count); | ||
| 738 | if (ret < 0) { | ||
| 739 | dev_err(qproc->dev, "failed to enable active power domains\n"); | ||
| 740 | goto disable_irqs; | ||
| 741 | } | ||
| 742 | |||
| 743 | ret = q6v5_pds_enable(qproc, qproc->proxy_pds, qproc->proxy_pd_count); | ||
| 744 | if (ret < 0) { | ||
| 745 | dev_err(qproc->dev, "failed to enable proxy power domains\n"); | ||
| 746 | goto disable_active_pds; | ||
| 747 | } | ||
| 748 | |||
| 693 | ret = q6v5_regulator_enable(qproc, qproc->proxy_regs, | 749 | ret = q6v5_regulator_enable(qproc, qproc->proxy_regs, |
| 694 | qproc->proxy_reg_count); | 750 | qproc->proxy_reg_count); |
| 695 | if (ret) { | 751 | if (ret) { |
| 696 | dev_err(qproc->dev, "failed to enable proxy supplies\n"); | 752 | dev_err(qproc->dev, "failed to enable proxy supplies\n"); |
| 697 | goto disable_irqs; | 753 | goto disable_proxy_pds; |
| 698 | } | 754 | } |
| 699 | 755 | ||
| 700 | ret = q6v5_clk_enable(qproc->dev, qproc->proxy_clks, | 756 | ret = q6v5_clk_enable(qproc->dev, qproc->proxy_clks, |
| @@ -791,6 +847,10 @@ disable_proxy_clk: | |||
| 791 | disable_proxy_reg: | 847 | disable_proxy_reg: |
| 792 | q6v5_regulator_disable(qproc, qproc->proxy_regs, | 848 | q6v5_regulator_disable(qproc, qproc->proxy_regs, |
| 793 | qproc->proxy_reg_count); | 849 | qproc->proxy_reg_count); |
| 850 | disable_proxy_pds: | ||
| 851 | q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count); | ||
| 852 | disable_active_pds: | ||
| 853 | q6v5_pds_disable(qproc, qproc->active_pds, qproc->active_pd_count); | ||
| 794 | disable_irqs: | 854 | disable_irqs: |
| 795 | qcom_q6v5_unprepare(&qproc->q6v5); | 855 | qcom_q6v5_unprepare(&qproc->q6v5); |
| 796 | 856 | ||
| @@ -830,6 +890,7 @@ static void q6v5_mba_reclaim(struct q6v5 *qproc) | |||
| 830 | qproc->active_clk_count); | 890 | qproc->active_clk_count); |
| 831 | q6v5_regulator_disable(qproc, qproc->active_regs, | 891 | q6v5_regulator_disable(qproc, qproc->active_regs, |
| 832 | qproc->active_reg_count); | 892 | qproc->active_reg_count); |
| 893 | q6v5_pds_disable(qproc, qproc->active_pds, qproc->active_pd_count); | ||
| 833 | 894 | ||
| 834 | /* In case of failure or coredump scenario where reclaiming MBA memory | 895 | /* In case of failure or coredump scenario where reclaiming MBA memory |
| 835 | * could not happen reclaim it here. | 896 | * could not happen reclaim it here. |
| @@ -841,6 +902,8 @@ static void q6v5_mba_reclaim(struct q6v5 *qproc) | |||
| 841 | 902 | ||
| 842 | ret = qcom_q6v5_unprepare(&qproc->q6v5); | 903 | ret = qcom_q6v5_unprepare(&qproc->q6v5); |
| 843 | if (ret) { | 904 | if (ret) { |
| 905 | q6v5_pds_disable(qproc, qproc->proxy_pds, | ||
| 906 | qproc->proxy_pd_count); | ||
| 844 | q6v5_clk_disable(qproc->dev, qproc->proxy_clks, | 907 | q6v5_clk_disable(qproc->dev, qproc->proxy_clks, |
| 845 | qproc->proxy_clk_count); | 908 | qproc->proxy_clk_count); |
| 846 | q6v5_regulator_disable(qproc, qproc->proxy_regs, | 909 | q6v5_regulator_disable(qproc, qproc->proxy_regs, |
| @@ -860,17 +923,26 @@ static int q6v5_mpss_load(struct q6v5 *qproc) | |||
| 860 | phys_addr_t min_addr = PHYS_ADDR_MAX; | 923 | phys_addr_t min_addr = PHYS_ADDR_MAX; |
| 861 | phys_addr_t max_addr = 0; | 924 | phys_addr_t max_addr = 0; |
| 862 | bool relocate = false; | 925 | bool relocate = false; |
| 863 | char seg_name[10]; | 926 | char *fw_name; |
| 927 | size_t fw_name_len; | ||
| 864 | ssize_t offset; | 928 | ssize_t offset; |
| 865 | size_t size = 0; | 929 | size_t size = 0; |
| 866 | void *ptr; | 930 | void *ptr; |
| 867 | int ret; | 931 | int ret; |
| 868 | int i; | 932 | int i; |
| 869 | 933 | ||
| 870 | ret = request_firmware(&fw, "modem.mdt", qproc->dev); | 934 | fw_name_len = strlen(qproc->hexagon_mdt_image); |
| 935 | if (fw_name_len <= 4) | ||
| 936 | return -EINVAL; | ||
| 937 | |||
| 938 | fw_name = kstrdup(qproc->hexagon_mdt_image, GFP_KERNEL); | ||
| 939 | if (!fw_name) | ||
| 940 | return -ENOMEM; | ||
| 941 | |||
| 942 | ret = request_firmware(&fw, fw_name, qproc->dev); | ||
| 871 | if (ret < 0) { | 943 | if (ret < 0) { |
| 872 | dev_err(qproc->dev, "unable to load modem.mdt\n"); | 944 | dev_err(qproc->dev, "unable to load %s\n", fw_name); |
| 873 | return ret; | 945 | goto out; |
| 874 | } | 946 | } |
| 875 | 947 | ||
| 876 | /* Initialize the RMB validator */ | 948 | /* Initialize the RMB validator */ |
| @@ -918,10 +990,11 @@ static int q6v5_mpss_load(struct q6v5 *qproc) | |||
| 918 | ptr = qproc->mpss_region + offset; | 990 | ptr = qproc->mpss_region + offset; |
| 919 | 991 | ||
| 920 | if (phdr->p_filesz) { | 992 | if (phdr->p_filesz) { |
| 921 | snprintf(seg_name, sizeof(seg_name), "modem.b%02d", i); | 993 | /* Replace "xxx.xxx" with "xxx.bxx" */ |
| 922 | ret = request_firmware(&seg_fw, seg_name, qproc->dev); | 994 | sprintf(fw_name + fw_name_len - 3, "b%02d", i); |
| 995 | ret = request_firmware(&seg_fw, fw_name, qproc->dev); | ||
| 923 | if (ret) { | 996 | if (ret) { |
| 924 | dev_err(qproc->dev, "failed to load %s\n", seg_name); | 997 | dev_err(qproc->dev, "failed to load %s\n", fw_name); |
| 925 | goto release_firmware; | 998 | goto release_firmware; |
| 926 | } | 999 | } |
| 927 | 1000 | ||
| @@ -960,6 +1033,8 @@ static int q6v5_mpss_load(struct q6v5 *qproc) | |||
| 960 | 1033 | ||
| 961 | release_firmware: | 1034 | release_firmware: |
| 962 | release_firmware(fw); | 1035 | release_firmware(fw); |
| 1036 | out: | ||
| 1037 | kfree(fw_name); | ||
| 963 | 1038 | ||
| 964 | return ret < 0 ? ret : 0; | 1039 | return ret < 0 ? ret : 0; |
| 965 | } | 1040 | } |
| @@ -1075,9 +1150,10 @@ static int qcom_q6v5_register_dump_segments(struct rproc *rproc, | |||
| 1075 | unsigned long i; | 1150 | unsigned long i; |
| 1076 | int ret; | 1151 | int ret; |
| 1077 | 1152 | ||
| 1078 | ret = request_firmware(&fw, "modem.mdt", qproc->dev); | 1153 | ret = request_firmware(&fw, qproc->hexagon_mdt_image, qproc->dev); |
| 1079 | if (ret < 0) { | 1154 | if (ret < 0) { |
| 1080 | dev_err(qproc->dev, "unable to load modem.mdt\n"); | 1155 | dev_err(qproc->dev, "unable to load %s\n", |
| 1156 | qproc->hexagon_mdt_image); | ||
| 1081 | return ret; | 1157 | return ret; |
| 1082 | } | 1158 | } |
| 1083 | 1159 | ||
| @@ -1121,6 +1197,7 @@ static void qcom_msa_handover(struct qcom_q6v5 *q6v5) | |||
| 1121 | qproc->proxy_clk_count); | 1197 | qproc->proxy_clk_count); |
| 1122 | q6v5_regulator_disable(qproc, qproc->proxy_regs, | 1198 | q6v5_regulator_disable(qproc, qproc->proxy_regs, |
| 1123 | qproc->proxy_reg_count); | 1199 | qproc->proxy_reg_count); |
| 1200 | q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count); | ||
| 1124 | } | 1201 | } |
| 1125 | 1202 | ||
| 1126 | static int q6v5_init_mem(struct q6v5 *qproc, struct platform_device *pdev) | 1203 | static int q6v5_init_mem(struct q6v5 *qproc, struct platform_device *pdev) |
| @@ -1181,6 +1258,45 @@ static int q6v5_init_clocks(struct device *dev, struct clk **clks, | |||
| 1181 | return i; | 1258 | return i; |
| 1182 | } | 1259 | } |
| 1183 | 1260 | ||
| 1261 | static int q6v5_pds_attach(struct device *dev, struct device **devs, | ||
| 1262 | char **pd_names) | ||
| 1263 | { | ||
| 1264 | size_t num_pds = 0; | ||
| 1265 | int ret; | ||
| 1266 | int i; | ||
| 1267 | |||
| 1268 | if (!pd_names) | ||
| 1269 | return 0; | ||
| 1270 | |||
| 1271 | while (pd_names[num_pds]) | ||
| 1272 | num_pds++; | ||
| 1273 | |||
| 1274 | for (i = 0; i < num_pds; i++) { | ||
| 1275 | devs[i] = dev_pm_domain_attach_by_name(dev, pd_names[i]); | ||
| 1276 | if (IS_ERR(devs[i])) { | ||
| 1277 | ret = PTR_ERR(devs[i]); | ||
| 1278 | goto unroll_attach; | ||
| 1279 | } | ||
| 1280 | } | ||
| 1281 | |||
| 1282 | return num_pds; | ||
| 1283 | |||
| 1284 | unroll_attach: | ||
| 1285 | for (i--; i >= 0; i--) | ||
| 1286 | dev_pm_domain_detach(devs[i], false); | ||
| 1287 | |||
| 1288 | return ret; | ||
| 1289 | }; | ||
| 1290 | |||
| 1291 | static void q6v5_pds_detach(struct q6v5 *qproc, struct device **pds, | ||
| 1292 | size_t pd_count) | ||
| 1293 | { | ||
| 1294 | int i; | ||
| 1295 | |||
| 1296 | for (i = 0; i < pd_count; i++) | ||
| 1297 | dev_pm_domain_detach(pds[i], false); | ||
| 1298 | } | ||
| 1299 | |||
| 1184 | static int q6v5_init_reset(struct q6v5 *qproc) | 1300 | static int q6v5_init_reset(struct q6v5 *qproc) |
| 1185 | { | 1301 | { |
| 1186 | qproc->mss_restart = devm_reset_control_get_exclusive(qproc->dev, | 1302 | qproc->mss_restart = devm_reset_control_get_exclusive(qproc->dev, |
| @@ -1253,6 +1369,7 @@ static int q6v5_probe(struct platform_device *pdev) | |||
| 1253 | const struct rproc_hexagon_res *desc; | 1369 | const struct rproc_hexagon_res *desc; |
| 1254 | struct q6v5 *qproc; | 1370 | struct q6v5 *qproc; |
| 1255 | struct rproc *rproc; | 1371 | struct rproc *rproc; |
| 1372 | const char *mba_image; | ||
| 1256 | int ret; | 1373 | int ret; |
| 1257 | 1374 | ||
| 1258 | desc = of_device_get_match_data(&pdev->dev); | 1375 | desc = of_device_get_match_data(&pdev->dev); |
| @@ -1262,16 +1379,30 @@ static int q6v5_probe(struct platform_device *pdev) | |||
| 1262 | if (desc->need_mem_protection && !qcom_scm_is_available()) | 1379 | if (desc->need_mem_protection && !qcom_scm_is_available()) |
| 1263 | return -EPROBE_DEFER; | 1380 | return -EPROBE_DEFER; |
| 1264 | 1381 | ||
| 1382 | mba_image = desc->hexagon_mba_image; | ||
| 1383 | ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name", | ||
| 1384 | 0, &mba_image); | ||
| 1385 | if (ret < 0 && ret != -EINVAL) | ||
| 1386 | return ret; | ||
| 1387 | |||
| 1265 | rproc = rproc_alloc(&pdev->dev, pdev->name, &q6v5_ops, | 1388 | rproc = rproc_alloc(&pdev->dev, pdev->name, &q6v5_ops, |
| 1266 | desc->hexagon_mba_image, sizeof(*qproc)); | 1389 | mba_image, sizeof(*qproc)); |
| 1267 | if (!rproc) { | 1390 | if (!rproc) { |
| 1268 | dev_err(&pdev->dev, "failed to allocate rproc\n"); | 1391 | dev_err(&pdev->dev, "failed to allocate rproc\n"); |
| 1269 | return -ENOMEM; | 1392 | return -ENOMEM; |
| 1270 | } | 1393 | } |
| 1271 | 1394 | ||
| 1395 | rproc->auto_boot = false; | ||
| 1396 | |||
| 1272 | qproc = (struct q6v5 *)rproc->priv; | 1397 | qproc = (struct q6v5 *)rproc->priv; |
| 1273 | qproc->dev = &pdev->dev; | 1398 | qproc->dev = &pdev->dev; |
| 1274 | qproc->rproc = rproc; | 1399 | qproc->rproc = rproc; |
| 1400 | qproc->hexagon_mdt_image = "modem.mdt"; | ||
| 1401 | ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name", | ||
| 1402 | 1, &qproc->hexagon_mdt_image); | ||
| 1403 | if (ret < 0 && ret != -EINVAL) | ||
| 1404 | return ret; | ||
| 1405 | |||
| 1275 | platform_set_drvdata(pdev, qproc); | 1406 | platform_set_drvdata(pdev, qproc); |
| 1276 | 1407 | ||
| 1277 | ret = q6v5_init_mem(qproc, pdev); | 1408 | ret = q6v5_init_mem(qproc, pdev); |
| @@ -1322,10 +1453,26 @@ static int q6v5_probe(struct platform_device *pdev) | |||
| 1322 | } | 1453 | } |
| 1323 | qproc->active_reg_count = ret; | 1454 | qproc->active_reg_count = ret; |
| 1324 | 1455 | ||
| 1456 | ret = q6v5_pds_attach(&pdev->dev, qproc->active_pds, | ||
| 1457 | desc->active_pd_names); | ||
| 1458 | if (ret < 0) { | ||
| 1459 | dev_err(&pdev->dev, "Failed to attach active power domains\n"); | ||
| 1460 | goto free_rproc; | ||
| 1461 | } | ||
| 1462 | qproc->active_pd_count = ret; | ||
| 1463 | |||
| 1464 | ret = q6v5_pds_attach(&pdev->dev, qproc->proxy_pds, | ||
| 1465 | desc->proxy_pd_names); | ||
| 1466 | if (ret < 0) { | ||
| 1467 | dev_err(&pdev->dev, "Failed to init power domains\n"); | ||
| 1468 | goto detach_active_pds; | ||
| 1469 | } | ||
| 1470 | qproc->proxy_pd_count = ret; | ||
| 1471 | |||
| 1325 | qproc->has_alt_reset = desc->has_alt_reset; | 1472 | qproc->has_alt_reset = desc->has_alt_reset; |
| 1326 | ret = q6v5_init_reset(qproc); | 1473 | ret = q6v5_init_reset(qproc); |
| 1327 | if (ret) | 1474 | if (ret) |
| 1328 | goto free_rproc; | 1475 | goto detach_proxy_pds; |
| 1329 | 1476 | ||
| 1330 | qproc->version = desc->version; | 1477 | qproc->version = desc->version; |
| 1331 | qproc->need_mem_protection = desc->need_mem_protection; | 1478 | qproc->need_mem_protection = desc->need_mem_protection; |
| @@ -1333,7 +1480,7 @@ static int q6v5_probe(struct platform_device *pdev) | |||
| 1333 | ret = qcom_q6v5_init(&qproc->q6v5, pdev, rproc, MPSS_CRASH_REASON_SMEM, | 1480 | ret = qcom_q6v5_init(&qproc->q6v5, pdev, rproc, MPSS_CRASH_REASON_SMEM, |
| 1334 | qcom_msa_handover); | 1481 | qcom_msa_handover); |
| 1335 | if (ret) | 1482 | if (ret) |
| 1336 | goto free_rproc; | 1483 | goto detach_proxy_pds; |
| 1337 | 1484 | ||
| 1338 | qproc->mpss_perm = BIT(QCOM_SCM_VMID_HLOS); | 1485 | qproc->mpss_perm = BIT(QCOM_SCM_VMID_HLOS); |
| 1339 | qproc->mba_perm = BIT(QCOM_SCM_VMID_HLOS); | 1486 | qproc->mba_perm = BIT(QCOM_SCM_VMID_HLOS); |
| @@ -1341,13 +1488,21 @@ static int q6v5_probe(struct platform_device *pdev) | |||
| 1341 | qcom_add_smd_subdev(rproc, &qproc->smd_subdev); | 1488 | qcom_add_smd_subdev(rproc, &qproc->smd_subdev); |
| 1342 | qcom_add_ssr_subdev(rproc, &qproc->ssr_subdev, "mpss"); | 1489 | qcom_add_ssr_subdev(rproc, &qproc->ssr_subdev, "mpss"); |
| 1343 | qproc->sysmon = qcom_add_sysmon_subdev(rproc, "modem", 0x12); | 1490 | qproc->sysmon = qcom_add_sysmon_subdev(rproc, "modem", 0x12); |
| 1491 | if (IS_ERR(qproc->sysmon)) { | ||
| 1492 | ret = PTR_ERR(qproc->sysmon); | ||
| 1493 | goto detach_proxy_pds; | ||
| 1494 | } | ||
| 1344 | 1495 | ||
| 1345 | ret = rproc_add(rproc); | 1496 | ret = rproc_add(rproc); |
| 1346 | if (ret) | 1497 | if (ret) |
| 1347 | goto free_rproc; | 1498 | goto detach_proxy_pds; |
| 1348 | 1499 | ||
| 1349 | return 0; | 1500 | return 0; |
| 1350 | 1501 | ||
| 1502 | detach_proxy_pds: | ||
| 1503 | q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count); | ||
| 1504 | detach_active_pds: | ||
| 1505 | q6v5_pds_detach(qproc, qproc->active_pds, qproc->active_pd_count); | ||
| 1351 | free_rproc: | 1506 | free_rproc: |
| 1352 | rproc_free(rproc); | 1507 | rproc_free(rproc); |
| 1353 | 1508 | ||
| @@ -1364,6 +1519,10 @@ static int q6v5_remove(struct platform_device *pdev) | |||
| 1364 | qcom_remove_glink_subdev(qproc->rproc, &qproc->glink_subdev); | 1519 | qcom_remove_glink_subdev(qproc->rproc, &qproc->glink_subdev); |
| 1365 | qcom_remove_smd_subdev(qproc->rproc, &qproc->smd_subdev); | 1520 | qcom_remove_smd_subdev(qproc->rproc, &qproc->smd_subdev); |
| 1366 | qcom_remove_ssr_subdev(qproc->rproc, &qproc->ssr_subdev); | 1521 | qcom_remove_ssr_subdev(qproc->rproc, &qproc->ssr_subdev); |
| 1522 | |||
| 1523 | q6v5_pds_detach(qproc, qproc->active_pds, qproc->active_pd_count); | ||
| 1524 | q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count); | ||
| 1525 | |||
| 1367 | rproc_free(qproc->rproc); | 1526 | rproc_free(qproc->rproc); |
| 1368 | 1527 | ||
| 1369 | return 0; | 1528 | return 0; |
| @@ -1388,6 +1547,16 @@ static const struct rproc_hexagon_res sdm845_mss = { | |||
| 1388 | "mnoc_axi", | 1547 | "mnoc_axi", |
| 1389 | NULL | 1548 | NULL |
| 1390 | }, | 1549 | }, |
| 1550 | .active_pd_names = (char*[]){ | ||
| 1551 | "load_state", | ||
| 1552 | NULL | ||
| 1553 | }, | ||
| 1554 | .proxy_pd_names = (char*[]){ | ||
| 1555 | "cx", | ||
| 1556 | "mx", | ||
| 1557 | "mss", | ||
| 1558 | NULL | ||
| 1559 | }, | ||
| 1391 | .need_mem_protection = true, | 1560 | .need_mem_protection = true, |
| 1392 | .has_alt_reset = true, | 1561 | .has_alt_reset = true, |
| 1393 | .version = MSS_SDM845, | 1562 | .version = MSS_SDM845, |
| @@ -1395,16 +1564,26 @@ static const struct rproc_hexagon_res sdm845_mss = { | |||
| 1395 | 1564 | ||
| 1396 | static const struct rproc_hexagon_res msm8996_mss = { | 1565 | static const struct rproc_hexagon_res msm8996_mss = { |
| 1397 | .hexagon_mba_image = "mba.mbn", | 1566 | .hexagon_mba_image = "mba.mbn", |
| 1567 | .proxy_supply = (struct qcom_mss_reg_res[]) { | ||
| 1568 | { | ||
| 1569 | .supply = "pll", | ||
| 1570 | .uA = 100000, | ||
| 1571 | }, | ||
| 1572 | {} | ||
| 1573 | }, | ||
| 1398 | .proxy_clk_names = (char*[]){ | 1574 | .proxy_clk_names = (char*[]){ |
| 1399 | "xo", | 1575 | "xo", |
| 1400 | "pnoc", | 1576 | "pnoc", |
| 1577 | "qdss", | ||
| 1401 | NULL | 1578 | NULL |
| 1402 | }, | 1579 | }, |
| 1403 | .active_clk_names = (char*[]){ | 1580 | .active_clk_names = (char*[]){ |
| 1404 | "iface", | 1581 | "iface", |
| 1405 | "bus", | 1582 | "bus", |
| 1406 | "mem", | 1583 | "mem", |
| 1407 | "gpll0_mss_clk", | 1584 | "gpll0_mss", |
| 1585 | "snoc_axi", | ||
| 1586 | "mnoc_axi", | ||
| 1408 | NULL | 1587 | NULL |
| 1409 | }, | 1588 | }, |
| 1410 | .need_mem_protection = true, | 1589 | .need_mem_protection = true, |
diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c index b1e63fcd5fdf..f280f196d007 100644 --- a/drivers/remoteproc/qcom_q6v5_pas.c +++ b/drivers/remoteproc/qcom_q6v5_pas.c | |||
| @@ -258,6 +258,7 @@ static int adsp_probe(struct platform_device *pdev) | |||
| 258 | const struct adsp_data *desc; | 258 | const struct adsp_data *desc; |
| 259 | struct qcom_adsp *adsp; | 259 | struct qcom_adsp *adsp; |
| 260 | struct rproc *rproc; | 260 | struct rproc *rproc; |
| 261 | const char *fw_name; | ||
| 261 | int ret; | 262 | int ret; |
| 262 | 263 | ||
| 263 | desc = of_device_get_match_data(&pdev->dev); | 264 | desc = of_device_get_match_data(&pdev->dev); |
| @@ -267,8 +268,14 @@ static int adsp_probe(struct platform_device *pdev) | |||
| 267 | if (!qcom_scm_is_available()) | 268 | if (!qcom_scm_is_available()) |
| 268 | return -EPROBE_DEFER; | 269 | return -EPROBE_DEFER; |
| 269 | 270 | ||
| 271 | fw_name = desc->firmware_name; | ||
| 272 | ret = of_property_read_string(pdev->dev.of_node, "firmware-name", | ||
| 273 | &fw_name); | ||
| 274 | if (ret < 0 && ret != -EINVAL) | ||
| 275 | return ret; | ||
| 276 | |||
| 270 | rproc = rproc_alloc(&pdev->dev, pdev->name, &adsp_ops, | 277 | rproc = rproc_alloc(&pdev->dev, pdev->name, &adsp_ops, |
| 271 | desc->firmware_name, sizeof(*adsp)); | 278 | fw_name, sizeof(*adsp)); |
| 272 | if (!rproc) { | 279 | if (!rproc) { |
| 273 | dev_err(&pdev->dev, "unable to allocate remoteproc\n"); | 280 | dev_err(&pdev->dev, "unable to allocate remoteproc\n"); |
| 274 | return -ENOMEM; | 281 | return -ENOMEM; |
| @@ -304,6 +311,10 @@ static int adsp_probe(struct platform_device *pdev) | |||
| 304 | adsp->sysmon = qcom_add_sysmon_subdev(rproc, | 311 | adsp->sysmon = qcom_add_sysmon_subdev(rproc, |
| 305 | desc->sysmon_name, | 312 | desc->sysmon_name, |
| 306 | desc->ssctl_id); | 313 | desc->ssctl_id); |
| 314 | if (IS_ERR(adsp->sysmon)) { | ||
| 315 | ret = PTR_ERR(adsp->sysmon); | ||
| 316 | goto free_rproc; | ||
| 317 | } | ||
| 307 | 318 | ||
| 308 | ret = rproc_add(rproc); | 319 | ret = rproc_add(rproc); |
| 309 | if (ret) | 320 | if (ret) |
diff --git a/drivers/remoteproc/qcom_sysmon.c b/drivers/remoteproc/qcom_sysmon.c index e976a602b015..c231314eab66 100644 --- a/drivers/remoteproc/qcom_sysmon.c +++ b/drivers/remoteproc/qcom_sysmon.c | |||
| @@ -6,8 +6,9 @@ | |||
| 6 | #include <linux/module.h> | 6 | #include <linux/module.h> |
| 7 | #include <linux/notifier.h> | 7 | #include <linux/notifier.h> |
| 8 | #include <linux/slab.h> | 8 | #include <linux/slab.h> |
| 9 | #include <linux/interrupt.h> | ||
| 9 | #include <linux/io.h> | 10 | #include <linux/io.h> |
| 10 | #include <linux/notifier.h> | 11 | #include <linux/of_irq.h> |
| 11 | #include <linux/of_platform.h> | 12 | #include <linux/of_platform.h> |
| 12 | #include <linux/platform_device.h> | 13 | #include <linux/platform_device.h> |
| 13 | #include <linux/remoteproc/qcom_rproc.h> | 14 | #include <linux/remoteproc/qcom_rproc.h> |
| @@ -25,6 +26,7 @@ struct qcom_sysmon { | |||
| 25 | 26 | ||
| 26 | const char *name; | 27 | const char *name; |
| 27 | 28 | ||
| 29 | int shutdown_irq; | ||
| 28 | int ssctl_version; | 30 | int ssctl_version; |
| 29 | int ssctl_instance; | 31 | int ssctl_instance; |
| 30 | 32 | ||
| @@ -34,6 +36,8 @@ struct qcom_sysmon { | |||
| 34 | 36 | ||
| 35 | struct rpmsg_endpoint *ept; | 37 | struct rpmsg_endpoint *ept; |
| 36 | struct completion comp; | 38 | struct completion comp; |
| 39 | struct completion ind_comp; | ||
| 40 | struct completion shutdown_comp; | ||
| 37 | struct mutex lock; | 41 | struct mutex lock; |
| 38 | 42 | ||
| 39 | bool ssr_ack; | 43 | bool ssr_ack; |
| @@ -137,6 +141,7 @@ static int sysmon_callback(struct rpmsg_device *rpdev, void *data, int count, | |||
| 137 | } | 141 | } |
| 138 | 142 | ||
| 139 | #define SSCTL_SHUTDOWN_REQ 0x21 | 143 | #define SSCTL_SHUTDOWN_REQ 0x21 |
| 144 | #define SSCTL_SHUTDOWN_READY_IND 0x21 | ||
| 140 | #define SSCTL_SUBSYS_EVENT_REQ 0x23 | 145 | #define SSCTL_SUBSYS_EVENT_REQ 0x23 |
| 141 | 146 | ||
| 142 | #define SSCTL_MAX_MSG_LEN 7 | 147 | #define SSCTL_MAX_MSG_LEN 7 |
| @@ -252,6 +257,29 @@ static struct qmi_elem_info ssctl_subsys_event_resp_ei[] = { | |||
| 252 | {} | 257 | {} |
| 253 | }; | 258 | }; |
| 254 | 259 | ||
| 260 | static struct qmi_elem_info ssctl_shutdown_ind_ei[] = { | ||
| 261 | {} | ||
| 262 | }; | ||
| 263 | |||
| 264 | static void sysmon_ind_cb(struct qmi_handle *qmi, struct sockaddr_qrtr *sq, | ||
| 265 | struct qmi_txn *txn, const void *data) | ||
| 266 | { | ||
| 267 | struct qcom_sysmon *sysmon = container_of(qmi, struct qcom_sysmon, qmi); | ||
| 268 | |||
| 269 | complete(&sysmon->ind_comp); | ||
| 270 | } | ||
| 271 | |||
| 272 | static struct qmi_msg_handler qmi_indication_handler[] = { | ||
| 273 | { | ||
| 274 | .type = QMI_INDICATION, | ||
| 275 | .msg_id = SSCTL_SHUTDOWN_READY_IND, | ||
| 276 | .ei = ssctl_shutdown_ind_ei, | ||
| 277 | .decoded_size = 0, | ||
| 278 | .fn = sysmon_ind_cb | ||
| 279 | }, | ||
| 280 | {} | ||
| 281 | }; | ||
| 282 | |||
| 255 | /** | 283 | /** |
| 256 | * ssctl_request_shutdown() - request shutdown via SSCTL QMI service | 284 | * ssctl_request_shutdown() - request shutdown via SSCTL QMI service |
| 257 | * @sysmon: sysmon context | 285 | * @sysmon: sysmon context |
| @@ -262,6 +290,8 @@ static void ssctl_request_shutdown(struct qcom_sysmon *sysmon) | |||
| 262 | struct qmi_txn txn; | 290 | struct qmi_txn txn; |
| 263 | int ret; | 291 | int ret; |
| 264 | 292 | ||
| 293 | reinit_completion(&sysmon->ind_comp); | ||
| 294 | reinit_completion(&sysmon->shutdown_comp); | ||
| 265 | ret = qmi_txn_init(&sysmon->qmi, &txn, ssctl_shutdown_resp_ei, &resp); | 295 | ret = qmi_txn_init(&sysmon->qmi, &txn, ssctl_shutdown_resp_ei, &resp); |
| 266 | if (ret < 0) { | 296 | if (ret < 0) { |
| 267 | dev_err(sysmon->dev, "failed to allocate QMI txn\n"); | 297 | dev_err(sysmon->dev, "failed to allocate QMI txn\n"); |
| @@ -283,6 +313,17 @@ static void ssctl_request_shutdown(struct qcom_sysmon *sysmon) | |||
| 283 | dev_err(sysmon->dev, "shutdown request failed\n"); | 313 | dev_err(sysmon->dev, "shutdown request failed\n"); |
| 284 | else | 314 | else |
| 285 | dev_dbg(sysmon->dev, "shutdown request completed\n"); | 315 | dev_dbg(sysmon->dev, "shutdown request completed\n"); |
| 316 | |||
| 317 | if (sysmon->shutdown_irq > 0) { | ||
| 318 | ret = wait_for_completion_timeout(&sysmon->shutdown_comp, | ||
| 319 | 10 * HZ); | ||
| 320 | if (!ret) { | ||
| 321 | ret = try_wait_for_completion(&sysmon->ind_comp); | ||
| 322 | if (!ret) | ||
| 323 | dev_err(sysmon->dev, | ||
| 324 | "timeout waiting for shutdown ack\n"); | ||
| 325 | } | ||
| 326 | } | ||
| 286 | } | 327 | } |
| 287 | 328 | ||
| 288 | /** | 329 | /** |
| @@ -432,6 +473,15 @@ static int sysmon_notify(struct notifier_block *nb, unsigned long event, | |||
| 432 | return NOTIFY_DONE; | 473 | return NOTIFY_DONE; |
| 433 | } | 474 | } |
| 434 | 475 | ||
| 476 | static irqreturn_t sysmon_shutdown_interrupt(int irq, void *data) | ||
| 477 | { | ||
| 478 | struct qcom_sysmon *sysmon = data; | ||
| 479 | |||
| 480 | complete(&sysmon->shutdown_comp); | ||
| 481 | |||
| 482 | return IRQ_HANDLED; | ||
| 483 | } | ||
| 484 | |||
| 435 | /** | 485 | /** |
| 436 | * qcom_add_sysmon_subdev() - create a sysmon subdev for the given remoteproc | 486 | * qcom_add_sysmon_subdev() - create a sysmon subdev for the given remoteproc |
| 437 | * @rproc: rproc context to associate the subdev with | 487 | * @rproc: rproc context to associate the subdev with |
| @@ -449,7 +499,7 @@ struct qcom_sysmon *qcom_add_sysmon_subdev(struct rproc *rproc, | |||
| 449 | 499 | ||
| 450 | sysmon = kzalloc(sizeof(*sysmon), GFP_KERNEL); | 500 | sysmon = kzalloc(sizeof(*sysmon), GFP_KERNEL); |
| 451 | if (!sysmon) | 501 | if (!sysmon) |
| 452 | return NULL; | 502 | return ERR_PTR(-ENOMEM); |
| 453 | 503 | ||
| 454 | sysmon->dev = rproc->dev.parent; | 504 | sysmon->dev = rproc->dev.parent; |
| 455 | sysmon->rproc = rproc; | 505 | sysmon->rproc = rproc; |
| @@ -458,13 +508,37 @@ struct qcom_sysmon *qcom_add_sysmon_subdev(struct rproc *rproc, | |||
| 458 | sysmon->ssctl_instance = ssctl_instance; | 508 | sysmon->ssctl_instance = ssctl_instance; |
| 459 | 509 | ||
| 460 | init_completion(&sysmon->comp); | 510 | init_completion(&sysmon->comp); |
| 511 | init_completion(&sysmon->ind_comp); | ||
| 512 | init_completion(&sysmon->shutdown_comp); | ||
| 461 | mutex_init(&sysmon->lock); | 513 | mutex_init(&sysmon->lock); |
| 462 | 514 | ||
| 463 | ret = qmi_handle_init(&sysmon->qmi, SSCTL_MAX_MSG_LEN, &ssctl_ops, NULL); | 515 | sysmon->shutdown_irq = of_irq_get_byname(sysmon->dev->of_node, |
| 516 | "shutdown-ack"); | ||
| 517 | if (sysmon->shutdown_irq < 0) { | ||
| 518 | if (sysmon->shutdown_irq != -ENODATA) { | ||
| 519 | dev_err(sysmon->dev, | ||
| 520 | "failed to retrieve shutdown-ack IRQ\n"); | ||
| 521 | return ERR_PTR(sysmon->shutdown_irq); | ||
| 522 | } | ||
| 523 | } else { | ||
| 524 | ret = devm_request_threaded_irq(sysmon->dev, | ||
| 525 | sysmon->shutdown_irq, | ||
| 526 | NULL, sysmon_shutdown_interrupt, | ||
| 527 | IRQF_TRIGGER_RISING | IRQF_ONESHOT, | ||
| 528 | "q6v5 shutdown-ack", sysmon); | ||
| 529 | if (ret) { | ||
| 530 | dev_err(sysmon->dev, | ||
| 531 | "failed to acquire shutdown-ack IRQ\n"); | ||
| 532 | return ERR_PTR(ret); | ||
| 533 | } | ||
| 534 | } | ||
| 535 | |||
| 536 | ret = qmi_handle_init(&sysmon->qmi, SSCTL_MAX_MSG_LEN, &ssctl_ops, | ||
| 537 | qmi_indication_handler); | ||
| 464 | if (ret < 0) { | 538 | if (ret < 0) { |
| 465 | dev_err(sysmon->dev, "failed to initialize qmi handle\n"); | 539 | dev_err(sysmon->dev, "failed to initialize qmi handle\n"); |
| 466 | kfree(sysmon); | 540 | kfree(sysmon); |
| 467 | return NULL; | 541 | return ERR_PTR(ret); |
| 468 | } | 542 | } |
| 469 | 543 | ||
| 470 | qmi_add_lookup(&sysmon->qmi, 43, 0, 0); | 544 | qmi_add_lookup(&sysmon->qmi, 43, 0, 0); |
diff --git a/drivers/remoteproc/qcom_wcnss.c b/drivers/remoteproc/qcom_wcnss.c index b0e07e9f42d5..adcce523971e 100644 --- a/drivers/remoteproc/qcom_wcnss.c +++ b/drivers/remoteproc/qcom_wcnss.c | |||
| @@ -553,6 +553,10 @@ static int wcnss_probe(struct platform_device *pdev) | |||
| 553 | 553 | ||
| 554 | qcom_add_smd_subdev(rproc, &wcnss->smd_subdev); | 554 | qcom_add_smd_subdev(rproc, &wcnss->smd_subdev); |
| 555 | wcnss->sysmon = qcom_add_sysmon_subdev(rproc, "wcnss", WCNSS_SSCTL_ID); | 555 | wcnss->sysmon = qcom_add_sysmon_subdev(rproc, "wcnss", WCNSS_SSCTL_ID); |
| 556 | if (IS_ERR(wcnss->sysmon)) { | ||
| 557 | ret = PTR_ERR(wcnss->sysmon); | ||
| 558 | goto free_rproc; | ||
| 559 | } | ||
| 556 | 560 | ||
| 557 | ret = rproc_add(rproc); | 561 | ret = rproc_add(rproc); |
| 558 | if (ret) | 562 | if (ret) |
| @@ -622,5 +626,5 @@ static void __exit wcnss_exit(void) | |||
| 622 | } | 626 | } |
| 623 | module_exit(wcnss_exit); | 627 | module_exit(wcnss_exit); |
| 624 | 628 | ||
| 625 | MODULE_DESCRIPTION("Qualcomm Peripherial Image Loader for Wireless Subsystem"); | 629 | MODULE_DESCRIPTION("Qualcomm Peripheral Image Loader for Wireless Subsystem"); |
| 626 | MODULE_LICENSE("GPL v2"); | 630 | MODULE_LICENSE("GPL v2"); |
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c index 54ec38fc5dca..48feebd6d0a2 100644 --- a/drivers/remoteproc/remoteproc_core.c +++ b/drivers/remoteproc/remoteproc_core.c | |||
| @@ -39,12 +39,16 @@ | |||
| 39 | #include <linux/idr.h> | 39 | #include <linux/idr.h> |
| 40 | #include <linux/elf.h> | 40 | #include <linux/elf.h> |
| 41 | #include <linux/crc32.h> | 41 | #include <linux/crc32.h> |
| 42 | #include <linux/of_reserved_mem.h> | ||
| 42 | #include <linux/virtio_ids.h> | 43 | #include <linux/virtio_ids.h> |
| 43 | #include <linux/virtio_ring.h> | 44 | #include <linux/virtio_ring.h> |
| 44 | #include <asm/byteorder.h> | 45 | #include <asm/byteorder.h> |
| 46 | #include <linux/platform_device.h> | ||
| 45 | 47 | ||
| 46 | #include "remoteproc_internal.h" | 48 | #include "remoteproc_internal.h" |
| 47 | 49 | ||
| 50 | #define HIGH_BITS_MASK 0xFFFFFFFF00000000ULL | ||
| 51 | |||
| 48 | static DEFINE_MUTEX(rproc_list_mutex); | 52 | static DEFINE_MUTEX(rproc_list_mutex); |
| 49 | static LIST_HEAD(rproc_list); | 53 | static LIST_HEAD(rproc_list); |
| 50 | 54 | ||
| @@ -145,7 +149,7 @@ static void rproc_disable_iommu(struct rproc *rproc) | |||
| 145 | iommu_domain_free(domain); | 149 | iommu_domain_free(domain); |
| 146 | } | 150 | } |
| 147 | 151 | ||
| 148 | static phys_addr_t rproc_va_to_pa(void *cpu_addr) | 152 | phys_addr_t rproc_va_to_pa(void *cpu_addr) |
| 149 | { | 153 | { |
| 150 | /* | 154 | /* |
| 151 | * Return physical address according to virtual address location | 155 | * Return physical address according to virtual address location |
| @@ -160,6 +164,7 @@ static phys_addr_t rproc_va_to_pa(void *cpu_addr) | |||
| 160 | WARN_ON(!virt_addr_valid(cpu_addr)); | 164 | WARN_ON(!virt_addr_valid(cpu_addr)); |
| 161 | return virt_to_phys(cpu_addr); | 165 | return virt_to_phys(cpu_addr); |
| 162 | } | 166 | } |
| 167 | EXPORT_SYMBOL(rproc_va_to_pa); | ||
| 163 | 168 | ||
| 164 | /** | 169 | /** |
| 165 | * rproc_da_to_va() - lookup the kernel virtual address for a remoteproc address | 170 | * rproc_da_to_va() - lookup the kernel virtual address for a remoteproc address |
| @@ -204,6 +209,10 @@ void *rproc_da_to_va(struct rproc *rproc, u64 da, int len) | |||
| 204 | list_for_each_entry(carveout, &rproc->carveouts, node) { | 209 | list_for_each_entry(carveout, &rproc->carveouts, node) { |
| 205 | int offset = da - carveout->da; | 210 | int offset = da - carveout->da; |
| 206 | 211 | ||
| 212 | /* Verify that carveout is allocated */ | ||
| 213 | if (!carveout->va) | ||
| 214 | continue; | ||
| 215 | |||
| 207 | /* try next carveout if da is too small */ | 216 | /* try next carveout if da is too small */ |
| 208 | if (offset < 0) | 217 | if (offset < 0) |
| 209 | continue; | 218 | continue; |
| @@ -272,25 +281,27 @@ rproc_find_carveout_by_name(struct rproc *rproc, const char *name, ...) | |||
| 272 | * @len: associated area size | 281 | * @len: associated area size |
| 273 | * | 282 | * |
| 274 | * This function is a helper function to verify requested device area (couple | 283 | * This function is a helper function to verify requested device area (couple |
| 275 | * da, len) is part of specified carevout. | 284 | * da, len) is part of specified carveout. |
| 285 | * If da is not set (defined as FW_RSC_ADDR_ANY), only requested length is | ||
| 286 | * checked. | ||
| 276 | * | 287 | * |
| 277 | * Return: 0 if carveout match request else -ENOMEM | 288 | * Return: 0 if carveout matches request else error |
| 278 | */ | 289 | */ |
| 279 | int rproc_check_carveout_da(struct rproc *rproc, struct rproc_mem_entry *mem, | 290 | static int rproc_check_carveout_da(struct rproc *rproc, |
| 280 | u32 da, u32 len) | 291 | struct rproc_mem_entry *mem, u32 da, u32 len) |
| 281 | { | 292 | { |
| 282 | struct device *dev = &rproc->dev; | 293 | struct device *dev = &rproc->dev; |
| 283 | int delta = 0; | 294 | int delta; |
| 284 | 295 | ||
| 285 | /* Check requested resource length */ | 296 | /* Check requested resource length */ |
| 286 | if (len > mem->len) { | 297 | if (len > mem->len) { |
| 287 | dev_err(dev, "Registered carveout doesn't fit len request\n"); | 298 | dev_err(dev, "Registered carveout doesn't fit len request\n"); |
| 288 | return -ENOMEM; | 299 | return -EINVAL; |
| 289 | } | 300 | } |
| 290 | 301 | ||
| 291 | if (da != FW_RSC_ADDR_ANY && mem->da == FW_RSC_ADDR_ANY) { | 302 | if (da != FW_RSC_ADDR_ANY && mem->da == FW_RSC_ADDR_ANY) { |
| 292 | /* Update existing carveout da */ | 303 | /* Address doesn't match registered carveout configuration */ |
| 293 | mem->da = da; | 304 | return -EINVAL; |
| 294 | } else if (da != FW_RSC_ADDR_ANY && mem->da != FW_RSC_ADDR_ANY) { | 305 | } else if (da != FW_RSC_ADDR_ANY && mem->da != FW_RSC_ADDR_ANY) { |
| 295 | delta = da - mem->da; | 306 | delta = da - mem->da; |
| 296 | 307 | ||
| @@ -298,13 +309,13 @@ int rproc_check_carveout_da(struct rproc *rproc, struct rproc_mem_entry *mem, | |||
| 298 | if (delta < 0) { | 309 | if (delta < 0) { |
| 299 | dev_err(dev, | 310 | dev_err(dev, |
| 300 | "Registered carveout doesn't fit da request\n"); | 311 | "Registered carveout doesn't fit da request\n"); |
| 301 | return -ENOMEM; | 312 | return -EINVAL; |
| 302 | } | 313 | } |
| 303 | 314 | ||
| 304 | if (delta + len > mem->len) { | 315 | if (delta + len > mem->len) { |
| 305 | dev_err(dev, | 316 | dev_err(dev, |
| 306 | "Registered carveout doesn't fit len request\n"); | 317 | "Registered carveout doesn't fit len request\n"); |
| 307 | return -ENOMEM; | 318 | return -EINVAL; |
| 308 | } | 319 | } |
| 309 | } | 320 | } |
| 310 | 321 | ||
| @@ -418,8 +429,25 @@ static int rproc_vdev_do_start(struct rproc_subdev *subdev) | |||
| 418 | static void rproc_vdev_do_stop(struct rproc_subdev *subdev, bool crashed) | 429 | static void rproc_vdev_do_stop(struct rproc_subdev *subdev, bool crashed) |
| 419 | { | 430 | { |
| 420 | struct rproc_vdev *rvdev = container_of(subdev, struct rproc_vdev, subdev); | 431 | struct rproc_vdev *rvdev = container_of(subdev, struct rproc_vdev, subdev); |
| 432 | int ret; | ||
| 421 | 433 | ||
| 422 | rproc_remove_virtio_dev(rvdev); | 434 | ret = device_for_each_child(&rvdev->dev, NULL, rproc_remove_virtio_dev); |
| 435 | if (ret) | ||
| 436 | dev_warn(&rvdev->dev, "can't remove vdev child device: %d\n", ret); | ||
| 437 | } | ||
| 438 | |||
| 439 | /** | ||
| 440 | * rproc_rvdev_release() - release the existence of a rvdev | ||
| 441 | * | ||
| 442 | * @dev: the subdevice's dev | ||
| 443 | */ | ||
| 444 | static void rproc_rvdev_release(struct device *dev) | ||
| 445 | { | ||
| 446 | struct rproc_vdev *rvdev = container_of(dev, struct rproc_vdev, dev); | ||
| 447 | |||
| 448 | of_reserved_mem_device_release(dev); | ||
| 449 | |||
| 450 | kfree(rvdev); | ||
| 423 | } | 451 | } |
| 424 | 452 | ||
| 425 | /** | 453 | /** |
| @@ -455,6 +483,7 @@ static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc, | |||
| 455 | struct device *dev = &rproc->dev; | 483 | struct device *dev = &rproc->dev; |
| 456 | struct rproc_vdev *rvdev; | 484 | struct rproc_vdev *rvdev; |
| 457 | int i, ret; | 485 | int i, ret; |
| 486 | char name[16]; | ||
| 458 | 487 | ||
| 459 | /* make sure resource isn't truncated */ | 488 | /* make sure resource isn't truncated */ |
| 460 | if (sizeof(*rsc) + rsc->num_of_vrings * sizeof(struct fw_rsc_vdev_vring) | 489 | if (sizeof(*rsc) + rsc->num_of_vrings * sizeof(struct fw_rsc_vdev_vring) |
| @@ -488,6 +517,29 @@ static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc, | |||
| 488 | rvdev->rproc = rproc; | 517 | rvdev->rproc = rproc; |
| 489 | rvdev->index = rproc->nb_vdev++; | 518 | rvdev->index = rproc->nb_vdev++; |
| 490 | 519 | ||
| 520 | /* Initialise vdev subdevice */ | ||
| 521 | snprintf(name, sizeof(name), "vdev%dbuffer", rvdev->index); | ||
| 522 | rvdev->dev.parent = rproc->dev.parent; | ||
| 523 | rvdev->dev.release = rproc_rvdev_release; | ||
| 524 | dev_set_name(&rvdev->dev, "%s#%s", dev_name(rvdev->dev.parent), name); | ||
| 525 | dev_set_drvdata(&rvdev->dev, rvdev); | ||
| 526 | |||
| 527 | ret = device_register(&rvdev->dev); | ||
| 528 | if (ret) { | ||
| 529 | put_device(&rvdev->dev); | ||
| 530 | return ret; | ||
| 531 | } | ||
| 532 | /* Make device dma capable by inheriting from parent's capabilities */ | ||
| 533 | set_dma_ops(&rvdev->dev, get_dma_ops(rproc->dev.parent)); | ||
| 534 | |||
| 535 | ret = dma_coerce_mask_and_coherent(&rvdev->dev, | ||
| 536 | dma_get_mask(rproc->dev.parent)); | ||
| 537 | if (ret) { | ||
| 538 | dev_warn(dev, | ||
| 539 | "Failed to set DMA mask %llx. Trying to continue... %x\n", | ||
| 540 | dma_get_mask(rproc->dev.parent), ret); | ||
| 541 | } | ||
| 542 | |||
| 491 | /* parse the vrings */ | 543 | /* parse the vrings */ |
| 492 | for (i = 0; i < rsc->num_of_vrings; i++) { | 544 | for (i = 0; i < rsc->num_of_vrings; i++) { |
| 493 | ret = rproc_parse_vring(rvdev, rsc, i); | 545 | ret = rproc_parse_vring(rvdev, rsc, i); |
| @@ -518,7 +570,7 @@ unwind_vring_allocations: | |||
| 518 | for (i--; i >= 0; i--) | 570 | for (i--; i >= 0; i--) |
| 519 | rproc_free_vring(&rvdev->vring[i]); | 571 | rproc_free_vring(&rvdev->vring[i]); |
| 520 | free_rvdev: | 572 | free_rvdev: |
| 521 | kfree(rvdev); | 573 | device_unregister(&rvdev->dev); |
| 522 | return ret; | 574 | return ret; |
| 523 | } | 575 | } |
| 524 | 576 | ||
| @@ -536,7 +588,7 @@ void rproc_vdev_release(struct kref *ref) | |||
| 536 | 588 | ||
| 537 | rproc_remove_subdev(rproc, &rvdev->subdev); | 589 | rproc_remove_subdev(rproc, &rvdev->subdev); |
| 538 | list_del(&rvdev->node); | 590 | list_del(&rvdev->node); |
| 539 | kfree(rvdev); | 591 | device_unregister(&rvdev->dev); |
| 540 | } | 592 | } |
| 541 | 593 | ||
| 542 | /** | 594 | /** |
| @@ -558,9 +610,8 @@ void rproc_vdev_release(struct kref *ref) | |||
| 558 | static int rproc_handle_trace(struct rproc *rproc, struct fw_rsc_trace *rsc, | 610 | static int rproc_handle_trace(struct rproc *rproc, struct fw_rsc_trace *rsc, |
| 559 | int offset, int avail) | 611 | int offset, int avail) |
| 560 | { | 612 | { |
| 561 | struct rproc_mem_entry *trace; | 613 | struct rproc_debug_trace *trace; |
| 562 | struct device *dev = &rproc->dev; | 614 | struct device *dev = &rproc->dev; |
| 563 | void *ptr; | ||
| 564 | char name[15]; | 615 | char name[15]; |
| 565 | 616 | ||
| 566 | if (sizeof(*rsc) > avail) { | 617 | if (sizeof(*rsc) > avail) { |
| @@ -574,28 +625,23 @@ static int rproc_handle_trace(struct rproc *rproc, struct fw_rsc_trace *rsc, | |||
| 574 | return -EINVAL; | 625 | return -EINVAL; |
| 575 | } | 626 | } |
| 576 | 627 | ||
| 577 | /* what's the kernel address of this resource ? */ | ||
| 578 | ptr = rproc_da_to_va(rproc, rsc->da, rsc->len); | ||
| 579 | if (!ptr) { | ||
| 580 | dev_err(dev, "erroneous trace resource entry\n"); | ||
| 581 | return -EINVAL; | ||
| 582 | } | ||
| 583 | |||
| 584 | trace = kzalloc(sizeof(*trace), GFP_KERNEL); | 628 | trace = kzalloc(sizeof(*trace), GFP_KERNEL); |
| 585 | if (!trace) | 629 | if (!trace) |
| 586 | return -ENOMEM; | 630 | return -ENOMEM; |
| 587 | 631 | ||
| 588 | /* set the trace buffer dma properties */ | 632 | /* set the trace buffer dma properties */ |
| 589 | trace->len = rsc->len; | 633 | trace->trace_mem.len = rsc->len; |
| 590 | trace->va = ptr; | 634 | trace->trace_mem.da = rsc->da; |
| 635 | |||
| 636 | /* set pointer on rproc device */ | ||
| 637 | trace->rproc = rproc; | ||
| 591 | 638 | ||
| 592 | /* make sure snprintf always null terminates, even if truncating */ | 639 | /* make sure snprintf always null terminates, even if truncating */ |
| 593 | snprintf(name, sizeof(name), "trace%d", rproc->num_traces); | 640 | snprintf(name, sizeof(name), "trace%d", rproc->num_traces); |
| 594 | 641 | ||
| 595 | /* create the debugfs entry */ | 642 | /* create the debugfs entry */ |
| 596 | trace->priv = rproc_create_trace_file(name, rproc, trace); | 643 | trace->tfile = rproc_create_trace_file(name, rproc, trace); |
| 597 | if (!trace->priv) { | 644 | if (!trace->tfile) { |
| 598 | trace->va = NULL; | ||
| 599 | kfree(trace); | 645 | kfree(trace); |
| 600 | return -EINVAL; | 646 | return -EINVAL; |
| 601 | } | 647 | } |
| @@ -604,8 +650,8 @@ static int rproc_handle_trace(struct rproc *rproc, struct fw_rsc_trace *rsc, | |||
| 604 | 650 | ||
| 605 | rproc->num_traces++; | 651 | rproc->num_traces++; |
| 606 | 652 | ||
| 607 | dev_dbg(dev, "%s added: va %pK, da 0x%x, len 0x%x\n", | 653 | dev_dbg(dev, "%s added: da 0x%x, len 0x%x\n", |
| 608 | name, ptr, rsc->da, rsc->len); | 654 | name, rsc->da, rsc->len); |
| 609 | 655 | ||
| 610 | return 0; | 656 | return 0; |
| 611 | } | 657 | } |
| @@ -715,6 +761,18 @@ static int rproc_alloc_carveout(struct rproc *rproc, | |||
| 715 | dev_dbg(dev, "carveout va %pK, dma %pad, len 0x%x\n", | 761 | dev_dbg(dev, "carveout va %pK, dma %pad, len 0x%x\n", |
| 716 | va, &dma, mem->len); | 762 | va, &dma, mem->len); |
| 717 | 763 | ||
| 764 | if (mem->da != FW_RSC_ADDR_ANY && !rproc->domain) { | ||
| 765 | /* | ||
| 766 | * Check requested da is equal to dma address | ||
| 767 | * and print a warn message in case of missalignment. | ||
| 768 | * Don't stop rproc_start sequence as coprocessor may | ||
| 769 | * build pa to da translation on its side. | ||
| 770 | */ | ||
| 771 | if (mem->da != (u32)dma) | ||
| 772 | dev_warn(dev->parent, | ||
| 773 | "Allocated carveout doesn't fit device address request\n"); | ||
| 774 | } | ||
| 775 | |||
| 718 | /* | 776 | /* |
| 719 | * Ok, this is non-standard. | 777 | * Ok, this is non-standard. |
| 720 | * | 778 | * |
| @@ -732,15 +790,7 @@ static int rproc_alloc_carveout(struct rproc *rproc, | |||
| 732 | * to use the iommu-based DMA API: we expect 'dma' to contain the | 790 | * to use the iommu-based DMA API: we expect 'dma' to contain the |
| 733 | * physical address in this case. | 791 | * physical address in this case. |
| 734 | */ | 792 | */ |
| 735 | 793 | if (mem->da != FW_RSC_ADDR_ANY && rproc->domain) { | |
| 736 | if (mem->da != FW_RSC_ADDR_ANY) { | ||
| 737 | if (!rproc->domain) { | ||
| 738 | dev_err(dev->parent, | ||
| 739 | "Bad carveout rsc configuration\n"); | ||
| 740 | ret = -ENOMEM; | ||
| 741 | goto dma_free; | ||
| 742 | } | ||
| 743 | |||
| 744 | mapping = kzalloc(sizeof(*mapping), GFP_KERNEL); | 794 | mapping = kzalloc(sizeof(*mapping), GFP_KERNEL); |
| 745 | if (!mapping) { | 795 | if (!mapping) { |
| 746 | ret = -ENOMEM; | 796 | ret = -ENOMEM; |
| @@ -767,11 +817,17 @@ static int rproc_alloc_carveout(struct rproc *rproc, | |||
| 767 | 817 | ||
| 768 | dev_dbg(dev, "carveout mapped 0x%x to %pad\n", | 818 | dev_dbg(dev, "carveout mapped 0x%x to %pad\n", |
| 769 | mem->da, &dma); | 819 | mem->da, &dma); |
| 770 | } else { | 820 | } |
| 821 | |||
| 822 | if (mem->da == FW_RSC_ADDR_ANY) { | ||
| 823 | /* Update device address as undefined by requester */ | ||
| 824 | if ((u64)dma & HIGH_BITS_MASK) | ||
| 825 | dev_warn(dev, "DMA address cast in 32bit to fit resource table format\n"); | ||
| 826 | |||
| 771 | mem->da = (u32)dma; | 827 | mem->da = (u32)dma; |
| 772 | } | 828 | } |
| 773 | 829 | ||
| 774 | mem->dma = (u32)dma; | 830 | mem->dma = dma; |
| 775 | mem->va = va; | 831 | mem->va = va; |
| 776 | 832 | ||
| 777 | return 0; | 833 | return 0; |
| @@ -900,7 +956,8 @@ EXPORT_SYMBOL(rproc_add_carveout); | |||
| 900 | * @dma: dma address | 956 | * @dma: dma address |
| 901 | * @len: memory carveout length | 957 | * @len: memory carveout length |
| 902 | * @da: device address | 958 | * @da: device address |
| 903 | * @release: memory carveout function | 959 | * @alloc: memory carveout allocation function |
| 960 | * @release: memory carveout release function | ||
| 904 | * @name: carveout name | 961 | * @name: carveout name |
| 905 | * | 962 | * |
| 906 | * This function allocates a rproc_mem_entry struct and fill it with parameters | 963 | * This function allocates a rproc_mem_entry struct and fill it with parameters |
| @@ -1110,6 +1167,7 @@ static int rproc_alloc_registered_carveouts(struct rproc *rproc) | |||
| 1110 | struct rproc_mem_entry *entry, *tmp; | 1167 | struct rproc_mem_entry *entry, *tmp; |
| 1111 | struct fw_rsc_carveout *rsc; | 1168 | struct fw_rsc_carveout *rsc; |
| 1112 | struct device *dev = &rproc->dev; | 1169 | struct device *dev = &rproc->dev; |
| 1170 | u64 pa; | ||
| 1113 | int ret; | 1171 | int ret; |
| 1114 | 1172 | ||
| 1115 | list_for_each_entry_safe(entry, tmp, &rproc->carveouts, node) { | 1173 | list_for_each_entry_safe(entry, tmp, &rproc->carveouts, node) { |
| @@ -1146,10 +1204,15 @@ static int rproc_alloc_registered_carveouts(struct rproc *rproc) | |||
| 1146 | 1204 | ||
| 1147 | /* Use va if defined else dma to generate pa */ | 1205 | /* Use va if defined else dma to generate pa */ |
| 1148 | if (entry->va) | 1206 | if (entry->va) |
| 1149 | rsc->pa = (u32)rproc_va_to_pa(entry->va); | 1207 | pa = (u64)rproc_va_to_pa(entry->va); |
| 1150 | else | 1208 | else |
| 1151 | rsc->pa = (u32)entry->dma; | 1209 | pa = (u64)entry->dma; |
| 1210 | |||
| 1211 | if (((u64)pa) & HIGH_BITS_MASK) | ||
| 1212 | dev_warn(dev, | ||
| 1213 | "Physical address cast in 32bit to fit resource table format\n"); | ||
| 1152 | 1214 | ||
| 1215 | rsc->pa = (u32)pa; | ||
| 1153 | rsc->da = entry->da; | 1216 | rsc->da = entry->da; |
| 1154 | rsc->len = entry->len; | 1217 | rsc->len = entry->len; |
| 1155 | } | 1218 | } |
| @@ -1182,15 +1245,16 @@ static void rproc_coredump_cleanup(struct rproc *rproc) | |||
| 1182 | static void rproc_resource_cleanup(struct rproc *rproc) | 1245 | static void rproc_resource_cleanup(struct rproc *rproc) |
| 1183 | { | 1246 | { |
| 1184 | struct rproc_mem_entry *entry, *tmp; | 1247 | struct rproc_mem_entry *entry, *tmp; |
| 1248 | struct rproc_debug_trace *trace, *ttmp; | ||
| 1185 | struct rproc_vdev *rvdev, *rvtmp; | 1249 | struct rproc_vdev *rvdev, *rvtmp; |
| 1186 | struct device *dev = &rproc->dev; | 1250 | struct device *dev = &rproc->dev; |
| 1187 | 1251 | ||
| 1188 | /* clean up debugfs trace entries */ | 1252 | /* clean up debugfs trace entries */ |
| 1189 | list_for_each_entry_safe(entry, tmp, &rproc->traces, node) { | 1253 | list_for_each_entry_safe(trace, ttmp, &rproc->traces, node) { |
| 1190 | rproc_remove_trace_file(entry->priv); | 1254 | rproc_remove_trace_file(trace->tfile); |
| 1191 | rproc->num_traces--; | 1255 | rproc->num_traces--; |
| 1192 | list_del(&entry->node); | 1256 | list_del(&trace->node); |
| 1193 | kfree(entry); | 1257 | kfree(trace); |
| 1194 | } | 1258 | } |
| 1195 | 1259 | ||
| 1196 | /* clean up iommu mapping entries */ | 1260 | /* clean up iommu mapping entries */ |
diff --git a/drivers/remoteproc/remoteproc_debugfs.c b/drivers/remoteproc/remoteproc_debugfs.c index e90135c64af0..6da934b8dc4b 100644 --- a/drivers/remoteproc/remoteproc_debugfs.c +++ b/drivers/remoteproc/remoteproc_debugfs.c | |||
| @@ -47,10 +47,23 @@ static struct dentry *rproc_dbg; | |||
| 47 | static ssize_t rproc_trace_read(struct file *filp, char __user *userbuf, | 47 | static ssize_t rproc_trace_read(struct file *filp, char __user *userbuf, |
| 48 | size_t count, loff_t *ppos) | 48 | size_t count, loff_t *ppos) |
| 49 | { | 49 | { |
| 50 | struct rproc_mem_entry *trace = filp->private_data; | 50 | struct rproc_debug_trace *data = filp->private_data; |
| 51 | int len = strnlen(trace->va, trace->len); | 51 | struct rproc_mem_entry *trace = &data->trace_mem; |
| 52 | void *va; | ||
| 53 | char buf[100]; | ||
| 54 | int len; | ||
| 55 | |||
| 56 | va = rproc_da_to_va(data->rproc, trace->da, trace->len); | ||
| 52 | 57 | ||
| 53 | return simple_read_from_buffer(userbuf, count, ppos, trace->va, len); | 58 | if (!va) { |
| 59 | len = scnprintf(buf, sizeof(buf), "Trace %s not available\n", | ||
| 60 | trace->name); | ||
| 61 | va = buf; | ||
| 62 | } else { | ||
| 63 | len = strnlen(va, trace->len); | ||
| 64 | } | ||
| 65 | |||
| 66 | return simple_read_from_buffer(userbuf, count, ppos, va, len); | ||
| 54 | } | 67 | } |
| 55 | 68 | ||
| 56 | static const struct file_operations trace_rproc_ops = { | 69 | static const struct file_operations trace_rproc_ops = { |
| @@ -155,6 +168,30 @@ static const struct file_operations rproc_recovery_ops = { | |||
| 155 | .llseek = generic_file_llseek, | 168 | .llseek = generic_file_llseek, |
| 156 | }; | 169 | }; |
| 157 | 170 | ||
| 171 | /* expose the crash trigger via debugfs */ | ||
| 172 | static ssize_t | ||
| 173 | rproc_crash_write(struct file *filp, const char __user *user_buf, | ||
| 174 | size_t count, loff_t *ppos) | ||
| 175 | { | ||
| 176 | struct rproc *rproc = filp->private_data; | ||
| 177 | unsigned int type; | ||
| 178 | int ret; | ||
| 179 | |||
| 180 | ret = kstrtouint_from_user(user_buf, count, 0, &type); | ||
| 181 | if (ret < 0) | ||
| 182 | return ret; | ||
| 183 | |||
| 184 | rproc_report_crash(rproc, type); | ||
| 185 | |||
| 186 | return count; | ||
| 187 | } | ||
| 188 | |||
| 189 | static const struct file_operations rproc_crash_ops = { | ||
| 190 | .write = rproc_crash_write, | ||
| 191 | .open = simple_open, | ||
| 192 | .llseek = generic_file_llseek, | ||
| 193 | }; | ||
| 194 | |||
| 158 | /* Expose resource table content via debugfs */ | 195 | /* Expose resource table content via debugfs */ |
| 159 | static int rproc_rsc_table_show(struct seq_file *seq, void *p) | 196 | static int rproc_rsc_table_show(struct seq_file *seq, void *p) |
| 160 | { | 197 | { |
| @@ -288,7 +325,7 @@ void rproc_remove_trace_file(struct dentry *tfile) | |||
| 288 | } | 325 | } |
| 289 | 326 | ||
| 290 | struct dentry *rproc_create_trace_file(const char *name, struct rproc *rproc, | 327 | struct dentry *rproc_create_trace_file(const char *name, struct rproc *rproc, |
| 291 | struct rproc_mem_entry *trace) | 328 | struct rproc_debug_trace *trace) |
| 292 | { | 329 | { |
| 293 | struct dentry *tfile; | 330 | struct dentry *tfile; |
| 294 | 331 | ||
| @@ -325,6 +362,8 @@ void rproc_create_debug_dir(struct rproc *rproc) | |||
| 325 | rproc, &rproc_name_ops); | 362 | rproc, &rproc_name_ops); |
| 326 | debugfs_create_file("recovery", 0400, rproc->dbg_dir, | 363 | debugfs_create_file("recovery", 0400, rproc->dbg_dir, |
| 327 | rproc, &rproc_recovery_ops); | 364 | rproc, &rproc_recovery_ops); |
| 365 | debugfs_create_file("crash", 0200, rproc->dbg_dir, | ||
| 366 | rproc, &rproc_crash_ops); | ||
| 328 | debugfs_create_file("resource_table", 0400, rproc->dbg_dir, | 367 | debugfs_create_file("resource_table", 0400, rproc->dbg_dir, |
| 329 | rproc, &rproc_rsc_table_ops); | 368 | rproc, &rproc_rsc_table_ops); |
| 330 | debugfs_create_file("carveout_memories", 0400, rproc->dbg_dir, | 369 | debugfs_create_file("carveout_memories", 0400, rproc->dbg_dir, |
diff --git a/drivers/remoteproc/remoteproc_internal.h b/drivers/remoteproc/remoteproc_internal.h index f6cad243d7ca..45ff76a06c72 100644 --- a/drivers/remoteproc/remoteproc_internal.h +++ b/drivers/remoteproc/remoteproc_internal.h | |||
| @@ -25,6 +25,13 @@ | |||
| 25 | 25 | ||
| 26 | struct rproc; | 26 | struct rproc; |
| 27 | 27 | ||
| 28 | struct rproc_debug_trace { | ||
| 29 | struct rproc *rproc; | ||
| 30 | struct dentry *tfile; | ||
| 31 | struct list_head node; | ||
| 32 | struct rproc_mem_entry trace_mem; | ||
| 33 | }; | ||
| 34 | |||
| 28 | /* from remoteproc_core.c */ | 35 | /* from remoteproc_core.c */ |
| 29 | void rproc_release(struct kref *kref); | 36 | void rproc_release(struct kref *kref); |
| 30 | irqreturn_t rproc_vq_interrupt(struct rproc *rproc, int vq_id); | 37 | irqreturn_t rproc_vq_interrupt(struct rproc *rproc, int vq_id); |
| @@ -32,12 +39,12 @@ void rproc_vdev_release(struct kref *ref); | |||
| 32 | 39 | ||
| 33 | /* from remoteproc_virtio.c */ | 40 | /* from remoteproc_virtio.c */ |
| 34 | int rproc_add_virtio_dev(struct rproc_vdev *rvdev, int id); | 41 | int rproc_add_virtio_dev(struct rproc_vdev *rvdev, int id); |
| 35 | void rproc_remove_virtio_dev(struct rproc_vdev *rvdev); | 42 | int rproc_remove_virtio_dev(struct device *dev, void *data); |
| 36 | 43 | ||
| 37 | /* from remoteproc_debugfs.c */ | 44 | /* from remoteproc_debugfs.c */ |
| 38 | void rproc_remove_trace_file(struct dentry *tfile); | 45 | void rproc_remove_trace_file(struct dentry *tfile); |
| 39 | struct dentry *rproc_create_trace_file(const char *name, struct rproc *rproc, | 46 | struct dentry *rproc_create_trace_file(const char *name, struct rproc *rproc, |
| 40 | struct rproc_mem_entry *trace); | 47 | struct rproc_debug_trace *trace); |
| 41 | void rproc_delete_debug_dir(struct rproc *rproc); | 48 | void rproc_delete_debug_dir(struct rproc *rproc); |
| 42 | void rproc_create_debug_dir(struct rproc *rproc); | 49 | void rproc_create_debug_dir(struct rproc *rproc); |
| 43 | void rproc_init_debugfs(void); | 50 | void rproc_init_debugfs(void); |
| @@ -52,6 +59,7 @@ void rproc_free_vring(struct rproc_vring *rvring); | |||
| 52 | int rproc_alloc_vring(struct rproc_vdev *rvdev, int i); | 59 | int rproc_alloc_vring(struct rproc_vdev *rvdev, int i); |
| 53 | 60 | ||
| 54 | void *rproc_da_to_va(struct rproc *rproc, u64 da, int len); | 61 | void *rproc_da_to_va(struct rproc *rproc, u64 da, int len); |
| 62 | phys_addr_t rproc_va_to_pa(void *cpu_addr); | ||
| 55 | int rproc_trigger_recovery(struct rproc *rproc); | 63 | int rproc_trigger_recovery(struct rproc *rproc); |
| 56 | 64 | ||
| 57 | int rproc_elf_sanity_check(struct rproc *rproc, const struct firmware *fw); | 65 | int rproc_elf_sanity_check(struct rproc *rproc, const struct firmware *fw); |
diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c index 2d7cd344f3bf..44774de6f17b 100644 --- a/drivers/remoteproc/remoteproc_virtio.c +++ b/drivers/remoteproc/remoteproc_virtio.c | |||
| @@ -17,7 +17,9 @@ | |||
| 17 | * GNU General Public License for more details. | 17 | * GNU General Public License for more details. |
| 18 | */ | 18 | */ |
| 19 | 19 | ||
| 20 | #include <linux/dma-mapping.h> | ||
| 20 | #include <linux/export.h> | 21 | #include <linux/export.h> |
| 22 | #include <linux/of_reserved_mem.h> | ||
| 21 | #include <linux/remoteproc.h> | 23 | #include <linux/remoteproc.h> |
| 22 | #include <linux/virtio.h> | 24 | #include <linux/virtio.h> |
| 23 | #include <linux/virtio_config.h> | 25 | #include <linux/virtio_config.h> |
| @@ -316,6 +318,8 @@ static void rproc_virtio_dev_release(struct device *dev) | |||
| 316 | struct rproc_vdev *rvdev = vdev_to_rvdev(vdev); | 318 | struct rproc_vdev *rvdev = vdev_to_rvdev(vdev); |
| 317 | struct rproc *rproc = vdev_to_rproc(vdev); | 319 | struct rproc *rproc = vdev_to_rproc(vdev); |
| 318 | 320 | ||
| 321 | kfree(vdev); | ||
| 322 | |||
| 319 | kref_put(&rvdev->refcount, rproc_vdev_release); | 323 | kref_put(&rvdev->refcount, rproc_vdev_release); |
| 320 | 324 | ||
| 321 | put_device(&rproc->dev); | 325 | put_device(&rproc->dev); |
| @@ -333,10 +337,53 @@ static void rproc_virtio_dev_release(struct device *dev) | |||
| 333 | int rproc_add_virtio_dev(struct rproc_vdev *rvdev, int id) | 337 | int rproc_add_virtio_dev(struct rproc_vdev *rvdev, int id) |
| 334 | { | 338 | { |
| 335 | struct rproc *rproc = rvdev->rproc; | 339 | struct rproc *rproc = rvdev->rproc; |
| 336 | struct device *dev = &rproc->dev; | 340 | struct device *dev = &rvdev->dev; |
| 337 | struct virtio_device *vdev = &rvdev->vdev; | 341 | struct virtio_device *vdev; |
| 342 | struct rproc_mem_entry *mem; | ||
| 338 | int ret; | 343 | int ret; |
| 339 | 344 | ||
| 345 | /* Try to find dedicated vdev buffer carveout */ | ||
| 346 | mem = rproc_find_carveout_by_name(rproc, "vdev%dbuffer", rvdev->index); | ||
| 347 | if (mem) { | ||
| 348 | phys_addr_t pa; | ||
| 349 | |||
| 350 | if (mem->of_resm_idx != -1) { | ||
| 351 | struct device_node *np = rproc->dev.parent->of_node; | ||
| 352 | |||
| 353 | /* Associate reserved memory to vdev device */ | ||
| 354 | ret = of_reserved_mem_device_init_by_idx(dev, np, | ||
| 355 | mem->of_resm_idx); | ||
| 356 | if (ret) { | ||
| 357 | dev_err(dev, "Can't associate reserved memory\n"); | ||
| 358 | goto out; | ||
| 359 | } | ||
| 360 | } else { | ||
| 361 | if (mem->va) { | ||
| 362 | dev_warn(dev, "vdev %d buffer already mapped\n", | ||
| 363 | rvdev->index); | ||
| 364 | pa = rproc_va_to_pa(mem->va); | ||
| 365 | } else { | ||
| 366 | /* Use dma address as carveout no memmapped yet */ | ||
| 367 | pa = (phys_addr_t)mem->dma; | ||
| 368 | } | ||
| 369 | |||
| 370 | /* Associate vdev buffer memory pool to vdev subdev */ | ||
| 371 | ret = dma_declare_coherent_memory(dev, pa, | ||
| 372 | mem->da, | ||
| 373 | mem->len); | ||
| 374 | if (ret < 0) { | ||
| 375 | dev_err(dev, "Failed to associate buffer\n"); | ||
| 376 | goto out; | ||
| 377 | } | ||
| 378 | } | ||
| 379 | } | ||
| 380 | |||
| 381 | /* Allocate virtio device */ | ||
| 382 | vdev = kzalloc(sizeof(*vdev), GFP_KERNEL); | ||
| 383 | if (!vdev) { | ||
| 384 | ret = -ENOMEM; | ||
| 385 | goto out; | ||
| 386 | } | ||
| 340 | vdev->id.device = id, | 387 | vdev->id.device = id, |
| 341 | vdev->config = &rproc_virtio_config_ops, | 388 | vdev->config = &rproc_virtio_config_ops, |
| 342 | vdev->dev.parent = dev; | 389 | vdev->dev.parent = dev; |
| @@ -370,11 +417,15 @@ out: | |||
| 370 | 417 | ||
| 371 | /** | 418 | /** |
| 372 | * rproc_remove_virtio_dev() - remove an rproc-induced virtio device | 419 | * rproc_remove_virtio_dev() - remove an rproc-induced virtio device |
| 373 | * @rvdev: the remote vdev | 420 | * @dev: the virtio device |
| 421 | * @data: must be null | ||
| 374 | * | 422 | * |
| 375 | * This function unregisters an existing virtio device. | 423 | * This function unregisters an existing virtio device. |
| 376 | */ | 424 | */ |
| 377 | void rproc_remove_virtio_dev(struct rproc_vdev *rvdev) | 425 | int rproc_remove_virtio_dev(struct device *dev, void *data) |
| 378 | { | 426 | { |
| 379 | unregister_virtio_device(&rvdev->vdev); | 427 | struct virtio_device *vdev = dev_to_virtio(dev); |
| 428 | |||
| 429 | unregister_virtio_device(vdev); | ||
| 430 | return 0; | ||
| 380 | } | 431 | } |
diff --git a/drivers/remoteproc/st_remoteproc.c b/drivers/remoteproc/st_remoteproc.c index aacef0ea3b90..51049d17b1e5 100644 --- a/drivers/remoteproc/st_remoteproc.c +++ b/drivers/remoteproc/st_remoteproc.c | |||
| @@ -19,6 +19,7 @@ | |||
| 19 | #include <linux/mfd/syscon.h> | 19 | #include <linux/mfd/syscon.h> |
| 20 | #include <linux/module.h> | 20 | #include <linux/module.h> |
| 21 | #include <linux/of.h> | 21 | #include <linux/of.h> |
| 22 | #include <linux/of_address.h> | ||
| 22 | #include <linux/of_device.h> | 23 | #include <linux/of_device.h> |
| 23 | #include <linux/of_reserved_mem.h> | 24 | #include <linux/of_reserved_mem.h> |
| 24 | #include <linux/platform_device.h> | 25 | #include <linux/platform_device.h> |
| @@ -91,6 +92,77 @@ static void st_rproc_kick(struct rproc *rproc, int vqid) | |||
| 91 | dev_err(dev, "failed to send message via mbox: %d\n", ret); | 92 | dev_err(dev, "failed to send message via mbox: %d\n", ret); |
| 92 | } | 93 | } |
| 93 | 94 | ||
| 95 | static int st_rproc_mem_alloc(struct rproc *rproc, | ||
| 96 | struct rproc_mem_entry *mem) | ||
| 97 | { | ||
| 98 | struct device *dev = rproc->dev.parent; | ||
| 99 | void *va; | ||
| 100 | |||
| 101 | va = ioremap_wc(mem->dma, mem->len); | ||
| 102 | if (!va) { | ||
| 103 | dev_err(dev, "Unable to map memory region: %pa+%zx\n", | ||
| 104 | &mem->dma, mem->len); | ||
| 105 | return -ENOMEM; | ||
| 106 | } | ||
| 107 | |||
| 108 | /* Update memory entry va */ | ||
| 109 | mem->va = va; | ||
| 110 | |||
| 111 | return 0; | ||
| 112 | } | ||
| 113 | |||
| 114 | static int st_rproc_mem_release(struct rproc *rproc, | ||
| 115 | struct rproc_mem_entry *mem) | ||
| 116 | { | ||
| 117 | iounmap(mem->va); | ||
| 118 | |||
| 119 | return 0; | ||
| 120 | } | ||
| 121 | |||
| 122 | static int st_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw) | ||
| 123 | { | ||
| 124 | struct device *dev = rproc->dev.parent; | ||
| 125 | struct device_node *np = dev->of_node; | ||
| 126 | struct rproc_mem_entry *mem; | ||
| 127 | struct reserved_mem *rmem; | ||
| 128 | struct of_phandle_iterator it; | ||
| 129 | int index = 0; | ||
| 130 | |||
| 131 | of_phandle_iterator_init(&it, np, "memory-region", NULL, 0); | ||
| 132 | while (of_phandle_iterator_next(&it) == 0) { | ||
| 133 | rmem = of_reserved_mem_lookup(it.node); | ||
| 134 | if (!rmem) { | ||
| 135 | dev_err(dev, "unable to acquire memory-region\n"); | ||
| 136 | return -EINVAL; | ||
| 137 | } | ||
| 138 | |||
| 139 | /* No need to map vdev buffer */ | ||
| 140 | if (strcmp(it.node->name, "vdev0buffer")) { | ||
| 141 | /* Register memory region */ | ||
| 142 | mem = rproc_mem_entry_init(dev, NULL, | ||
| 143 | (dma_addr_t)rmem->base, | ||
| 144 | rmem->size, rmem->base, | ||
| 145 | st_rproc_mem_alloc, | ||
| 146 | st_rproc_mem_release, | ||
| 147 | it.node->name); | ||
| 148 | } else { | ||
| 149 | /* Register reserved memory for vdev buffer allocation */ | ||
| 150 | mem = rproc_of_resm_mem_entry_init(dev, index, | ||
| 151 | rmem->size, | ||
| 152 | rmem->base, | ||
| 153 | it.node->name); | ||
| 154 | } | ||
| 155 | |||
| 156 | if (!mem) | ||
| 157 | return -ENOMEM; | ||
| 158 | |||
| 159 | rproc_add_carveout(rproc, mem); | ||
| 160 | index++; | ||
| 161 | } | ||
| 162 | |||
| 163 | return rproc_elf_load_rsc_table(rproc, fw); | ||
| 164 | } | ||
| 165 | |||
| 94 | static int st_rproc_start(struct rproc *rproc) | 166 | static int st_rproc_start(struct rproc *rproc) |
| 95 | { | 167 | { |
| 96 | struct st_rproc *ddata = rproc->priv; | 168 | struct st_rproc *ddata = rproc->priv; |
| @@ -158,9 +230,14 @@ static int st_rproc_stop(struct rproc *rproc) | |||
| 158 | } | 230 | } |
| 159 | 231 | ||
| 160 | static const struct rproc_ops st_rproc_ops = { | 232 | static const struct rproc_ops st_rproc_ops = { |
| 161 | .kick = st_rproc_kick, | 233 | .kick = st_rproc_kick, |
| 162 | .start = st_rproc_start, | 234 | .start = st_rproc_start, |
| 163 | .stop = st_rproc_stop, | 235 | .stop = st_rproc_stop, |
| 236 | .parse_fw = st_rproc_parse_fw, | ||
| 237 | .load = rproc_elf_load_segments, | ||
| 238 | .find_loaded_rsc_table = rproc_elf_find_loaded_rsc_table, | ||
| 239 | .sanity_check = rproc_elf_sanity_check, | ||
| 240 | .get_boot_addr = rproc_elf_get_boot_addr, | ||
| 164 | }; | 241 | }; |
| 165 | 242 | ||
| 166 | /* | 243 | /* |
| @@ -254,12 +331,6 @@ static int st_rproc_parse_dt(struct platform_device *pdev) | |||
| 254 | return -EINVAL; | 331 | return -EINVAL; |
| 255 | } | 332 | } |
| 256 | 333 | ||
| 257 | err = of_reserved_mem_device_init(dev); | ||
| 258 | if (err) { | ||
| 259 | dev_err(dev, "Failed to obtain shared memory\n"); | ||
| 260 | return err; | ||
| 261 | } | ||
| 262 | |||
| 263 | err = clk_prepare(ddata->clk); | 334 | err = clk_prepare(ddata->clk); |
| 264 | if (err) | 335 | if (err) |
| 265 | dev_err(dev, "failed to get clock\n"); | 336 | dev_err(dev, "failed to get clock\n"); |
| @@ -387,8 +458,6 @@ static int st_rproc_remove(struct platform_device *pdev) | |||
| 387 | 458 | ||
| 388 | clk_disable_unprepare(ddata->clk); | 459 | clk_disable_unprepare(ddata->clk); |
| 389 | 460 | ||
| 390 | of_reserved_mem_device_release(&pdev->dev); | ||
| 391 | |||
| 392 | for (i = 0; i < ST_RPROC_MAX_VRING * MBOX_MAX; i++) | 461 | for (i = 0; i < ST_RPROC_MAX_VRING * MBOX_MAX; i++) |
| 393 | mbox_free_channel(ddata->mbox_chan[i]); | 462 | mbox_free_channel(ddata->mbox_chan[i]); |
| 394 | 463 | ||
diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c index 664f957012cd..5d3685bd76a2 100644 --- a/drivers/rpmsg/virtio_rpmsg_bus.c +++ b/drivers/rpmsg/virtio_rpmsg_bus.c | |||
| @@ -11,21 +11,21 @@ | |||
| 11 | 11 | ||
| 12 | #define pr_fmt(fmt) "%s: " fmt, __func__ | 12 | #define pr_fmt(fmt) "%s: " fmt, __func__ |
| 13 | 13 | ||
| 14 | #include <linux/dma-mapping.h> | ||
| 15 | #include <linux/idr.h> | ||
| 16 | #include <linux/jiffies.h> | ||
| 14 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
| 15 | #include <linux/module.h> | 18 | #include <linux/module.h> |
| 16 | #include <linux/virtio.h> | 19 | #include <linux/mutex.h> |
| 17 | #include <linux/virtio_ids.h> | 20 | #include <linux/of_device.h> |
| 18 | #include <linux/virtio_config.h> | 21 | #include <linux/rpmsg.h> |
| 19 | #include <linux/scatterlist.h> | 22 | #include <linux/scatterlist.h> |
| 20 | #include <linux/dma-mapping.h> | ||
| 21 | #include <linux/slab.h> | 23 | #include <linux/slab.h> |
| 22 | #include <linux/idr.h> | ||
| 23 | #include <linux/jiffies.h> | ||
| 24 | #include <linux/sched.h> | 24 | #include <linux/sched.h> |
| 25 | #include <linux/virtio.h> | ||
| 26 | #include <linux/virtio_ids.h> | ||
| 27 | #include <linux/virtio_config.h> | ||
| 25 | #include <linux/wait.h> | 28 | #include <linux/wait.h> |
| 26 | #include <linux/rpmsg.h> | ||
| 27 | #include <linux/mutex.h> | ||
| 28 | #include <linux/of_device.h> | ||
| 29 | 29 | ||
| 30 | #include "rpmsg_internal.h" | 30 | #include "rpmsg_internal.h" |
| 31 | 31 | ||
| @@ -912,7 +912,7 @@ static int rpmsg_probe(struct virtio_device *vdev) | |||
| 912 | total_buf_space = vrp->num_bufs * vrp->buf_size; | 912 | total_buf_space = vrp->num_bufs * vrp->buf_size; |
| 913 | 913 | ||
| 914 | /* allocate coherent memory for the buffers */ | 914 | /* allocate coherent memory for the buffers */ |
| 915 | bufs_va = dma_alloc_coherent(vdev->dev.parent->parent, | 915 | bufs_va = dma_alloc_coherent(vdev->dev.parent, |
| 916 | total_buf_space, &vrp->bufs_dma, | 916 | total_buf_space, &vrp->bufs_dma, |
| 917 | GFP_KERNEL); | 917 | GFP_KERNEL); |
| 918 | if (!bufs_va) { | 918 | if (!bufs_va) { |
| @@ -980,7 +980,7 @@ static int rpmsg_probe(struct virtio_device *vdev) | |||
| 980 | return 0; | 980 | return 0; |
| 981 | 981 | ||
| 982 | free_coherent: | 982 | free_coherent: |
| 983 | dma_free_coherent(vdev->dev.parent->parent, total_buf_space, | 983 | dma_free_coherent(vdev->dev.parent, total_buf_space, |
| 984 | bufs_va, vrp->bufs_dma); | 984 | bufs_va, vrp->bufs_dma); |
| 985 | vqs_del: | 985 | vqs_del: |
| 986 | vdev->config->del_vqs(vrp->vdev); | 986 | vdev->config->del_vqs(vrp->vdev); |
| @@ -1015,7 +1015,7 @@ static void rpmsg_remove(struct virtio_device *vdev) | |||
| 1015 | 1015 | ||
| 1016 | vdev->config->del_vqs(vrp->vdev); | 1016 | vdev->config->del_vqs(vrp->vdev); |
| 1017 | 1017 | ||
| 1018 | dma_free_coherent(vdev->dev.parent->parent, total_buf_space, | 1018 | dma_free_coherent(vdev->dev.parent, total_buf_space, |
| 1019 | vrp->rbufs, vrp->bufs_dma); | 1019 | vrp->rbufs, vrp->bufs_dma); |
| 1020 | 1020 | ||
| 1021 | kfree(vrp); | 1021 | kfree(vrp); |
diff --git a/drivers/tty/serial/8250/8250_lpss.c b/drivers/tty/serial/8250/8250_lpss.c index 98dbc796353f..53ca9ba6ab4b 100644 --- a/drivers/tty/serial/8250/8250_lpss.c +++ b/drivers/tty/serial/8250/8250_lpss.c | |||
| @@ -153,7 +153,6 @@ static int byt_serial_setup(struct lpss8250 *lpss, struct uart_port *port) | |||
| 153 | #ifdef CONFIG_SERIAL_8250_DMA | 153 | #ifdef CONFIG_SERIAL_8250_DMA |
| 154 | static const struct dw_dma_platform_data qrk_serial_dma_pdata = { | 154 | static const struct dw_dma_platform_data qrk_serial_dma_pdata = { |
| 155 | .nr_channels = 2, | 155 | .nr_channels = 2, |
| 156 | .is_private = true, | ||
| 157 | .chan_allocation_order = CHAN_ALLOCATION_ASCENDING, | 156 | .chan_allocation_order = CHAN_ALLOCATION_ASCENDING, |
| 158 | .chan_priority = CHAN_PRIORITY_ASCENDING, | 157 | .chan_priority = CHAN_PRIORITY_ASCENDING, |
| 159 | .block_size = 4095, | 158 | .block_size = 4095, |
