diff options
Diffstat (limited to 'drivers')
50 files changed, 1396 insertions, 409 deletions
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c index 217a782c3e55..1b207fca1420 100644 --- a/drivers/acpi/cppc_acpi.c +++ b/drivers/acpi/cppc_acpi.c | |||
| @@ -1051,6 +1051,48 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val) | |||
| 1051 | } | 1051 | } |
| 1052 | 1052 | ||
| 1053 | /** | 1053 | /** |
| 1054 | * cppc_get_desired_perf - Get the value of desired performance register. | ||
| 1055 | * @cpunum: CPU from which to get desired performance. | ||
| 1056 | * @desired_perf: address of a variable to store the returned desired performance | ||
| 1057 | * | ||
| 1058 | * Return: 0 for success, -EIO otherwise. | ||
| 1059 | */ | ||
| 1060 | int cppc_get_desired_perf(int cpunum, u64 *desired_perf) | ||
| 1061 | { | ||
| 1062 | struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum); | ||
| 1063 | int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum); | ||
| 1064 | struct cpc_register_resource *desired_reg; | ||
| 1065 | struct cppc_pcc_data *pcc_ss_data = NULL; | ||
| 1066 | |||
| 1067 | desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF]; | ||
| 1068 | |||
| 1069 | if (CPC_IN_PCC(desired_reg)) { | ||
| 1070 | int ret = 0; | ||
| 1071 | |||
| 1072 | if (pcc_ss_id < 0) | ||
| 1073 | return -EIO; | ||
| 1074 | |||
| 1075 | pcc_ss_data = pcc_data[pcc_ss_id]; | ||
| 1076 | |||
| 1077 | down_write(&pcc_ss_data->pcc_lock); | ||
| 1078 | |||
| 1079 | if (send_pcc_cmd(pcc_ss_id, CMD_READ) >= 0) | ||
| 1080 | cpc_read(cpunum, desired_reg, desired_perf); | ||
| 1081 | else | ||
| 1082 | ret = -EIO; | ||
| 1083 | |||
| 1084 | up_write(&pcc_ss_data->pcc_lock); | ||
| 1085 | |||
| 1086 | return ret; | ||
| 1087 | } | ||
| 1088 | |||
| 1089 | cpc_read(cpunum, desired_reg, desired_perf); | ||
| 1090 | |||
| 1091 | return 0; | ||
| 1092 | } | ||
| 1093 | EXPORT_SYMBOL_GPL(cppc_get_desired_perf); | ||
| 1094 | |||
| 1095 | /** | ||
| 1054 | * cppc_get_perf_caps - Get a CPUs performance capabilities. | 1096 | * cppc_get_perf_caps - Get a CPUs performance capabilities. |
| 1055 | * @cpunum: CPU from which to get capabilities info. | 1097 | * @cpunum: CPU from which to get capabilities info. |
| 1056 | * @perf_caps: ptr to cppc_perf_caps. See cppc_acpi.h | 1098 | * @perf_caps: ptr to cppc_perf_caps. See cppc_acpi.h |
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index b2131c4ea124..98d4ec5bf450 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c | |||
| @@ -282,6 +282,13 @@ static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr) | |||
| 282 | pr->power.states[ACPI_STATE_C2].address, | 282 | pr->power.states[ACPI_STATE_C2].address, |
| 283 | pr->power.states[ACPI_STATE_C3].address)); | 283 | pr->power.states[ACPI_STATE_C3].address)); |
| 284 | 284 | ||
| 285 | snprintf(pr->power.states[ACPI_STATE_C2].desc, | ||
| 286 | ACPI_CX_DESC_LEN, "ACPI P_LVL2 IOPORT 0x%x", | ||
| 287 | pr->power.states[ACPI_STATE_C2].address); | ||
| 288 | snprintf(pr->power.states[ACPI_STATE_C3].desc, | ||
| 289 | ACPI_CX_DESC_LEN, "ACPI P_LVL3 IOPORT 0x%x", | ||
| 290 | pr->power.states[ACPI_STATE_C3].address); | ||
| 291 | |||
| 285 | return 0; | 292 | return 0; |
| 286 | } | 293 | } |
| 287 | 294 | ||
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index eb9443d5bae1..6ce93a52bf3f 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c | |||
| @@ -427,6 +427,7 @@ __cpu_device_create(struct device *parent, void *drvdata, | |||
| 427 | dev->parent = parent; | 427 | dev->parent = parent; |
| 428 | dev->groups = groups; | 428 | dev->groups = groups; |
| 429 | dev->release = device_create_release; | 429 | dev->release = device_create_release; |
| 430 | device_set_pm_not_required(dev); | ||
| 430 | dev_set_drvdata(dev, drvdata); | 431 | dev_set_drvdata(dev, drvdata); |
| 431 | 432 | ||
| 432 | retval = kobject_set_name_vargs(&dev->kobj, fmt, args); | 433 | retval = kobject_set_name_vargs(&dev->kobj, fmt, args); |
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c index 5a42ae4078c2..365ad751ce0f 100644 --- a/drivers/base/power/clock_ops.c +++ b/drivers/base/power/clock_ops.c | |||
| @@ -65,10 +65,15 @@ static void pm_clk_acquire(struct device *dev, struct pm_clock_entry *ce) | |||
| 65 | if (IS_ERR(ce->clk)) { | 65 | if (IS_ERR(ce->clk)) { |
| 66 | ce->status = PCE_STATUS_ERROR; | 66 | ce->status = PCE_STATUS_ERROR; |
| 67 | } else { | 67 | } else { |
| 68 | clk_prepare(ce->clk); | 68 | if (clk_prepare(ce->clk)) { |
| 69 | ce->status = PCE_STATUS_ACQUIRED; | 69 | ce->status = PCE_STATUS_ERROR; |
| 70 | dev_dbg(dev, "Clock %pC con_id %s managed by runtime PM.\n", | 70 | dev_err(dev, "clk_prepare() failed\n"); |
| 71 | ce->clk, ce->con_id); | 71 | } else { |
| 72 | ce->status = PCE_STATUS_ACQUIRED; | ||
| 73 | dev_dbg(dev, | ||
| 74 | "Clock %pC con_id %s managed by runtime PM.\n", | ||
| 75 | ce->clk, ce->con_id); | ||
| 76 | } | ||
| 72 | } | 77 | } |
| 73 | } | 78 | } |
| 74 | 79 | ||
diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c index b413951c6abc..22aedb28aad7 100644 --- a/drivers/base/power/common.c +++ b/drivers/base/power/common.c | |||
| @@ -160,7 +160,7 @@ EXPORT_SYMBOL_GPL(dev_pm_domain_attach_by_id); | |||
| 160 | * For a detailed function description, see dev_pm_domain_attach_by_id(). | 160 | * For a detailed function description, see dev_pm_domain_attach_by_id(). |
| 161 | */ | 161 | */ |
| 162 | struct device *dev_pm_domain_attach_by_name(struct device *dev, | 162 | struct device *dev_pm_domain_attach_by_name(struct device *dev, |
| 163 | char *name) | 163 | const char *name) |
| 164 | { | 164 | { |
| 165 | if (dev->pm_domain) | 165 | if (dev->pm_domain) |
| 166 | return ERR_PTR(-EEXIST); | 166 | return ERR_PTR(-EEXIST); |
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 500de1dee967..2c334c01fc43 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c | |||
| @@ -2483,7 +2483,7 @@ EXPORT_SYMBOL_GPL(genpd_dev_pm_attach_by_id); | |||
| 2483 | * power-domain-names DT property. For further description see | 2483 | * power-domain-names DT property. For further description see |
| 2484 | * genpd_dev_pm_attach_by_id(). | 2484 | * genpd_dev_pm_attach_by_id(). |
| 2485 | */ | 2485 | */ |
| 2486 | struct device *genpd_dev_pm_attach_by_name(struct device *dev, char *name) | 2486 | struct device *genpd_dev_pm_attach_by_name(struct device *dev, const char *name) |
| 2487 | { | 2487 | { |
| 2488 | int index; | 2488 | int index; |
| 2489 | 2489 | ||
| @@ -2948,18 +2948,11 @@ static int __init genpd_debug_init(void) | |||
| 2948 | 2948 | ||
| 2949 | genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL); | 2949 | genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL); |
| 2950 | 2950 | ||
| 2951 | if (!genpd_debugfs_dir) | 2951 | debugfs_create_file("pm_genpd_summary", S_IRUGO, genpd_debugfs_dir, |
| 2952 | return -ENOMEM; | 2952 | NULL, &summary_fops); |
| 2953 | |||
| 2954 | d = debugfs_create_file("pm_genpd_summary", S_IRUGO, | ||
| 2955 | genpd_debugfs_dir, NULL, &summary_fops); | ||
| 2956 | if (!d) | ||
| 2957 | return -ENOMEM; | ||
| 2958 | 2953 | ||
| 2959 | list_for_each_entry(genpd, &gpd_list, gpd_list_node) { | 2954 | list_for_each_entry(genpd, &gpd_list, gpd_list_node) { |
| 2960 | d = debugfs_create_dir(genpd->name, genpd_debugfs_dir); | 2955 | d = debugfs_create_dir(genpd->name, genpd_debugfs_dir); |
| 2961 | if (!d) | ||
| 2962 | return -ENOMEM; | ||
| 2963 | 2956 | ||
| 2964 | debugfs_create_file("current_state", 0444, | 2957 | debugfs_create_file("current_state", 0444, |
| 2965 | d, genpd, &status_fops); | 2958 | d, genpd, &status_fops); |
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 0992e67e862b..893ae464bfd6 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c | |||
| @@ -124,6 +124,10 @@ void device_pm_unlock(void) | |||
| 124 | */ | 124 | */ |
| 125 | void device_pm_add(struct device *dev) | 125 | void device_pm_add(struct device *dev) |
| 126 | { | 126 | { |
| 127 | /* Skip PM setup/initialization. */ | ||
| 128 | if (device_pm_not_required(dev)) | ||
| 129 | return; | ||
| 130 | |||
| 127 | pr_debug("PM: Adding info for %s:%s\n", | 131 | pr_debug("PM: Adding info for %s:%s\n", |
| 128 | dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); | 132 | dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); |
| 129 | device_pm_check_callbacks(dev); | 133 | device_pm_check_callbacks(dev); |
| @@ -142,6 +146,9 @@ void device_pm_add(struct device *dev) | |||
| 142 | */ | 146 | */ |
| 143 | void device_pm_remove(struct device *dev) | 147 | void device_pm_remove(struct device *dev) |
| 144 | { | 148 | { |
| 149 | if (device_pm_not_required(dev)) | ||
| 150 | return; | ||
| 151 | |||
| 145 | pr_debug("PM: Removing info for %s:%s\n", | 152 | pr_debug("PM: Removing info for %s:%s\n", |
| 146 | dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); | 153 | dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); |
| 147 | complete_all(&dev->power.completion); | 154 | complete_all(&dev->power.completion); |
| @@ -1741,8 +1748,10 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) | |||
| 1741 | if (dev->power.direct_complete) { | 1748 | if (dev->power.direct_complete) { |
| 1742 | if (pm_runtime_status_suspended(dev)) { | 1749 | if (pm_runtime_status_suspended(dev)) { |
| 1743 | pm_runtime_disable(dev); | 1750 | pm_runtime_disable(dev); |
| 1744 | if (pm_runtime_status_suspended(dev)) | 1751 | if (pm_runtime_status_suspended(dev)) { |
| 1752 | pm_dev_dbg(dev, state, "direct-complete "); | ||
| 1745 | goto Complete; | 1753 | goto Complete; |
| 1754 | } | ||
| 1746 | 1755 | ||
| 1747 | pm_runtime_enable(dev); | 1756 | pm_runtime_enable(dev); |
| 1748 | } | 1757 | } |
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index ccd296dbb95c..78937c45278c 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c | |||
| @@ -66,20 +66,30 @@ static int rpm_suspend(struct device *dev, int rpmflags); | |||
| 66 | */ | 66 | */ |
| 67 | void update_pm_runtime_accounting(struct device *dev) | 67 | void update_pm_runtime_accounting(struct device *dev) |
| 68 | { | 68 | { |
| 69 | unsigned long now = jiffies; | 69 | u64 now, last, delta; |
| 70 | unsigned long delta; | ||
| 71 | 70 | ||
| 72 | delta = now - dev->power.accounting_timestamp; | 71 | if (dev->power.disable_depth > 0) |
| 72 | return; | ||
| 73 | |||
| 74 | last = dev->power.accounting_timestamp; | ||
| 73 | 75 | ||
| 76 | now = ktime_get_mono_fast_ns(); | ||
| 74 | dev->power.accounting_timestamp = now; | 77 | dev->power.accounting_timestamp = now; |
| 75 | 78 | ||
| 76 | if (dev->power.disable_depth > 0) | 79 | /* |
| 80 | * Because ktime_get_mono_fast_ns() is not monotonic during | ||
| 81 | * timekeeping updates, ensure that 'now' is after the last saved | ||
| 82 | * timesptamp. | ||
| 83 | */ | ||
| 84 | if (now < last) | ||
| 77 | return; | 85 | return; |
| 78 | 86 | ||
| 87 | delta = now - last; | ||
| 88 | |||
| 79 | if (dev->power.runtime_status == RPM_SUSPENDED) | 89 | if (dev->power.runtime_status == RPM_SUSPENDED) |
| 80 | dev->power.suspended_jiffies += delta; | 90 | dev->power.suspended_time += delta; |
| 81 | else | 91 | else |
| 82 | dev->power.active_jiffies += delta; | 92 | dev->power.active_time += delta; |
| 83 | } | 93 | } |
| 84 | 94 | ||
| 85 | static void __update_runtime_status(struct device *dev, enum rpm_status status) | 95 | static void __update_runtime_status(struct device *dev, enum rpm_status status) |
| @@ -88,6 +98,22 @@ static void __update_runtime_status(struct device *dev, enum rpm_status status) | |||
| 88 | dev->power.runtime_status = status; | 98 | dev->power.runtime_status = status; |
| 89 | } | 99 | } |
| 90 | 100 | ||
| 101 | u64 pm_runtime_suspended_time(struct device *dev) | ||
| 102 | { | ||
| 103 | u64 time; | ||
| 104 | unsigned long flags; | ||
| 105 | |||
| 106 | spin_lock_irqsave(&dev->power.lock, flags); | ||
| 107 | |||
| 108 | update_pm_runtime_accounting(dev); | ||
| 109 | time = dev->power.suspended_time; | ||
| 110 | |||
| 111 | spin_unlock_irqrestore(&dev->power.lock, flags); | ||
| 112 | |||
| 113 | return time; | ||
| 114 | } | ||
| 115 | EXPORT_SYMBOL_GPL(pm_runtime_suspended_time); | ||
| 116 | |||
| 91 | /** | 117 | /** |
| 92 | * pm_runtime_deactivate_timer - Deactivate given device's suspend timer. | 118 | * pm_runtime_deactivate_timer - Deactivate given device's suspend timer. |
| 93 | * @dev: Device to handle. | 119 | * @dev: Device to handle. |
| @@ -129,24 +155,21 @@ static void pm_runtime_cancel_pending(struct device *dev) | |||
| 129 | u64 pm_runtime_autosuspend_expiration(struct device *dev) | 155 | u64 pm_runtime_autosuspend_expiration(struct device *dev) |
| 130 | { | 156 | { |
| 131 | int autosuspend_delay; | 157 | int autosuspend_delay; |
| 132 | u64 last_busy, expires = 0; | 158 | u64 expires; |
| 133 | u64 now = ktime_get_mono_fast_ns(); | ||
| 134 | 159 | ||
| 135 | if (!dev->power.use_autosuspend) | 160 | if (!dev->power.use_autosuspend) |
| 136 | goto out; | 161 | return 0; |
| 137 | 162 | ||
| 138 | autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay); | 163 | autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay); |
| 139 | if (autosuspend_delay < 0) | 164 | if (autosuspend_delay < 0) |
| 140 | goto out; | 165 | return 0; |
| 141 | |||
| 142 | last_busy = READ_ONCE(dev->power.last_busy); | ||
| 143 | 166 | ||
| 144 | expires = last_busy + (u64)autosuspend_delay * NSEC_PER_MSEC; | 167 | expires = READ_ONCE(dev->power.last_busy); |
| 145 | if (expires <= now) | 168 | expires += (u64)autosuspend_delay * NSEC_PER_MSEC; |
| 146 | expires = 0; /* Already expired. */ | 169 | if (expires > ktime_get_mono_fast_ns()) |
| 170 | return expires; /* Expires in the future */ | ||
| 147 | 171 | ||
| 148 | out: | 172 | return 0; |
| 149 | return expires; | ||
| 150 | } | 173 | } |
| 151 | EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration); | 174 | EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration); |
| 152 | 175 | ||
| @@ -1276,6 +1299,9 @@ void __pm_runtime_disable(struct device *dev, bool check_resume) | |||
| 1276 | pm_runtime_put_noidle(dev); | 1299 | pm_runtime_put_noidle(dev); |
| 1277 | } | 1300 | } |
| 1278 | 1301 | ||
| 1302 | /* Update time accounting before disabling PM-runtime. */ | ||
| 1303 | update_pm_runtime_accounting(dev); | ||
| 1304 | |||
| 1279 | if (!dev->power.disable_depth++) | 1305 | if (!dev->power.disable_depth++) |
| 1280 | __pm_runtime_barrier(dev); | 1306 | __pm_runtime_barrier(dev); |
| 1281 | 1307 | ||
| @@ -1294,10 +1320,15 @@ void pm_runtime_enable(struct device *dev) | |||
| 1294 | 1320 | ||
| 1295 | spin_lock_irqsave(&dev->power.lock, flags); | 1321 | spin_lock_irqsave(&dev->power.lock, flags); |
| 1296 | 1322 | ||
| 1297 | if (dev->power.disable_depth > 0) | 1323 | if (dev->power.disable_depth > 0) { |
| 1298 | dev->power.disable_depth--; | 1324 | dev->power.disable_depth--; |
| 1299 | else | 1325 | |
| 1326 | /* About to enable runtime pm, set accounting_timestamp to now */ | ||
| 1327 | if (!dev->power.disable_depth) | ||
| 1328 | dev->power.accounting_timestamp = ktime_get_mono_fast_ns(); | ||
| 1329 | } else { | ||
| 1300 | dev_warn(dev, "Unbalanced %s!\n", __func__); | 1330 | dev_warn(dev, "Unbalanced %s!\n", __func__); |
| 1331 | } | ||
| 1301 | 1332 | ||
| 1302 | WARN(!dev->power.disable_depth && | 1333 | WARN(!dev->power.disable_depth && |
| 1303 | dev->power.runtime_status == RPM_SUSPENDED && | 1334 | dev->power.runtime_status == RPM_SUSPENDED && |
| @@ -1494,7 +1525,6 @@ void pm_runtime_init(struct device *dev) | |||
| 1494 | dev->power.request_pending = false; | 1525 | dev->power.request_pending = false; |
| 1495 | dev->power.request = RPM_REQ_NONE; | 1526 | dev->power.request = RPM_REQ_NONE; |
| 1496 | dev->power.deferred_resume = false; | 1527 | dev->power.deferred_resume = false; |
| 1497 | dev->power.accounting_timestamp = jiffies; | ||
| 1498 | INIT_WORK(&dev->power.work, pm_runtime_work); | 1528 | INIT_WORK(&dev->power.work, pm_runtime_work); |
| 1499 | 1529 | ||
| 1500 | dev->power.timer_expires = 0; | 1530 | dev->power.timer_expires = 0; |
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c index d713738ce796..c6bf76124184 100644 --- a/drivers/base/power/sysfs.c +++ b/drivers/base/power/sysfs.c | |||
| @@ -125,9 +125,12 @@ static ssize_t runtime_active_time_show(struct device *dev, | |||
| 125 | struct device_attribute *attr, char *buf) | 125 | struct device_attribute *attr, char *buf) |
| 126 | { | 126 | { |
| 127 | int ret; | 127 | int ret; |
| 128 | u64 tmp; | ||
| 128 | spin_lock_irq(&dev->power.lock); | 129 | spin_lock_irq(&dev->power.lock); |
| 129 | update_pm_runtime_accounting(dev); | 130 | update_pm_runtime_accounting(dev); |
| 130 | ret = sprintf(buf, "%i\n", jiffies_to_msecs(dev->power.active_jiffies)); | 131 | tmp = dev->power.active_time; |
| 132 | do_div(tmp, NSEC_PER_MSEC); | ||
| 133 | ret = sprintf(buf, "%llu\n", tmp); | ||
| 131 | spin_unlock_irq(&dev->power.lock); | 134 | spin_unlock_irq(&dev->power.lock); |
| 132 | return ret; | 135 | return ret; |
| 133 | } | 136 | } |
| @@ -138,10 +141,12 @@ static ssize_t runtime_suspended_time_show(struct device *dev, | |||
| 138 | struct device_attribute *attr, char *buf) | 141 | struct device_attribute *attr, char *buf) |
| 139 | { | 142 | { |
| 140 | int ret; | 143 | int ret; |
| 144 | u64 tmp; | ||
| 141 | spin_lock_irq(&dev->power.lock); | 145 | spin_lock_irq(&dev->power.lock); |
| 142 | update_pm_runtime_accounting(dev); | 146 | update_pm_runtime_accounting(dev); |
| 143 | ret = sprintf(buf, "%i\n", | 147 | tmp = dev->power.suspended_time; |
| 144 | jiffies_to_msecs(dev->power.suspended_jiffies)); | 148 | do_div(tmp, NSEC_PER_MSEC); |
| 149 | ret = sprintf(buf, "%llu\n", tmp); | ||
| 145 | spin_unlock_irq(&dev->power.lock); | 150 | spin_unlock_irq(&dev->power.lock); |
| 146 | return ret; | 151 | return ret; |
| 147 | } | 152 | } |
| @@ -648,6 +653,10 @@ int dpm_sysfs_add(struct device *dev) | |||
| 648 | { | 653 | { |
| 649 | int rc; | 654 | int rc; |
| 650 | 655 | ||
| 656 | /* No need to create PM sysfs if explicitly disabled. */ | ||
| 657 | if (device_pm_not_required(dev)) | ||
| 658 | return 0; | ||
| 659 | |||
| 651 | rc = sysfs_create_group(&dev->kobj, &pm_attr_group); | 660 | rc = sysfs_create_group(&dev->kobj, &pm_attr_group); |
| 652 | if (rc) | 661 | if (rc) |
| 653 | return rc; | 662 | return rc; |
| @@ -727,6 +736,8 @@ void rpm_sysfs_remove(struct device *dev) | |||
| 727 | 736 | ||
| 728 | void dpm_sysfs_remove(struct device *dev) | 737 | void dpm_sysfs_remove(struct device *dev) |
| 729 | { | 738 | { |
| 739 | if (device_pm_not_required(dev)) | ||
| 740 | return; | ||
| 730 | sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group); | 741 | sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group); |
| 731 | dev_pm_qos_constraints_destroy(dev); | 742 | dev_pm_qos_constraints_destroy(dev); |
| 732 | rpm_sysfs_remove(dev); | 743 | rpm_sysfs_remove(dev); |
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index 5fa1898755a3..f1fee72ed970 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c | |||
| @@ -783,7 +783,7 @@ void pm_wakeup_ws_event(struct wakeup_source *ws, unsigned int msec, bool hard) | |||
| 783 | EXPORT_SYMBOL_GPL(pm_wakeup_ws_event); | 783 | EXPORT_SYMBOL_GPL(pm_wakeup_ws_event); |
| 784 | 784 | ||
| 785 | /** | 785 | /** |
| 786 | * pm_wakeup_event - Notify the PM core of a wakeup event. | 786 | * pm_wakeup_dev_event - Notify the PM core of a wakeup event. |
| 787 | * @dev: Device the wakeup event is related to. | 787 | * @dev: Device the wakeup event is related to. |
| 788 | * @msec: Anticipated event processing time (in milliseconds). | 788 | * @msec: Anticipated event processing time (in milliseconds). |
| 789 | * @hard: If set, abort suspends in progress and wake up from suspend-to-idle. | 789 | * @hard: If set, abort suspends in progress and wake up from suspend-to-idle. |
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index 608af20a3494..b22e6bba71f1 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig | |||
| @@ -207,8 +207,6 @@ comment "CPU frequency scaling drivers" | |||
| 207 | config CPUFREQ_DT | 207 | config CPUFREQ_DT |
| 208 | tristate "Generic DT based cpufreq driver" | 208 | tristate "Generic DT based cpufreq driver" |
| 209 | depends on HAVE_CLK && OF | 209 | depends on HAVE_CLK && OF |
| 210 | # if CPU_THERMAL is on and THERMAL=m, CPUFREQ_DT cannot be =y: | ||
| 211 | depends on !CPU_THERMAL || THERMAL | ||
| 212 | select CPUFREQ_DT_PLATDEV | 210 | select CPUFREQ_DT_PLATDEV |
| 213 | select PM_OPP | 211 | select PM_OPP |
| 214 | help | 212 | help |
| @@ -327,7 +325,6 @@ endif | |||
| 327 | config QORIQ_CPUFREQ | 325 | config QORIQ_CPUFREQ |
| 328 | tristate "CPU frequency scaling driver for Freescale QorIQ SoCs" | 326 | tristate "CPU frequency scaling driver for Freescale QorIQ SoCs" |
| 329 | depends on OF && COMMON_CLK && (PPC_E500MC || ARM || ARM64) | 327 | depends on OF && COMMON_CLK && (PPC_E500MC || ARM || ARM64) |
| 330 | depends on !CPU_THERMAL || THERMAL | ||
| 331 | select CLK_QORIQ | 328 | select CLK_QORIQ |
| 332 | help | 329 | help |
| 333 | This adds the CPUFreq driver support for Freescale QorIQ SoCs | 330 | This adds the CPUFreq driver support for Freescale QorIQ SoCs |
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index 1a6778e81f90..179a1d302f48 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm | |||
| @@ -25,12 +25,21 @@ config ARM_ARMADA_37XX_CPUFREQ | |||
| 25 | This adds the CPUFreq driver support for Marvell Armada 37xx SoCs. | 25 | This adds the CPUFreq driver support for Marvell Armada 37xx SoCs. |
| 26 | The Armada 37xx PMU supports 4 frequency and VDD levels. | 26 | The Armada 37xx PMU supports 4 frequency and VDD levels. |
| 27 | 27 | ||
| 28 | config ARM_ARMADA_8K_CPUFREQ | ||
| 29 | tristate "Armada 8K CPUFreq driver" | ||
| 30 | depends on ARCH_MVEBU && CPUFREQ_DT | ||
| 31 | help | ||
| 32 | This enables the CPUFreq driver support for Marvell | ||
| 33 | Armada8k SOCs. | ||
| 34 | Armada8K device has the AP806 which supports scaling | ||
| 35 | to any full integer divider. | ||
| 36 | |||
| 37 | If in doubt, say N. | ||
| 38 | |||
| 28 | # big LITTLE core layer and glue drivers | 39 | # big LITTLE core layer and glue drivers |
| 29 | config ARM_BIG_LITTLE_CPUFREQ | 40 | config ARM_BIG_LITTLE_CPUFREQ |
| 30 | tristate "Generic ARM big LITTLE CPUfreq driver" | 41 | tristate "Generic ARM big LITTLE CPUfreq driver" |
| 31 | depends on ARM_CPU_TOPOLOGY && HAVE_CLK | 42 | depends on ARM_CPU_TOPOLOGY && HAVE_CLK |
| 32 | # if CPU_THERMAL is on and THERMAL=m, ARM_BIT_LITTLE_CPUFREQ cannot be =y | ||
| 33 | depends on !CPU_THERMAL || THERMAL | ||
| 34 | select PM_OPP | 43 | select PM_OPP |
| 35 | help | 44 | help |
| 36 | This enables the Generic CPUfreq driver for ARM big.LITTLE platforms. | 45 | This enables the Generic CPUfreq driver for ARM big.LITTLE platforms. |
| @@ -38,7 +47,6 @@ config ARM_BIG_LITTLE_CPUFREQ | |||
| 38 | config ARM_SCPI_CPUFREQ | 47 | config ARM_SCPI_CPUFREQ |
| 39 | tristate "SCPI based CPUfreq driver" | 48 | tristate "SCPI based CPUfreq driver" |
| 40 | depends on ARM_SCPI_PROTOCOL && COMMON_CLK_SCPI | 49 | depends on ARM_SCPI_PROTOCOL && COMMON_CLK_SCPI |
| 41 | depends on !CPU_THERMAL || THERMAL | ||
| 42 | help | 50 | help |
| 43 | This adds the CPUfreq driver support for ARM platforms using SCPI | 51 | This adds the CPUfreq driver support for ARM platforms using SCPI |
| 44 | protocol for CPU power management. | 52 | protocol for CPU power management. |
| @@ -93,7 +101,6 @@ config ARM_KIRKWOOD_CPUFREQ | |||
| 93 | config ARM_MEDIATEK_CPUFREQ | 101 | config ARM_MEDIATEK_CPUFREQ |
| 94 | tristate "CPU Frequency scaling support for MediaTek SoCs" | 102 | tristate "CPU Frequency scaling support for MediaTek SoCs" |
| 95 | depends on ARCH_MEDIATEK && REGULATOR | 103 | depends on ARCH_MEDIATEK && REGULATOR |
| 96 | depends on !CPU_THERMAL || THERMAL | ||
| 97 | select PM_OPP | 104 | select PM_OPP |
| 98 | help | 105 | help |
| 99 | This adds the CPUFreq driver support for MediaTek SoCs. | 106 | This adds the CPUFreq driver support for MediaTek SoCs. |
| @@ -233,7 +240,6 @@ config ARM_SA1110_CPUFREQ | |||
| 233 | config ARM_SCMI_CPUFREQ | 240 | config ARM_SCMI_CPUFREQ |
| 234 | tristate "SCMI based CPUfreq driver" | 241 | tristate "SCMI based CPUfreq driver" |
| 235 | depends on ARM_SCMI_PROTOCOL || COMPILE_TEST | 242 | depends on ARM_SCMI_PROTOCOL || COMPILE_TEST |
| 236 | depends on !CPU_THERMAL || THERMAL | ||
| 237 | select PM_OPP | 243 | select PM_OPP |
| 238 | help | 244 | help |
| 239 | This adds the CPUfreq driver support for ARM platforms using SCMI | 245 | This adds the CPUfreq driver support for ARM platforms using SCMI |
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index 08c071be2491..689b26c6f949 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile | |||
| @@ -50,6 +50,7 @@ obj-$(CONFIG_X86_SFI_CPUFREQ) += sfi-cpufreq.o | |||
| 50 | obj-$(CONFIG_ARM_BIG_LITTLE_CPUFREQ) += arm_big_little.o | 50 | obj-$(CONFIG_ARM_BIG_LITTLE_CPUFREQ) += arm_big_little.o |
| 51 | 51 | ||
| 52 | obj-$(CONFIG_ARM_ARMADA_37XX_CPUFREQ) += armada-37xx-cpufreq.o | 52 | obj-$(CONFIG_ARM_ARMADA_37XX_CPUFREQ) += armada-37xx-cpufreq.o |
| 53 | obj-$(CONFIG_ARM_ARMADA_8K_CPUFREQ) += armada-8k-cpufreq.o | ||
| 53 | obj-$(CONFIG_ARM_BRCMSTB_AVS_CPUFREQ) += brcmstb-avs-cpufreq.o | 54 | obj-$(CONFIG_ARM_BRCMSTB_AVS_CPUFREQ) += brcmstb-avs-cpufreq.o |
| 54 | obj-$(CONFIG_ACPI_CPPC_CPUFREQ) += cppc_cpufreq.o | 55 | obj-$(CONFIG_ACPI_CPPC_CPUFREQ) += cppc_cpufreq.o |
| 55 | obj-$(CONFIG_ARCH_DAVINCI) += davinci-cpufreq.o | 56 | obj-$(CONFIG_ARCH_DAVINCI) += davinci-cpufreq.o |
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c index d62fd374d5c7..c72258a44ba4 100644 --- a/drivers/cpufreq/acpi-cpufreq.c +++ b/drivers/cpufreq/acpi-cpufreq.c | |||
| @@ -916,8 +916,10 @@ static void __init acpi_cpufreq_boost_init(void) | |||
| 916 | { | 916 | { |
| 917 | int ret; | 917 | int ret; |
| 918 | 918 | ||
| 919 | if (!(boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA))) | 919 | if (!(boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA))) { |
| 920 | pr_debug("Boost capabilities not present in the processor\n"); | ||
| 920 | return; | 921 | return; |
| 922 | } | ||
| 921 | 923 | ||
| 922 | acpi_cpufreq_driver.set_boost = set_boost; | 924 | acpi_cpufreq_driver.set_boost = set_boost; |
| 923 | acpi_cpufreq_driver.boost_enabled = boost_state(0); | 925 | acpi_cpufreq_driver.boost_enabled = boost_state(0); |
diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c index cf62a1f64dd7..7fe52fcddcf1 100644 --- a/drivers/cpufreq/arm_big_little.c +++ b/drivers/cpufreq/arm_big_little.c | |||
| @@ -487,6 +487,8 @@ static int bL_cpufreq_init(struct cpufreq_policy *policy) | |||
| 487 | policy->cpuinfo.transition_latency = | 487 | policy->cpuinfo.transition_latency = |
| 488 | arm_bL_ops->get_transition_latency(cpu_dev); | 488 | arm_bL_ops->get_transition_latency(cpu_dev); |
| 489 | 489 | ||
| 490 | dev_pm_opp_of_register_em(policy->cpus); | ||
| 491 | |||
| 490 | if (is_bL_switching_enabled()) | 492 | if (is_bL_switching_enabled()) |
| 491 | per_cpu(cpu_last_req_freq, policy->cpu) = clk_get_cpu_rate(policy->cpu); | 493 | per_cpu(cpu_last_req_freq, policy->cpu) = clk_get_cpu_rate(policy->cpu); |
| 492 | 494 | ||
diff --git a/drivers/cpufreq/armada-8k-cpufreq.c b/drivers/cpufreq/armada-8k-cpufreq.c new file mode 100644 index 000000000000..b3f4bd647e9b --- /dev/null +++ b/drivers/cpufreq/armada-8k-cpufreq.c | |||
| @@ -0,0 +1,206 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0+ | ||
| 2 | /* | ||
| 3 | * CPUFreq support for Armada 8K | ||
| 4 | * | ||
| 5 | * Copyright (C) 2018 Marvell | ||
| 6 | * | ||
| 7 | * Omri Itach <omrii@marvell.com> | ||
| 8 | * Gregory Clement <gregory.clement@bootlin.com> | ||
| 9 | */ | ||
| 10 | |||
| 11 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
| 12 | |||
| 13 | #include <linux/clk.h> | ||
| 14 | #include <linux/cpu.h> | ||
| 15 | #include <linux/err.h> | ||
| 16 | #include <linux/init.h> | ||
| 17 | #include <linux/kernel.h> | ||
| 18 | #include <linux/module.h> | ||
| 19 | #include <linux/of.h> | ||
| 20 | #include <linux/platform_device.h> | ||
| 21 | #include <linux/pm_opp.h> | ||
| 22 | #include <linux/slab.h> | ||
| 23 | |||
| 24 | /* | ||
| 25 | * Setup the opps list with the divider for the max frequency, that | ||
| 26 | * will be filled at runtime. | ||
| 27 | */ | ||
| 28 | static const int opps_div[] __initconst = {1, 2, 3, 4}; | ||
| 29 | |||
| 30 | static struct platform_device *armada_8k_pdev; | ||
| 31 | |||
| 32 | struct freq_table { | ||
| 33 | struct device *cpu_dev; | ||
| 34 | unsigned int freq[ARRAY_SIZE(opps_div)]; | ||
| 35 | }; | ||
| 36 | |||
| 37 | /* If the CPUs share the same clock, then they are in the same cluster. */ | ||
| 38 | static void __init armada_8k_get_sharing_cpus(struct clk *cur_clk, | ||
| 39 | struct cpumask *cpumask) | ||
| 40 | { | ||
| 41 | int cpu; | ||
| 42 | |||
| 43 | for_each_possible_cpu(cpu) { | ||
| 44 | struct device *cpu_dev; | ||
| 45 | struct clk *clk; | ||
| 46 | |||
| 47 | cpu_dev = get_cpu_device(cpu); | ||
| 48 | if (!cpu_dev) { | ||
| 49 | pr_warn("Failed to get cpu%d device\n", cpu); | ||
| 50 | continue; | ||
| 51 | } | ||
| 52 | |||
| 53 | clk = clk_get(cpu_dev, 0); | ||
| 54 | if (IS_ERR(clk)) { | ||
| 55 | pr_warn("Cannot get clock for CPU %d\n", cpu); | ||
| 56 | } else { | ||
| 57 | if (clk_is_match(clk, cur_clk)) | ||
| 58 | cpumask_set_cpu(cpu, cpumask); | ||
| 59 | |||
| 60 | clk_put(clk); | ||
| 61 | } | ||
| 62 | } | ||
| 63 | } | ||
| 64 | |||
| 65 | static int __init armada_8k_add_opp(struct clk *clk, struct device *cpu_dev, | ||
| 66 | struct freq_table *freq_tables, | ||
| 67 | int opps_index) | ||
| 68 | { | ||
| 69 | unsigned int cur_frequency; | ||
| 70 | unsigned int freq; | ||
| 71 | int i, ret; | ||
| 72 | |||
| 73 | /* Get nominal (current) CPU frequency. */ | ||
| 74 | cur_frequency = clk_get_rate(clk); | ||
| 75 | if (!cur_frequency) { | ||
| 76 | dev_err(cpu_dev, "Failed to get clock rate for this CPU\n"); | ||
| 77 | return -EINVAL; | ||
| 78 | } | ||
| 79 | |||
| 80 | freq_tables[opps_index].cpu_dev = cpu_dev; | ||
| 81 | |||
| 82 | for (i = 0; i < ARRAY_SIZE(opps_div); i++) { | ||
| 83 | freq = cur_frequency / opps_div[i]; | ||
| 84 | |||
| 85 | ret = dev_pm_opp_add(cpu_dev, freq, 0); | ||
| 86 | if (ret) | ||
| 87 | return ret; | ||
| 88 | |||
| 89 | freq_tables[opps_index].freq[i] = freq; | ||
| 90 | } | ||
| 91 | |||
| 92 | return 0; | ||
| 93 | } | ||
| 94 | |||
| 95 | static void armada_8k_cpufreq_free_table(struct freq_table *freq_tables) | ||
| 96 | { | ||
| 97 | int opps_index, nb_cpus = num_possible_cpus(); | ||
| 98 | |||
| 99 | for (opps_index = 0 ; opps_index <= nb_cpus; opps_index++) { | ||
| 100 | int i; | ||
| 101 | |||
| 102 | /* If cpu_dev is NULL then we reached the end of the array */ | ||
| 103 | if (!freq_tables[opps_index].cpu_dev) | ||
| 104 | break; | ||
| 105 | |||
| 106 | for (i = 0; i < ARRAY_SIZE(opps_div); i++) { | ||
| 107 | /* | ||
| 108 | * A 0Hz frequency is not valid, this meant | ||
| 109 | * that it was not yet initialized so there is | ||
| 110 | * no more opp to free | ||
| 111 | */ | ||
| 112 | if (freq_tables[opps_index].freq[i] == 0) | ||
| 113 | break; | ||
| 114 | |||
| 115 | dev_pm_opp_remove(freq_tables[opps_index].cpu_dev, | ||
| 116 | freq_tables[opps_index].freq[i]); | ||
| 117 | } | ||
| 118 | } | ||
| 119 | |||
| 120 | kfree(freq_tables); | ||
| 121 | } | ||
| 122 | |||
| 123 | static int __init armada_8k_cpufreq_init(void) | ||
| 124 | { | ||
| 125 | int ret = 0, opps_index = 0, cpu, nb_cpus; | ||
| 126 | struct freq_table *freq_tables; | ||
| 127 | struct device_node *node; | ||
| 128 | struct cpumask cpus; | ||
| 129 | |||
| 130 | node = of_find_compatible_node(NULL, NULL, "marvell,ap806-cpu-clock"); | ||
| 131 | if (!node || !of_device_is_available(node)) { | ||
| 132 | of_node_put(node); | ||
| 133 | return -ENODEV; | ||
| 134 | } | ||
| 135 | |||
| 136 | nb_cpus = num_possible_cpus(); | ||
| 137 | freq_tables = kcalloc(nb_cpus, sizeof(*freq_tables), GFP_KERNEL); | ||
| 138 | cpumask_copy(&cpus, cpu_possible_mask); | ||
| 139 | |||
| 140 | /* | ||
| 141 | * For each CPU, this loop registers the operating points | ||
| 142 | * supported (which are the nominal CPU frequency and full integer | ||
| 143 | * divisions of it). | ||
| 144 | */ | ||
| 145 | for_each_cpu(cpu, &cpus) { | ||
| 146 | struct cpumask shared_cpus; | ||
| 147 | struct device *cpu_dev; | ||
| 148 | struct clk *clk; | ||
| 149 | |||
| 150 | cpu_dev = get_cpu_device(cpu); | ||
| 151 | |||
| 152 | if (!cpu_dev) { | ||
| 153 | pr_err("Cannot get CPU %d\n", cpu); | ||
| 154 | continue; | ||
| 155 | } | ||
| 156 | |||
| 157 | clk = clk_get(cpu_dev, 0); | ||
| 158 | |||
| 159 | if (IS_ERR(clk)) { | ||
| 160 | pr_err("Cannot get clock for CPU %d\n", cpu); | ||
| 161 | ret = PTR_ERR(clk); | ||
| 162 | goto remove_opp; | ||
| 163 | } | ||
| 164 | |||
| 165 | ret = armada_8k_add_opp(clk, cpu_dev, freq_tables, opps_index); | ||
| 166 | if (ret) { | ||
| 167 | clk_put(clk); | ||
| 168 | goto remove_opp; | ||
| 169 | } | ||
| 170 | |||
| 171 | opps_index++; | ||
| 172 | cpumask_clear(&shared_cpus); | ||
| 173 | armada_8k_get_sharing_cpus(clk, &shared_cpus); | ||
| 174 | dev_pm_opp_set_sharing_cpus(cpu_dev, &shared_cpus); | ||
| 175 | cpumask_andnot(&cpus, &cpus, &shared_cpus); | ||
| 176 | clk_put(clk); | ||
| 177 | } | ||
| 178 | |||
| 179 | armada_8k_pdev = platform_device_register_simple("cpufreq-dt", -1, | ||
| 180 | NULL, 0); | ||
| 181 | ret = PTR_ERR_OR_ZERO(armada_8k_pdev); | ||
| 182 | if (ret) | ||
| 183 | goto remove_opp; | ||
| 184 | |||
| 185 | platform_set_drvdata(armada_8k_pdev, freq_tables); | ||
| 186 | |||
| 187 | return 0; | ||
| 188 | |||
| 189 | remove_opp: | ||
| 190 | armada_8k_cpufreq_free_table(freq_tables); | ||
| 191 | return ret; | ||
| 192 | } | ||
| 193 | module_init(armada_8k_cpufreq_init); | ||
| 194 | |||
| 195 | static void __exit armada_8k_cpufreq_exit(void) | ||
| 196 | { | ||
| 197 | struct freq_table *freq_tables = platform_get_drvdata(armada_8k_pdev); | ||
| 198 | |||
| 199 | platform_device_unregister(armada_8k_pdev); | ||
| 200 | armada_8k_cpufreq_free_table(freq_tables); | ||
| 201 | } | ||
| 202 | module_exit(armada_8k_cpufreq_exit); | ||
| 203 | |||
| 204 | MODULE_AUTHOR("Gregory Clement <gregory.clement@bootlin.com>"); | ||
| 205 | MODULE_DESCRIPTION("Armada 8K cpufreq driver"); | ||
| 206 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c index fd25c21cee72..2ae978d27e61 100644 --- a/drivers/cpufreq/cppc_cpufreq.c +++ b/drivers/cpufreq/cppc_cpufreq.c | |||
| @@ -42,6 +42,66 @@ | |||
| 42 | */ | 42 | */ |
| 43 | static struct cppc_cpudata **all_cpu_data; | 43 | static struct cppc_cpudata **all_cpu_data; |
| 44 | 44 | ||
| 45 | struct cppc_workaround_oem_info { | ||
| 46 | char oem_id[ACPI_OEM_ID_SIZE +1]; | ||
| 47 | char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1]; | ||
| 48 | u32 oem_revision; | ||
| 49 | }; | ||
| 50 | |||
| 51 | static bool apply_hisi_workaround; | ||
| 52 | |||
| 53 | static struct cppc_workaround_oem_info wa_info[] = { | ||
| 54 | { | ||
| 55 | .oem_id = "HISI ", | ||
| 56 | .oem_table_id = "HIP07 ", | ||
| 57 | .oem_revision = 0, | ||
| 58 | }, { | ||
| 59 | .oem_id = "HISI ", | ||
| 60 | .oem_table_id = "HIP08 ", | ||
| 61 | .oem_revision = 0, | ||
| 62 | } | ||
| 63 | }; | ||
| 64 | |||
| 65 | static unsigned int cppc_cpufreq_perf_to_khz(struct cppc_cpudata *cpu, | ||
| 66 | unsigned int perf); | ||
| 67 | |||
| 68 | /* | ||
| 69 | * HISI platform does not support delivered performance counter and | ||
| 70 | * reference performance counter. It can calculate the performance using the | ||
| 71 | * platform specific mechanism. We reuse the desired performance register to | ||
| 72 | * store the real performance calculated by the platform. | ||
| 73 | */ | ||
| 74 | static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpunum) | ||
| 75 | { | ||
| 76 | struct cppc_cpudata *cpudata = all_cpu_data[cpunum]; | ||
| 77 | u64 desired_perf; | ||
| 78 | int ret; | ||
| 79 | |||
| 80 | ret = cppc_get_desired_perf(cpunum, &desired_perf); | ||
| 81 | if (ret < 0) | ||
| 82 | return -EIO; | ||
| 83 | |||
| 84 | return cppc_cpufreq_perf_to_khz(cpudata, desired_perf); | ||
| 85 | } | ||
| 86 | |||
| 87 | static void cppc_check_hisi_workaround(void) | ||
| 88 | { | ||
| 89 | struct acpi_table_header *tbl; | ||
| 90 | acpi_status status = AE_OK; | ||
| 91 | int i; | ||
| 92 | |||
| 93 | status = acpi_get_table(ACPI_SIG_PCCT, 0, &tbl); | ||
| 94 | if (ACPI_FAILURE(status) || !tbl) | ||
| 95 | return; | ||
| 96 | |||
| 97 | for (i = 0; i < ARRAY_SIZE(wa_info); i++) { | ||
| 98 | if (!memcmp(wa_info[i].oem_id, tbl->oem_id, ACPI_OEM_ID_SIZE) && | ||
| 99 | !memcmp(wa_info[i].oem_table_id, tbl->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) && | ||
| 100 | wa_info[i].oem_revision == tbl->oem_revision) | ||
| 101 | apply_hisi_workaround = true; | ||
| 102 | } | ||
| 103 | } | ||
| 104 | |||
| 45 | /* Callback function used to retrieve the max frequency from DMI */ | 105 | /* Callback function used to retrieve the max frequency from DMI */ |
| 46 | static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private) | 106 | static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private) |
| 47 | { | 107 | { |
| @@ -334,6 +394,9 @@ static unsigned int cppc_cpufreq_get_rate(unsigned int cpunum) | |||
| 334 | struct cppc_cpudata *cpu = all_cpu_data[cpunum]; | 394 | struct cppc_cpudata *cpu = all_cpu_data[cpunum]; |
| 335 | int ret; | 395 | int ret; |
| 336 | 396 | ||
| 397 | if (apply_hisi_workaround) | ||
| 398 | return hisi_cppc_cpufreq_get_rate(cpunum); | ||
| 399 | |||
| 337 | ret = cppc_get_perf_ctrs(cpunum, &fb_ctrs_t0); | 400 | ret = cppc_get_perf_ctrs(cpunum, &fb_ctrs_t0); |
| 338 | if (ret) | 401 | if (ret) |
| 339 | return ret; | 402 | return ret; |
| @@ -386,6 +449,8 @@ static int __init cppc_cpufreq_init(void) | |||
| 386 | goto out; | 449 | goto out; |
| 387 | } | 450 | } |
| 388 | 451 | ||
| 452 | cppc_check_hisi_workaround(); | ||
| 453 | |||
| 389 | ret = cpufreq_register_driver(&cppc_cpufreq_driver); | 454 | ret = cpufreq_register_driver(&cppc_cpufreq_driver); |
| 390 | if (ret) | 455 | if (ret) |
| 391 | goto out; | 456 | goto out; |
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c index e58bfcb1169e..bde28878725b 100644 --- a/drivers/cpufreq/cpufreq-dt.c +++ b/drivers/cpufreq/cpufreq-dt.c | |||
| @@ -13,7 +13,6 @@ | |||
| 13 | 13 | ||
| 14 | #include <linux/clk.h> | 14 | #include <linux/clk.h> |
| 15 | #include <linux/cpu.h> | 15 | #include <linux/cpu.h> |
| 16 | #include <linux/cpu_cooling.h> | ||
| 17 | #include <linux/cpufreq.h> | 16 | #include <linux/cpufreq.h> |
| 18 | #include <linux/cpumask.h> | 17 | #include <linux/cpumask.h> |
| 19 | #include <linux/err.h> | 18 | #include <linux/err.h> |
| @@ -30,7 +29,6 @@ | |||
| 30 | struct private_data { | 29 | struct private_data { |
| 31 | struct opp_table *opp_table; | 30 | struct opp_table *opp_table; |
| 32 | struct device *cpu_dev; | 31 | struct device *cpu_dev; |
| 33 | struct thermal_cooling_device *cdev; | ||
| 34 | const char *reg_name; | 32 | const char *reg_name; |
| 35 | bool have_static_opps; | 33 | bool have_static_opps; |
| 36 | }; | 34 | }; |
| @@ -280,6 +278,8 @@ static int cpufreq_init(struct cpufreq_policy *policy) | |||
| 280 | policy->cpuinfo.transition_latency = transition_latency; | 278 | policy->cpuinfo.transition_latency = transition_latency; |
| 281 | policy->dvfs_possible_from_any_cpu = true; | 279 | policy->dvfs_possible_from_any_cpu = true; |
| 282 | 280 | ||
| 281 | dev_pm_opp_of_register_em(policy->cpus); | ||
| 282 | |||
| 283 | return 0; | 283 | return 0; |
| 284 | 284 | ||
| 285 | out_free_cpufreq_table: | 285 | out_free_cpufreq_table: |
| @@ -297,11 +297,25 @@ out_put_clk: | |||
| 297 | return ret; | 297 | return ret; |
| 298 | } | 298 | } |
| 299 | 299 | ||
| 300 | static int cpufreq_online(struct cpufreq_policy *policy) | ||
| 301 | { | ||
| 302 | /* We did light-weight tear down earlier, nothing to do here */ | ||
| 303 | return 0; | ||
| 304 | } | ||
| 305 | |||
| 306 | static int cpufreq_offline(struct cpufreq_policy *policy) | ||
| 307 | { | ||
| 308 | /* | ||
| 309 | * Preserve policy->driver_data and don't free resources on light-weight | ||
| 310 | * tear down. | ||
| 311 | */ | ||
| 312 | return 0; | ||
| 313 | } | ||
| 314 | |||
| 300 | static int cpufreq_exit(struct cpufreq_policy *policy) | 315 | static int cpufreq_exit(struct cpufreq_policy *policy) |
| 301 | { | 316 | { |
| 302 | struct private_data *priv = policy->driver_data; | 317 | struct private_data *priv = policy->driver_data; |
| 303 | 318 | ||
| 304 | cpufreq_cooling_unregister(priv->cdev); | ||
| 305 | dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table); | 319 | dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table); |
| 306 | if (priv->have_static_opps) | 320 | if (priv->have_static_opps) |
| 307 | dev_pm_opp_of_cpumask_remove_table(policy->related_cpus); | 321 | dev_pm_opp_of_cpumask_remove_table(policy->related_cpus); |
| @@ -314,21 +328,16 @@ static int cpufreq_exit(struct cpufreq_policy *policy) | |||
| 314 | return 0; | 328 | return 0; |
| 315 | } | 329 | } |
| 316 | 330 | ||
| 317 | static void cpufreq_ready(struct cpufreq_policy *policy) | ||
| 318 | { | ||
| 319 | struct private_data *priv = policy->driver_data; | ||
| 320 | |||
| 321 | priv->cdev = of_cpufreq_cooling_register(policy); | ||
| 322 | } | ||
| 323 | |||
| 324 | static struct cpufreq_driver dt_cpufreq_driver = { | 331 | static struct cpufreq_driver dt_cpufreq_driver = { |
| 325 | .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK, | 332 | .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK | |
| 333 | CPUFREQ_IS_COOLING_DEV, | ||
| 326 | .verify = cpufreq_generic_frequency_table_verify, | 334 | .verify = cpufreq_generic_frequency_table_verify, |
| 327 | .target_index = set_target, | 335 | .target_index = set_target, |
| 328 | .get = cpufreq_generic_get, | 336 | .get = cpufreq_generic_get, |
| 329 | .init = cpufreq_init, | 337 | .init = cpufreq_init, |
| 330 | .exit = cpufreq_exit, | 338 | .exit = cpufreq_exit, |
| 331 | .ready = cpufreq_ready, | 339 | .online = cpufreq_online, |
| 340 | .offline = cpufreq_offline, | ||
| 332 | .name = "cpufreq-dt", | 341 | .name = "cpufreq-dt", |
| 333 | .attr = cpufreq_dt_attr, | 342 | .attr = cpufreq_dt_attr, |
| 334 | .suspend = cpufreq_generic_suspend, | 343 | .suspend = cpufreq_generic_suspend, |
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index e35a886e00bc..0e626b00053b 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
| @@ -19,6 +19,7 @@ | |||
| 19 | 19 | ||
| 20 | #include <linux/cpu.h> | 20 | #include <linux/cpu.h> |
| 21 | #include <linux/cpufreq.h> | 21 | #include <linux/cpufreq.h> |
| 22 | #include <linux/cpu_cooling.h> | ||
| 22 | #include <linux/delay.h> | 23 | #include <linux/delay.h> |
| 23 | #include <linux/device.h> | 24 | #include <linux/device.h> |
| 24 | #include <linux/init.h> | 25 | #include <linux/init.h> |
| @@ -545,13 +546,13 @@ EXPORT_SYMBOL_GPL(cpufreq_policy_transition_delay_us); | |||
| 545 | * SYSFS INTERFACE * | 546 | * SYSFS INTERFACE * |
| 546 | *********************************************************************/ | 547 | *********************************************************************/ |
| 547 | static ssize_t show_boost(struct kobject *kobj, | 548 | static ssize_t show_boost(struct kobject *kobj, |
| 548 | struct attribute *attr, char *buf) | 549 | struct kobj_attribute *attr, char *buf) |
| 549 | { | 550 | { |
| 550 | return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled); | 551 | return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled); |
| 551 | } | 552 | } |
| 552 | 553 | ||
| 553 | static ssize_t store_boost(struct kobject *kobj, struct attribute *attr, | 554 | static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr, |
| 554 | const char *buf, size_t count) | 555 | const char *buf, size_t count) |
| 555 | { | 556 | { |
| 556 | int ret, enable; | 557 | int ret, enable; |
| 557 | 558 | ||
| @@ -1200,28 +1201,39 @@ static int cpufreq_online(unsigned int cpu) | |||
| 1200 | return -ENOMEM; | 1201 | return -ENOMEM; |
| 1201 | } | 1202 | } |
| 1202 | 1203 | ||
| 1203 | cpumask_copy(policy->cpus, cpumask_of(cpu)); | 1204 | if (!new_policy && cpufreq_driver->online) { |
| 1205 | ret = cpufreq_driver->online(policy); | ||
| 1206 | if (ret) { | ||
| 1207 | pr_debug("%s: %d: initialization failed\n", __func__, | ||
| 1208 | __LINE__); | ||
| 1209 | goto out_exit_policy; | ||
| 1210 | } | ||
| 1204 | 1211 | ||
| 1205 | /* call driver. From then on the cpufreq must be able | 1212 | /* Recover policy->cpus using related_cpus */ |
| 1206 | * to accept all calls to ->verify and ->setpolicy for this CPU | 1213 | cpumask_copy(policy->cpus, policy->related_cpus); |
| 1207 | */ | 1214 | } else { |
| 1208 | ret = cpufreq_driver->init(policy); | 1215 | cpumask_copy(policy->cpus, cpumask_of(cpu)); |
| 1209 | if (ret) { | ||
| 1210 | pr_debug("initialization failed\n"); | ||
| 1211 | goto out_free_policy; | ||
| 1212 | } | ||
| 1213 | 1216 | ||
| 1214 | ret = cpufreq_table_validate_and_sort(policy); | 1217 | /* |
| 1215 | if (ret) | 1218 | * Call driver. From then on the cpufreq must be able |
| 1216 | goto out_exit_policy; | 1219 | * to accept all calls to ->verify and ->setpolicy for this CPU. |
| 1220 | */ | ||
| 1221 | ret = cpufreq_driver->init(policy); | ||
| 1222 | if (ret) { | ||
| 1223 | pr_debug("%s: %d: initialization failed\n", __func__, | ||
| 1224 | __LINE__); | ||
| 1225 | goto out_free_policy; | ||
| 1226 | } | ||
| 1217 | 1227 | ||
| 1218 | down_write(&policy->rwsem); | 1228 | ret = cpufreq_table_validate_and_sort(policy); |
| 1229 | if (ret) | ||
| 1230 | goto out_exit_policy; | ||
| 1219 | 1231 | ||
| 1220 | if (new_policy) { | ||
| 1221 | /* related_cpus should at least include policy->cpus. */ | 1232 | /* related_cpus should at least include policy->cpus. */ |
| 1222 | cpumask_copy(policy->related_cpus, policy->cpus); | 1233 | cpumask_copy(policy->related_cpus, policy->cpus); |
| 1223 | } | 1234 | } |
| 1224 | 1235 | ||
| 1236 | down_write(&policy->rwsem); | ||
| 1225 | /* | 1237 | /* |
| 1226 | * affected cpus must always be the one, which are online. We aren't | 1238 | * affected cpus must always be the one, which are online. We aren't |
| 1227 | * managing offline cpus here. | 1239 | * managing offline cpus here. |
| @@ -1305,8 +1317,6 @@ static int cpufreq_online(unsigned int cpu) | |||
| 1305 | if (ret) { | 1317 | if (ret) { |
| 1306 | pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n", | 1318 | pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n", |
| 1307 | __func__, cpu, ret); | 1319 | __func__, cpu, ret); |
| 1308 | /* cpufreq_policy_free() will notify based on this */ | ||
| 1309 | new_policy = false; | ||
| 1310 | goto out_destroy_policy; | 1320 | goto out_destroy_policy; |
| 1311 | } | 1321 | } |
| 1312 | 1322 | ||
| @@ -1318,6 +1328,10 @@ static int cpufreq_online(unsigned int cpu) | |||
| 1318 | if (cpufreq_driver->ready) | 1328 | if (cpufreq_driver->ready) |
| 1319 | cpufreq_driver->ready(policy); | 1329 | cpufreq_driver->ready(policy); |
| 1320 | 1330 | ||
| 1331 | if (IS_ENABLED(CONFIG_CPU_THERMAL) && | ||
| 1332 | cpufreq_driver->flags & CPUFREQ_IS_COOLING_DEV) | ||
| 1333 | policy->cdev = of_cpufreq_cooling_register(policy); | ||
| 1334 | |||
| 1321 | pr_debug("initialization complete\n"); | 1335 | pr_debug("initialization complete\n"); |
| 1322 | 1336 | ||
| 1323 | return 0; | 1337 | return 0; |
| @@ -1405,6 +1419,12 @@ static int cpufreq_offline(unsigned int cpu) | |||
| 1405 | goto unlock; | 1419 | goto unlock; |
| 1406 | } | 1420 | } |
| 1407 | 1421 | ||
| 1422 | if (IS_ENABLED(CONFIG_CPU_THERMAL) && | ||
| 1423 | cpufreq_driver->flags & CPUFREQ_IS_COOLING_DEV) { | ||
| 1424 | cpufreq_cooling_unregister(policy->cdev); | ||
| 1425 | policy->cdev = NULL; | ||
| 1426 | } | ||
| 1427 | |||
| 1408 | if (cpufreq_driver->stop_cpu) | 1428 | if (cpufreq_driver->stop_cpu) |
| 1409 | cpufreq_driver->stop_cpu(policy); | 1429 | cpufreq_driver->stop_cpu(policy); |
| 1410 | 1430 | ||
| @@ -1412,11 +1432,12 @@ static int cpufreq_offline(unsigned int cpu) | |||
| 1412 | cpufreq_exit_governor(policy); | 1432 | cpufreq_exit_governor(policy); |
| 1413 | 1433 | ||
| 1414 | /* | 1434 | /* |
| 1415 | * Perform the ->exit() even during light-weight tear-down, | 1435 | * Perform the ->offline() during light-weight tear-down, as |
| 1416 | * since this is a core component, and is essential for the | 1436 | * that allows fast recovery when the CPU comes back. |
| 1417 | * subsequent light-weight ->init() to succeed. | ||
| 1418 | */ | 1437 | */ |
| 1419 | if (cpufreq_driver->exit) { | 1438 | if (cpufreq_driver->offline) { |
| 1439 | cpufreq_driver->offline(policy); | ||
| 1440 | } else if (cpufreq_driver->exit) { | ||
| 1420 | cpufreq_driver->exit(policy); | 1441 | cpufreq_driver->exit(policy); |
| 1421 | policy->freq_table = NULL; | 1442 | policy->freq_table = NULL; |
| 1422 | } | 1443 | } |
| @@ -1445,8 +1466,13 @@ static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif) | |||
| 1445 | cpumask_clear_cpu(cpu, policy->real_cpus); | 1466 | cpumask_clear_cpu(cpu, policy->real_cpus); |
| 1446 | remove_cpu_dev_symlink(policy, dev); | 1467 | remove_cpu_dev_symlink(policy, dev); |
| 1447 | 1468 | ||
| 1448 | if (cpumask_empty(policy->real_cpus)) | 1469 | if (cpumask_empty(policy->real_cpus)) { |
| 1470 | /* We did light-weight exit earlier, do full tear down now */ | ||
| 1471 | if (cpufreq_driver->offline) | ||
| 1472 | cpufreq_driver->exit(policy); | ||
| 1473 | |||
| 1449 | cpufreq_policy_free(policy); | 1474 | cpufreq_policy_free(policy); |
| 1475 | } | ||
| 1450 | } | 1476 | } |
| 1451 | 1477 | ||
| 1452 | /** | 1478 | /** |
| @@ -2192,12 +2218,25 @@ int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu) | |||
| 2192 | } | 2218 | } |
| 2193 | EXPORT_SYMBOL(cpufreq_get_policy); | 2219 | EXPORT_SYMBOL(cpufreq_get_policy); |
| 2194 | 2220 | ||
| 2195 | /* | 2221 | /** |
| 2196 | * policy : current policy. | 2222 | * cpufreq_set_policy - Modify cpufreq policy parameters. |
| 2197 | * new_policy: policy to be set. | 2223 | * @policy: Policy object to modify. |
| 2224 | * @new_policy: New policy data. | ||
| 2225 | * | ||
| 2226 | * Pass @new_policy to the cpufreq driver's ->verify() callback, run the | ||
| 2227 | * installed policy notifiers for it with the CPUFREQ_ADJUST value, pass it to | ||
| 2228 | * the driver's ->verify() callback again and run the notifiers for it again | ||
| 2229 | * with the CPUFREQ_NOTIFY value. Next, copy the min and max parameters | ||
| 2230 | * of @new_policy to @policy and either invoke the driver's ->setpolicy() | ||
| 2231 | * callback (if present) or carry out a governor update for @policy. That is, | ||
| 2232 | * run the current governor's ->limits() callback (if the governor field in | ||
| 2233 | * @new_policy points to the same object as the one in @policy) or replace the | ||
| 2234 | * governor for @policy with the new one stored in @new_policy. | ||
| 2235 | * | ||
| 2236 | * The cpuinfo part of @policy is not updated by this function. | ||
| 2198 | */ | 2237 | */ |
| 2199 | static int cpufreq_set_policy(struct cpufreq_policy *policy, | 2238 | static int cpufreq_set_policy(struct cpufreq_policy *policy, |
| 2200 | struct cpufreq_policy *new_policy) | 2239 | struct cpufreq_policy *new_policy) |
| 2201 | { | 2240 | { |
| 2202 | struct cpufreq_governor *old_gov; | 2241 | struct cpufreq_governor *old_gov; |
| 2203 | int ret; | 2242 | int ret; |
| @@ -2247,11 +2286,11 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy, | |||
| 2247 | if (cpufreq_driver->setpolicy) { | 2286 | if (cpufreq_driver->setpolicy) { |
| 2248 | policy->policy = new_policy->policy; | 2287 | policy->policy = new_policy->policy; |
| 2249 | pr_debug("setting range\n"); | 2288 | pr_debug("setting range\n"); |
| 2250 | return cpufreq_driver->setpolicy(new_policy); | 2289 | return cpufreq_driver->setpolicy(policy); |
| 2251 | } | 2290 | } |
| 2252 | 2291 | ||
| 2253 | if (new_policy->governor == policy->governor) { | 2292 | if (new_policy->governor == policy->governor) { |
| 2254 | pr_debug("cpufreq: governor limits update\n"); | 2293 | pr_debug("governor limits update\n"); |
| 2255 | cpufreq_governor_limits(policy); | 2294 | cpufreq_governor_limits(policy); |
| 2256 | return 0; | 2295 | return 0; |
| 2257 | } | 2296 | } |
| @@ -2272,7 +2311,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy, | |||
| 2272 | if (!ret) { | 2311 | if (!ret) { |
| 2273 | ret = cpufreq_start_governor(policy); | 2312 | ret = cpufreq_start_governor(policy); |
| 2274 | if (!ret) { | 2313 | if (!ret) { |
| 2275 | pr_debug("cpufreq: governor change\n"); | 2314 | pr_debug("governor change\n"); |
| 2276 | sched_cpufreq_governor_change(policy, old_gov); | 2315 | sched_cpufreq_governor_change(policy, old_gov); |
| 2277 | return 0; | 2316 | return 0; |
| 2278 | } | 2317 | } |
| @@ -2293,11 +2332,14 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy, | |||
| 2293 | } | 2332 | } |
| 2294 | 2333 | ||
| 2295 | /** | 2334 | /** |
| 2296 | * cpufreq_update_policy - re-evaluate an existing cpufreq policy | 2335 | * cpufreq_update_policy - Re-evaluate an existing cpufreq policy. |
| 2297 | * @cpu: CPU which shall be re-evaluated | 2336 | * @cpu: CPU to re-evaluate the policy for. |
| 2298 | * | 2337 | * |
| 2299 | * Useful for policy notifiers which have different necessities | 2338 | * Update the current frequency for the cpufreq policy of @cpu and use |
| 2300 | * at different times. | 2339 | * cpufreq_set_policy() to re-apply the min and max limits saved in the |
| 2340 | * user_policy sub-structure of that policy, which triggers the evaluation | ||
| 2341 | * of policy notifiers and the cpufreq driver's ->verify() callback for the | ||
| 2342 | * policy in question, among other things. | ||
| 2301 | */ | 2343 | */ |
| 2302 | void cpufreq_update_policy(unsigned int cpu) | 2344 | void cpufreq_update_policy(unsigned int cpu) |
| 2303 | { | 2345 | { |
| @@ -2312,23 +2354,18 @@ void cpufreq_update_policy(unsigned int cpu) | |||
| 2312 | if (policy_is_inactive(policy)) | 2354 | if (policy_is_inactive(policy)) |
| 2313 | goto unlock; | 2355 | goto unlock; |
| 2314 | 2356 | ||
| 2315 | pr_debug("updating policy for CPU %u\n", cpu); | ||
| 2316 | memcpy(&new_policy, policy, sizeof(*policy)); | ||
| 2317 | new_policy.min = policy->user_policy.min; | ||
| 2318 | new_policy.max = policy->user_policy.max; | ||
| 2319 | |||
| 2320 | /* | 2357 | /* |
| 2321 | * BIOS might change freq behind our back | 2358 | * BIOS might change freq behind our back |
| 2322 | * -> ask driver for current freq and notify governors about a change | 2359 | * -> ask driver for current freq and notify governors about a change |
| 2323 | */ | 2360 | */ |
| 2324 | if (cpufreq_driver->get && !cpufreq_driver->setpolicy) { | 2361 | if (cpufreq_driver->get && !cpufreq_driver->setpolicy && |
| 2325 | if (cpufreq_suspended) | 2362 | (cpufreq_suspended || WARN_ON(!cpufreq_update_current_freq(policy)))) |
| 2326 | goto unlock; | 2363 | goto unlock; |
| 2327 | 2364 | ||
| 2328 | new_policy.cur = cpufreq_update_current_freq(policy); | 2365 | pr_debug("updating policy for CPU %u\n", cpu); |
| 2329 | if (WARN_ON(!new_policy.cur)) | 2366 | memcpy(&new_policy, policy, sizeof(*policy)); |
| 2330 | goto unlock; | 2367 | new_policy.min = policy->user_policy.min; |
| 2331 | } | 2368 | new_policy.max = policy->user_policy.max; |
| 2332 | 2369 | ||
| 2333 | cpufreq_set_policy(policy, &new_policy); | 2370 | cpufreq_set_policy(policy, &new_policy); |
| 2334 | 2371 | ||
| @@ -2479,7 +2516,8 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) | |||
| 2479 | driver_data->target) || | 2516 | driver_data->target) || |
| 2480 | (driver_data->setpolicy && (driver_data->target_index || | 2517 | (driver_data->setpolicy && (driver_data->target_index || |
| 2481 | driver_data->target)) || | 2518 | driver_data->target)) || |
| 2482 | (!!driver_data->get_intermediate != !!driver_data->target_intermediate)) | 2519 | (!driver_data->get_intermediate != !driver_data->target_intermediate) || |
| 2520 | (!driver_data->online != !driver_data->offline)) | ||
| 2483 | return -EINVAL; | 2521 | return -EINVAL; |
| 2484 | 2522 | ||
| 2485 | pr_debug("trying to register driver %s\n", driver_data->name); | 2523 | pr_debug("trying to register driver %s\n", driver_data->name); |
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c index 1572129844a5..e2db5581489a 100644 --- a/drivers/cpufreq/cpufreq_stats.c +++ b/drivers/cpufreq/cpufreq_stats.c | |||
| @@ -31,26 +31,27 @@ static void cpufreq_stats_update(struct cpufreq_stats *stats) | |||
| 31 | { | 31 | { |
| 32 | unsigned long long cur_time = get_jiffies_64(); | 32 | unsigned long long cur_time = get_jiffies_64(); |
| 33 | 33 | ||
| 34 | spin_lock(&cpufreq_stats_lock); | ||
| 35 | stats->time_in_state[stats->last_index] += cur_time - stats->last_time; | 34 | stats->time_in_state[stats->last_index] += cur_time - stats->last_time; |
| 36 | stats->last_time = cur_time; | 35 | stats->last_time = cur_time; |
| 37 | spin_unlock(&cpufreq_stats_lock); | ||
| 38 | } | 36 | } |
| 39 | 37 | ||
| 40 | static void cpufreq_stats_clear_table(struct cpufreq_stats *stats) | 38 | static void cpufreq_stats_clear_table(struct cpufreq_stats *stats) |
| 41 | { | 39 | { |
| 42 | unsigned int count = stats->max_state; | 40 | unsigned int count = stats->max_state; |
| 43 | 41 | ||
| 42 | spin_lock(&cpufreq_stats_lock); | ||
| 44 | memset(stats->time_in_state, 0, count * sizeof(u64)); | 43 | memset(stats->time_in_state, 0, count * sizeof(u64)); |
| 45 | memset(stats->trans_table, 0, count * count * sizeof(int)); | 44 | memset(stats->trans_table, 0, count * count * sizeof(int)); |
| 46 | stats->last_time = get_jiffies_64(); | 45 | stats->last_time = get_jiffies_64(); |
| 47 | stats->total_trans = 0; | 46 | stats->total_trans = 0; |
| 47 | spin_unlock(&cpufreq_stats_lock); | ||
| 48 | } | 48 | } |
| 49 | 49 | ||
| 50 | static ssize_t show_total_trans(struct cpufreq_policy *policy, char *buf) | 50 | static ssize_t show_total_trans(struct cpufreq_policy *policy, char *buf) |
| 51 | { | 51 | { |
| 52 | return sprintf(buf, "%d\n", policy->stats->total_trans); | 52 | return sprintf(buf, "%d\n", policy->stats->total_trans); |
| 53 | } | 53 | } |
| 54 | cpufreq_freq_attr_ro(total_trans); | ||
| 54 | 55 | ||
| 55 | static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf) | 56 | static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf) |
| 56 | { | 57 | { |
| @@ -61,7 +62,10 @@ static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf) | |||
| 61 | if (policy->fast_switch_enabled) | 62 | if (policy->fast_switch_enabled) |
| 62 | return 0; | 63 | return 0; |
| 63 | 64 | ||
| 65 | spin_lock(&cpufreq_stats_lock); | ||
| 64 | cpufreq_stats_update(stats); | 66 | cpufreq_stats_update(stats); |
| 67 | spin_unlock(&cpufreq_stats_lock); | ||
| 68 | |||
| 65 | for (i = 0; i < stats->state_num; i++) { | 69 | for (i = 0; i < stats->state_num; i++) { |
| 66 | len += sprintf(buf + len, "%u %llu\n", stats->freq_table[i], | 70 | len += sprintf(buf + len, "%u %llu\n", stats->freq_table[i], |
| 67 | (unsigned long long) | 71 | (unsigned long long) |
| @@ -69,6 +73,7 @@ static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf) | |||
| 69 | } | 73 | } |
| 70 | return len; | 74 | return len; |
| 71 | } | 75 | } |
| 76 | cpufreq_freq_attr_ro(time_in_state); | ||
| 72 | 77 | ||
| 73 | static ssize_t store_reset(struct cpufreq_policy *policy, const char *buf, | 78 | static ssize_t store_reset(struct cpufreq_policy *policy, const char *buf, |
| 74 | size_t count) | 79 | size_t count) |
| @@ -77,6 +82,7 @@ static ssize_t store_reset(struct cpufreq_policy *policy, const char *buf, | |||
| 77 | cpufreq_stats_clear_table(policy->stats); | 82 | cpufreq_stats_clear_table(policy->stats); |
| 78 | return count; | 83 | return count; |
| 79 | } | 84 | } |
| 85 | cpufreq_freq_attr_wo(reset); | ||
| 80 | 86 | ||
| 81 | static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf) | 87 | static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf) |
| 82 | { | 88 | { |
| @@ -126,10 +132,6 @@ static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf) | |||
| 126 | } | 132 | } |
| 127 | cpufreq_freq_attr_ro(trans_table); | 133 | cpufreq_freq_attr_ro(trans_table); |
| 128 | 134 | ||
| 129 | cpufreq_freq_attr_ro(total_trans); | ||
| 130 | cpufreq_freq_attr_ro(time_in_state); | ||
| 131 | cpufreq_freq_attr_wo(reset); | ||
| 132 | |||
| 133 | static struct attribute *default_attrs[] = { | 135 | static struct attribute *default_attrs[] = { |
| 134 | &total_trans.attr, | 136 | &total_trans.attr, |
| 135 | &time_in_state.attr, | 137 | &time_in_state.attr, |
| @@ -240,9 +242,11 @@ void cpufreq_stats_record_transition(struct cpufreq_policy *policy, | |||
| 240 | if (old_index == -1 || new_index == -1 || old_index == new_index) | 242 | if (old_index == -1 || new_index == -1 || old_index == new_index) |
| 241 | return; | 243 | return; |
| 242 | 244 | ||
| 245 | spin_lock(&cpufreq_stats_lock); | ||
| 243 | cpufreq_stats_update(stats); | 246 | cpufreq_stats_update(stats); |
| 244 | 247 | ||
| 245 | stats->last_index = new_index; | 248 | stats->last_index = new_index; |
| 246 | stats->trans_table[old_index * stats->max_state + new_index]++; | 249 | stats->trans_table[old_index * stats->max_state + new_index]++; |
| 247 | stats->total_trans++; | 250 | stats->total_trans++; |
| 251 | spin_unlock(&cpufreq_stats_lock); | ||
| 248 | } | 252 | } |
diff --git a/drivers/cpufreq/davinci-cpufreq.c b/drivers/cpufreq/davinci-cpufreq.c index d54a27c99121..940fe85db97a 100644 --- a/drivers/cpufreq/davinci-cpufreq.c +++ b/drivers/cpufreq/davinci-cpufreq.c | |||
| @@ -23,13 +23,10 @@ | |||
| 23 | #include <linux/init.h> | 23 | #include <linux/init.h> |
| 24 | #include <linux/err.h> | 24 | #include <linux/err.h> |
| 25 | #include <linux/clk.h> | 25 | #include <linux/clk.h> |
| 26 | #include <linux/platform_data/davinci-cpufreq.h> | ||
| 26 | #include <linux/platform_device.h> | 27 | #include <linux/platform_device.h> |
| 27 | #include <linux/export.h> | 28 | #include <linux/export.h> |
| 28 | 29 | ||
| 29 | #include <mach/hardware.h> | ||
| 30 | #include <mach/cpufreq.h> | ||
| 31 | #include <mach/common.h> | ||
| 32 | |||
| 33 | struct davinci_cpufreq { | 30 | struct davinci_cpufreq { |
| 34 | struct device *dev; | 31 | struct device *dev; |
| 35 | struct clk *armclk; | 32 | struct clk *armclk; |
diff --git a/drivers/cpufreq/e_powersaver.c b/drivers/cpufreq/e_powersaver.c index 60bea302abbe..2d3ef208dd70 100644 --- a/drivers/cpufreq/e_powersaver.c +++ b/drivers/cpufreq/e_powersaver.c | |||
| @@ -323,9 +323,8 @@ static int eps_cpu_init(struct cpufreq_policy *policy) | |||
| 323 | states = 2; | 323 | states = 2; |
| 324 | 324 | ||
| 325 | /* Allocate private data and frequency table for current cpu */ | 325 | /* Allocate private data and frequency table for current cpu */ |
| 326 | centaur = kzalloc(sizeof(*centaur) | 326 | centaur = kzalloc(struct_size(centaur, freq_table, states + 1), |
| 327 | + (states + 1) * sizeof(struct cpufreq_frequency_table), | 327 | GFP_KERNEL); |
| 328 | GFP_KERNEL); | ||
| 329 | if (!centaur) | 328 | if (!centaur) |
| 330 | return -ENOMEM; | 329 | return -ENOMEM; |
| 331 | eps_cpu[0] = centaur; | 330 | eps_cpu[0] = centaur; |
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c index 9fedf627e000..a4ff09f91c8f 100644 --- a/drivers/cpufreq/imx6q-cpufreq.c +++ b/drivers/cpufreq/imx6q-cpufreq.c | |||
| @@ -9,7 +9,6 @@ | |||
| 9 | #include <linux/clk.h> | 9 | #include <linux/clk.h> |
| 10 | #include <linux/cpu.h> | 10 | #include <linux/cpu.h> |
| 11 | #include <linux/cpufreq.h> | 11 | #include <linux/cpufreq.h> |
| 12 | #include <linux/cpu_cooling.h> | ||
| 13 | #include <linux/err.h> | 12 | #include <linux/err.h> |
| 14 | #include <linux/module.h> | 13 | #include <linux/module.h> |
| 15 | #include <linux/nvmem-consumer.h> | 14 | #include <linux/nvmem-consumer.h> |
| @@ -52,7 +51,6 @@ static struct clk_bulk_data clks[] = { | |||
| 52 | }; | 51 | }; |
| 53 | 52 | ||
| 54 | static struct device *cpu_dev; | 53 | static struct device *cpu_dev; |
| 55 | static struct thermal_cooling_device *cdev; | ||
| 56 | static bool free_opp; | 54 | static bool free_opp; |
| 57 | static struct cpufreq_frequency_table *freq_table; | 55 | static struct cpufreq_frequency_table *freq_table; |
| 58 | static unsigned int max_freq; | 56 | static unsigned int max_freq; |
| @@ -193,16 +191,6 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index) | |||
| 193 | return 0; | 191 | return 0; |
| 194 | } | 192 | } |
| 195 | 193 | ||
| 196 | static void imx6q_cpufreq_ready(struct cpufreq_policy *policy) | ||
| 197 | { | ||
| 198 | cdev = of_cpufreq_cooling_register(policy); | ||
| 199 | |||
| 200 | if (!cdev) | ||
| 201 | dev_err(cpu_dev, | ||
| 202 | "running cpufreq without cooling device: %ld\n", | ||
| 203 | PTR_ERR(cdev)); | ||
| 204 | } | ||
| 205 | |||
| 206 | static int imx6q_cpufreq_init(struct cpufreq_policy *policy) | 194 | static int imx6q_cpufreq_init(struct cpufreq_policy *policy) |
| 207 | { | 195 | { |
| 208 | int ret; | 196 | int ret; |
| @@ -210,26 +198,19 @@ static int imx6q_cpufreq_init(struct cpufreq_policy *policy) | |||
| 210 | policy->clk = clks[ARM].clk; | 198 | policy->clk = clks[ARM].clk; |
| 211 | ret = cpufreq_generic_init(policy, freq_table, transition_latency); | 199 | ret = cpufreq_generic_init(policy, freq_table, transition_latency); |
| 212 | policy->suspend_freq = max_freq; | 200 | policy->suspend_freq = max_freq; |
| 201 | dev_pm_opp_of_register_em(policy->cpus); | ||
| 213 | 202 | ||
| 214 | return ret; | 203 | return ret; |
| 215 | } | 204 | } |
| 216 | 205 | ||
| 217 | static int imx6q_cpufreq_exit(struct cpufreq_policy *policy) | ||
| 218 | { | ||
| 219 | cpufreq_cooling_unregister(cdev); | ||
| 220 | |||
| 221 | return 0; | ||
| 222 | } | ||
| 223 | |||
| 224 | static struct cpufreq_driver imx6q_cpufreq_driver = { | 206 | static struct cpufreq_driver imx6q_cpufreq_driver = { |
| 225 | .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK, | 207 | .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK | |
| 208 | CPUFREQ_IS_COOLING_DEV, | ||
| 226 | .verify = cpufreq_generic_frequency_table_verify, | 209 | .verify = cpufreq_generic_frequency_table_verify, |
| 227 | .target_index = imx6q_set_target, | 210 | .target_index = imx6q_set_target, |
| 228 | .get = cpufreq_generic_get, | 211 | .get = cpufreq_generic_get, |
| 229 | .init = imx6q_cpufreq_init, | 212 | .init = imx6q_cpufreq_init, |
| 230 | .exit = imx6q_cpufreq_exit, | ||
| 231 | .name = "imx6q-cpufreq", | 213 | .name = "imx6q-cpufreq", |
| 232 | .ready = imx6q_cpufreq_ready, | ||
| 233 | .attr = cpufreq_generic_attr, | 214 | .attr = cpufreq_generic_attr, |
| 234 | .suspend = cpufreq_generic_suspend, | 215 | .suspend = cpufreq_generic_suspend, |
| 235 | }; | 216 | }; |
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index dd66decf2087..002f5169d4eb 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c | |||
| @@ -50,6 +50,8 @@ | |||
| 50 | #define int_tofp(X) ((int64_t)(X) << FRAC_BITS) | 50 | #define int_tofp(X) ((int64_t)(X) << FRAC_BITS) |
| 51 | #define fp_toint(X) ((X) >> FRAC_BITS) | 51 | #define fp_toint(X) ((X) >> FRAC_BITS) |
| 52 | 52 | ||
| 53 | #define ONE_EIGHTH_FP ((int64_t)1 << (FRAC_BITS - 3)) | ||
| 54 | |||
| 53 | #define EXT_BITS 6 | 55 | #define EXT_BITS 6 |
| 54 | #define EXT_FRAC_BITS (EXT_BITS + FRAC_BITS) | 56 | #define EXT_FRAC_BITS (EXT_BITS + FRAC_BITS) |
| 55 | #define fp_ext_toint(X) ((X) >> EXT_FRAC_BITS) | 57 | #define fp_ext_toint(X) ((X) >> EXT_FRAC_BITS) |
| @@ -895,7 +897,7 @@ static void intel_pstate_update_policies(void) | |||
| 895 | /************************** sysfs begin ************************/ | 897 | /************************** sysfs begin ************************/ |
| 896 | #define show_one(file_name, object) \ | 898 | #define show_one(file_name, object) \ |
| 897 | static ssize_t show_##file_name \ | 899 | static ssize_t show_##file_name \ |
| 898 | (struct kobject *kobj, struct attribute *attr, char *buf) \ | 900 | (struct kobject *kobj, struct kobj_attribute *attr, char *buf) \ |
| 899 | { \ | 901 | { \ |
| 900 | return sprintf(buf, "%u\n", global.object); \ | 902 | return sprintf(buf, "%u\n", global.object); \ |
| 901 | } | 903 | } |
| @@ -904,7 +906,7 @@ static ssize_t intel_pstate_show_status(char *buf); | |||
| 904 | static int intel_pstate_update_status(const char *buf, size_t size); | 906 | static int intel_pstate_update_status(const char *buf, size_t size); |
| 905 | 907 | ||
| 906 | static ssize_t show_status(struct kobject *kobj, | 908 | static ssize_t show_status(struct kobject *kobj, |
| 907 | struct attribute *attr, char *buf) | 909 | struct kobj_attribute *attr, char *buf) |
| 908 | { | 910 | { |
| 909 | ssize_t ret; | 911 | ssize_t ret; |
| 910 | 912 | ||
| @@ -915,7 +917,7 @@ static ssize_t show_status(struct kobject *kobj, | |||
| 915 | return ret; | 917 | return ret; |
| 916 | } | 918 | } |
| 917 | 919 | ||
| 918 | static ssize_t store_status(struct kobject *a, struct attribute *b, | 920 | static ssize_t store_status(struct kobject *a, struct kobj_attribute *b, |
| 919 | const char *buf, size_t count) | 921 | const char *buf, size_t count) |
| 920 | { | 922 | { |
| 921 | char *p = memchr(buf, '\n', count); | 923 | char *p = memchr(buf, '\n', count); |
| @@ -929,7 +931,7 @@ static ssize_t store_status(struct kobject *a, struct attribute *b, | |||
| 929 | } | 931 | } |
| 930 | 932 | ||
| 931 | static ssize_t show_turbo_pct(struct kobject *kobj, | 933 | static ssize_t show_turbo_pct(struct kobject *kobj, |
| 932 | struct attribute *attr, char *buf) | 934 | struct kobj_attribute *attr, char *buf) |
| 933 | { | 935 | { |
| 934 | struct cpudata *cpu; | 936 | struct cpudata *cpu; |
| 935 | int total, no_turbo, turbo_pct; | 937 | int total, no_turbo, turbo_pct; |
| @@ -955,7 +957,7 @@ static ssize_t show_turbo_pct(struct kobject *kobj, | |||
| 955 | } | 957 | } |
| 956 | 958 | ||
| 957 | static ssize_t show_num_pstates(struct kobject *kobj, | 959 | static ssize_t show_num_pstates(struct kobject *kobj, |
| 958 | struct attribute *attr, char *buf) | 960 | struct kobj_attribute *attr, char *buf) |
| 959 | { | 961 | { |
| 960 | struct cpudata *cpu; | 962 | struct cpudata *cpu; |
| 961 | int total; | 963 | int total; |
| @@ -976,7 +978,7 @@ static ssize_t show_num_pstates(struct kobject *kobj, | |||
| 976 | } | 978 | } |
| 977 | 979 | ||
| 978 | static ssize_t show_no_turbo(struct kobject *kobj, | 980 | static ssize_t show_no_turbo(struct kobject *kobj, |
| 979 | struct attribute *attr, char *buf) | 981 | struct kobj_attribute *attr, char *buf) |
| 980 | { | 982 | { |
| 981 | ssize_t ret; | 983 | ssize_t ret; |
| 982 | 984 | ||
| @@ -998,7 +1000,7 @@ static ssize_t show_no_turbo(struct kobject *kobj, | |||
| 998 | return ret; | 1000 | return ret; |
| 999 | } | 1001 | } |
| 1000 | 1002 | ||
| 1001 | static ssize_t store_no_turbo(struct kobject *a, struct attribute *b, | 1003 | static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b, |
| 1002 | const char *buf, size_t count) | 1004 | const char *buf, size_t count) |
| 1003 | { | 1005 | { |
| 1004 | unsigned int input; | 1006 | unsigned int input; |
| @@ -1045,7 +1047,7 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b, | |||
| 1045 | return count; | 1047 | return count; |
| 1046 | } | 1048 | } |
| 1047 | 1049 | ||
| 1048 | static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b, | 1050 | static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b, |
| 1049 | const char *buf, size_t count) | 1051 | const char *buf, size_t count) |
| 1050 | { | 1052 | { |
| 1051 | unsigned int input; | 1053 | unsigned int input; |
| @@ -1075,7 +1077,7 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b, | |||
| 1075 | return count; | 1077 | return count; |
| 1076 | } | 1078 | } |
| 1077 | 1079 | ||
| 1078 | static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b, | 1080 | static ssize_t store_min_perf_pct(struct kobject *a, struct kobj_attribute *b, |
| 1079 | const char *buf, size_t count) | 1081 | const char *buf, size_t count) |
| 1080 | { | 1082 | { |
| 1081 | unsigned int input; | 1083 | unsigned int input; |
| @@ -1107,12 +1109,13 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b, | |||
| 1107 | } | 1109 | } |
| 1108 | 1110 | ||
| 1109 | static ssize_t show_hwp_dynamic_boost(struct kobject *kobj, | 1111 | static ssize_t show_hwp_dynamic_boost(struct kobject *kobj, |
| 1110 | struct attribute *attr, char *buf) | 1112 | struct kobj_attribute *attr, char *buf) |
| 1111 | { | 1113 | { |
| 1112 | return sprintf(buf, "%u\n", hwp_boost); | 1114 | return sprintf(buf, "%u\n", hwp_boost); |
| 1113 | } | 1115 | } |
| 1114 | 1116 | ||
| 1115 | static ssize_t store_hwp_dynamic_boost(struct kobject *a, struct attribute *b, | 1117 | static ssize_t store_hwp_dynamic_boost(struct kobject *a, |
| 1118 | struct kobj_attribute *b, | ||
| 1116 | const char *buf, size_t count) | 1119 | const char *buf, size_t count) |
| 1117 | { | 1120 | { |
| 1118 | unsigned int input; | 1121 | unsigned int input; |
| @@ -1444,12 +1447,6 @@ static int knl_get_turbo_pstate(void) | |||
| 1444 | return ret; | 1447 | return ret; |
| 1445 | } | 1448 | } |
| 1446 | 1449 | ||
| 1447 | static int intel_pstate_get_base_pstate(struct cpudata *cpu) | ||
| 1448 | { | ||
| 1449 | return global.no_turbo || global.turbo_disabled ? | ||
| 1450 | cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; | ||
| 1451 | } | ||
| 1452 | |||
| 1453 | static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate) | 1450 | static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate) |
| 1454 | { | 1451 | { |
| 1455 | trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu); | 1452 | trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu); |
| @@ -1470,11 +1467,9 @@ static void intel_pstate_set_min_pstate(struct cpudata *cpu) | |||
| 1470 | 1467 | ||
| 1471 | static void intel_pstate_max_within_limits(struct cpudata *cpu) | 1468 | static void intel_pstate_max_within_limits(struct cpudata *cpu) |
| 1472 | { | 1469 | { |
| 1473 | int pstate; | 1470 | int pstate = max(cpu->pstate.min_pstate, cpu->max_perf_ratio); |
| 1474 | 1471 | ||
| 1475 | update_turbo_state(); | 1472 | update_turbo_state(); |
| 1476 | pstate = intel_pstate_get_base_pstate(cpu); | ||
| 1477 | pstate = max(cpu->pstate.min_pstate, cpu->max_perf_ratio); | ||
| 1478 | intel_pstate_set_pstate(cpu, pstate); | 1473 | intel_pstate_set_pstate(cpu, pstate); |
| 1479 | } | 1474 | } |
| 1480 | 1475 | ||
| @@ -1678,17 +1673,14 @@ static inline int32_t get_avg_pstate(struct cpudata *cpu) | |||
| 1678 | static inline int32_t get_target_pstate(struct cpudata *cpu) | 1673 | static inline int32_t get_target_pstate(struct cpudata *cpu) |
| 1679 | { | 1674 | { |
| 1680 | struct sample *sample = &cpu->sample; | 1675 | struct sample *sample = &cpu->sample; |
| 1681 | int32_t busy_frac, boost; | 1676 | int32_t busy_frac; |
| 1682 | int target, avg_pstate; | 1677 | int target, avg_pstate; |
| 1683 | 1678 | ||
| 1684 | busy_frac = div_fp(sample->mperf << cpu->aperf_mperf_shift, | 1679 | busy_frac = div_fp(sample->mperf << cpu->aperf_mperf_shift, |
| 1685 | sample->tsc); | 1680 | sample->tsc); |
| 1686 | 1681 | ||
| 1687 | boost = cpu->iowait_boost; | 1682 | if (busy_frac < cpu->iowait_boost) |
| 1688 | cpu->iowait_boost >>= 1; | 1683 | busy_frac = cpu->iowait_boost; |
| 1689 | |||
| 1690 | if (busy_frac < boost) | ||
| 1691 | busy_frac = boost; | ||
| 1692 | 1684 | ||
| 1693 | sample->busy_scaled = busy_frac * 100; | 1685 | sample->busy_scaled = busy_frac * 100; |
| 1694 | 1686 | ||
| @@ -1715,11 +1707,9 @@ static inline int32_t get_target_pstate(struct cpudata *cpu) | |||
| 1715 | 1707 | ||
| 1716 | static int intel_pstate_prepare_request(struct cpudata *cpu, int pstate) | 1708 | static int intel_pstate_prepare_request(struct cpudata *cpu, int pstate) |
| 1717 | { | 1709 | { |
| 1718 | int max_pstate = intel_pstate_get_base_pstate(cpu); | 1710 | int min_pstate = max(cpu->pstate.min_pstate, cpu->min_perf_ratio); |
| 1719 | int min_pstate; | 1711 | int max_pstate = max(min_pstate, cpu->max_perf_ratio); |
| 1720 | 1712 | ||
| 1721 | min_pstate = max(cpu->pstate.min_pstate, cpu->min_perf_ratio); | ||
| 1722 | max_pstate = max(min_pstate, cpu->max_perf_ratio); | ||
| 1723 | return clamp_t(int, pstate, min_pstate, max_pstate); | 1713 | return clamp_t(int, pstate, min_pstate, max_pstate); |
| 1724 | } | 1714 | } |
| 1725 | 1715 | ||
| @@ -1767,29 +1757,30 @@ static void intel_pstate_update_util(struct update_util_data *data, u64 time, | |||
| 1767 | if (smp_processor_id() != cpu->cpu) | 1757 | if (smp_processor_id() != cpu->cpu) |
| 1768 | return; | 1758 | return; |
| 1769 | 1759 | ||
| 1760 | delta_ns = time - cpu->last_update; | ||
| 1770 | if (flags & SCHED_CPUFREQ_IOWAIT) { | 1761 | if (flags & SCHED_CPUFREQ_IOWAIT) { |
| 1771 | cpu->iowait_boost = int_tofp(1); | 1762 | /* Start over if the CPU may have been idle. */ |
| 1772 | cpu->last_update = time; | 1763 | if (delta_ns > TICK_NSEC) { |
| 1773 | /* | 1764 | cpu->iowait_boost = ONE_EIGHTH_FP; |
| 1774 | * The last time the busy was 100% so P-state was max anyway | 1765 | } else if (cpu->iowait_boost) { |
| 1775 | * so avoid overhead of computation. | 1766 | cpu->iowait_boost <<= 1; |
| 1776 | */ | 1767 | if (cpu->iowait_boost > int_tofp(1)) |
| 1777 | if (fp_toint(cpu->sample.busy_scaled) == 100) | 1768 | cpu->iowait_boost = int_tofp(1); |
| 1778 | return; | 1769 | } else { |
| 1779 | 1770 | cpu->iowait_boost = ONE_EIGHTH_FP; | |
| 1780 | goto set_pstate; | 1771 | } |
| 1781 | } else if (cpu->iowait_boost) { | 1772 | } else if (cpu->iowait_boost) { |
| 1782 | /* Clear iowait_boost if the CPU may have been idle. */ | 1773 | /* Clear iowait_boost if the CPU may have been idle. */ |
| 1783 | delta_ns = time - cpu->last_update; | ||
| 1784 | if (delta_ns > TICK_NSEC) | 1774 | if (delta_ns > TICK_NSEC) |
| 1785 | cpu->iowait_boost = 0; | 1775 | cpu->iowait_boost = 0; |
| 1776 | else | ||
| 1777 | cpu->iowait_boost >>= 1; | ||
| 1786 | } | 1778 | } |
| 1787 | cpu->last_update = time; | 1779 | cpu->last_update = time; |
| 1788 | delta_ns = time - cpu->sample.time; | 1780 | delta_ns = time - cpu->sample.time; |
| 1789 | if ((s64)delta_ns < INTEL_PSTATE_SAMPLING_INTERVAL) | 1781 | if ((s64)delta_ns < INTEL_PSTATE_SAMPLING_INTERVAL) |
| 1790 | return; | 1782 | return; |
| 1791 | 1783 | ||
| 1792 | set_pstate: | ||
| 1793 | if (intel_pstate_sample(cpu, time)) | 1784 | if (intel_pstate_sample(cpu, time)) |
| 1794 | intel_pstate_adjust_pstate(cpu); | 1785 | intel_pstate_adjust_pstate(cpu); |
| 1795 | } | 1786 | } |
| @@ -1976,7 +1967,8 @@ static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy, | |||
| 1976 | if (hwp_active) { | 1967 | if (hwp_active) { |
| 1977 | intel_pstate_get_hwp_max(cpu->cpu, &turbo_max, &max_state); | 1968 | intel_pstate_get_hwp_max(cpu->cpu, &turbo_max, &max_state); |
| 1978 | } else { | 1969 | } else { |
| 1979 | max_state = intel_pstate_get_base_pstate(cpu); | 1970 | max_state = global.no_turbo || global.turbo_disabled ? |
| 1971 | cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; | ||
| 1980 | turbo_max = cpu->pstate.turbo_pstate; | 1972 | turbo_max = cpu->pstate.turbo_pstate; |
| 1981 | } | 1973 | } |
| 1982 | 1974 | ||
| @@ -2475,6 +2467,7 @@ static bool __init intel_pstate_no_acpi_pss(void) | |||
| 2475 | kfree(pss); | 2467 | kfree(pss); |
| 2476 | } | 2468 | } |
| 2477 | 2469 | ||
| 2470 | pr_debug("ACPI _PSS not found\n"); | ||
| 2478 | return true; | 2471 | return true; |
| 2479 | } | 2472 | } |
| 2480 | 2473 | ||
| @@ -2485,9 +2478,14 @@ static bool __init intel_pstate_no_acpi_pcch(void) | |||
| 2485 | 2478 | ||
| 2486 | status = acpi_get_handle(NULL, "\\_SB", &handle); | 2479 | status = acpi_get_handle(NULL, "\\_SB", &handle); |
| 2487 | if (ACPI_FAILURE(status)) | 2480 | if (ACPI_FAILURE(status)) |
| 2488 | return true; | 2481 | goto not_found; |
| 2482 | |||
| 2483 | if (acpi_has_method(handle, "PCCH")) | ||
| 2484 | return false; | ||
| 2489 | 2485 | ||
| 2490 | return !acpi_has_method(handle, "PCCH"); | 2486 | not_found: |
| 2487 | pr_debug("ACPI PCCH not found\n"); | ||
| 2488 | return true; | ||
| 2491 | } | 2489 | } |
| 2492 | 2490 | ||
| 2493 | static bool __init intel_pstate_has_acpi_ppc(void) | 2491 | static bool __init intel_pstate_has_acpi_ppc(void) |
| @@ -2502,6 +2500,7 @@ static bool __init intel_pstate_has_acpi_ppc(void) | |||
| 2502 | if (acpi_has_method(pr->handle, "_PPC")) | 2500 | if (acpi_has_method(pr->handle, "_PPC")) |
| 2503 | return true; | 2501 | return true; |
| 2504 | } | 2502 | } |
| 2503 | pr_debug("ACPI _PPC not found\n"); | ||
| 2505 | return false; | 2504 | return false; |
| 2506 | } | 2505 | } |
| 2507 | 2506 | ||
| @@ -2539,8 +2538,10 @@ static bool __init intel_pstate_platform_pwr_mgmt_exists(void) | |||
| 2539 | id = x86_match_cpu(intel_pstate_cpu_oob_ids); | 2538 | id = x86_match_cpu(intel_pstate_cpu_oob_ids); |
| 2540 | if (id) { | 2539 | if (id) { |
| 2541 | rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr); | 2540 | rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr); |
| 2542 | if ( misc_pwr & (1 << 8)) | 2541 | if (misc_pwr & (1 << 8)) { |
| 2542 | pr_debug("Bit 8 in the MISC_PWR_MGMT MSR set\n"); | ||
| 2543 | return true; | 2543 | return true; |
| 2544 | } | ||
| 2544 | } | 2545 | } |
| 2545 | 2546 | ||
| 2546 | idx = acpi_match_platform_list(plat_info); | 2547 | idx = acpi_match_platform_list(plat_info); |
| @@ -2606,22 +2607,28 @@ static int __init intel_pstate_init(void) | |||
| 2606 | } | 2607 | } |
| 2607 | } else { | 2608 | } else { |
| 2608 | id = x86_match_cpu(intel_pstate_cpu_ids); | 2609 | id = x86_match_cpu(intel_pstate_cpu_ids); |
| 2609 | if (!id) | 2610 | if (!id) { |
| 2611 | pr_info("CPU ID not supported\n"); | ||
| 2610 | return -ENODEV; | 2612 | return -ENODEV; |
| 2613 | } | ||
| 2611 | 2614 | ||
| 2612 | copy_cpu_funcs((struct pstate_funcs *)id->driver_data); | 2615 | copy_cpu_funcs((struct pstate_funcs *)id->driver_data); |
| 2613 | } | 2616 | } |
| 2614 | 2617 | ||
| 2615 | if (intel_pstate_msrs_not_valid()) | 2618 | if (intel_pstate_msrs_not_valid()) { |
| 2619 | pr_info("Invalid MSRs\n"); | ||
| 2616 | return -ENODEV; | 2620 | return -ENODEV; |
| 2621 | } | ||
| 2617 | 2622 | ||
| 2618 | hwp_cpu_matched: | 2623 | hwp_cpu_matched: |
| 2619 | /* | 2624 | /* |
| 2620 | * The Intel pstate driver will be ignored if the platform | 2625 | * The Intel pstate driver will be ignored if the platform |
| 2621 | * firmware has its own power management modes. | 2626 | * firmware has its own power management modes. |
| 2622 | */ | 2627 | */ |
| 2623 | if (intel_pstate_platform_pwr_mgmt_exists()) | 2628 | if (intel_pstate_platform_pwr_mgmt_exists()) { |
| 2629 | pr_info("P-states controlled by the platform\n"); | ||
| 2624 | return -ENODEV; | 2630 | return -ENODEV; |
| 2631 | } | ||
| 2625 | 2632 | ||
| 2626 | if (!hwp_active && hwp_only) | 2633 | if (!hwp_active && hwp_only) |
| 2627 | return -ENOTSUPP; | 2634 | return -ENOTSUPP; |
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c index 279bd9e9fa95..fb546e0d0356 100644 --- a/drivers/cpufreq/longhaul.c +++ b/drivers/cpufreq/longhaul.c | |||
| @@ -851,7 +851,7 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy) | |||
| 851 | case TYPE_POWERSAVER: | 851 | case TYPE_POWERSAVER: |
| 852 | pr_cont("Powersaver supported\n"); | 852 | pr_cont("Powersaver supported\n"); |
| 853 | break; | 853 | break; |
| 854 | }; | 854 | } |
| 855 | 855 | ||
| 856 | /* Doesn't hurt */ | 856 | /* Doesn't hurt */ |
| 857 | longhaul_setup_southbridge(); | 857 | longhaul_setup_southbridge(); |
diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c index eb8920d39818..48e9829274c6 100644 --- a/drivers/cpufreq/mediatek-cpufreq.c +++ b/drivers/cpufreq/mediatek-cpufreq.c | |||
| @@ -14,7 +14,6 @@ | |||
| 14 | 14 | ||
| 15 | #include <linux/clk.h> | 15 | #include <linux/clk.h> |
| 16 | #include <linux/cpu.h> | 16 | #include <linux/cpu.h> |
| 17 | #include <linux/cpu_cooling.h> | ||
| 18 | #include <linux/cpufreq.h> | 17 | #include <linux/cpufreq.h> |
| 19 | #include <linux/cpumask.h> | 18 | #include <linux/cpumask.h> |
| 20 | #include <linux/module.h> | 19 | #include <linux/module.h> |
| @@ -48,7 +47,6 @@ struct mtk_cpu_dvfs_info { | |||
| 48 | struct regulator *sram_reg; | 47 | struct regulator *sram_reg; |
| 49 | struct clk *cpu_clk; | 48 | struct clk *cpu_clk; |
| 50 | struct clk *inter_clk; | 49 | struct clk *inter_clk; |
| 51 | struct thermal_cooling_device *cdev; | ||
| 52 | struct list_head list_head; | 50 | struct list_head list_head; |
| 53 | int intermediate_voltage; | 51 | int intermediate_voltage; |
| 54 | bool need_voltage_tracking; | 52 | bool need_voltage_tracking; |
| @@ -307,13 +305,6 @@ static int mtk_cpufreq_set_target(struct cpufreq_policy *policy, | |||
| 307 | 305 | ||
| 308 | #define DYNAMIC_POWER "dynamic-power-coefficient" | 306 | #define DYNAMIC_POWER "dynamic-power-coefficient" |
| 309 | 307 | ||
| 310 | static void mtk_cpufreq_ready(struct cpufreq_policy *policy) | ||
| 311 | { | ||
| 312 | struct mtk_cpu_dvfs_info *info = policy->driver_data; | ||
| 313 | |||
| 314 | info->cdev = of_cpufreq_cooling_register(policy); | ||
| 315 | } | ||
| 316 | |||
| 317 | static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu) | 308 | static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu) |
| 318 | { | 309 | { |
| 319 | struct device *cpu_dev; | 310 | struct device *cpu_dev; |
| @@ -465,6 +456,8 @@ static int mtk_cpufreq_init(struct cpufreq_policy *policy) | |||
| 465 | policy->driver_data = info; | 456 | policy->driver_data = info; |
| 466 | policy->clk = info->cpu_clk; | 457 | policy->clk = info->cpu_clk; |
| 467 | 458 | ||
| 459 | dev_pm_opp_of_register_em(policy->cpus); | ||
| 460 | |||
| 468 | return 0; | 461 | return 0; |
| 469 | } | 462 | } |
| 470 | 463 | ||
| @@ -472,7 +465,6 @@ static int mtk_cpufreq_exit(struct cpufreq_policy *policy) | |||
| 472 | { | 465 | { |
| 473 | struct mtk_cpu_dvfs_info *info = policy->driver_data; | 466 | struct mtk_cpu_dvfs_info *info = policy->driver_data; |
| 474 | 467 | ||
| 475 | cpufreq_cooling_unregister(info->cdev); | ||
| 476 | dev_pm_opp_free_cpufreq_table(info->cpu_dev, &policy->freq_table); | 468 | dev_pm_opp_free_cpufreq_table(info->cpu_dev, &policy->freq_table); |
| 477 | 469 | ||
| 478 | return 0; | 470 | return 0; |
| @@ -480,13 +472,13 @@ static int mtk_cpufreq_exit(struct cpufreq_policy *policy) | |||
| 480 | 472 | ||
| 481 | static struct cpufreq_driver mtk_cpufreq_driver = { | 473 | static struct cpufreq_driver mtk_cpufreq_driver = { |
| 482 | .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK | | 474 | .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK | |
| 483 | CPUFREQ_HAVE_GOVERNOR_PER_POLICY, | 475 | CPUFREQ_HAVE_GOVERNOR_PER_POLICY | |
| 476 | CPUFREQ_IS_COOLING_DEV, | ||
| 484 | .verify = cpufreq_generic_frequency_table_verify, | 477 | .verify = cpufreq_generic_frequency_table_verify, |
| 485 | .target_index = mtk_cpufreq_set_target, | 478 | .target_index = mtk_cpufreq_set_target, |
| 486 | .get = cpufreq_generic_get, | 479 | .get = cpufreq_generic_get, |
| 487 | .init = mtk_cpufreq_init, | 480 | .init = mtk_cpufreq_init, |
| 488 | .exit = mtk_cpufreq_exit, | 481 | .exit = mtk_cpufreq_exit, |
| 489 | .ready = mtk_cpufreq_ready, | ||
| 490 | .name = "mtk-cpufreq", | 482 | .name = "mtk-cpufreq", |
| 491 | .attr = cpufreq_generic_attr, | 483 | .attr = cpufreq_generic_attr, |
| 492 | }; | 484 | }; |
diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c index 71e81bbf031b..68052b74d28f 100644 --- a/drivers/cpufreq/omap-cpufreq.c +++ b/drivers/cpufreq/omap-cpufreq.c | |||
| @@ -133,8 +133,10 @@ static int omap_cpu_init(struct cpufreq_policy *policy) | |||
| 133 | 133 | ||
| 134 | /* FIXME: what's the actual transition time? */ | 134 | /* FIXME: what's the actual transition time? */ |
| 135 | result = cpufreq_generic_init(policy, freq_table, 300 * 1000); | 135 | result = cpufreq_generic_init(policy, freq_table, 300 * 1000); |
| 136 | if (!result) | 136 | if (!result) { |
| 137 | dev_pm_opp_of_register_em(policy->cpus); | ||
| 137 | return 0; | 138 | return 0; |
| 139 | } | ||
| 138 | 140 | ||
| 139 | freq_table_free(); | 141 | freq_table_free(); |
| 140 | fail: | 142 | fail: |
diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c index 099a849396f6..1e5e64643c3a 100644 --- a/drivers/cpufreq/pcc-cpufreq.c +++ b/drivers/cpufreq/pcc-cpufreq.c | |||
| @@ -268,7 +268,7 @@ static int pcc_get_offset(int cpu) | |||
| 268 | if (!pccp || pccp->type != ACPI_TYPE_PACKAGE) { | 268 | if (!pccp || pccp->type != ACPI_TYPE_PACKAGE) { |
| 269 | ret = -ENODEV; | 269 | ret = -ENODEV; |
| 270 | goto out_free; | 270 | goto out_free; |
| 271 | }; | 271 | } |
| 272 | 272 | ||
| 273 | offset = &(pccp->package.elements[0]); | 273 | offset = &(pccp->package.elements[0]); |
| 274 | if (!offset || offset->type != ACPI_TYPE_INTEGER) { | 274 | if (!offset || offset->type != ACPI_TYPE_INTEGER) { |
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c index 7e7ad3879c4e..d2230812fa4b 100644 --- a/drivers/cpufreq/powernv-cpufreq.c +++ b/drivers/cpufreq/powernv-cpufreq.c | |||
| @@ -244,6 +244,7 @@ static int init_powernv_pstates(void) | |||
| 244 | u32 len_ids, len_freqs; | 244 | u32 len_ids, len_freqs; |
| 245 | u32 pstate_min, pstate_max, pstate_nominal; | 245 | u32 pstate_min, pstate_max, pstate_nominal; |
| 246 | u32 pstate_turbo, pstate_ultra_turbo; | 246 | u32 pstate_turbo, pstate_ultra_turbo; |
| 247 | int rc = -ENODEV; | ||
| 247 | 248 | ||
| 248 | power_mgt = of_find_node_by_path("/ibm,opal/power-mgt"); | 249 | power_mgt = of_find_node_by_path("/ibm,opal/power-mgt"); |
| 249 | if (!power_mgt) { | 250 | if (!power_mgt) { |
| @@ -327,8 +328,11 @@ next: | |||
| 327 | powernv_freqs[i].frequency = freq * 1000; /* kHz */ | 328 | powernv_freqs[i].frequency = freq * 1000; /* kHz */ |
| 328 | powernv_freqs[i].driver_data = id & 0xFF; | 329 | powernv_freqs[i].driver_data = id & 0xFF; |
| 329 | 330 | ||
| 330 | revmap_data = (struct pstate_idx_revmap_data *) | 331 | revmap_data = kmalloc(sizeof(*revmap_data), GFP_KERNEL); |
| 331 | kmalloc(sizeof(*revmap_data), GFP_KERNEL); | 332 | if (!revmap_data) { |
| 333 | rc = -ENOMEM; | ||
| 334 | goto out; | ||
| 335 | } | ||
| 332 | 336 | ||
| 333 | revmap_data->pstate_id = id & 0xFF; | 337 | revmap_data->pstate_id = id & 0xFF; |
| 334 | revmap_data->cpufreq_table_idx = i; | 338 | revmap_data->cpufreq_table_idx = i; |
| @@ -357,7 +361,7 @@ next: | |||
| 357 | return 0; | 361 | return 0; |
| 358 | out: | 362 | out: |
| 359 | of_node_put(power_mgt); | 363 | of_node_put(power_mgt); |
| 360 | return -ENODEV; | 364 | return rc; |
| 361 | } | 365 | } |
| 362 | 366 | ||
| 363 | /* Returns the CPU frequency corresponding to the pstate_id. */ | 367 | /* Returns the CPU frequency corresponding to the pstate_id. */ |
diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c index d83939a1b3d4..4b0b50403901 100644 --- a/drivers/cpufreq/qcom-cpufreq-hw.c +++ b/drivers/cpufreq/qcom-cpufreq-hw.c | |||
| @@ -10,18 +10,21 @@ | |||
| 10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
| 11 | #include <linux/of_address.h> | 11 | #include <linux/of_address.h> |
| 12 | #include <linux/of_platform.h> | 12 | #include <linux/of_platform.h> |
| 13 | #include <linux/pm_opp.h> | ||
| 13 | #include <linux/slab.h> | 14 | #include <linux/slab.h> |
| 14 | 15 | ||
| 15 | #define LUT_MAX_ENTRIES 40U | 16 | #define LUT_MAX_ENTRIES 40U |
| 16 | #define LUT_SRC GENMASK(31, 30) | 17 | #define LUT_SRC GENMASK(31, 30) |
| 17 | #define LUT_L_VAL GENMASK(7, 0) | 18 | #define LUT_L_VAL GENMASK(7, 0) |
| 18 | #define LUT_CORE_COUNT GENMASK(18, 16) | 19 | #define LUT_CORE_COUNT GENMASK(18, 16) |
| 20 | #define LUT_VOLT GENMASK(11, 0) | ||
| 19 | #define LUT_ROW_SIZE 32 | 21 | #define LUT_ROW_SIZE 32 |
| 20 | #define CLK_HW_DIV 2 | 22 | #define CLK_HW_DIV 2 |
| 21 | 23 | ||
| 22 | /* Register offsets */ | 24 | /* Register offsets */ |
| 23 | #define REG_ENABLE 0x0 | 25 | #define REG_ENABLE 0x0 |
| 24 | #define REG_LUT_TABLE 0x110 | 26 | #define REG_FREQ_LUT 0x110 |
| 27 | #define REG_VOLT_LUT 0x114 | ||
| 25 | #define REG_PERF_STATE 0x920 | 28 | #define REG_PERF_STATE 0x920 |
| 26 | 29 | ||
| 27 | static unsigned long cpu_hw_rate, xo_rate; | 30 | static unsigned long cpu_hw_rate, xo_rate; |
| @@ -70,11 +73,12 @@ static unsigned int qcom_cpufreq_hw_fast_switch(struct cpufreq_policy *policy, | |||
| 70 | return policy->freq_table[index].frequency; | 73 | return policy->freq_table[index].frequency; |
| 71 | } | 74 | } |
| 72 | 75 | ||
| 73 | static int qcom_cpufreq_hw_read_lut(struct device *dev, | 76 | static int qcom_cpufreq_hw_read_lut(struct device *cpu_dev, |
| 74 | struct cpufreq_policy *policy, | 77 | struct cpufreq_policy *policy, |
| 75 | void __iomem *base) | 78 | void __iomem *base) |
| 76 | { | 79 | { |
| 77 | u32 data, src, lval, i, core_count, prev_cc = 0, prev_freq = 0, freq; | 80 | u32 data, src, lval, i, core_count, prev_cc = 0, prev_freq = 0, freq; |
| 81 | u32 volt; | ||
| 78 | unsigned int max_cores = cpumask_weight(policy->cpus); | 82 | unsigned int max_cores = cpumask_weight(policy->cpus); |
| 79 | struct cpufreq_frequency_table *table; | 83 | struct cpufreq_frequency_table *table; |
| 80 | 84 | ||
| @@ -83,23 +87,28 @@ static int qcom_cpufreq_hw_read_lut(struct device *dev, | |||
| 83 | return -ENOMEM; | 87 | return -ENOMEM; |
| 84 | 88 | ||
| 85 | for (i = 0; i < LUT_MAX_ENTRIES; i++) { | 89 | for (i = 0; i < LUT_MAX_ENTRIES; i++) { |
| 86 | data = readl_relaxed(base + REG_LUT_TABLE + i * LUT_ROW_SIZE); | 90 | data = readl_relaxed(base + REG_FREQ_LUT + |
| 91 | i * LUT_ROW_SIZE); | ||
| 87 | src = FIELD_GET(LUT_SRC, data); | 92 | src = FIELD_GET(LUT_SRC, data); |
| 88 | lval = FIELD_GET(LUT_L_VAL, data); | 93 | lval = FIELD_GET(LUT_L_VAL, data); |
| 89 | core_count = FIELD_GET(LUT_CORE_COUNT, data); | 94 | core_count = FIELD_GET(LUT_CORE_COUNT, data); |
| 90 | 95 | ||
| 96 | data = readl_relaxed(base + REG_VOLT_LUT + | ||
| 97 | i * LUT_ROW_SIZE); | ||
| 98 | volt = FIELD_GET(LUT_VOLT, data) * 1000; | ||
| 99 | |||
| 91 | if (src) | 100 | if (src) |
| 92 | freq = xo_rate * lval / 1000; | 101 | freq = xo_rate * lval / 1000; |
| 93 | else | 102 | else |
| 94 | freq = cpu_hw_rate / 1000; | 103 | freq = cpu_hw_rate / 1000; |
| 95 | 104 | ||
| 96 | /* Ignore boosts in the middle of the table */ | 105 | if (freq != prev_freq && core_count == max_cores) { |
| 97 | if (core_count != max_cores) { | ||
| 98 | table[i].frequency = CPUFREQ_ENTRY_INVALID; | ||
| 99 | } else { | ||
| 100 | table[i].frequency = freq; | 106 | table[i].frequency = freq; |
| 101 | dev_dbg(dev, "index=%d freq=%d, core_count %d\n", i, | 107 | dev_pm_opp_add(cpu_dev, freq * 1000, volt); |
| 108 | dev_dbg(cpu_dev, "index=%d freq=%d, core_count %d\n", i, | ||
| 102 | freq, core_count); | 109 | freq, core_count); |
| 110 | } else { | ||
| 111 | table[i].frequency = CPUFREQ_ENTRY_INVALID; | ||
| 103 | } | 112 | } |
| 104 | 113 | ||
| 105 | /* | 114 | /* |
| @@ -116,6 +125,7 @@ static int qcom_cpufreq_hw_read_lut(struct device *dev, | |||
| 116 | if (prev_cc != max_cores) { | 125 | if (prev_cc != max_cores) { |
| 117 | prev->frequency = prev_freq; | 126 | prev->frequency = prev_freq; |
| 118 | prev->flags = CPUFREQ_BOOST_FREQ; | 127 | prev->flags = CPUFREQ_BOOST_FREQ; |
| 128 | dev_pm_opp_add(cpu_dev, prev_freq * 1000, volt); | ||
| 119 | } | 129 | } |
| 120 | 130 | ||
| 121 | break; | 131 | break; |
| @@ -127,6 +137,7 @@ static int qcom_cpufreq_hw_read_lut(struct device *dev, | |||
| 127 | 137 | ||
| 128 | table[i].frequency = CPUFREQ_TABLE_END; | 138 | table[i].frequency = CPUFREQ_TABLE_END; |
| 129 | policy->freq_table = table; | 139 | policy->freq_table = table; |
| 140 | dev_pm_opp_set_sharing_cpus(cpu_dev, policy->cpus); | ||
| 130 | 141 | ||
| 131 | return 0; | 142 | return 0; |
| 132 | } | 143 | } |
| @@ -159,10 +170,18 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy) | |||
| 159 | struct device *dev = &global_pdev->dev; | 170 | struct device *dev = &global_pdev->dev; |
| 160 | struct of_phandle_args args; | 171 | struct of_phandle_args args; |
| 161 | struct device_node *cpu_np; | 172 | struct device_node *cpu_np; |
| 173 | struct device *cpu_dev; | ||
| 162 | struct resource *res; | 174 | struct resource *res; |
| 163 | void __iomem *base; | 175 | void __iomem *base; |
| 164 | int ret, index; | 176 | int ret, index; |
| 165 | 177 | ||
| 178 | cpu_dev = get_cpu_device(policy->cpu); | ||
| 179 | if (!cpu_dev) { | ||
| 180 | pr_err("%s: failed to get cpu%d device\n", __func__, | ||
| 181 | policy->cpu); | ||
| 182 | return -ENODEV; | ||
| 183 | } | ||
| 184 | |||
| 166 | cpu_np = of_cpu_device_node_get(policy->cpu); | 185 | cpu_np = of_cpu_device_node_get(policy->cpu); |
| 167 | if (!cpu_np) | 186 | if (!cpu_np) |
| 168 | return -EINVAL; | 187 | return -EINVAL; |
| @@ -199,12 +218,21 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy) | |||
| 199 | 218 | ||
| 200 | policy->driver_data = base + REG_PERF_STATE; | 219 | policy->driver_data = base + REG_PERF_STATE; |
| 201 | 220 | ||
| 202 | ret = qcom_cpufreq_hw_read_lut(dev, policy, base); | 221 | ret = qcom_cpufreq_hw_read_lut(cpu_dev, policy, base); |
| 203 | if (ret) { | 222 | if (ret) { |
| 204 | dev_err(dev, "Domain-%d failed to read LUT\n", index); | 223 | dev_err(dev, "Domain-%d failed to read LUT\n", index); |
| 205 | goto error; | 224 | goto error; |
| 206 | } | 225 | } |
| 207 | 226 | ||
| 227 | ret = dev_pm_opp_get_opp_count(cpu_dev); | ||
| 228 | if (ret <= 0) { | ||
| 229 | dev_err(cpu_dev, "Failed to add OPPs\n"); | ||
| 230 | ret = -ENODEV; | ||
| 231 | goto error; | ||
| 232 | } | ||
| 233 | |||
| 234 | dev_pm_opp_of_register_em(policy->cpus); | ||
| 235 | |||
| 208 | policy->fast_switch_possible = true; | 236 | policy->fast_switch_possible = true; |
| 209 | 237 | ||
| 210 | return 0; | 238 | return 0; |
| @@ -215,8 +243,10 @@ error: | |||
| 215 | 243 | ||
| 216 | static int qcom_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy) | 244 | static int qcom_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy) |
| 217 | { | 245 | { |
| 246 | struct device *cpu_dev = get_cpu_device(policy->cpu); | ||
| 218 | void __iomem *base = policy->driver_data - REG_PERF_STATE; | 247 | void __iomem *base = policy->driver_data - REG_PERF_STATE; |
| 219 | 248 | ||
| 249 | dev_pm_opp_remove_all_dynamic(cpu_dev); | ||
| 220 | kfree(policy->freq_table); | 250 | kfree(policy->freq_table); |
| 221 | devm_iounmap(&global_pdev->dev, base); | 251 | devm_iounmap(&global_pdev->dev, base); |
| 222 | 252 | ||
| @@ -231,7 +261,8 @@ static struct freq_attr *qcom_cpufreq_hw_attr[] = { | |||
| 231 | 261 | ||
| 232 | static struct cpufreq_driver cpufreq_qcom_hw_driver = { | 262 | static struct cpufreq_driver cpufreq_qcom_hw_driver = { |
| 233 | .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK | | 263 | .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK | |
| 234 | CPUFREQ_HAVE_GOVERNOR_PER_POLICY, | 264 | CPUFREQ_HAVE_GOVERNOR_PER_POLICY | |
| 265 | CPUFREQ_IS_COOLING_DEV, | ||
| 235 | .verify = cpufreq_generic_frequency_table_verify, | 266 | .verify = cpufreq_generic_frequency_table_verify, |
| 236 | .target_index = qcom_cpufreq_hw_target_index, | 267 | .target_index = qcom_cpufreq_hw_target_index, |
| 237 | .get = qcom_cpufreq_hw_get, | 268 | .get = qcom_cpufreq_hw_get, |
| @@ -296,7 +327,7 @@ static int __init qcom_cpufreq_hw_init(void) | |||
| 296 | { | 327 | { |
| 297 | return platform_driver_register(&qcom_cpufreq_hw_driver); | 328 | return platform_driver_register(&qcom_cpufreq_hw_driver); |
| 298 | } | 329 | } |
| 299 | subsys_initcall(qcom_cpufreq_hw_init); | 330 | device_initcall(qcom_cpufreq_hw_init); |
| 300 | 331 | ||
| 301 | static void __exit qcom_cpufreq_hw_exit(void) | 332 | static void __exit qcom_cpufreq_hw_exit(void) |
| 302 | { | 333 | { |
diff --git a/drivers/cpufreq/qcom-cpufreq-kryo.c b/drivers/cpufreq/qcom-cpufreq-kryo.c index 2a3675c24032..dd64dcf89c74 100644 --- a/drivers/cpufreq/qcom-cpufreq-kryo.c +++ b/drivers/cpufreq/qcom-cpufreq-kryo.c | |||
| @@ -42,7 +42,7 @@ enum _msm8996_version { | |||
| 42 | NUM_OF_MSM8996_VERSIONS, | 42 | NUM_OF_MSM8996_VERSIONS, |
| 43 | }; | 43 | }; |
| 44 | 44 | ||
| 45 | struct platform_device *cpufreq_dt_pdev, *kryo_cpufreq_pdev; | 45 | static struct platform_device *cpufreq_dt_pdev, *kryo_cpufreq_pdev; |
| 46 | 46 | ||
| 47 | static enum _msm8996_version qcom_cpufreq_kryo_get_msm_id(void) | 47 | static enum _msm8996_version qcom_cpufreq_kryo_get_msm_id(void) |
| 48 | { | 48 | { |
| @@ -75,7 +75,7 @@ static enum _msm8996_version qcom_cpufreq_kryo_get_msm_id(void) | |||
| 75 | 75 | ||
| 76 | static int qcom_cpufreq_kryo_probe(struct platform_device *pdev) | 76 | static int qcom_cpufreq_kryo_probe(struct platform_device *pdev) |
| 77 | { | 77 | { |
| 78 | struct opp_table *opp_tables[NR_CPUS] = {0}; | 78 | struct opp_table **opp_tables; |
| 79 | enum _msm8996_version msm8996_version; | 79 | enum _msm8996_version msm8996_version; |
| 80 | struct nvmem_cell *speedbin_nvmem; | 80 | struct nvmem_cell *speedbin_nvmem; |
| 81 | struct device_node *np; | 81 | struct device_node *np; |
| @@ -133,6 +133,10 @@ static int qcom_cpufreq_kryo_probe(struct platform_device *pdev) | |||
| 133 | } | 133 | } |
| 134 | kfree(speedbin); | 134 | kfree(speedbin); |
| 135 | 135 | ||
| 136 | opp_tables = kcalloc(num_possible_cpus(), sizeof(*opp_tables), GFP_KERNEL); | ||
| 137 | if (!opp_tables) | ||
| 138 | return -ENOMEM; | ||
| 139 | |||
| 136 | for_each_possible_cpu(cpu) { | 140 | for_each_possible_cpu(cpu) { |
| 137 | cpu_dev = get_cpu_device(cpu); | 141 | cpu_dev = get_cpu_device(cpu); |
| 138 | if (NULL == cpu_dev) { | 142 | if (NULL == cpu_dev) { |
| @@ -151,8 +155,10 @@ static int qcom_cpufreq_kryo_probe(struct platform_device *pdev) | |||
| 151 | 155 | ||
| 152 | cpufreq_dt_pdev = platform_device_register_simple("cpufreq-dt", -1, | 156 | cpufreq_dt_pdev = platform_device_register_simple("cpufreq-dt", -1, |
| 153 | NULL, 0); | 157 | NULL, 0); |
| 154 | if (!IS_ERR(cpufreq_dt_pdev)) | 158 | if (!IS_ERR(cpufreq_dt_pdev)) { |
| 159 | platform_set_drvdata(pdev, opp_tables); | ||
| 155 | return 0; | 160 | return 0; |
| 161 | } | ||
| 156 | 162 | ||
| 157 | ret = PTR_ERR(cpufreq_dt_pdev); | 163 | ret = PTR_ERR(cpufreq_dt_pdev); |
| 158 | dev_err(cpu_dev, "Failed to register platform device\n"); | 164 | dev_err(cpu_dev, "Failed to register platform device\n"); |
| @@ -163,13 +169,23 @@ free_opp: | |||
| 163 | break; | 169 | break; |
| 164 | dev_pm_opp_put_supported_hw(opp_tables[cpu]); | 170 | dev_pm_opp_put_supported_hw(opp_tables[cpu]); |
| 165 | } | 171 | } |
| 172 | kfree(opp_tables); | ||
| 166 | 173 | ||
| 167 | return ret; | 174 | return ret; |
| 168 | } | 175 | } |
| 169 | 176 | ||
| 170 | static int qcom_cpufreq_kryo_remove(struct platform_device *pdev) | 177 | static int qcom_cpufreq_kryo_remove(struct platform_device *pdev) |
| 171 | { | 178 | { |
| 179 | struct opp_table **opp_tables = platform_get_drvdata(pdev); | ||
| 180 | unsigned int cpu; | ||
| 181 | |||
| 172 | platform_device_unregister(cpufreq_dt_pdev); | 182 | platform_device_unregister(cpufreq_dt_pdev); |
| 183 | |||
| 184 | for_each_possible_cpu(cpu) | ||
| 185 | dev_pm_opp_put_supported_hw(opp_tables[cpu]); | ||
| 186 | |||
| 187 | kfree(opp_tables); | ||
| 188 | |||
| 173 | return 0; | 189 | return 0; |
| 174 | } | 190 | } |
| 175 | 191 | ||
diff --git a/drivers/cpufreq/qoriq-cpufreq.c b/drivers/cpufreq/qoriq-cpufreq.c index 3d773f64b4df..4295e5476264 100644 --- a/drivers/cpufreq/qoriq-cpufreq.c +++ b/drivers/cpufreq/qoriq-cpufreq.c | |||
| @@ -13,7 +13,6 @@ | |||
| 13 | #include <linux/clk.h> | 13 | #include <linux/clk.h> |
| 14 | #include <linux/clk-provider.h> | 14 | #include <linux/clk-provider.h> |
| 15 | #include <linux/cpufreq.h> | 15 | #include <linux/cpufreq.h> |
| 16 | #include <linux/cpu_cooling.h> | ||
| 17 | #include <linux/errno.h> | 16 | #include <linux/errno.h> |
| 18 | #include <linux/init.h> | 17 | #include <linux/init.h> |
| 19 | #include <linux/kernel.h> | 18 | #include <linux/kernel.h> |
| @@ -31,7 +30,6 @@ | |||
| 31 | struct cpu_data { | 30 | struct cpu_data { |
| 32 | struct clk **pclk; | 31 | struct clk **pclk; |
| 33 | struct cpufreq_frequency_table *table; | 32 | struct cpufreq_frequency_table *table; |
| 34 | struct thermal_cooling_device *cdev; | ||
| 35 | }; | 33 | }; |
| 36 | 34 | ||
| 37 | /* | 35 | /* |
| @@ -239,7 +237,6 @@ static int qoriq_cpufreq_cpu_exit(struct cpufreq_policy *policy) | |||
| 239 | { | 237 | { |
| 240 | struct cpu_data *data = policy->driver_data; | 238 | struct cpu_data *data = policy->driver_data; |
| 241 | 239 | ||
| 242 | cpufreq_cooling_unregister(data->cdev); | ||
| 243 | kfree(data->pclk); | 240 | kfree(data->pclk); |
| 244 | kfree(data->table); | 241 | kfree(data->table); |
| 245 | kfree(data); | 242 | kfree(data); |
| @@ -258,23 +255,15 @@ static int qoriq_cpufreq_target(struct cpufreq_policy *policy, | |||
| 258 | return clk_set_parent(policy->clk, parent); | 255 | return clk_set_parent(policy->clk, parent); |
| 259 | } | 256 | } |
| 260 | 257 | ||
| 261 | |||
| 262 | static void qoriq_cpufreq_ready(struct cpufreq_policy *policy) | ||
| 263 | { | ||
| 264 | struct cpu_data *cpud = policy->driver_data; | ||
| 265 | |||
| 266 | cpud->cdev = of_cpufreq_cooling_register(policy); | ||
| 267 | } | ||
| 268 | |||
| 269 | static struct cpufreq_driver qoriq_cpufreq_driver = { | 258 | static struct cpufreq_driver qoriq_cpufreq_driver = { |
| 270 | .name = "qoriq_cpufreq", | 259 | .name = "qoriq_cpufreq", |
| 271 | .flags = CPUFREQ_CONST_LOOPS, | 260 | .flags = CPUFREQ_CONST_LOOPS | |
| 261 | CPUFREQ_IS_COOLING_DEV, | ||
| 272 | .init = qoriq_cpufreq_cpu_init, | 262 | .init = qoriq_cpufreq_cpu_init, |
| 273 | .exit = qoriq_cpufreq_cpu_exit, | 263 | .exit = qoriq_cpufreq_cpu_exit, |
| 274 | .verify = cpufreq_generic_frequency_table_verify, | 264 | .verify = cpufreq_generic_frequency_table_verify, |
| 275 | .target_index = qoriq_cpufreq_target, | 265 | .target_index = qoriq_cpufreq_target, |
| 276 | .get = cpufreq_generic_get, | 266 | .get = cpufreq_generic_get, |
| 277 | .ready = qoriq_cpufreq_ready, | ||
| 278 | .attr = cpufreq_generic_attr, | 267 | .attr = cpufreq_generic_attr, |
| 279 | }; | 268 | }; |
| 280 | 269 | ||
diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c index dbecd7667db2..5b4289460bc9 100644 --- a/drivers/cpufreq/s5pv210-cpufreq.c +++ b/drivers/cpufreq/s5pv210-cpufreq.c | |||
| @@ -584,7 +584,7 @@ static struct notifier_block s5pv210_cpufreq_reboot_notifier = { | |||
| 584 | static int s5pv210_cpufreq_probe(struct platform_device *pdev) | 584 | static int s5pv210_cpufreq_probe(struct platform_device *pdev) |
| 585 | { | 585 | { |
| 586 | struct device_node *np; | 586 | struct device_node *np; |
| 587 | int id; | 587 | int id, result = 0; |
| 588 | 588 | ||
| 589 | /* | 589 | /* |
| 590 | * HACK: This is a temporary workaround to get access to clock | 590 | * HACK: This is a temporary workaround to get access to clock |
| @@ -594,18 +594,39 @@ static int s5pv210_cpufreq_probe(struct platform_device *pdev) | |||
| 594 | * this whole driver as soon as S5PV210 gets migrated to use | 594 | * this whole driver as soon as S5PV210 gets migrated to use |
| 595 | * cpufreq-dt driver. | 595 | * cpufreq-dt driver. |
| 596 | */ | 596 | */ |
| 597 | arm_regulator = regulator_get(NULL, "vddarm"); | ||
| 598 | if (IS_ERR(arm_regulator)) { | ||
| 599 | if (PTR_ERR(arm_regulator) == -EPROBE_DEFER) | ||
| 600 | pr_debug("vddarm regulator not ready, defer\n"); | ||
| 601 | else | ||
| 602 | pr_err("failed to get regulator vddarm\n"); | ||
| 603 | return PTR_ERR(arm_regulator); | ||
| 604 | } | ||
| 605 | |||
| 606 | int_regulator = regulator_get(NULL, "vddint"); | ||
| 607 | if (IS_ERR(int_regulator)) { | ||
| 608 | if (PTR_ERR(int_regulator) == -EPROBE_DEFER) | ||
| 609 | pr_debug("vddint regulator not ready, defer\n"); | ||
| 610 | else | ||
| 611 | pr_err("failed to get regulator vddint\n"); | ||
| 612 | result = PTR_ERR(int_regulator); | ||
| 613 | goto err_int_regulator; | ||
| 614 | } | ||
| 615 | |||
| 597 | np = of_find_compatible_node(NULL, NULL, "samsung,s5pv210-clock"); | 616 | np = of_find_compatible_node(NULL, NULL, "samsung,s5pv210-clock"); |
| 598 | if (!np) { | 617 | if (!np) { |
| 599 | pr_err("%s: failed to find clock controller DT node\n", | 618 | pr_err("%s: failed to find clock controller DT node\n", |
| 600 | __func__); | 619 | __func__); |
| 601 | return -ENODEV; | 620 | result = -ENODEV; |
| 621 | goto err_clock; | ||
| 602 | } | 622 | } |
| 603 | 623 | ||
| 604 | clk_base = of_iomap(np, 0); | 624 | clk_base = of_iomap(np, 0); |
| 605 | of_node_put(np); | 625 | of_node_put(np); |
| 606 | if (!clk_base) { | 626 | if (!clk_base) { |
| 607 | pr_err("%s: failed to map clock registers\n", __func__); | 627 | pr_err("%s: failed to map clock registers\n", __func__); |
| 608 | return -EFAULT; | 628 | result = -EFAULT; |
| 629 | goto err_clock; | ||
| 609 | } | 630 | } |
| 610 | 631 | ||
| 611 | for_each_compatible_node(np, NULL, "samsung,s5pv210-dmc") { | 632 | for_each_compatible_node(np, NULL, "samsung,s5pv210-dmc") { |
| @@ -614,7 +635,8 @@ static int s5pv210_cpufreq_probe(struct platform_device *pdev) | |||
| 614 | pr_err("%s: failed to get alias of dmc node '%pOFn'\n", | 635 | pr_err("%s: failed to get alias of dmc node '%pOFn'\n", |
| 615 | __func__, np); | 636 | __func__, np); |
| 616 | of_node_put(np); | 637 | of_node_put(np); |
| 617 | return id; | 638 | result = id; |
| 639 | goto err_clk_base; | ||
| 618 | } | 640 | } |
| 619 | 641 | ||
| 620 | dmc_base[id] = of_iomap(np, 0); | 642 | dmc_base[id] = of_iomap(np, 0); |
| @@ -622,33 +644,40 @@ static int s5pv210_cpufreq_probe(struct platform_device *pdev) | |||
| 622 | pr_err("%s: failed to map dmc%d registers\n", | 644 | pr_err("%s: failed to map dmc%d registers\n", |
| 623 | __func__, id); | 645 | __func__, id); |
| 624 | of_node_put(np); | 646 | of_node_put(np); |
| 625 | return -EFAULT; | 647 | result = -EFAULT; |
| 648 | goto err_dmc; | ||
| 626 | } | 649 | } |
| 627 | } | 650 | } |
| 628 | 651 | ||
| 629 | for (id = 0; id < ARRAY_SIZE(dmc_base); ++id) { | 652 | for (id = 0; id < ARRAY_SIZE(dmc_base); ++id) { |
| 630 | if (!dmc_base[id]) { | 653 | if (!dmc_base[id]) { |
| 631 | pr_err("%s: failed to find dmc%d node\n", __func__, id); | 654 | pr_err("%s: failed to find dmc%d node\n", __func__, id); |
| 632 | return -ENODEV; | 655 | result = -ENODEV; |
| 656 | goto err_dmc; | ||
| 633 | } | 657 | } |
| 634 | } | 658 | } |
| 635 | 659 | ||
| 636 | arm_regulator = regulator_get(NULL, "vddarm"); | ||
| 637 | if (IS_ERR(arm_regulator)) { | ||
| 638 | pr_err("failed to get regulator vddarm\n"); | ||
| 639 | return PTR_ERR(arm_regulator); | ||
| 640 | } | ||
| 641 | |||
| 642 | int_regulator = regulator_get(NULL, "vddint"); | ||
| 643 | if (IS_ERR(int_regulator)) { | ||
| 644 | pr_err("failed to get regulator vddint\n"); | ||
| 645 | regulator_put(arm_regulator); | ||
| 646 | return PTR_ERR(int_regulator); | ||
| 647 | } | ||
| 648 | |||
| 649 | register_reboot_notifier(&s5pv210_cpufreq_reboot_notifier); | 660 | register_reboot_notifier(&s5pv210_cpufreq_reboot_notifier); |
| 650 | 661 | ||
| 651 | return cpufreq_register_driver(&s5pv210_driver); | 662 | return cpufreq_register_driver(&s5pv210_driver); |
| 663 | |||
| 664 | err_dmc: | ||
| 665 | for (id = 0; id < ARRAY_SIZE(dmc_base); ++id) | ||
| 666 | if (dmc_base[id]) { | ||
| 667 | iounmap(dmc_base[id]); | ||
| 668 | dmc_base[id] = NULL; | ||
| 669 | } | ||
| 670 | |||
| 671 | err_clk_base: | ||
| 672 | iounmap(clk_base); | ||
| 673 | |||
| 674 | err_clock: | ||
| 675 | regulator_put(int_regulator); | ||
| 676 | |||
| 677 | err_int_regulator: | ||
| 678 | regulator_put(arm_regulator); | ||
| 679 | |||
| 680 | return result; | ||
| 652 | } | 681 | } |
| 653 | 682 | ||
| 654 | static struct platform_driver s5pv210_cpufreq_platdrv = { | 683 | static struct platform_driver s5pv210_cpufreq_platdrv = { |
diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c index 9ed46d188cb5..e6182c89df79 100644 --- a/drivers/cpufreq/scmi-cpufreq.c +++ b/drivers/cpufreq/scmi-cpufreq.c | |||
| @@ -11,7 +11,7 @@ | |||
| 11 | #include <linux/cpu.h> | 11 | #include <linux/cpu.h> |
| 12 | #include <linux/cpufreq.h> | 12 | #include <linux/cpufreq.h> |
| 13 | #include <linux/cpumask.h> | 13 | #include <linux/cpumask.h> |
| 14 | #include <linux/cpu_cooling.h> | 14 | #include <linux/energy_model.h> |
| 15 | #include <linux/export.h> | 15 | #include <linux/export.h> |
| 16 | #include <linux/module.h> | 16 | #include <linux/module.h> |
| 17 | #include <linux/pm_opp.h> | 17 | #include <linux/pm_opp.h> |
| @@ -22,7 +22,6 @@ | |||
| 22 | struct scmi_data { | 22 | struct scmi_data { |
| 23 | int domain_id; | 23 | int domain_id; |
| 24 | struct device *cpu_dev; | 24 | struct device *cpu_dev; |
| 25 | struct thermal_cooling_device *cdev; | ||
| 26 | }; | 25 | }; |
| 27 | 26 | ||
| 28 | static const struct scmi_handle *handle; | 27 | static const struct scmi_handle *handle; |
| @@ -103,13 +102,42 @@ scmi_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask) | |||
| 103 | return 0; | 102 | return 0; |
| 104 | } | 103 | } |
| 105 | 104 | ||
| 105 | static int __maybe_unused | ||
| 106 | scmi_get_cpu_power(unsigned long *power, unsigned long *KHz, int cpu) | ||
| 107 | { | ||
| 108 | struct device *cpu_dev = get_cpu_device(cpu); | ||
| 109 | unsigned long Hz; | ||
| 110 | int ret, domain; | ||
| 111 | |||
| 112 | if (!cpu_dev) { | ||
| 113 | pr_err("failed to get cpu%d device\n", cpu); | ||
| 114 | return -ENODEV; | ||
| 115 | } | ||
| 116 | |||
| 117 | domain = handle->perf_ops->device_domain_id(cpu_dev); | ||
| 118 | if (domain < 0) | ||
| 119 | return domain; | ||
| 120 | |||
| 121 | /* Get the power cost of the performance domain. */ | ||
| 122 | Hz = *KHz * 1000; | ||
| 123 | ret = handle->perf_ops->est_power_get(handle, domain, &Hz, power); | ||
| 124 | if (ret) | ||
| 125 | return ret; | ||
| 126 | |||
| 127 | /* The EM framework specifies the frequency in KHz. */ | ||
| 128 | *KHz = Hz / 1000; | ||
| 129 | |||
| 130 | return 0; | ||
| 131 | } | ||
| 132 | |||
| 106 | static int scmi_cpufreq_init(struct cpufreq_policy *policy) | 133 | static int scmi_cpufreq_init(struct cpufreq_policy *policy) |
| 107 | { | 134 | { |
| 108 | int ret; | 135 | int ret, nr_opp; |
| 109 | unsigned int latency; | 136 | unsigned int latency; |
| 110 | struct device *cpu_dev; | 137 | struct device *cpu_dev; |
| 111 | struct scmi_data *priv; | 138 | struct scmi_data *priv; |
| 112 | struct cpufreq_frequency_table *freq_table; | 139 | struct cpufreq_frequency_table *freq_table; |
| 140 | struct em_data_callback em_cb = EM_DATA_CB(scmi_get_cpu_power); | ||
| 113 | 141 | ||
| 114 | cpu_dev = get_cpu_device(policy->cpu); | 142 | cpu_dev = get_cpu_device(policy->cpu); |
| 115 | if (!cpu_dev) { | 143 | if (!cpu_dev) { |
| @@ -136,8 +164,8 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy) | |||
| 136 | return ret; | 164 | return ret; |
| 137 | } | 165 | } |
| 138 | 166 | ||
| 139 | ret = dev_pm_opp_get_opp_count(cpu_dev); | 167 | nr_opp = dev_pm_opp_get_opp_count(cpu_dev); |
| 140 | if (ret <= 0) { | 168 | if (nr_opp <= 0) { |
| 141 | dev_dbg(cpu_dev, "OPP table is not ready, deferring probe\n"); | 169 | dev_dbg(cpu_dev, "OPP table is not ready, deferring probe\n"); |
| 142 | ret = -EPROBE_DEFER; | 170 | ret = -EPROBE_DEFER; |
| 143 | goto out_free_opp; | 171 | goto out_free_opp; |
| @@ -171,6 +199,9 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy) | |||
| 171 | policy->cpuinfo.transition_latency = latency; | 199 | policy->cpuinfo.transition_latency = latency; |
| 172 | 200 | ||
| 173 | policy->fast_switch_possible = true; | 201 | policy->fast_switch_possible = true; |
| 202 | |||
| 203 | em_register_perf_domain(policy->cpus, nr_opp, &em_cb); | ||
| 204 | |||
| 174 | return 0; | 205 | return 0; |
| 175 | 206 | ||
| 176 | out_free_priv: | 207 | out_free_priv: |
| @@ -185,7 +216,6 @@ static int scmi_cpufreq_exit(struct cpufreq_policy *policy) | |||
| 185 | { | 216 | { |
| 186 | struct scmi_data *priv = policy->driver_data; | 217 | struct scmi_data *priv = policy->driver_data; |
| 187 | 218 | ||
| 188 | cpufreq_cooling_unregister(priv->cdev); | ||
| 189 | dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table); | 219 | dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table); |
| 190 | dev_pm_opp_remove_all_dynamic(priv->cpu_dev); | 220 | dev_pm_opp_remove_all_dynamic(priv->cpu_dev); |
| 191 | kfree(priv); | 221 | kfree(priv); |
| @@ -193,17 +223,11 @@ static int scmi_cpufreq_exit(struct cpufreq_policy *policy) | |||
| 193 | return 0; | 223 | return 0; |
| 194 | } | 224 | } |
| 195 | 225 | ||
| 196 | static void scmi_cpufreq_ready(struct cpufreq_policy *policy) | ||
| 197 | { | ||
| 198 | struct scmi_data *priv = policy->driver_data; | ||
| 199 | |||
| 200 | priv->cdev = of_cpufreq_cooling_register(policy); | ||
| 201 | } | ||
| 202 | |||
| 203 | static struct cpufreq_driver scmi_cpufreq_driver = { | 226 | static struct cpufreq_driver scmi_cpufreq_driver = { |
| 204 | .name = "scmi", | 227 | .name = "scmi", |
| 205 | .flags = CPUFREQ_STICKY | CPUFREQ_HAVE_GOVERNOR_PER_POLICY | | 228 | .flags = CPUFREQ_STICKY | CPUFREQ_HAVE_GOVERNOR_PER_POLICY | |
| 206 | CPUFREQ_NEED_INITIAL_FREQ_CHECK, | 229 | CPUFREQ_NEED_INITIAL_FREQ_CHECK | |
| 230 | CPUFREQ_IS_COOLING_DEV, | ||
| 207 | .verify = cpufreq_generic_frequency_table_verify, | 231 | .verify = cpufreq_generic_frequency_table_verify, |
| 208 | .attr = cpufreq_generic_attr, | 232 | .attr = cpufreq_generic_attr, |
| 209 | .target_index = scmi_cpufreq_set_target, | 233 | .target_index = scmi_cpufreq_set_target, |
| @@ -211,7 +235,6 @@ static struct cpufreq_driver scmi_cpufreq_driver = { | |||
| 211 | .get = scmi_cpufreq_get_rate, | 235 | .get = scmi_cpufreq_get_rate, |
| 212 | .init = scmi_cpufreq_init, | 236 | .init = scmi_cpufreq_init, |
| 213 | .exit = scmi_cpufreq_exit, | 237 | .exit = scmi_cpufreq_exit, |
| 214 | .ready = scmi_cpufreq_ready, | ||
| 215 | }; | 238 | }; |
| 216 | 239 | ||
| 217 | static int scmi_cpufreq_probe(struct scmi_device *sdev) | 240 | static int scmi_cpufreq_probe(struct scmi_device *sdev) |
diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c index 99449738faa4..3f49427766b8 100644 --- a/drivers/cpufreq/scpi-cpufreq.c +++ b/drivers/cpufreq/scpi-cpufreq.c | |||
| @@ -22,7 +22,6 @@ | |||
| 22 | #include <linux/cpu.h> | 22 | #include <linux/cpu.h> |
| 23 | #include <linux/cpufreq.h> | 23 | #include <linux/cpufreq.h> |
| 24 | #include <linux/cpumask.h> | 24 | #include <linux/cpumask.h> |
| 25 | #include <linux/cpu_cooling.h> | ||
| 26 | #include <linux/export.h> | 25 | #include <linux/export.h> |
| 27 | #include <linux/module.h> | 26 | #include <linux/module.h> |
| 28 | #include <linux/of_platform.h> | 27 | #include <linux/of_platform.h> |
| @@ -34,7 +33,6 @@ | |||
| 34 | struct scpi_data { | 33 | struct scpi_data { |
| 35 | struct clk *clk; | 34 | struct clk *clk; |
| 36 | struct device *cpu_dev; | 35 | struct device *cpu_dev; |
| 37 | struct thermal_cooling_device *cdev; | ||
| 38 | }; | 36 | }; |
| 39 | 37 | ||
| 40 | static struct scpi_ops *scpi_ops; | 38 | static struct scpi_ops *scpi_ops; |
| @@ -170,6 +168,9 @@ static int scpi_cpufreq_init(struct cpufreq_policy *policy) | |||
| 170 | policy->cpuinfo.transition_latency = latency; | 168 | policy->cpuinfo.transition_latency = latency; |
| 171 | 169 | ||
| 172 | policy->fast_switch_possible = false; | 170 | policy->fast_switch_possible = false; |
| 171 | |||
| 172 | dev_pm_opp_of_register_em(policy->cpus); | ||
| 173 | |||
| 173 | return 0; | 174 | return 0; |
| 174 | 175 | ||
| 175 | out_free_cpufreq_table: | 176 | out_free_cpufreq_table: |
| @@ -186,7 +187,6 @@ static int scpi_cpufreq_exit(struct cpufreq_policy *policy) | |||
| 186 | { | 187 | { |
| 187 | struct scpi_data *priv = policy->driver_data; | 188 | struct scpi_data *priv = policy->driver_data; |
| 188 | 189 | ||
| 189 | cpufreq_cooling_unregister(priv->cdev); | ||
| 190 | clk_put(priv->clk); | 190 | clk_put(priv->clk); |
| 191 | dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table); | 191 | dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table); |
| 192 | kfree(priv); | 192 | kfree(priv); |
| @@ -195,23 +195,16 @@ static int scpi_cpufreq_exit(struct cpufreq_policy *policy) | |||
| 195 | return 0; | 195 | return 0; |
| 196 | } | 196 | } |
| 197 | 197 | ||
| 198 | static void scpi_cpufreq_ready(struct cpufreq_policy *policy) | ||
| 199 | { | ||
| 200 | struct scpi_data *priv = policy->driver_data; | ||
| 201 | |||
| 202 | priv->cdev = of_cpufreq_cooling_register(policy); | ||
| 203 | } | ||
| 204 | |||
| 205 | static struct cpufreq_driver scpi_cpufreq_driver = { | 198 | static struct cpufreq_driver scpi_cpufreq_driver = { |
| 206 | .name = "scpi-cpufreq", | 199 | .name = "scpi-cpufreq", |
| 207 | .flags = CPUFREQ_STICKY | CPUFREQ_HAVE_GOVERNOR_PER_POLICY | | 200 | .flags = CPUFREQ_STICKY | CPUFREQ_HAVE_GOVERNOR_PER_POLICY | |
| 208 | CPUFREQ_NEED_INITIAL_FREQ_CHECK, | 201 | CPUFREQ_NEED_INITIAL_FREQ_CHECK | |
| 202 | CPUFREQ_IS_COOLING_DEV, | ||
| 209 | .verify = cpufreq_generic_frequency_table_verify, | 203 | .verify = cpufreq_generic_frequency_table_verify, |
| 210 | .attr = cpufreq_generic_attr, | 204 | .attr = cpufreq_generic_attr, |
| 211 | .get = scpi_cpufreq_get_rate, | 205 | .get = scpi_cpufreq_get_rate, |
| 212 | .init = scpi_cpufreq_init, | 206 | .init = scpi_cpufreq_init, |
| 213 | .exit = scpi_cpufreq_exit, | 207 | .exit = scpi_cpufreq_exit, |
| 214 | .ready = scpi_cpufreq_ready, | ||
| 215 | .target_index = scpi_cpufreq_set_target, | 208 | .target_index = scpi_cpufreq_set_target, |
| 216 | }; | 209 | }; |
| 217 | 210 | ||
diff --git a/drivers/cpufreq/speedstep-ich.c b/drivers/cpufreq/speedstep-ich.c index fbbcb88db061..5d8a09b82efb 100644 --- a/drivers/cpufreq/speedstep-ich.c +++ b/drivers/cpufreq/speedstep-ich.c | |||
| @@ -243,8 +243,7 @@ static unsigned int speedstep_get(unsigned int cpu) | |||
| 243 | unsigned int speed; | 243 | unsigned int speed; |
| 244 | 244 | ||
| 245 | /* You're supposed to ensure CPU is online. */ | 245 | /* You're supposed to ensure CPU is online. */ |
| 246 | if (smp_call_function_single(cpu, get_freq_data, &speed, 1) != 0) | 246 | BUG_ON(smp_call_function_single(cpu, get_freq_data, &speed, 1)); |
| 247 | BUG(); | ||
| 248 | 247 | ||
| 249 | pr_debug("detected %u kHz as current frequency\n", speed); | 248 | pr_debug("detected %u kHz as current frequency\n", speed); |
| 250 | return speed; | 249 | return speed; |
diff --git a/drivers/cpufreq/tegra124-cpufreq.c b/drivers/cpufreq/tegra124-cpufreq.c index ba3795e13ac6..5e748c8a5c9a 100644 --- a/drivers/cpufreq/tegra124-cpufreq.c +++ b/drivers/cpufreq/tegra124-cpufreq.c | |||
| @@ -118,6 +118,8 @@ static int tegra124_cpufreq_probe(struct platform_device *pdev) | |||
| 118 | 118 | ||
| 119 | platform_set_drvdata(pdev, priv); | 119 | platform_set_drvdata(pdev, priv); |
| 120 | 120 | ||
| 121 | of_node_put(np); | ||
| 122 | |||
| 121 | return 0; | 123 | return 0; |
| 122 | 124 | ||
| 123 | out_put_pllp_clk: | 125 | out_put_pllp_clk: |
diff --git a/drivers/cpuidle/Kconfig b/drivers/cpuidle/Kconfig index 7e48eb5bf0a7..8caccbbd7353 100644 --- a/drivers/cpuidle/Kconfig +++ b/drivers/cpuidle/Kconfig | |||
| @@ -4,7 +4,7 @@ config CPU_IDLE | |||
| 4 | bool "CPU idle PM support" | 4 | bool "CPU idle PM support" |
| 5 | default y if ACPI || PPC_PSERIES | 5 | default y if ACPI || PPC_PSERIES |
| 6 | select CPU_IDLE_GOV_LADDER if (!NO_HZ && !NO_HZ_IDLE) | 6 | select CPU_IDLE_GOV_LADDER if (!NO_HZ && !NO_HZ_IDLE) |
| 7 | select CPU_IDLE_GOV_MENU if (NO_HZ || NO_HZ_IDLE) | 7 | select CPU_IDLE_GOV_MENU if (NO_HZ || NO_HZ_IDLE) && !CPU_IDLE_GOV_TEO |
| 8 | help | 8 | help |
| 9 | CPU idle is a generic framework for supporting software-controlled | 9 | CPU idle is a generic framework for supporting software-controlled |
| 10 | idle processor power management. It includes modular cross-platform | 10 | idle processor power management. It includes modular cross-platform |
| @@ -23,6 +23,15 @@ config CPU_IDLE_GOV_LADDER | |||
| 23 | config CPU_IDLE_GOV_MENU | 23 | config CPU_IDLE_GOV_MENU |
| 24 | bool "Menu governor (for tickless system)" | 24 | bool "Menu governor (for tickless system)" |
| 25 | 25 | ||
| 26 | config CPU_IDLE_GOV_TEO | ||
| 27 | bool "Timer events oriented (TEO) governor (for tickless systems)" | ||
| 28 | help | ||
| 29 | This governor implements a simplified idle state selection method | ||
| 30 | focused on timer events and does not do any interactivity boosting. | ||
| 31 | |||
| 32 | Some workloads benefit from using it and it generally should be safe | ||
| 33 | to use. Say Y here if you are not happy with the alternatives. | ||
| 34 | |||
| 26 | config DT_IDLE_STATES | 35 | config DT_IDLE_STATES |
| 27 | bool | 36 | bool |
| 28 | 37 | ||
diff --git a/drivers/cpuidle/dt_idle_states.c b/drivers/cpuidle/dt_idle_states.c index 53342b7f1010..add9569636b5 100644 --- a/drivers/cpuidle/dt_idle_states.c +++ b/drivers/cpuidle/dt_idle_states.c | |||
| @@ -22,16 +22,12 @@ | |||
| 22 | #include "dt_idle_states.h" | 22 | #include "dt_idle_states.h" |
| 23 | 23 | ||
| 24 | static int init_state_node(struct cpuidle_state *idle_state, | 24 | static int init_state_node(struct cpuidle_state *idle_state, |
| 25 | const struct of_device_id *matches, | 25 | const struct of_device_id *match_id, |
| 26 | struct device_node *state_node) | 26 | struct device_node *state_node) |
| 27 | { | 27 | { |
| 28 | int err; | 28 | int err; |
| 29 | const struct of_device_id *match_id; | ||
| 30 | const char *desc; | 29 | const char *desc; |
| 31 | 30 | ||
| 32 | match_id = of_match_node(matches, state_node); | ||
| 33 | if (!match_id) | ||
| 34 | return -ENODEV; | ||
| 35 | /* | 31 | /* |
| 36 | * CPUidle drivers are expected to initialize the const void *data | 32 | * CPUidle drivers are expected to initialize the const void *data |
| 37 | * pointer of the passed in struct of_device_id array to the idle | 33 | * pointer of the passed in struct of_device_id array to the idle |
| @@ -160,6 +156,7 @@ int dt_init_idle_driver(struct cpuidle_driver *drv, | |||
| 160 | { | 156 | { |
| 161 | struct cpuidle_state *idle_state; | 157 | struct cpuidle_state *idle_state; |
| 162 | struct device_node *state_node, *cpu_node; | 158 | struct device_node *state_node, *cpu_node; |
| 159 | const struct of_device_id *match_id; | ||
| 163 | int i, err = 0; | 160 | int i, err = 0; |
| 164 | const cpumask_t *cpumask; | 161 | const cpumask_t *cpumask; |
| 165 | unsigned int state_idx = start_idx; | 162 | unsigned int state_idx = start_idx; |
| @@ -180,6 +177,12 @@ int dt_init_idle_driver(struct cpuidle_driver *drv, | |||
| 180 | if (!state_node) | 177 | if (!state_node) |
| 181 | break; | 178 | break; |
| 182 | 179 | ||
| 180 | match_id = of_match_node(matches, state_node); | ||
| 181 | if (!match_id) { | ||
| 182 | err = -ENODEV; | ||
| 183 | break; | ||
| 184 | } | ||
| 185 | |||
| 183 | if (!of_device_is_available(state_node)) { | 186 | if (!of_device_is_available(state_node)) { |
| 184 | of_node_put(state_node); | 187 | of_node_put(state_node); |
| 185 | continue; | 188 | continue; |
| @@ -198,7 +201,7 @@ int dt_init_idle_driver(struct cpuidle_driver *drv, | |||
| 198 | } | 201 | } |
| 199 | 202 | ||
| 200 | idle_state = &drv->states[state_idx++]; | 203 | idle_state = &drv->states[state_idx++]; |
| 201 | err = init_state_node(idle_state, matches, state_node); | 204 | err = init_state_node(idle_state, match_id, state_node); |
| 202 | if (err) { | 205 | if (err) { |
| 203 | pr_err("Parsing idle state node %pOF failed with err %d\n", | 206 | pr_err("Parsing idle state node %pOF failed with err %d\n", |
| 204 | state_node, err); | 207 | state_node, err); |
diff --git a/drivers/cpuidle/governors/Makefile b/drivers/cpuidle/governors/Makefile index 1b512722689f..4d8aff5248a8 100644 --- a/drivers/cpuidle/governors/Makefile +++ b/drivers/cpuidle/governors/Makefile | |||
| @@ -4,3 +4,4 @@ | |||
| 4 | 4 | ||
| 5 | obj-$(CONFIG_CPU_IDLE_GOV_LADDER) += ladder.o | 5 | obj-$(CONFIG_CPU_IDLE_GOV_LADDER) += ladder.o |
| 6 | obj-$(CONFIG_CPU_IDLE_GOV_MENU) += menu.o | 6 | obj-$(CONFIG_CPU_IDLE_GOV_MENU) += menu.o |
| 7 | obj-$(CONFIG_CPU_IDLE_GOV_TEO) += teo.o | ||
diff --git a/drivers/cpuidle/governors/teo.c b/drivers/cpuidle/governors/teo.c new file mode 100644 index 000000000000..7d05efdbd3c6 --- /dev/null +++ b/drivers/cpuidle/governors/teo.c | |||
| @@ -0,0 +1,444 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | /* | ||
| 3 | * Timer events oriented CPU idle governor | ||
| 4 | * | ||
| 5 | * Copyright (C) 2018 Intel Corporation | ||
| 6 | * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com> | ||
| 7 | * | ||
| 8 | * The idea of this governor is based on the observation that on many systems | ||
| 9 | * timer events are two or more orders of magnitude more frequent than any | ||
| 10 | * other interrupts, so they are likely to be the most significant source of CPU | ||
| 11 | * wakeups from idle states. Moreover, information about what happened in the | ||
| 12 | * (relatively recent) past can be used to estimate whether or not the deepest | ||
| 13 | * idle state with target residency within the time to the closest timer is | ||
| 14 | * likely to be suitable for the upcoming idle time of the CPU and, if not, then | ||
| 15 | * which of the shallower idle states to choose. | ||
| 16 | * | ||
| 17 | * Of course, non-timer wakeup sources are more important in some use cases and | ||
| 18 | * they can be covered by taking a few most recent idle time intervals of the | ||
| 19 | * CPU into account. However, even in that case it is not necessary to consider | ||
| 20 | * idle duration values greater than the time till the closest timer, as the | ||
| 21 | * patterns that they may belong to produce average values close enough to | ||
| 22 | * the time till the closest timer (sleep length) anyway. | ||
| 23 | * | ||
| 24 | * Thus this governor estimates whether or not the upcoming idle time of the CPU | ||
| 25 | * is likely to be significantly shorter than the sleep length and selects an | ||
| 26 | * idle state for it in accordance with that, as follows: | ||
| 27 | * | ||
| 28 | * - Find an idle state on the basis of the sleep length and state statistics | ||
| 29 | * collected over time: | ||
| 30 | * | ||
| 31 | * o Find the deepest idle state whose target residency is less than or equal | ||
| 32 | * to the sleep length. | ||
| 33 | * | ||
| 34 | * o Select it if it matched both the sleep length and the observed idle | ||
| 35 | * duration in the past more often than it matched the sleep length alone | ||
| 36 | * (i.e. the observed idle duration was significantly shorter than the sleep | ||
| 37 | * length matched by it). | ||
| 38 | * | ||
| 39 | * o Otherwise, select the shallower state with the greatest matched "early" | ||
| 40 | * wakeups metric. | ||
| 41 | * | ||
| 42 | * - If the majority of the most recent idle duration values are below the | ||
| 43 | * target residency of the idle state selected so far, use those values to | ||
| 44 | * compute the new expected idle duration and find an idle state matching it | ||
| 45 | * (which has to be shallower than the one selected so far). | ||
| 46 | */ | ||
| 47 | |||
| 48 | #include <linux/cpuidle.h> | ||
| 49 | #include <linux/jiffies.h> | ||
| 50 | #include <linux/kernel.h> | ||
| 51 | #include <linux/sched/clock.h> | ||
| 52 | #include <linux/tick.h> | ||
| 53 | |||
| 54 | /* | ||
| 55 | * The PULSE value is added to metrics when they grow and the DECAY_SHIFT value | ||
| 56 | * is used for decreasing metrics on a regular basis. | ||
| 57 | */ | ||
| 58 | #define PULSE 1024 | ||
| 59 | #define DECAY_SHIFT 3 | ||
| 60 | |||
| 61 | /* | ||
| 62 | * Number of the most recent idle duration values to take into consideration for | ||
| 63 | * the detection of wakeup patterns. | ||
| 64 | */ | ||
| 65 | #define INTERVALS 8 | ||
| 66 | |||
| 67 | /** | ||
| 68 | * struct teo_idle_state - Idle state data used by the TEO cpuidle governor. | ||
| 69 | * @early_hits: "Early" CPU wakeups "matching" this state. | ||
| 70 | * @hits: "On time" CPU wakeups "matching" this state. | ||
| 71 | * @misses: CPU wakeups "missing" this state. | ||
| 72 | * | ||
| 73 | * A CPU wakeup is "matched" by a given idle state if the idle duration measured | ||
| 74 | * after the wakeup is between the target residency of that state and the target | ||
| 75 | * residency of the next one (or if this is the deepest available idle state, it | ||
| 76 | * "matches" a CPU wakeup when the measured idle duration is at least equal to | ||
| 77 | * its target residency). | ||
| 78 | * | ||
| 79 | * Also, from the TEO governor perspective, a CPU wakeup from idle is "early" if | ||
| 80 | * it occurs significantly earlier than the closest expected timer event (that | ||
| 81 | * is, early enough to match an idle state shallower than the one matching the | ||
| 82 | * time till the closest timer event). Otherwise, the wakeup is "on time", or | ||
| 83 | * it is a "hit". | ||
| 84 | * | ||
| 85 | * A "miss" occurs when the given state doesn't match the wakeup, but it matches | ||
| 86 | * the time till the closest timer event used for idle state selection. | ||
| 87 | */ | ||
| 88 | struct teo_idle_state { | ||
| 89 | unsigned int early_hits; | ||
| 90 | unsigned int hits; | ||
| 91 | unsigned int misses; | ||
| 92 | }; | ||
| 93 | |||
| 94 | /** | ||
| 95 | * struct teo_cpu - CPU data used by the TEO cpuidle governor. | ||
| 96 | * @time_span_ns: Time between idle state selection and post-wakeup update. | ||
| 97 | * @sleep_length_ns: Time till the closest timer event (at the selection time). | ||
| 98 | * @states: Idle states data corresponding to this CPU. | ||
| 99 | * @last_state: Idle state entered by the CPU last time. | ||
| 100 | * @interval_idx: Index of the most recent saved idle interval. | ||
| 101 | * @intervals: Saved idle duration values. | ||
| 102 | */ | ||
| 103 | struct teo_cpu { | ||
| 104 | u64 time_span_ns; | ||
| 105 | u64 sleep_length_ns; | ||
| 106 | struct teo_idle_state states[CPUIDLE_STATE_MAX]; | ||
| 107 | int last_state; | ||
| 108 | int interval_idx; | ||
| 109 | unsigned int intervals[INTERVALS]; | ||
| 110 | }; | ||
| 111 | |||
| 112 | static DEFINE_PER_CPU(struct teo_cpu, teo_cpus); | ||
| 113 | |||
| 114 | /** | ||
| 115 | * teo_update - Update CPU data after wakeup. | ||
| 116 | * @drv: cpuidle driver containing state data. | ||
| 117 | * @dev: Target CPU. | ||
| 118 | */ | ||
| 119 | static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev) | ||
| 120 | { | ||
| 121 | struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu); | ||
| 122 | unsigned int sleep_length_us = ktime_to_us(cpu_data->sleep_length_ns); | ||
| 123 | int i, idx_hit = -1, idx_timer = -1; | ||
| 124 | unsigned int measured_us; | ||
| 125 | |||
| 126 | if (cpu_data->time_span_ns >= cpu_data->sleep_length_ns) { | ||
| 127 | /* | ||
| 128 | * One of the safety nets has triggered or this was a timer | ||
| 129 | * wakeup (or equivalent). | ||
| 130 | */ | ||
| 131 | measured_us = sleep_length_us; | ||
| 132 | } else { | ||
| 133 | unsigned int lat = drv->states[cpu_data->last_state].exit_latency; | ||
| 134 | |||
| 135 | measured_us = ktime_to_us(cpu_data->time_span_ns); | ||
| 136 | /* | ||
| 137 | * The delay between the wakeup and the first instruction | ||
| 138 | * executed by the CPU is not likely to be worst-case every | ||
| 139 | * time, so take 1/2 of the exit latency as a very rough | ||
| 140 | * approximation of the average of it. | ||
| 141 | */ | ||
| 142 | if (measured_us >= lat) | ||
| 143 | measured_us -= lat / 2; | ||
| 144 | else | ||
| 145 | measured_us /= 2; | ||
| 146 | } | ||
| 147 | |||
| 148 | /* | ||
| 149 | * Decay the "early hits" metric for all of the states and find the | ||
| 150 | * states matching the sleep length and the measured idle duration. | ||
| 151 | */ | ||
| 152 | for (i = 0; i < drv->state_count; i++) { | ||
| 153 | unsigned int early_hits = cpu_data->states[i].early_hits; | ||
| 154 | |||
| 155 | cpu_data->states[i].early_hits -= early_hits >> DECAY_SHIFT; | ||
| 156 | |||
| 157 | if (drv->states[i].target_residency <= sleep_length_us) { | ||
| 158 | idx_timer = i; | ||
| 159 | if (drv->states[i].target_residency <= measured_us) | ||
| 160 | idx_hit = i; | ||
| 161 | } | ||
| 162 | } | ||
| 163 | |||
| 164 | /* | ||
| 165 | * Update the "hits" and "misses" data for the state matching the sleep | ||
| 166 | * length. If it matches the measured idle duration too, this is a hit, | ||
| 167 | * so increase the "hits" metric for it then. Otherwise, this is a | ||
| 168 | * miss, so increase the "misses" metric for it. In the latter case | ||
| 169 | * also increase the "early hits" metric for the state that actually | ||
| 170 | * matches the measured idle duration. | ||
| 171 | */ | ||
| 172 | if (idx_timer >= 0) { | ||
| 173 | unsigned int hits = cpu_data->states[idx_timer].hits; | ||
| 174 | unsigned int misses = cpu_data->states[idx_timer].misses; | ||
| 175 | |||
| 176 | hits -= hits >> DECAY_SHIFT; | ||
| 177 | misses -= misses >> DECAY_SHIFT; | ||
| 178 | |||
| 179 | if (idx_timer > idx_hit) { | ||
| 180 | misses += PULSE; | ||
| 181 | if (idx_hit >= 0) | ||
| 182 | cpu_data->states[idx_hit].early_hits += PULSE; | ||
| 183 | } else { | ||
| 184 | hits += PULSE; | ||
| 185 | } | ||
| 186 | |||
| 187 | cpu_data->states[idx_timer].misses = misses; | ||
| 188 | cpu_data->states[idx_timer].hits = hits; | ||
| 189 | } | ||
| 190 | |||
| 191 | /* | ||
| 192 | * If the total time span between idle state selection and the "reflect" | ||
| 193 | * callback is greater than or equal to the sleep length determined at | ||
| 194 | * the idle state selection time, the wakeup is likely to be due to a | ||
| 195 | * timer event. | ||
| 196 | */ | ||
| 197 | if (cpu_data->time_span_ns >= cpu_data->sleep_length_ns) | ||
| 198 | measured_us = UINT_MAX; | ||
| 199 | |||
| 200 | /* | ||
| 201 | * Save idle duration values corresponding to non-timer wakeups for | ||
| 202 | * pattern detection. | ||
| 203 | */ | ||
| 204 | cpu_data->intervals[cpu_data->interval_idx++] = measured_us; | ||
| 205 | if (cpu_data->interval_idx > INTERVALS) | ||
| 206 | cpu_data->interval_idx = 0; | ||
| 207 | } | ||
| 208 | |||
| 209 | /** | ||
| 210 | * teo_find_shallower_state - Find shallower idle state matching given duration. | ||
| 211 | * @drv: cpuidle driver containing state data. | ||
| 212 | * @dev: Target CPU. | ||
| 213 | * @state_idx: Index of the capping idle state. | ||
| 214 | * @duration_us: Idle duration value to match. | ||
| 215 | */ | ||
| 216 | static int teo_find_shallower_state(struct cpuidle_driver *drv, | ||
| 217 | struct cpuidle_device *dev, int state_idx, | ||
| 218 | unsigned int duration_us) | ||
| 219 | { | ||
| 220 | int i; | ||
| 221 | |||
| 222 | for (i = state_idx - 1; i >= 0; i--) { | ||
| 223 | if (drv->states[i].disabled || dev->states_usage[i].disable) | ||
| 224 | continue; | ||
| 225 | |||
| 226 | state_idx = i; | ||
| 227 | if (drv->states[i].target_residency <= duration_us) | ||
| 228 | break; | ||
| 229 | } | ||
| 230 | return state_idx; | ||
| 231 | } | ||
| 232 | |||
| 233 | /** | ||
| 234 | * teo_select - Selects the next idle state to enter. | ||
| 235 | * @drv: cpuidle driver containing state data. | ||
| 236 | * @dev: Target CPU. | ||
| 237 | * @stop_tick: Indication on whether or not to stop the scheduler tick. | ||
| 238 | */ | ||
| 239 | static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, | ||
| 240 | bool *stop_tick) | ||
| 241 | { | ||
| 242 | struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu); | ||
| 243 | int latency_req = cpuidle_governor_latency_req(dev->cpu); | ||
| 244 | unsigned int duration_us, count; | ||
| 245 | int max_early_idx, idx, i; | ||
| 246 | ktime_t delta_tick; | ||
| 247 | |||
| 248 | if (cpu_data->last_state >= 0) { | ||
| 249 | teo_update(drv, dev); | ||
| 250 | cpu_data->last_state = -1; | ||
| 251 | } | ||
| 252 | |||
| 253 | cpu_data->time_span_ns = local_clock(); | ||
| 254 | |||
| 255 | cpu_data->sleep_length_ns = tick_nohz_get_sleep_length(&delta_tick); | ||
| 256 | duration_us = ktime_to_us(cpu_data->sleep_length_ns); | ||
| 257 | |||
| 258 | count = 0; | ||
| 259 | max_early_idx = -1; | ||
| 260 | idx = -1; | ||
| 261 | |||
| 262 | for (i = 0; i < drv->state_count; i++) { | ||
| 263 | struct cpuidle_state *s = &drv->states[i]; | ||
| 264 | struct cpuidle_state_usage *su = &dev->states_usage[i]; | ||
| 265 | |||
| 266 | if (s->disabled || su->disable) { | ||
| 267 | /* | ||
| 268 | * If the "early hits" metric of a disabled state is | ||
| 269 | * greater than the current maximum, it should be taken | ||
| 270 | * into account, because it would be a mistake to select | ||
| 271 | * a deeper state with lower "early hits" metric. The | ||
| 272 | * index cannot be changed to point to it, however, so | ||
| 273 | * just increase the max count alone and let the index | ||
| 274 | * still point to a shallower idle state. | ||
| 275 | */ | ||
| 276 | if (max_early_idx >= 0 && | ||
| 277 | count < cpu_data->states[i].early_hits) | ||
| 278 | count = cpu_data->states[i].early_hits; | ||
| 279 | |||
| 280 | continue; | ||
| 281 | } | ||
| 282 | |||
| 283 | if (idx < 0) | ||
| 284 | idx = i; /* first enabled state */ | ||
| 285 | |||
| 286 | if (s->target_residency > duration_us) | ||
| 287 | break; | ||
| 288 | |||
| 289 | if (s->exit_latency > latency_req) { | ||
| 290 | /* | ||
| 291 | * If we break out of the loop for latency reasons, use | ||
| 292 | * the target residency of the selected state as the | ||
| 293 | * expected idle duration to avoid stopping the tick | ||
| 294 | * as long as that target residency is low enough. | ||
| 295 | */ | ||
| 296 | duration_us = drv->states[idx].target_residency; | ||
| 297 | goto refine; | ||
| 298 | } | ||
| 299 | |||
| 300 | idx = i; | ||
| 301 | |||
| 302 | if (count < cpu_data->states[i].early_hits && | ||
| 303 | !(tick_nohz_tick_stopped() && | ||
| 304 | drv->states[i].target_residency < TICK_USEC)) { | ||
| 305 | count = cpu_data->states[i].early_hits; | ||
| 306 | max_early_idx = i; | ||
| 307 | } | ||
| 308 | } | ||
| 309 | |||
| 310 | /* | ||
| 311 | * If the "hits" metric of the idle state matching the sleep length is | ||
| 312 | * greater than its "misses" metric, that is the one to use. Otherwise, | ||
| 313 | * it is more likely that one of the shallower states will match the | ||
| 314 | * idle duration observed after wakeup, so take the one with the maximum | ||
| 315 | * "early hits" metric, but if that cannot be determined, just use the | ||
| 316 | * state selected so far. | ||
| 317 | */ | ||
| 318 | if (cpu_data->states[idx].hits <= cpu_data->states[idx].misses && | ||
| 319 | max_early_idx >= 0) { | ||
| 320 | idx = max_early_idx; | ||
| 321 | duration_us = drv->states[idx].target_residency; | ||
| 322 | } | ||
| 323 | |||
| 324 | refine: | ||
| 325 | if (idx < 0) { | ||
| 326 | idx = 0; /* No states enabled. Must use 0. */ | ||
| 327 | } else if (idx > 0) { | ||
| 328 | u64 sum = 0; | ||
| 329 | |||
| 330 | count = 0; | ||
| 331 | |||
| 332 | /* | ||
| 333 | * Count and sum the most recent idle duration values less than | ||
| 334 | * the target residency of the state selected so far, find the | ||
| 335 | * max. | ||
| 336 | */ | ||
| 337 | for (i = 0; i < INTERVALS; i++) { | ||
| 338 | unsigned int val = cpu_data->intervals[i]; | ||
| 339 | |||
| 340 | if (val >= drv->states[idx].target_residency) | ||
| 341 | continue; | ||
| 342 | |||
| 343 | count++; | ||
| 344 | sum += val; | ||
| 345 | } | ||
| 346 | |||
| 347 | /* | ||
| 348 | * Give up unless the majority of the most recent idle duration | ||
| 349 | * values are in the interesting range. | ||
| 350 | */ | ||
| 351 | if (count > INTERVALS / 2) { | ||
| 352 | unsigned int avg_us = div64_u64(sum, count); | ||
| 353 | |||
| 354 | /* | ||
| 355 | * Avoid spending too much time in an idle state that | ||
| 356 | * would be too shallow. | ||
| 357 | */ | ||
| 358 | if (!(tick_nohz_tick_stopped() && avg_us < TICK_USEC)) { | ||
| 359 | idx = teo_find_shallower_state(drv, dev, idx, avg_us); | ||
| 360 | duration_us = avg_us; | ||
| 361 | } | ||
| 362 | } | ||
| 363 | } | ||
| 364 | |||
| 365 | /* | ||
| 366 | * Don't stop the tick if the selected state is a polling one or if the | ||
| 367 | * expected idle duration is shorter than the tick period length. | ||
| 368 | */ | ||
| 369 | if (((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) || | ||
| 370 | duration_us < TICK_USEC) && !tick_nohz_tick_stopped()) { | ||
| 371 | unsigned int delta_tick_us = ktime_to_us(delta_tick); | ||
| 372 | |||
| 373 | *stop_tick = false; | ||
| 374 | |||
| 375 | /* | ||
| 376 | * The tick is not going to be stopped, so if the target | ||
| 377 | * residency of the state to be returned is not within the time | ||
| 378 | * till the closest timer including the tick, try to correct | ||
| 379 | * that. | ||
| 380 | */ | ||
| 381 | if (idx > 0 && drv->states[idx].target_residency > delta_tick_us) | ||
| 382 | idx = teo_find_shallower_state(drv, dev, idx, delta_tick_us); | ||
| 383 | } | ||
| 384 | |||
| 385 | return idx; | ||
| 386 | } | ||
| 387 | |||
| 388 | /** | ||
| 389 | * teo_reflect - Note that governor data for the CPU need to be updated. | ||
| 390 | * @dev: Target CPU. | ||
| 391 | * @state: Entered state. | ||
| 392 | */ | ||
| 393 | static void teo_reflect(struct cpuidle_device *dev, int state) | ||
| 394 | { | ||
| 395 | struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu); | ||
| 396 | |||
| 397 | cpu_data->last_state = state; | ||
| 398 | /* | ||
| 399 | * If the wakeup was not "natural", but triggered by one of the safety | ||
| 400 | * nets, assume that the CPU might have been idle for the entire sleep | ||
| 401 | * length time. | ||
| 402 | */ | ||
| 403 | if (dev->poll_time_limit || | ||
| 404 | (tick_nohz_idle_got_tick() && cpu_data->sleep_length_ns > TICK_NSEC)) { | ||
| 405 | dev->poll_time_limit = false; | ||
| 406 | cpu_data->time_span_ns = cpu_data->sleep_length_ns; | ||
| 407 | } else { | ||
| 408 | cpu_data->time_span_ns = local_clock() - cpu_data->time_span_ns; | ||
| 409 | } | ||
| 410 | } | ||
| 411 | |||
| 412 | /** | ||
| 413 | * teo_enable_device - Initialize the governor's data for the target CPU. | ||
| 414 | * @drv: cpuidle driver (not used). | ||
| 415 | * @dev: Target CPU. | ||
| 416 | */ | ||
| 417 | static int teo_enable_device(struct cpuidle_driver *drv, | ||
| 418 | struct cpuidle_device *dev) | ||
| 419 | { | ||
| 420 | struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu); | ||
| 421 | int i; | ||
| 422 | |||
| 423 | memset(cpu_data, 0, sizeof(*cpu_data)); | ||
| 424 | |||
| 425 | for (i = 0; i < INTERVALS; i++) | ||
| 426 | cpu_data->intervals[i] = UINT_MAX; | ||
| 427 | |||
| 428 | return 0; | ||
| 429 | } | ||
| 430 | |||
| 431 | static struct cpuidle_governor teo_governor = { | ||
| 432 | .name = "teo", | ||
| 433 | .rating = 19, | ||
| 434 | .enable = teo_enable_device, | ||
| 435 | .select = teo_select, | ||
| 436 | .reflect = teo_reflect, | ||
| 437 | }; | ||
| 438 | |||
| 439 | static int __init teo_governor_init(void) | ||
| 440 | { | ||
| 441 | return cpuidle_register_governor(&teo_governor); | ||
| 442 | } | ||
| 443 | |||
| 444 | postcore_initcall(teo_governor_init); | ||
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index 017fc602a10e..cf7c66bb3ed9 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c | |||
| @@ -5,6 +5,7 @@ | |||
| 5 | */ | 5 | */ |
| 6 | 6 | ||
| 7 | #include <linux/irq.h> | 7 | #include <linux/irq.h> |
| 8 | #include <linux/pm_runtime.h> | ||
| 8 | #include "i915_pmu.h" | 9 | #include "i915_pmu.h" |
| 9 | #include "intel_ringbuffer.h" | 10 | #include "intel_ringbuffer.h" |
| 10 | #include "i915_drv.h" | 11 | #include "i915_drv.h" |
| @@ -478,7 +479,6 @@ static u64 get_rc6(struct drm_i915_private *i915) | |||
| 478 | * counter value. | 479 | * counter value. |
| 479 | */ | 480 | */ |
| 480 | spin_lock_irqsave(&i915->pmu.lock, flags); | 481 | spin_lock_irqsave(&i915->pmu.lock, flags); |
| 481 | spin_lock(&kdev->power.lock); | ||
| 482 | 482 | ||
| 483 | /* | 483 | /* |
| 484 | * After the above branch intel_runtime_pm_get_if_in_use failed | 484 | * After the above branch intel_runtime_pm_get_if_in_use failed |
| @@ -491,16 +491,13 @@ static u64 get_rc6(struct drm_i915_private *i915) | |||
| 491 | * suspended and if not we cannot do better than report the last | 491 | * suspended and if not we cannot do better than report the last |
| 492 | * known RC6 value. | 492 | * known RC6 value. |
| 493 | */ | 493 | */ |
| 494 | if (kdev->power.runtime_status == RPM_SUSPENDED) { | 494 | if (pm_runtime_status_suspended(kdev)) { |
| 495 | if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) | 495 | val = pm_runtime_suspended_time(kdev); |
| 496 | i915->pmu.suspended_jiffies_last = | ||
| 497 | kdev->power.suspended_jiffies; | ||
| 498 | 496 | ||
| 499 | val = kdev->power.suspended_jiffies - | 497 | if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) |
| 500 | i915->pmu.suspended_jiffies_last; | 498 | i915->pmu.suspended_time_last = val; |
| 501 | val += jiffies - kdev->power.accounting_timestamp; | ||
| 502 | 499 | ||
| 503 | val = jiffies_to_nsecs(val); | 500 | val -= i915->pmu.suspended_time_last; |
| 504 | val += i915->pmu.sample[__I915_SAMPLE_RC6].cur; | 501 | val += i915->pmu.sample[__I915_SAMPLE_RC6].cur; |
| 505 | 502 | ||
| 506 | i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val; | 503 | i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val; |
| @@ -510,7 +507,6 @@ static u64 get_rc6(struct drm_i915_private *i915) | |||
| 510 | val = i915->pmu.sample[__I915_SAMPLE_RC6].cur; | 507 | val = i915->pmu.sample[__I915_SAMPLE_RC6].cur; |
| 511 | } | 508 | } |
| 512 | 509 | ||
| 513 | spin_unlock(&kdev->power.lock); | ||
| 514 | spin_unlock_irqrestore(&i915->pmu.lock, flags); | 510 | spin_unlock_irqrestore(&i915->pmu.lock, flags); |
| 515 | } | 511 | } |
| 516 | 512 | ||
diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h index b3728c5f13e7..4fc4f2478301 100644 --- a/drivers/gpu/drm/i915/i915_pmu.h +++ b/drivers/gpu/drm/i915/i915_pmu.h | |||
| @@ -97,9 +97,9 @@ struct i915_pmu { | |||
| 97 | */ | 97 | */ |
| 98 | struct i915_pmu_sample sample[__I915_NUM_PMU_SAMPLERS]; | 98 | struct i915_pmu_sample sample[__I915_NUM_PMU_SAMPLERS]; |
| 99 | /** | 99 | /** |
| 100 | * @suspended_jiffies_last: Cached suspend time from PM core. | 100 | * @suspended_time_last: Cached suspend time from PM core. |
| 101 | */ | 101 | */ |
| 102 | unsigned long suspended_jiffies_last; | 102 | u64 suspended_time_last; |
| 103 | /** | 103 | /** |
| 104 | * @i915_attr: Memory block holding device attributes. | 104 | * @i915_attr: Memory block holding device attributes. |
| 105 | */ | 105 | */ |
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 8b5d85c91e9d..b8647b5c3d4d 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c | |||
| @@ -1103,6 +1103,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = { | |||
| 1103 | INTEL_CPU_FAM6(ATOM_GOLDMONT, idle_cpu_bxt), | 1103 | INTEL_CPU_FAM6(ATOM_GOLDMONT, idle_cpu_bxt), |
| 1104 | INTEL_CPU_FAM6(ATOM_GOLDMONT_PLUS, idle_cpu_bxt), | 1104 | INTEL_CPU_FAM6(ATOM_GOLDMONT_PLUS, idle_cpu_bxt), |
| 1105 | INTEL_CPU_FAM6(ATOM_GOLDMONT_X, idle_cpu_dnv), | 1105 | INTEL_CPU_FAM6(ATOM_GOLDMONT_X, idle_cpu_dnv), |
| 1106 | INTEL_CPU_FAM6(ATOM_TREMONT_X, idle_cpu_dnv), | ||
| 1106 | {} | 1107 | {} |
| 1107 | }; | 1108 | }; |
| 1108 | 1109 | ||
diff --git a/drivers/opp/core.c b/drivers/opp/core.c index e06a0ab05ad6..d7f97167cac3 100644 --- a/drivers/opp/core.c +++ b/drivers/opp/core.c | |||
| @@ -551,9 +551,8 @@ static int _set_opp_voltage(struct device *dev, struct regulator *reg, | |||
| 551 | return ret; | 551 | return ret; |
| 552 | } | 552 | } |
| 553 | 553 | ||
| 554 | static inline int | 554 | static inline int _generic_set_opp_clk_only(struct device *dev, struct clk *clk, |
| 555 | _generic_set_opp_clk_only(struct device *dev, struct clk *clk, | 555 | unsigned long freq) |
| 556 | unsigned long old_freq, unsigned long freq) | ||
| 557 | { | 556 | { |
| 558 | int ret; | 557 | int ret; |
| 559 | 558 | ||
| @@ -590,7 +589,7 @@ static int _generic_set_opp_regulator(const struct opp_table *opp_table, | |||
| 590 | } | 589 | } |
| 591 | 590 | ||
| 592 | /* Change frequency */ | 591 | /* Change frequency */ |
| 593 | ret = _generic_set_opp_clk_only(dev, opp_table->clk, old_freq, freq); | 592 | ret = _generic_set_opp_clk_only(dev, opp_table->clk, freq); |
| 594 | if (ret) | 593 | if (ret) |
| 595 | goto restore_voltage; | 594 | goto restore_voltage; |
| 596 | 595 | ||
| @@ -604,7 +603,7 @@ static int _generic_set_opp_regulator(const struct opp_table *opp_table, | |||
| 604 | return 0; | 603 | return 0; |
| 605 | 604 | ||
| 606 | restore_freq: | 605 | restore_freq: |
| 607 | if (_generic_set_opp_clk_only(dev, opp_table->clk, freq, old_freq)) | 606 | if (_generic_set_opp_clk_only(dev, opp_table->clk, old_freq)) |
| 608 | dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n", | 607 | dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n", |
| 609 | __func__, old_freq); | 608 | __func__, old_freq); |
| 610 | restore_voltage: | 609 | restore_voltage: |
| @@ -777,7 +776,7 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) | |||
| 777 | opp->supplies); | 776 | opp->supplies); |
| 778 | } else { | 777 | } else { |
| 779 | /* Only frequency scaling */ | 778 | /* Only frequency scaling */ |
| 780 | ret = _generic_set_opp_clk_only(dev, clk, old_freq, freq); | 779 | ret = _generic_set_opp_clk_only(dev, clk, freq); |
| 781 | } | 780 | } |
| 782 | 781 | ||
| 783 | /* Scaling down? Configure required OPPs after frequency */ | 782 | /* Scaling down? Configure required OPPs after frequency */ |
| @@ -811,7 +810,6 @@ static struct opp_device *_add_opp_dev_unlocked(const struct device *dev, | |||
| 811 | struct opp_table *opp_table) | 810 | struct opp_table *opp_table) |
| 812 | { | 811 | { |
| 813 | struct opp_device *opp_dev; | 812 | struct opp_device *opp_dev; |
| 814 | int ret; | ||
| 815 | 813 | ||
| 816 | opp_dev = kzalloc(sizeof(*opp_dev), GFP_KERNEL); | 814 | opp_dev = kzalloc(sizeof(*opp_dev), GFP_KERNEL); |
| 817 | if (!opp_dev) | 815 | if (!opp_dev) |
| @@ -823,10 +821,7 @@ static struct opp_device *_add_opp_dev_unlocked(const struct device *dev, | |||
| 823 | list_add(&opp_dev->node, &opp_table->dev_list); | 821 | list_add(&opp_dev->node, &opp_table->dev_list); |
| 824 | 822 | ||
| 825 | /* Create debugfs entries for the opp_table */ | 823 | /* Create debugfs entries for the opp_table */ |
| 826 | ret = opp_debug_register(opp_dev, opp_table); | 824 | opp_debug_register(opp_dev, opp_table); |
| 827 | if (ret) | ||
| 828 | dev_err(dev, "%s: Failed to register opp debugfs (%d)\n", | ||
| 829 | __func__, ret); | ||
| 830 | 825 | ||
| 831 | return opp_dev; | 826 | return opp_dev; |
| 832 | } | 827 | } |
| @@ -1247,10 +1242,7 @@ int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, | |||
| 1247 | new_opp->opp_table = opp_table; | 1242 | new_opp->opp_table = opp_table; |
| 1248 | kref_init(&new_opp->kref); | 1243 | kref_init(&new_opp->kref); |
| 1249 | 1244 | ||
| 1250 | ret = opp_debug_create_one(new_opp, opp_table); | 1245 | opp_debug_create_one(new_opp, opp_table); |
| 1251 | if (ret) | ||
| 1252 | dev_err(dev, "%s: Failed to register opp to debugfs (%d)\n", | ||
| 1253 | __func__, ret); | ||
| 1254 | 1246 | ||
| 1255 | if (!_opp_supported_by_regulators(new_opp, opp_table)) { | 1247 | if (!_opp_supported_by_regulators(new_opp, opp_table)) { |
| 1256 | new_opp->available = false; | 1248 | new_opp->available = false; |
diff --git a/drivers/opp/debugfs.c b/drivers/opp/debugfs.c index e6828e5f81b0..a1c57fe14de4 100644 --- a/drivers/opp/debugfs.c +++ b/drivers/opp/debugfs.c | |||
| @@ -35,7 +35,7 @@ void opp_debug_remove_one(struct dev_pm_opp *opp) | |||
| 35 | debugfs_remove_recursive(opp->dentry); | 35 | debugfs_remove_recursive(opp->dentry); |
| 36 | } | 36 | } |
| 37 | 37 | ||
| 38 | static bool opp_debug_create_supplies(struct dev_pm_opp *opp, | 38 | static void opp_debug_create_supplies(struct dev_pm_opp *opp, |
| 39 | struct opp_table *opp_table, | 39 | struct opp_table *opp_table, |
| 40 | struct dentry *pdentry) | 40 | struct dentry *pdentry) |
| 41 | { | 41 | { |
| @@ -50,30 +50,21 @@ static bool opp_debug_create_supplies(struct dev_pm_opp *opp, | |||
| 50 | /* Create per-opp directory */ | 50 | /* Create per-opp directory */ |
| 51 | d = debugfs_create_dir(name, pdentry); | 51 | d = debugfs_create_dir(name, pdentry); |
| 52 | 52 | ||
| 53 | if (!d) | 53 | debugfs_create_ulong("u_volt_target", S_IRUGO, d, |
| 54 | return false; | 54 | &opp->supplies[i].u_volt); |
| 55 | 55 | ||
| 56 | if (!debugfs_create_ulong("u_volt_target", S_IRUGO, d, | 56 | debugfs_create_ulong("u_volt_min", S_IRUGO, d, |
| 57 | &opp->supplies[i].u_volt)) | 57 | &opp->supplies[i].u_volt_min); |
| 58 | return false; | ||
| 59 | 58 | ||
| 60 | if (!debugfs_create_ulong("u_volt_min", S_IRUGO, d, | 59 | debugfs_create_ulong("u_volt_max", S_IRUGO, d, |
| 61 | &opp->supplies[i].u_volt_min)) | 60 | &opp->supplies[i].u_volt_max); |
| 62 | return false; | ||
| 63 | 61 | ||
| 64 | if (!debugfs_create_ulong("u_volt_max", S_IRUGO, d, | 62 | debugfs_create_ulong("u_amp", S_IRUGO, d, |
| 65 | &opp->supplies[i].u_volt_max)) | 63 | &opp->supplies[i].u_amp); |
| 66 | return false; | ||
| 67 | |||
| 68 | if (!debugfs_create_ulong("u_amp", S_IRUGO, d, | ||
| 69 | &opp->supplies[i].u_amp)) | ||
| 70 | return false; | ||
| 71 | } | 64 | } |
| 72 | |||
| 73 | return true; | ||
| 74 | } | 65 | } |
| 75 | 66 | ||
| 76 | int opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table) | 67 | void opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table) |
| 77 | { | 68 | { |
| 78 | struct dentry *pdentry = opp_table->dentry; | 69 | struct dentry *pdentry = opp_table->dentry; |
| 79 | struct dentry *d; | 70 | struct dentry *d; |
| @@ -95,40 +86,23 @@ int opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table) | |||
| 95 | 86 | ||
| 96 | /* Create per-opp directory */ | 87 | /* Create per-opp directory */ |
| 97 | d = debugfs_create_dir(name, pdentry); | 88 | d = debugfs_create_dir(name, pdentry); |
| 98 | if (!d) | ||
| 99 | return -ENOMEM; | ||
| 100 | |||
| 101 | if (!debugfs_create_bool("available", S_IRUGO, d, &opp->available)) | ||
| 102 | return -ENOMEM; | ||
| 103 | |||
| 104 | if (!debugfs_create_bool("dynamic", S_IRUGO, d, &opp->dynamic)) | ||
| 105 | return -ENOMEM; | ||
| 106 | |||
| 107 | if (!debugfs_create_bool("turbo", S_IRUGO, d, &opp->turbo)) | ||
| 108 | return -ENOMEM; | ||
| 109 | |||
| 110 | if (!debugfs_create_bool("suspend", S_IRUGO, d, &opp->suspend)) | ||
| 111 | return -ENOMEM; | ||
| 112 | |||
| 113 | if (!debugfs_create_u32("performance_state", S_IRUGO, d, &opp->pstate)) | ||
| 114 | return -ENOMEM; | ||
| 115 | 89 | ||
| 116 | if (!debugfs_create_ulong("rate_hz", S_IRUGO, d, &opp->rate)) | 90 | debugfs_create_bool("available", S_IRUGO, d, &opp->available); |
| 117 | return -ENOMEM; | 91 | debugfs_create_bool("dynamic", S_IRUGO, d, &opp->dynamic); |
| 92 | debugfs_create_bool("turbo", S_IRUGO, d, &opp->turbo); | ||
| 93 | debugfs_create_bool("suspend", S_IRUGO, d, &opp->suspend); | ||
| 94 | debugfs_create_u32("performance_state", S_IRUGO, d, &opp->pstate); | ||
| 95 | debugfs_create_ulong("rate_hz", S_IRUGO, d, &opp->rate); | ||
| 96 | debugfs_create_ulong("clock_latency_ns", S_IRUGO, d, | ||
| 97 | &opp->clock_latency_ns); | ||
| 118 | 98 | ||
| 119 | if (!opp_debug_create_supplies(opp, opp_table, d)) | 99 | opp_debug_create_supplies(opp, opp_table, d); |
| 120 | return -ENOMEM; | ||
| 121 | |||
| 122 | if (!debugfs_create_ulong("clock_latency_ns", S_IRUGO, d, | ||
| 123 | &opp->clock_latency_ns)) | ||
| 124 | return -ENOMEM; | ||
| 125 | 100 | ||
| 126 | opp->dentry = d; | 101 | opp->dentry = d; |
| 127 | return 0; | ||
| 128 | } | 102 | } |
| 129 | 103 | ||
| 130 | static int opp_list_debug_create_dir(struct opp_device *opp_dev, | 104 | static void opp_list_debug_create_dir(struct opp_device *opp_dev, |
| 131 | struct opp_table *opp_table) | 105 | struct opp_table *opp_table) |
| 132 | { | 106 | { |
| 133 | const struct device *dev = opp_dev->dev; | 107 | const struct device *dev = opp_dev->dev; |
| 134 | struct dentry *d; | 108 | struct dentry *d; |
| @@ -137,36 +111,21 @@ static int opp_list_debug_create_dir(struct opp_device *opp_dev, | |||
| 137 | 111 | ||
| 138 | /* Create device specific directory */ | 112 | /* Create device specific directory */ |
| 139 | d = debugfs_create_dir(opp_table->dentry_name, rootdir); | 113 | d = debugfs_create_dir(opp_table->dentry_name, rootdir); |
| 140 | if (!d) { | ||
| 141 | dev_err(dev, "%s: Failed to create debugfs dir\n", __func__); | ||
| 142 | return -ENOMEM; | ||
| 143 | } | ||
| 144 | 114 | ||
| 145 | opp_dev->dentry = d; | 115 | opp_dev->dentry = d; |
| 146 | opp_table->dentry = d; | 116 | opp_table->dentry = d; |
| 147 | |||
| 148 | return 0; | ||
| 149 | } | 117 | } |
| 150 | 118 | ||
| 151 | static int opp_list_debug_create_link(struct opp_device *opp_dev, | 119 | static void opp_list_debug_create_link(struct opp_device *opp_dev, |
| 152 | struct opp_table *opp_table) | 120 | struct opp_table *opp_table) |
| 153 | { | 121 | { |
| 154 | const struct device *dev = opp_dev->dev; | ||
| 155 | char name[NAME_MAX]; | 122 | char name[NAME_MAX]; |
| 156 | struct dentry *d; | ||
| 157 | 123 | ||
| 158 | opp_set_dev_name(opp_dev->dev, name); | 124 | opp_set_dev_name(opp_dev->dev, name); |
| 159 | 125 | ||
| 160 | /* Create device specific directory link */ | 126 | /* Create device specific directory link */ |
| 161 | d = debugfs_create_symlink(name, rootdir, opp_table->dentry_name); | 127 | opp_dev->dentry = debugfs_create_symlink(name, rootdir, |
| 162 | if (!d) { | 128 | opp_table->dentry_name); |
| 163 | dev_err(dev, "%s: Failed to create link\n", __func__); | ||
| 164 | return -ENOMEM; | ||
| 165 | } | ||
| 166 | |||
| 167 | opp_dev->dentry = d; | ||
| 168 | |||
| 169 | return 0; | ||
| 170 | } | 129 | } |
| 171 | 130 | ||
| 172 | /** | 131 | /** |
| @@ -177,20 +136,13 @@ static int opp_list_debug_create_link(struct opp_device *opp_dev, | |||
| 177 | * Dynamically adds device specific directory in debugfs 'opp' directory. If the | 136 | * Dynamically adds device specific directory in debugfs 'opp' directory. If the |
| 178 | * device-opp is shared with other devices, then links will be created for all | 137 | * device-opp is shared with other devices, then links will be created for all |
| 179 | * devices except the first. | 138 | * devices except the first. |
| 180 | * | ||
| 181 | * Return: 0 on success, otherwise negative error. | ||
| 182 | */ | 139 | */ |
| 183 | int opp_debug_register(struct opp_device *opp_dev, struct opp_table *opp_table) | 140 | void opp_debug_register(struct opp_device *opp_dev, struct opp_table *opp_table) |
| 184 | { | 141 | { |
| 185 | if (!rootdir) { | ||
| 186 | pr_debug("%s: Uninitialized rootdir\n", __func__); | ||
| 187 | return -EINVAL; | ||
| 188 | } | ||
| 189 | |||
| 190 | if (opp_table->dentry) | 142 | if (opp_table->dentry) |
| 191 | return opp_list_debug_create_link(opp_dev, opp_table); | 143 | opp_list_debug_create_link(opp_dev, opp_table); |
| 192 | 144 | else | |
| 193 | return opp_list_debug_create_dir(opp_dev, opp_table); | 145 | opp_list_debug_create_dir(opp_dev, opp_table); |
| 194 | } | 146 | } |
| 195 | 147 | ||
| 196 | static void opp_migrate_dentry(struct opp_device *opp_dev, | 148 | static void opp_migrate_dentry(struct opp_device *opp_dev, |
| @@ -252,10 +204,6 @@ static int __init opp_debug_init(void) | |||
| 252 | { | 204 | { |
| 253 | /* Create /sys/kernel/debug/opp directory */ | 205 | /* Create /sys/kernel/debug/opp directory */ |
| 254 | rootdir = debugfs_create_dir("opp", NULL); | 206 | rootdir = debugfs_create_dir("opp", NULL); |
| 255 | if (!rootdir) { | ||
| 256 | pr_err("%s: Failed to create root directory\n", __func__); | ||
| 257 | return -ENOMEM; | ||
| 258 | } | ||
| 259 | 207 | ||
| 260 | return 0; | 208 | return 0; |
| 261 | } | 209 | } |
diff --git a/drivers/opp/of.c b/drivers/opp/of.c index 1779f2c93291..62504b18f198 100644 --- a/drivers/opp/of.c +++ b/drivers/opp/of.c | |||
| @@ -20,6 +20,7 @@ | |||
| 20 | #include <linux/pm_domain.h> | 20 | #include <linux/pm_domain.h> |
| 21 | #include <linux/slab.h> | 21 | #include <linux/slab.h> |
| 22 | #include <linux/export.h> | 22 | #include <linux/export.h> |
| 23 | #include <linux/energy_model.h> | ||
| 23 | 24 | ||
| 24 | #include "opp.h" | 25 | #include "opp.h" |
| 25 | 26 | ||
| @@ -1049,3 +1050,101 @@ struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp) | |||
| 1049 | return of_node_get(opp->np); | 1050 | return of_node_get(opp->np); |
| 1050 | } | 1051 | } |
| 1051 | EXPORT_SYMBOL_GPL(dev_pm_opp_get_of_node); | 1052 | EXPORT_SYMBOL_GPL(dev_pm_opp_get_of_node); |
| 1053 | |||
| 1054 | /* | ||
| 1055 | * Callback function provided to the Energy Model framework upon registration. | ||
| 1056 | * This computes the power estimated by @CPU at @kHz if it is the frequency | ||
| 1057 | * of an existing OPP, or at the frequency of the first OPP above @kHz otherwise | ||
| 1058 | * (see dev_pm_opp_find_freq_ceil()). This function updates @kHz to the ceiled | ||
| 1059 | * frequency and @mW to the associated power. The power is estimated as | ||
| 1060 | * P = C * V^2 * f with C being the CPU's capacitance and V and f respectively | ||
| 1061 | * the voltage and frequency of the OPP. | ||
| 1062 | * | ||
| 1063 | * Returns -ENODEV if the CPU device cannot be found, -EINVAL if the power | ||
| 1064 | * calculation failed because of missing parameters, 0 otherwise. | ||
| 1065 | */ | ||
| 1066 | static int __maybe_unused _get_cpu_power(unsigned long *mW, unsigned long *kHz, | ||
| 1067 | int cpu) | ||
| 1068 | { | ||
| 1069 | struct device *cpu_dev; | ||
| 1070 | struct dev_pm_opp *opp; | ||
| 1071 | struct device_node *np; | ||
| 1072 | unsigned long mV, Hz; | ||
| 1073 | u32 cap; | ||
| 1074 | u64 tmp; | ||
| 1075 | int ret; | ||
| 1076 | |||
| 1077 | cpu_dev = get_cpu_device(cpu); | ||
| 1078 | if (!cpu_dev) | ||
| 1079 | return -ENODEV; | ||
| 1080 | |||
| 1081 | np = of_node_get(cpu_dev->of_node); | ||
| 1082 | if (!np) | ||
| 1083 | return -EINVAL; | ||
| 1084 | |||
| 1085 | ret = of_property_read_u32(np, "dynamic-power-coefficient", &cap); | ||
| 1086 | of_node_put(np); | ||
| 1087 | if (ret) | ||
| 1088 | return -EINVAL; | ||
| 1089 | |||
| 1090 | Hz = *kHz * 1000; | ||
| 1091 | opp = dev_pm_opp_find_freq_ceil(cpu_dev, &Hz); | ||
| 1092 | if (IS_ERR(opp)) | ||
| 1093 | return -EINVAL; | ||
| 1094 | |||
| 1095 | mV = dev_pm_opp_get_voltage(opp) / 1000; | ||
| 1096 | dev_pm_opp_put(opp); | ||
| 1097 | if (!mV) | ||
| 1098 | return -EINVAL; | ||
| 1099 | |||
| 1100 | tmp = (u64)cap * mV * mV * (Hz / 1000000); | ||
| 1101 | do_div(tmp, 1000000000); | ||
| 1102 | |||
| 1103 | *mW = (unsigned long)tmp; | ||
| 1104 | *kHz = Hz / 1000; | ||
| 1105 | |||
| 1106 | return 0; | ||
| 1107 | } | ||
| 1108 | |||
| 1109 | /** | ||
| 1110 | * dev_pm_opp_of_register_em() - Attempt to register an Energy Model | ||
| 1111 | * @cpus : CPUs for which an Energy Model has to be registered | ||
| 1112 | * | ||
| 1113 | * This checks whether the "dynamic-power-coefficient" devicetree property has | ||
| 1114 | * been specified, and tries to register an Energy Model with it if it has. | ||
| 1115 | */ | ||
| 1116 | void dev_pm_opp_of_register_em(struct cpumask *cpus) | ||
| 1117 | { | ||
| 1118 | struct em_data_callback em_cb = EM_DATA_CB(_get_cpu_power); | ||
| 1119 | int ret, nr_opp, cpu = cpumask_first(cpus); | ||
| 1120 | struct device *cpu_dev; | ||
| 1121 | struct device_node *np; | ||
| 1122 | u32 cap; | ||
| 1123 | |||
| 1124 | cpu_dev = get_cpu_device(cpu); | ||
| 1125 | if (!cpu_dev) | ||
| 1126 | return; | ||
| 1127 | |||
| 1128 | nr_opp = dev_pm_opp_get_opp_count(cpu_dev); | ||
| 1129 | if (nr_opp <= 0) | ||
| 1130 | return; | ||
| 1131 | |||
| 1132 | np = of_node_get(cpu_dev->of_node); | ||
| 1133 | if (!np) | ||
| 1134 | return; | ||
| 1135 | |||
| 1136 | /* | ||
| 1137 | * Register an EM only if the 'dynamic-power-coefficient' property is | ||
| 1138 | * set in devicetree. It is assumed the voltage values are known if that | ||
| 1139 | * property is set since it is useless otherwise. If voltages are not | ||
| 1140 | * known, just let the EM registration fail with an error to alert the | ||
| 1141 | * user about the inconsistent configuration. | ||
| 1142 | */ | ||
| 1143 | ret = of_property_read_u32(np, "dynamic-power-coefficient", &cap); | ||
| 1144 | of_node_put(np); | ||
| 1145 | if (ret || !cap) | ||
| 1146 | return; | ||
| 1147 | |||
| 1148 | em_register_perf_domain(cpus, nr_opp, &em_cb); | ||
| 1149 | } | ||
| 1150 | EXPORT_SYMBOL_GPL(dev_pm_opp_of_register_em); | ||
diff --git a/drivers/opp/opp.h b/drivers/opp/opp.h index 4458175aa661..569b3525aa67 100644 --- a/drivers/opp/opp.h +++ b/drivers/opp/opp.h | |||
| @@ -238,18 +238,17 @@ static inline void _of_opp_free_required_opps(struct opp_table *opp_table, | |||
| 238 | 238 | ||
| 239 | #ifdef CONFIG_DEBUG_FS | 239 | #ifdef CONFIG_DEBUG_FS |
| 240 | void opp_debug_remove_one(struct dev_pm_opp *opp); | 240 | void opp_debug_remove_one(struct dev_pm_opp *opp); |
| 241 | int opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table); | 241 | void opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table); |
| 242 | int opp_debug_register(struct opp_device *opp_dev, struct opp_table *opp_table); | 242 | void opp_debug_register(struct opp_device *opp_dev, struct opp_table *opp_table); |
| 243 | void opp_debug_unregister(struct opp_device *opp_dev, struct opp_table *opp_table); | 243 | void opp_debug_unregister(struct opp_device *opp_dev, struct opp_table *opp_table); |
| 244 | #else | 244 | #else |
| 245 | static inline void opp_debug_remove_one(struct dev_pm_opp *opp) {} | 245 | static inline void opp_debug_remove_one(struct dev_pm_opp *opp) {} |
| 246 | 246 | ||
| 247 | static inline int opp_debug_create_one(struct dev_pm_opp *opp, | 247 | static inline void opp_debug_create_one(struct dev_pm_opp *opp, |
| 248 | struct opp_table *opp_table) | 248 | struct opp_table *opp_table) { } |
| 249 | { return 0; } | 249 | |
| 250 | static inline int opp_debug_register(struct opp_device *opp_dev, | 250 | static inline void opp_debug_register(struct opp_device *opp_dev, |
| 251 | struct opp_table *opp_table) | 251 | struct opp_table *opp_table) { } |
| 252 | { return 0; } | ||
| 253 | 252 | ||
| 254 | static inline void opp_debug_unregister(struct opp_device *opp_dev, | 253 | static inline void opp_debug_unregister(struct opp_device *opp_dev, |
| 255 | struct opp_table *opp_table) | 254 | struct opp_table *opp_table) |
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c index 6cdb2c14eee4..4347f15165f8 100644 --- a/drivers/powercap/intel_rapl.c +++ b/drivers/powercap/intel_rapl.c | |||
| @@ -1156,6 +1156,7 @@ static const struct x86_cpu_id rapl_ids[] __initconst = { | |||
| 1156 | INTEL_CPU_FAM6(KABYLAKE_MOBILE, rapl_defaults_core), | 1156 | INTEL_CPU_FAM6(KABYLAKE_MOBILE, rapl_defaults_core), |
| 1157 | INTEL_CPU_FAM6(KABYLAKE_DESKTOP, rapl_defaults_core), | 1157 | INTEL_CPU_FAM6(KABYLAKE_DESKTOP, rapl_defaults_core), |
| 1158 | INTEL_CPU_FAM6(CANNONLAKE_MOBILE, rapl_defaults_core), | 1158 | INTEL_CPU_FAM6(CANNONLAKE_MOBILE, rapl_defaults_core), |
| 1159 | INTEL_CPU_FAM6(ICELAKE_MOBILE, rapl_defaults_core), | ||
| 1159 | 1160 | ||
| 1160 | INTEL_CPU_FAM6(ATOM_SILVERMONT, rapl_defaults_byt), | 1161 | INTEL_CPU_FAM6(ATOM_SILVERMONT, rapl_defaults_byt), |
| 1161 | INTEL_CPU_FAM6(ATOM_AIRMONT, rapl_defaults_cht), | 1162 | INTEL_CPU_FAM6(ATOM_AIRMONT, rapl_defaults_cht), |
| @@ -1164,6 +1165,7 @@ static const struct x86_cpu_id rapl_ids[] __initconst = { | |||
| 1164 | INTEL_CPU_FAM6(ATOM_GOLDMONT, rapl_defaults_core), | 1165 | INTEL_CPU_FAM6(ATOM_GOLDMONT, rapl_defaults_core), |
| 1165 | INTEL_CPU_FAM6(ATOM_GOLDMONT_PLUS, rapl_defaults_core), | 1166 | INTEL_CPU_FAM6(ATOM_GOLDMONT_PLUS, rapl_defaults_core), |
| 1166 | INTEL_CPU_FAM6(ATOM_GOLDMONT_X, rapl_defaults_core), | 1167 | INTEL_CPU_FAM6(ATOM_GOLDMONT_X, rapl_defaults_core), |
| 1168 | INTEL_CPU_FAM6(ATOM_TREMONT_X, rapl_defaults_core), | ||
| 1167 | 1169 | ||
| 1168 | INTEL_CPU_FAM6(XEON_PHI_KNL, rapl_defaults_hsw_server), | 1170 | INTEL_CPU_FAM6(XEON_PHI_KNL, rapl_defaults_hsw_server), |
| 1169 | INTEL_CPU_FAM6(XEON_PHI_KNM, rapl_defaults_hsw_server), | 1171 | INTEL_CPU_FAM6(XEON_PHI_KNM, rapl_defaults_hsw_server), |
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig index 30323426902e..58bb7d72dc2b 100644 --- a/drivers/thermal/Kconfig +++ b/drivers/thermal/Kconfig | |||
| @@ -152,6 +152,7 @@ config CPU_THERMAL | |||
| 152 | bool "generic cpu cooling support" | 152 | bool "generic cpu cooling support" |
| 153 | depends on CPU_FREQ | 153 | depends on CPU_FREQ |
| 154 | depends on THERMAL_OF | 154 | depends on THERMAL_OF |
| 155 | depends on THERMAL=y | ||
| 155 | help | 156 | help |
| 156 | This implements the generic cpu cooling mechanism through frequency | 157 | This implements the generic cpu cooling mechanism through frequency |
| 157 | reduction. An ACPI version of this already exists | 158 | reduction. An ACPI version of this already exists |
