aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>2018-06-13 05:04:16 -0400
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2018-06-13 05:04:16 -0400
commit2652df3af7317ebd88cd4493a0683e52d5199694 (patch)
treeec98d2ebad43c3467e449900eb33646d0e8948ad
parentb06c0b2f087ab498d51d50f5ae353133b602f614 (diff)
parent0aa9abd4c212fc1cd111cc0a9fc571f0d86e63cf (diff)
Merge branch 'pm-cpufreq'
Additional cpufreq updates for 4.18-rc1: fixes and cleanups in the core and drivers and intel_pstate extension to do iowait boosting on systems with HWP that improves performance quite a bit. * pm-cpufreq: cpufreq: imx6q: check speed grades for i.MX6ULL cpufreq: governors: Fix long idle detection logic in load calculation cpufreq: intel_pstate: enable boost for Skylake Xeon cpufreq: intel_pstate: New sysfs entry to control HWP boost cpufreq: intel_pstate: HWP boost performance on IO wakeup cpufreq: intel_pstate: Add HWP boost utility and sched util hooks cpufreq: ti-cpufreq: Use devres managed API in probe() cpufreq: ti-cpufreq: Fix an incorrect error return value cpufreq: ACPI: make function acpi_cpufreq_fast_switch() static cpufreq: kryo: allow building as a loadable module
-rw-r--r--drivers/cpufreq/Kconfig.arm2
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c4
-rw-r--r--drivers/cpufreq/cpufreq_governor.c12
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c29
-rw-r--r--drivers/cpufreq/intel_pstate.c179
-rw-r--r--drivers/cpufreq/ti-cpufreq.c7
6 files changed, 209 insertions, 24 deletions
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index c7ce928fbf1f..52f5f1a2040c 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -125,7 +125,7 @@ config ARM_OMAP2PLUS_CPUFREQ
125 default ARCH_OMAP2PLUS 125 default ARCH_OMAP2PLUS
126 126
127config ARM_QCOM_CPUFREQ_KRYO 127config ARM_QCOM_CPUFREQ_KRYO
128 bool "Qualcomm Kryo based CPUFreq" 128 tristate "Qualcomm Kryo based CPUFreq"
129 depends on ARM64 129 depends on ARM64
130 depends on QCOM_QFPROM 130 depends on QCOM_QFPROM
131 depends on QCOM_SMEM 131 depends on QCOM_SMEM
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 9449657d72f0..32ba4bc972e7 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -465,8 +465,8 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
465 return result; 465 return result;
466} 466}
467 467
468unsigned int acpi_cpufreq_fast_switch(struct cpufreq_policy *policy, 468static unsigned int acpi_cpufreq_fast_switch(struct cpufreq_policy *policy,
469 unsigned int target_freq) 469 unsigned int target_freq)
470{ 470{
471 struct acpi_cpufreq_data *data = policy->driver_data; 471 struct acpi_cpufreq_data *data = policy->driver_data;
472 struct acpi_processor_performance *perf; 472 struct acpi_processor_performance *perf;
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 871bf9cf55cf..1d50e97d49f1 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -165,7 +165,7 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
165 * calls, so the previous load value can be used then. 165 * calls, so the previous load value can be used then.
166 */ 166 */
167 load = j_cdbs->prev_load; 167 load = j_cdbs->prev_load;
168 } else if (unlikely(time_elapsed > 2 * sampling_rate && 168 } else if (unlikely((int)idle_time > 2 * sampling_rate &&
169 j_cdbs->prev_load)) { 169 j_cdbs->prev_load)) {
170 /* 170 /*
171 * If the CPU had gone completely idle and a task has 171 * If the CPU had gone completely idle and a task has
@@ -185,10 +185,8 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
185 * clear prev_load to guarantee that the load will be 185 * clear prev_load to guarantee that the load will be
186 * computed again next time. 186 * computed again next time.
187 * 187 *
188 * Detecting this situation is easy: the governor's 188 * Detecting this situation is easy: an unusually large
189 * utilization update handler would not have run during 189 * 'idle_time' (as compared to the sampling rate)
190 * CPU-idle periods. Hence, an unusually large
191 * 'time_elapsed' (as compared to the sampling rate)
192 * indicates this scenario. 190 * indicates this scenario.
193 */ 191 */
194 load = j_cdbs->prev_load; 192 load = j_cdbs->prev_load;
@@ -217,8 +215,8 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
217 j_cdbs->prev_load = load; 215 j_cdbs->prev_load = load;
218 } 216 }
219 217
220 if (time_elapsed > 2 * sampling_rate) { 218 if (unlikely((int)idle_time > 2 * sampling_rate)) {
221 unsigned int periods = time_elapsed / sampling_rate; 219 unsigned int periods = idle_time / sampling_rate;
222 220
223 if (periods < idle_periods) 221 if (periods < idle_periods)
224 idle_periods = periods; 222 idle_periods = periods;
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index 83cf631fc9bc..f094687cae52 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -266,6 +266,8 @@ put_node:
266} 266}
267 267
268#define OCOTP_CFG3_6UL_SPEED_696MHZ 0x2 268#define OCOTP_CFG3_6UL_SPEED_696MHZ 0x2
269#define OCOTP_CFG3_6ULL_SPEED_792MHZ 0x2
270#define OCOTP_CFG3_6ULL_SPEED_900MHZ 0x3
269 271
270static void imx6ul_opp_check_speed_grading(struct device *dev) 272static void imx6ul_opp_check_speed_grading(struct device *dev)
271{ 273{
@@ -287,16 +289,30 @@ static void imx6ul_opp_check_speed_grading(struct device *dev)
287 * Speed GRADING[1:0] defines the max speed of ARM: 289 * Speed GRADING[1:0] defines the max speed of ARM:
288 * 2b'00: Reserved; 290 * 2b'00: Reserved;
289 * 2b'01: 528000000Hz; 291 * 2b'01: 528000000Hz;
290 * 2b'10: 696000000Hz; 292 * 2b'10: 696000000Hz on i.MX6UL, 792000000Hz on i.MX6ULL;
291 * 2b'11: Reserved; 293 * 2b'11: 900000000Hz on i.MX6ULL only;
292 * We need to set the max speed of ARM according to fuse map. 294 * We need to set the max speed of ARM according to fuse map.
293 */ 295 */
294 val = readl_relaxed(base + OCOTP_CFG3); 296 val = readl_relaxed(base + OCOTP_CFG3);
295 val >>= OCOTP_CFG3_SPEED_SHIFT; 297 val >>= OCOTP_CFG3_SPEED_SHIFT;
296 val &= 0x3; 298 val &= 0x3;
297 if (val != OCOTP_CFG3_6UL_SPEED_696MHZ) 299
298 if (dev_pm_opp_disable(dev, 696000000)) 300 if (of_machine_is_compatible("fsl,imx6ul")) {
299 dev_warn(dev, "failed to disable 696MHz OPP\n"); 301 if (val != OCOTP_CFG3_6UL_SPEED_696MHZ)
302 if (dev_pm_opp_disable(dev, 696000000))
303 dev_warn(dev, "failed to disable 696MHz OPP\n");
304 }
305
306 if (of_machine_is_compatible("fsl,imx6ull")) {
307 if (val != OCOTP_CFG3_6ULL_SPEED_792MHZ)
308 if (dev_pm_opp_disable(dev, 792000000))
309 dev_warn(dev, "failed to disable 792MHz OPP\n");
310
311 if (val != OCOTP_CFG3_6ULL_SPEED_900MHZ)
312 if (dev_pm_opp_disable(dev, 900000000))
313 dev_warn(dev, "failed to disable 900MHz OPP\n");
314 }
315
300 iounmap(base); 316 iounmap(base);
301put_node: 317put_node:
302 of_node_put(np); 318 of_node_put(np);
@@ -356,7 +372,8 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
356 goto put_reg; 372 goto put_reg;
357 } 373 }
358 374
359 if (of_machine_is_compatible("fsl,imx6ul")) 375 if (of_machine_is_compatible("fsl,imx6ul") ||
376 of_machine_is_compatible("fsl,imx6ull"))
360 imx6ul_opp_check_speed_grading(cpu_dev); 377 imx6ul_opp_check_speed_grading(cpu_dev);
361 else 378 else
362 imx6q_opp_check_speed_grading(cpu_dev); 379 imx6q_opp_check_speed_grading(cpu_dev);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 08960a55eb27..352d5b2d5b58 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -221,6 +221,11 @@ struct global_params {
221 * preference/bias 221 * preference/bias
222 * @epp_saved: Saved EPP/EPB during system suspend or CPU offline 222 * @epp_saved: Saved EPP/EPB during system suspend or CPU offline
223 * operation 223 * operation
224 * @hwp_req_cached: Cached value of the last HWP Request MSR
225 * @hwp_cap_cached: Cached value of the last HWP Capabilities MSR
226 * @last_io_update: Last time when IO wake flag was set
227 * @sched_flags: Store scheduler flags for possible cross CPU update
228 * @hwp_boost_min: Last HWP boosted min performance
224 * 229 *
225 * This structure stores per CPU instance data for all CPUs. 230 * This structure stores per CPU instance data for all CPUs.
226 */ 231 */
@@ -253,6 +258,11 @@ struct cpudata {
253 s16 epp_policy; 258 s16 epp_policy;
254 s16 epp_default; 259 s16 epp_default;
255 s16 epp_saved; 260 s16 epp_saved;
261 u64 hwp_req_cached;
262 u64 hwp_cap_cached;
263 u64 last_io_update;
264 unsigned int sched_flags;
265 u32 hwp_boost_min;
256}; 266};
257 267
258static struct cpudata **all_cpu_data; 268static struct cpudata **all_cpu_data;
@@ -285,6 +295,7 @@ static struct pstate_funcs pstate_funcs __read_mostly;
285 295
286static int hwp_active __read_mostly; 296static int hwp_active __read_mostly;
287static bool per_cpu_limits __read_mostly; 297static bool per_cpu_limits __read_mostly;
298static bool hwp_boost __read_mostly;
288 299
289static struct cpufreq_driver *intel_pstate_driver __read_mostly; 300static struct cpufreq_driver *intel_pstate_driver __read_mostly;
290 301
@@ -689,6 +700,7 @@ static void intel_pstate_get_hwp_max(unsigned int cpu, int *phy_max,
689 u64 cap; 700 u64 cap;
690 701
691 rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap); 702 rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap);
703 WRITE_ONCE(all_cpu_data[cpu]->hwp_cap_cached, cap);
692 if (global.no_turbo) 704 if (global.no_turbo)
693 *current_max = HWP_GUARANTEED_PERF(cap); 705 *current_max = HWP_GUARANTEED_PERF(cap);
694 else 706 else
@@ -763,6 +775,7 @@ update_epp:
763 intel_pstate_set_epb(cpu, epp); 775 intel_pstate_set_epb(cpu, epp);
764 } 776 }
765skip_epp: 777skip_epp:
778 WRITE_ONCE(cpu_data->hwp_req_cached, value);
766 wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value); 779 wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value);
767} 780}
768 781
@@ -1020,6 +1033,30 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
1020 return count; 1033 return count;
1021} 1034}
1022 1035
1036static ssize_t show_hwp_dynamic_boost(struct kobject *kobj,
1037 struct attribute *attr, char *buf)
1038{
1039 return sprintf(buf, "%u\n", hwp_boost);
1040}
1041
1042static ssize_t store_hwp_dynamic_boost(struct kobject *a, struct attribute *b,
1043 const char *buf, size_t count)
1044{
1045 unsigned int input;
1046 int ret;
1047
1048 ret = kstrtouint(buf, 10, &input);
1049 if (ret)
1050 return ret;
1051
1052 mutex_lock(&intel_pstate_driver_lock);
1053 hwp_boost = !!input;
1054 intel_pstate_update_policies();
1055 mutex_unlock(&intel_pstate_driver_lock);
1056
1057 return count;
1058}
1059
1023show_one(max_perf_pct, max_perf_pct); 1060show_one(max_perf_pct, max_perf_pct);
1024show_one(min_perf_pct, min_perf_pct); 1061show_one(min_perf_pct, min_perf_pct);
1025 1062
@@ -1029,6 +1066,7 @@ define_one_global_rw(max_perf_pct);
1029define_one_global_rw(min_perf_pct); 1066define_one_global_rw(min_perf_pct);
1030define_one_global_ro(turbo_pct); 1067define_one_global_ro(turbo_pct);
1031define_one_global_ro(num_pstates); 1068define_one_global_ro(num_pstates);
1069define_one_global_rw(hwp_dynamic_boost);
1032 1070
1033static struct attribute *intel_pstate_attributes[] = { 1071static struct attribute *intel_pstate_attributes[] = {
1034 &status.attr, 1072 &status.attr,
@@ -1069,6 +1107,11 @@ static void __init intel_pstate_sysfs_expose_params(void)
1069 rc = sysfs_create_file(intel_pstate_kobject, &min_perf_pct.attr); 1107 rc = sysfs_create_file(intel_pstate_kobject, &min_perf_pct.attr);
1070 WARN_ON(rc); 1108 WARN_ON(rc);
1071 1109
1110 if (hwp_active) {
1111 rc = sysfs_create_file(intel_pstate_kobject,
1112 &hwp_dynamic_boost.attr);
1113 WARN_ON(rc);
1114 }
1072} 1115}
1073/************************** sysfs end ************************/ 1116/************************** sysfs end ************************/
1074 1117
@@ -1381,6 +1424,116 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
1381 intel_pstate_set_min_pstate(cpu); 1424 intel_pstate_set_min_pstate(cpu);
1382} 1425}
1383 1426
1427/*
1428 * Long hold time will keep high perf limits for long time,
1429 * which negatively impacts perf/watt for some workloads,
1430 * like specpower. 3ms is based on experiements on some
1431 * workoads.
1432 */
1433static int hwp_boost_hold_time_ns = 3 * NSEC_PER_MSEC;
1434
1435static inline void intel_pstate_hwp_boost_up(struct cpudata *cpu)
1436{
1437 u64 hwp_req = READ_ONCE(cpu->hwp_req_cached);
1438 u32 max_limit = (hwp_req & 0xff00) >> 8;
1439 u32 min_limit = (hwp_req & 0xff);
1440 u32 boost_level1;
1441
1442 /*
1443 * Cases to consider (User changes via sysfs or boot time):
1444 * If, P0 (Turbo max) = P1 (Guaranteed max) = min:
1445 * No boost, return.
1446 * If, P0 (Turbo max) > P1 (Guaranteed max) = min:
1447 * Should result in one level boost only for P0.
1448 * If, P0 (Turbo max) = P1 (Guaranteed max) > min:
1449 * Should result in two level boost:
1450 * (min + p1)/2 and P1.
1451 * If, P0 (Turbo max) > P1 (Guaranteed max) > min:
1452 * Should result in three level boost:
1453 * (min + p1)/2, P1 and P0.
1454 */
1455
1456 /* If max and min are equal or already at max, nothing to boost */
1457 if (max_limit == min_limit || cpu->hwp_boost_min >= max_limit)
1458 return;
1459
1460 if (!cpu->hwp_boost_min)
1461 cpu->hwp_boost_min = min_limit;
1462
1463 /* level at half way mark between min and guranteed */
1464 boost_level1 = (HWP_GUARANTEED_PERF(cpu->hwp_cap_cached) + min_limit) >> 1;
1465
1466 if (cpu->hwp_boost_min < boost_level1)
1467 cpu->hwp_boost_min = boost_level1;
1468 else if (cpu->hwp_boost_min < HWP_GUARANTEED_PERF(cpu->hwp_cap_cached))
1469 cpu->hwp_boost_min = HWP_GUARANTEED_PERF(cpu->hwp_cap_cached);
1470 else if (cpu->hwp_boost_min == HWP_GUARANTEED_PERF(cpu->hwp_cap_cached) &&
1471 max_limit != HWP_GUARANTEED_PERF(cpu->hwp_cap_cached))
1472 cpu->hwp_boost_min = max_limit;
1473 else
1474 return;
1475
1476 hwp_req = (hwp_req & ~GENMASK_ULL(7, 0)) | cpu->hwp_boost_min;
1477 wrmsrl(MSR_HWP_REQUEST, hwp_req);
1478 cpu->last_update = cpu->sample.time;
1479}
1480
1481static inline void intel_pstate_hwp_boost_down(struct cpudata *cpu)
1482{
1483 if (cpu->hwp_boost_min) {
1484 bool expired;
1485
1486 /* Check if we are idle for hold time to boost down */
1487 expired = time_after64(cpu->sample.time, cpu->last_update +
1488 hwp_boost_hold_time_ns);
1489 if (expired) {
1490 wrmsrl(MSR_HWP_REQUEST, cpu->hwp_req_cached);
1491 cpu->hwp_boost_min = 0;
1492 }
1493 }
1494 cpu->last_update = cpu->sample.time;
1495}
1496
1497static inline void intel_pstate_update_util_hwp_local(struct cpudata *cpu,
1498 u64 time)
1499{
1500 cpu->sample.time = time;
1501
1502 if (cpu->sched_flags & SCHED_CPUFREQ_IOWAIT) {
1503 bool do_io = false;
1504
1505 cpu->sched_flags = 0;
1506 /*
1507 * Set iowait_boost flag and update time. Since IO WAIT flag
1508 * is set all the time, we can't just conclude that there is
1509 * some IO bound activity is scheduled on this CPU with just
1510 * one occurrence. If we receive at least two in two
1511 * consecutive ticks, then we treat as boost candidate.
1512 */
1513 if (time_before64(time, cpu->last_io_update + 2 * TICK_NSEC))
1514 do_io = true;
1515
1516 cpu->last_io_update = time;
1517
1518 if (do_io)
1519 intel_pstate_hwp_boost_up(cpu);
1520
1521 } else {
1522 intel_pstate_hwp_boost_down(cpu);
1523 }
1524}
1525
1526static inline void intel_pstate_update_util_hwp(struct update_util_data *data,
1527 u64 time, unsigned int flags)
1528{
1529 struct cpudata *cpu = container_of(data, struct cpudata, update_util);
1530
1531 cpu->sched_flags |= flags;
1532
1533 if (smp_processor_id() == cpu->cpu)
1534 intel_pstate_update_util_hwp_local(cpu, time);
1535}
1536
1384static inline void intel_pstate_calc_avg_perf(struct cpudata *cpu) 1537static inline void intel_pstate_calc_avg_perf(struct cpudata *cpu)
1385{ 1538{
1386 struct sample *sample = &cpu->sample; 1539 struct sample *sample = &cpu->sample;
@@ -1641,6 +1794,12 @@ static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[] = {
1641 {} 1794 {}
1642}; 1795};
1643 1796
1797static const struct x86_cpu_id intel_pstate_hwp_boost_ids[] = {
1798 ICPU(INTEL_FAM6_SKYLAKE_X, core_funcs),
1799 ICPU(INTEL_FAM6_SKYLAKE_DESKTOP, core_funcs),
1800 {}
1801};
1802
1644static int intel_pstate_init_cpu(unsigned int cpunum) 1803static int intel_pstate_init_cpu(unsigned int cpunum)
1645{ 1804{
1646 struct cpudata *cpu; 1805 struct cpudata *cpu;
@@ -1671,6 +1830,10 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
1671 intel_pstate_disable_ee(cpunum); 1830 intel_pstate_disable_ee(cpunum);
1672 1831
1673 intel_pstate_hwp_enable(cpu); 1832 intel_pstate_hwp_enable(cpu);
1833
1834 id = x86_match_cpu(intel_pstate_hwp_boost_ids);
1835 if (id)
1836 hwp_boost = true;
1674 } 1837 }
1675 1838
1676 intel_pstate_get_cpu_pstates(cpu); 1839 intel_pstate_get_cpu_pstates(cpu);
@@ -1684,7 +1847,7 @@ static void intel_pstate_set_update_util_hook(unsigned int cpu_num)
1684{ 1847{
1685 struct cpudata *cpu = all_cpu_data[cpu_num]; 1848 struct cpudata *cpu = all_cpu_data[cpu_num];
1686 1849
1687 if (hwp_active) 1850 if (hwp_active && !hwp_boost)
1688 return; 1851 return;
1689 1852
1690 if (cpu->update_util_set) 1853 if (cpu->update_util_set)
@@ -1693,7 +1856,9 @@ static void intel_pstate_set_update_util_hook(unsigned int cpu_num)
1693 /* Prevent intel_pstate_update_util() from using stale data. */ 1856 /* Prevent intel_pstate_update_util() from using stale data. */
1694 cpu->sample.time = 0; 1857 cpu->sample.time = 0;
1695 cpufreq_add_update_util_hook(cpu_num, &cpu->update_util, 1858 cpufreq_add_update_util_hook(cpu_num, &cpu->update_util,
1696 intel_pstate_update_util); 1859 (hwp_active ?
1860 intel_pstate_update_util_hwp :
1861 intel_pstate_update_util));
1697 cpu->update_util_set = true; 1862 cpu->update_util_set = true;
1698} 1863}
1699 1864
@@ -1805,8 +1970,16 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
1805 intel_pstate_set_update_util_hook(policy->cpu); 1970 intel_pstate_set_update_util_hook(policy->cpu);
1806 } 1971 }
1807 1972
1808 if (hwp_active) 1973 if (hwp_active) {
1974 /*
1975 * When hwp_boost was active before and dynamically it
1976 * was turned off, in that case we need to clear the
1977 * update util hook.
1978 */
1979 if (!hwp_boost)
1980 intel_pstate_clear_update_util_hook(policy->cpu);
1809 intel_pstate_hwp_set(policy->cpu); 1981 intel_pstate_hwp_set(policy->cpu);
1982 }
1810 1983
1811 mutex_unlock(&intel_pstate_limits_lock); 1984 mutex_unlock(&intel_pstate_limits_lock);
1812 1985
diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c
index 6ba709b6f095..3f0e2a14895a 100644
--- a/drivers/cpufreq/ti-cpufreq.c
+++ b/drivers/cpufreq/ti-cpufreq.c
@@ -217,7 +217,7 @@ static int ti_cpufreq_probe(struct platform_device *pdev)
217 if (!match) 217 if (!match)
218 return -ENODEV; 218 return -ENODEV;
219 219
220 opp_data = kzalloc(sizeof(*opp_data), GFP_KERNEL); 220 opp_data = devm_kzalloc(&pdev->dev, sizeof(*opp_data), GFP_KERNEL);
221 if (!opp_data) 221 if (!opp_data)
222 return -ENOMEM; 222 return -ENOMEM;
223 223
@@ -226,8 +226,7 @@ static int ti_cpufreq_probe(struct platform_device *pdev)
226 opp_data->cpu_dev = get_cpu_device(0); 226 opp_data->cpu_dev = get_cpu_device(0);
227 if (!opp_data->cpu_dev) { 227 if (!opp_data->cpu_dev) {
228 pr_err("%s: Failed to get device for CPU0\n", __func__); 228 pr_err("%s: Failed to get device for CPU0\n", __func__);
229 ret = ENODEV; 229 return -ENODEV;
230 goto free_opp_data;
231 } 230 }
232 231
233 opp_data->opp_node = dev_pm_opp_of_get_opp_desc_node(opp_data->cpu_dev); 232 opp_data->opp_node = dev_pm_opp_of_get_opp_desc_node(opp_data->cpu_dev);
@@ -285,8 +284,6 @@ register_cpufreq_dt:
285 284
286fail_put_node: 285fail_put_node:
287 of_node_put(opp_data->opp_node); 286 of_node_put(opp_data->opp_node);
288free_opp_data:
289 kfree(opp_data);
290 287
291 return ret; 288 return ret;
292} 289}