summaryrefslogtreecommitdiffstats
path: root/drivers/cpufreq
diff options
context:
space:
mode:
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>2017-03-27 18:17:10 -0400
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2017-03-28 17:12:16 -0400
commit67dd9bf4416305811d58280dbe108d78ab573d56 (patch)
tree1e7189f9830f45309cac43aa6d97cbadbe6297f4 /drivers/cpufreq
parenteabd22c657f1d23c714f536b859a22a0f22ac7f5 (diff)
cpufreq: intel_pstate: Add update_util callback to pstate_funcs
Avoid using extra function pointers during P-state selection by dropping the get_target_pstate member from struct pstate_funcs, adding a new update_util callback to it (to be registered with the CPU scheduler as the utilization update callback in the active mode) and reworking the utilization update callback routines to invoke specific P-state selection functions directly. Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r--drivers/cpufreq/intel_pstate.c81
1 files changed, 43 insertions, 38 deletions
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index ca7bc19bf10b..68ede1006b07 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -311,7 +311,7 @@ struct pstate_adjust_policy {
311 * @get_scaling: Callback to get frequency scaling factor 311 * @get_scaling: Callback to get frequency scaling factor
312 * @get_val: Callback to convert P state to actual MSR write value 312 * @get_val: Callback to convert P state to actual MSR write value
313 * @get_vid: Callback to get VID data for Atom platforms 313 * @get_vid: Callback to get VID data for Atom platforms
314 * @get_target_pstate: Callback to a function to calculate next P state to use 314 * @update_util: Active mode utilization update callback.
315 * 315 *
316 * Core and Atom CPU models have different way to get P State limits. This 316 * Core and Atom CPU models have different way to get P State limits. This
317 * structure is used to store those callbacks. 317 * structure is used to store those callbacks.
@@ -324,7 +324,8 @@ struct pstate_funcs {
324 int (*get_scaling)(void); 324 int (*get_scaling)(void);
325 u64 (*get_val)(struct cpudata*, int pstate); 325 u64 (*get_val)(struct cpudata*, int pstate);
326 void (*get_vid)(struct cpudata *); 326 void (*get_vid)(struct cpudata *);
327 int32_t (*get_target_pstate)(struct cpudata *); 327 void (*update_util)(struct update_util_data *data, u64 time,
328 unsigned int flags);
328}; 329};
329 330
330/** 331/**
@@ -335,9 +336,6 @@ struct cpu_defaults {
335 struct pstate_funcs funcs; 336 struct pstate_funcs funcs;
336}; 337};
337 338
338static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu);
339static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu);
340
341static struct pstate_funcs pstate_funcs __read_mostly; 339static struct pstate_funcs pstate_funcs __read_mostly;
342static struct pstate_adjust_policy pid_params __read_mostly = { 340static struct pstate_adjust_policy pid_params __read_mostly = {
343 .sample_rate_ms = 10, 341 .sample_rate_ms = 10,
@@ -1510,6 +1508,11 @@ static int knl_get_turbo_pstate(void)
1510 return ret; 1508 return ret;
1511} 1509}
1512 1510
1511static void intel_pstate_update_util_pid(struct update_util_data *data,
1512 u64 time, unsigned int flags);
1513static void intel_pstate_update_util(struct update_util_data *data, u64 time,
1514 unsigned int flags);
1515
1513static struct cpu_defaults core_params = { 1516static struct cpu_defaults core_params = {
1514 .funcs = { 1517 .funcs = {
1515 .get_max = core_get_max_pstate, 1518 .get_max = core_get_max_pstate,
@@ -1518,7 +1521,7 @@ static struct cpu_defaults core_params = {
1518 .get_turbo = core_get_turbo_pstate, 1521 .get_turbo = core_get_turbo_pstate,
1519 .get_scaling = core_get_scaling, 1522 .get_scaling = core_get_scaling,
1520 .get_val = core_get_val, 1523 .get_val = core_get_val,
1521 .get_target_pstate = get_target_pstate_use_performance, 1524 .update_util = intel_pstate_update_util_pid,
1522 }, 1525 },
1523}; 1526};
1524 1527
@@ -1531,7 +1534,7 @@ static const struct cpu_defaults silvermont_params = {
1531 .get_val = atom_get_val, 1534 .get_val = atom_get_val,
1532 .get_scaling = silvermont_get_scaling, 1535 .get_scaling = silvermont_get_scaling,
1533 .get_vid = atom_get_vid, 1536 .get_vid = atom_get_vid,
1534 .get_target_pstate = get_target_pstate_use_cpu_load, 1537 .update_util = intel_pstate_update_util,
1535 }, 1538 },
1536}; 1539};
1537 1540
@@ -1544,7 +1547,7 @@ static const struct cpu_defaults airmont_params = {
1544 .get_val = atom_get_val, 1547 .get_val = atom_get_val,
1545 .get_scaling = airmont_get_scaling, 1548 .get_scaling = airmont_get_scaling,
1546 .get_vid = atom_get_vid, 1549 .get_vid = atom_get_vid,
1547 .get_target_pstate = get_target_pstate_use_cpu_load, 1550 .update_util = intel_pstate_update_util,
1548 }, 1551 },
1549}; 1552};
1550 1553
@@ -1556,7 +1559,7 @@ static const struct cpu_defaults knl_params = {
1556 .get_turbo = knl_get_turbo_pstate, 1559 .get_turbo = knl_get_turbo_pstate,
1557 .get_scaling = core_get_scaling, 1560 .get_scaling = core_get_scaling,
1558 .get_val = core_get_val, 1561 .get_val = core_get_val,
1559 .get_target_pstate = get_target_pstate_use_performance, 1562 .update_util = intel_pstate_update_util_pid,
1560 }, 1563 },
1561}; 1564};
1562 1565
@@ -1568,7 +1571,7 @@ static const struct cpu_defaults bxt_params = {
1568 .get_turbo = core_get_turbo_pstate, 1571 .get_turbo = core_get_turbo_pstate,
1569 .get_scaling = core_get_scaling, 1572 .get_scaling = core_get_scaling,
1570 .get_val = core_get_val, 1573 .get_val = core_get_val,
1571 .get_target_pstate = get_target_pstate_use_cpu_load, 1574 .update_util = intel_pstate_update_util,
1572 }, 1575 },
1573}; 1576};
1574 1577
@@ -1704,6 +1707,9 @@ static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu)
1704 int32_t busy_frac, boost; 1707 int32_t busy_frac, boost;
1705 int target, avg_pstate; 1708 int target, avg_pstate;
1706 1709
1710 if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE)
1711 return cpu->pstate.turbo_pstate;
1712
1707 busy_frac = div_fp(sample->mperf, sample->tsc); 1713 busy_frac = div_fp(sample->mperf, sample->tsc);
1708 1714
1709 boost = cpu->iowait_boost; 1715 boost = cpu->iowait_boost;
@@ -1740,6 +1746,9 @@ static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
1740 int32_t perf_scaled, max_pstate, current_pstate, sample_ratio; 1746 int32_t perf_scaled, max_pstate, current_pstate, sample_ratio;
1741 u64 duration_ns; 1747 u64 duration_ns;
1742 1748
1749 if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE)
1750 return cpu->pstate.turbo_pstate;
1751
1743 /* 1752 /*
1744 * perf_scaled is the ratio of the average P-state during the last 1753 * perf_scaled is the ratio of the average P-state during the last
1745 * sampling period to the P-state requested last time (in percent). 1754 * sampling period to the P-state requested last time (in percent).
@@ -1790,16 +1799,11 @@ static void intel_pstate_update_pstate(struct cpudata *cpu, int pstate)
1790 wrmsrl(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate)); 1799 wrmsrl(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate));
1791} 1800}
1792 1801
1793static void intel_pstate_adjust_busy_pstate(struct cpudata *cpu) 1802static void intel_pstate_adjust_pstate(struct cpudata *cpu, int target_pstate)
1794{ 1803{
1795 int from, target_pstate; 1804 int from = cpu->pstate.current_pstate;
1796 struct sample *sample; 1805 struct sample *sample;
1797 1806
1798 from = cpu->pstate.current_pstate;
1799
1800 target_pstate = cpu->policy == CPUFREQ_POLICY_PERFORMANCE ?
1801 cpu->pstate.turbo_pstate : pstate_funcs.get_target_pstate(cpu);
1802
1803 update_turbo_state(); 1807 update_turbo_state();
1804 1808
1805 target_pstate = intel_pstate_prepare_request(cpu, target_pstate); 1809 target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
@@ -1837,8 +1841,12 @@ static void intel_pstate_update_util_pid(struct update_util_data *data,
1837 if ((s64)delta_ns < pid_params.sample_rate_ns) 1841 if ((s64)delta_ns < pid_params.sample_rate_ns)
1838 return; 1842 return;
1839 1843
1840 if (intel_pstate_sample(cpu, time)) 1844 if (intel_pstate_sample(cpu, time)) {
1841 intel_pstate_adjust_busy_pstate(cpu); 1845 int target_pstate;
1846
1847 target_pstate = get_target_pstate_use_performance(cpu);
1848 intel_pstate_adjust_pstate(cpu, target_pstate);
1849 }
1842} 1850}
1843 1851
1844static void intel_pstate_update_util(struct update_util_data *data, u64 time, 1852static void intel_pstate_update_util(struct update_util_data *data, u64 time,
@@ -1860,13 +1868,13 @@ static void intel_pstate_update_util(struct update_util_data *data, u64 time,
1860 if ((s64)delta_ns < INTEL_PSTATE_DEFAULT_SAMPLING_INTERVAL) 1868 if ((s64)delta_ns < INTEL_PSTATE_DEFAULT_SAMPLING_INTERVAL)
1861 return; 1869 return;
1862 1870
1863 if (intel_pstate_sample(cpu, time)) 1871 if (intel_pstate_sample(cpu, time)) {
1864 intel_pstate_adjust_busy_pstate(cpu); 1872 int target_pstate;
1865}
1866 1873
1867/* Utilization update callback to register in the active mode. */ 1874 target_pstate = get_target_pstate_use_cpu_load(cpu);
1868static void (*update_util_cb)(struct update_util_data *data, u64 time, 1875 intel_pstate_adjust_pstate(cpu, target_pstate);
1869 unsigned int flags) = intel_pstate_update_util; 1876 }
1877}
1870 1878
1871#define ICPU(model, policy) \ 1879#define ICPU(model, policy) \
1872 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\ 1880 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\
@@ -1938,7 +1946,7 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
1938 intel_pstate_disable_ee(cpunum); 1946 intel_pstate_disable_ee(cpunum);
1939 1947
1940 intel_pstate_hwp_enable(cpu); 1948 intel_pstate_hwp_enable(cpu);
1941 } else if (pstate_funcs.get_target_pstate == get_target_pstate_use_performance) { 1949 } else if (pstate_funcs.update_util == intel_pstate_update_util_pid) {
1942 intel_pstate_pid_reset(cpu); 1950 intel_pstate_pid_reset(cpu);
1943 } 1951 }
1944 1952
@@ -1965,7 +1973,8 @@ static void intel_pstate_set_update_util_hook(unsigned int cpu_num)
1965 1973
1966 /* Prevent intel_pstate_update_util() from using stale data. */ 1974 /* Prevent intel_pstate_update_util() from using stale data. */
1967 cpu->sample.time = 0; 1975 cpu->sample.time = 0;
1968 cpufreq_add_update_util_hook(cpu_num, &cpu->update_util, update_util_cb); 1976 cpufreq_add_update_util_hook(cpu_num, &cpu->update_util,
1977 pstate_funcs.update_util);
1969 cpu->update_util_set = true; 1978 cpu->update_util_set = true;
1970} 1979}
1971 1980
@@ -2318,7 +2327,7 @@ static int intel_pstate_register_driver(struct cpufreq_driver *driver)
2318 global.min_perf_pct = min_perf_pct_min(); 2327 global.min_perf_pct = min_perf_pct_min();
2319 2328
2320 if (intel_pstate_driver == &intel_pstate && !hwp_active && 2329 if (intel_pstate_driver == &intel_pstate && !hwp_active &&
2321 pstate_funcs.get_target_pstate != get_target_pstate_use_cpu_load) 2330 pstate_funcs.update_util == intel_pstate_update_util_pid)
2322 intel_pstate_debug_expose_params(); 2331 intel_pstate_debug_expose_params();
2323 2332
2324 return 0; 2333 return 0;
@@ -2329,8 +2338,8 @@ static int intel_pstate_unregister_driver(void)
2329 if (hwp_active) 2338 if (hwp_active)
2330 return -EBUSY; 2339 return -EBUSY;
2331 2340
2332 if (intel_pstate_driver == &intel_pstate && !hwp_active && 2341 if (intel_pstate_driver == &intel_pstate &&
2333 pstate_funcs.get_target_pstate != get_target_pstate_use_cpu_load) 2342 pstate_funcs.update_util == intel_pstate_update_util_pid)
2334 intel_pstate_debug_hide_params(); 2343 intel_pstate_debug_hide_params();
2335 2344
2336 cpufreq_unregister_driver(intel_pstate_driver); 2345 cpufreq_unregister_driver(intel_pstate_driver);
@@ -2409,8 +2418,7 @@ static void intel_pstate_use_acpi_profile(void)
2409 case PM_APPLIANCE_PC: 2418 case PM_APPLIANCE_PC:
2410 case PM_DESKTOP: 2419 case PM_DESKTOP:
2411 case PM_WORKSTATION: 2420 case PM_WORKSTATION:
2412 pstate_funcs.get_target_pstate = 2421 pstate_funcs.update_util = intel_pstate_update_util;
2413 get_target_pstate_use_cpu_load;
2414 } 2422 }
2415} 2423}
2416#else 2424#else
@@ -2428,12 +2436,9 @@ static void __init copy_cpu_funcs(struct pstate_funcs *funcs)
2428 pstate_funcs.get_scaling = funcs->get_scaling; 2436 pstate_funcs.get_scaling = funcs->get_scaling;
2429 pstate_funcs.get_val = funcs->get_val; 2437 pstate_funcs.get_val = funcs->get_val;
2430 pstate_funcs.get_vid = funcs->get_vid; 2438 pstate_funcs.get_vid = funcs->get_vid;
2431 pstate_funcs.get_target_pstate = funcs->get_target_pstate; 2439 pstate_funcs.update_util = funcs->update_util;
2432 2440
2433 intel_pstate_use_acpi_profile(); 2441 intel_pstate_use_acpi_profile();
2434
2435 if (pstate_funcs.get_target_pstate == get_target_pstate_use_performance)
2436 update_util_cb = intel_pstate_update_util_pid;
2437} 2442}
2438 2443
2439#ifdef CONFIG_ACPI 2444#ifdef CONFIG_ACPI
@@ -2578,11 +2583,11 @@ static int __init intel_pstate_init(void)
2578 if (x86_match_cpu(hwp_support_ids)) { 2583 if (x86_match_cpu(hwp_support_ids)) {
2579 copy_cpu_funcs(&core_params.funcs); 2584 copy_cpu_funcs(&core_params.funcs);
2580 if (no_hwp) { 2585 if (no_hwp) {
2581 update_util_cb = intel_pstate_update_util; 2586 pstate_funcs.update_util = intel_pstate_update_util;
2582 } else { 2587 } else {
2583 hwp_active++; 2588 hwp_active++;
2584 intel_pstate.attr = hwp_cpufreq_attrs; 2589 intel_pstate.attr = hwp_cpufreq_attrs;
2585 update_util_cb = intel_pstate_update_util_hwp; 2590 pstate_funcs.update_util = intel_pstate_update_util_hwp;
2586 goto hwp_cpu_matched; 2591 goto hwp_cpu_matched;
2587 } 2592 }
2588 } else { 2593 } else {