summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/cpufreq/Kconfig.x861
-rw-r--r--drivers/cpufreq/intel_pstate.c56
2 files changed, 56 insertions, 1 deletions
diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86
index adbd1de1cea5..c6d273b43ff9 100644
--- a/drivers/cpufreq/Kconfig.x86
+++ b/drivers/cpufreq/Kconfig.x86
@@ -6,6 +6,7 @@ config X86_INTEL_PSTATE
6 bool "Intel P state control" 6 bool "Intel P state control"
7 depends on X86 7 depends on X86
8 select ACPI_PROCESSOR if ACPI 8 select ACPI_PROCESSOR if ACPI
9 select ACPI_CPPC_LIB if X86_64 && ACPI && SCHED_ITMT
9 help 10 help
10 This driver provides a P state for Intel core processors. 11 This driver provides a P state for Intel core processors.
11 The driver implements an internal governor and will become 12 The driver implements an internal governor and will become
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 4737520ec823..e8dc42fc0915 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -44,6 +44,7 @@
44 44
45#ifdef CONFIG_ACPI 45#ifdef CONFIG_ACPI
46#include <acpi/processor.h> 46#include <acpi/processor.h>
47#include <acpi/cppc_acpi.h>
47#endif 48#endif
48 49
49#define FRAC_BITS 8 50#define FRAC_BITS 8
@@ -379,14 +380,67 @@ static bool intel_pstate_get_ppc_enable_status(void)
379 return acpi_ppc; 380 return acpi_ppc;
380} 381}
381 382
383#ifdef CONFIG_ACPI_CPPC_LIB
384
385/* The work item is needed to avoid CPU hotplug locking issues */
386static void intel_pstste_sched_itmt_work_fn(struct work_struct *work)
387{
388 sched_set_itmt_support();
389}
390
391static DECLARE_WORK(sched_itmt_work, intel_pstste_sched_itmt_work_fn);
392
393static void intel_pstate_set_itmt_prio(int cpu)
394{
395 struct cppc_perf_caps cppc_perf;
396 static u32 max_highest_perf = 0, min_highest_perf = U32_MAX;
397 int ret;
398
399 ret = cppc_get_perf_caps(cpu, &cppc_perf);
400 if (ret)
401 return;
402
403 /*
404 * The priorities can be set regardless of whether or not
405 * sched_set_itmt_support(true) has been called and it is valid to
406 * update them at any time after it has been called.
407 */
408 sched_set_itmt_core_prio(cppc_perf.highest_perf, cpu);
409
410 if (max_highest_perf <= min_highest_perf) {
411 if (cppc_perf.highest_perf > max_highest_perf)
412 max_highest_perf = cppc_perf.highest_perf;
413
414 if (cppc_perf.highest_perf < min_highest_perf)
415 min_highest_perf = cppc_perf.highest_perf;
416
417 if (max_highest_perf > min_highest_perf) {
418 /*
419 * This code can be run during CPU online under the
420 * CPU hotplug locks, so sched_set_itmt_support()
421 * cannot be called from here. Queue up a work item
422 * to invoke it.
423 */
424 schedule_work(&sched_itmt_work);
425 }
426 }
427}
428#else
429static void intel_pstate_set_itmt_prio(int cpu)
430{
431}
432#endif
433
382static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy) 434static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
383{ 435{
384 struct cpudata *cpu; 436 struct cpudata *cpu;
385 int ret; 437 int ret;
386 int i; 438 int i;
387 439
388 if (hwp_active) 440 if (hwp_active) {
441 intel_pstate_set_itmt_prio(policy->cpu);
389 return; 442 return;
443 }
390 444
391 if (!intel_pstate_get_ppc_enable_status()) 445 if (!intel_pstate_get_ppc_enable_status())
392 return; 446 return;