aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-08-04 14:13:36 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-08-04 14:13:36 -0400
commit8d91530c5fd7f0b1e8c4ddfea2905e55a178569b (patch)
treef211c693c00015d9f5d015dc162fffa676629d1c /drivers
parentc145307a110c14d09d5d92ff3c49dc0940e44b80 (diff)
parent9d1f44ee206a23b975d7d7c6f759efb25e0e61ac (diff)
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq
* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq: [CPUFREQ] Remove pointless printk from p4-clockmod. [CPUFREQ] Fix section mismatch for powernow_cpu_init in powernow-k7.c [CPUFREQ] Fix section mismatch for longhaul_cpu_init. [CPUFREQ] Fix section mismatch for longrun_cpu_init. [CPUFREQ] powernow-k8: Fix misleading variable naming [CPUFREQ] Convert pci_table entries to PCI_VDEVICE (if PCI_ANY_ID is used) [CPUFREQ] arch/x86/kernel/cpu/cpufreq: use for_each_pci_dev() [CPUFREQ] fix brace coding style issue. [CPUFREQ] x86 cpufreq: Make trace_power_frequency cpufreq driver independent [CPUFREQ] acpi-cpufreq: Fix CPU_ANY CPUFREQ_{PRE,POST}CHANGE notification [CPUFREQ] ondemand: don't synchronize sample rate unless multiple cpus present [CPUFREQ] unexport (un)lock_policy_rwsem* functions [CPUFREQ] ondemand: Refactor frequency increase code [CPUFREQ] powernow-k8: On load failure, remind the user to enable support in BIOS setup [CPUFREQ] powernow-k8: Limit Pstate transition latency check [CPUFREQ] Fix PCC driver error path [CPUFREQ] fix double freeing in error path of pcc-cpufreq [CPUFREQ] pcc driver should check for pcch method before calling _OSC [CPUFREQ] fix memory leak in cpufreq_add_dev [CPUFREQ] revert "[CPUFREQ] remove rwsem lock from CPUFREQ_GOV_STOP call (second call site)" Manually fix up non-data merge conflict introduced by new calling conventions for trace_power_start() in commit 6f4f2723d085 ("x86 cpufreq: Make trace_power_frequency cpufreq driver independent"), which didn't update the intel_idle native hardware cpuidle driver.
Diffstat (limited to 'drivers')
-rw-r--r--drivers/cpufreq/cpufreq.c18
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c33
-rw-r--r--drivers/cpuidle/cpuidle.c2
-rwxr-xr-xdrivers/idle/intel_idle.c2
4 files changed, 29 insertions, 26 deletions
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 938b74ea9ffb..199dcb9f0b83 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -29,6 +29,8 @@
29#include <linux/completion.h> 29#include <linux/completion.h>
30#include <linux/mutex.h> 30#include <linux/mutex.h>
31 31
32#include <trace/events/power.h>
33
32#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, \ 34#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, \
33 "cpufreq-core", msg) 35 "cpufreq-core", msg)
34 36
@@ -68,7 +70,7 @@ static DEFINE_PER_CPU(int, cpufreq_policy_cpu);
68static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem); 70static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
69 71
70#define lock_policy_rwsem(mode, cpu) \ 72#define lock_policy_rwsem(mode, cpu) \
71int lock_policy_rwsem_##mode \ 73static int lock_policy_rwsem_##mode \
72(int cpu) \ 74(int cpu) \
73{ \ 75{ \
74 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \ 76 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
@@ -83,26 +85,22 @@ int lock_policy_rwsem_##mode \
83} 85}
84 86
85lock_policy_rwsem(read, cpu); 87lock_policy_rwsem(read, cpu);
86EXPORT_SYMBOL_GPL(lock_policy_rwsem_read);
87 88
88lock_policy_rwsem(write, cpu); 89lock_policy_rwsem(write, cpu);
89EXPORT_SYMBOL_GPL(lock_policy_rwsem_write);
90 90
91void unlock_policy_rwsem_read(int cpu) 91static void unlock_policy_rwsem_read(int cpu)
92{ 92{
93 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); 93 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);
94 BUG_ON(policy_cpu == -1); 94 BUG_ON(policy_cpu == -1);
95 up_read(&per_cpu(cpu_policy_rwsem, policy_cpu)); 95 up_read(&per_cpu(cpu_policy_rwsem, policy_cpu));
96} 96}
97EXPORT_SYMBOL_GPL(unlock_policy_rwsem_read);
98 97
99void unlock_policy_rwsem_write(int cpu) 98static void unlock_policy_rwsem_write(int cpu)
100{ 99{
101 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); 100 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);
102 BUG_ON(policy_cpu == -1); 101 BUG_ON(policy_cpu == -1);
103 up_write(&per_cpu(cpu_policy_rwsem, policy_cpu)); 102 up_write(&per_cpu(cpu_policy_rwsem, policy_cpu));
104} 103}
105EXPORT_SYMBOL_GPL(unlock_policy_rwsem_write);
106 104
107 105
108/* internal prototypes */ 106/* internal prototypes */
@@ -354,6 +352,9 @@ void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
354 352
355 case CPUFREQ_POSTCHANGE: 353 case CPUFREQ_POSTCHANGE:
356 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs); 354 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
355 dprintk("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
356 (unsigned long)freqs->cpu);
357 trace_power_frequency(POWER_PSTATE, freqs->new, freqs->cpu);
357 srcu_notifier_call_chain(&cpufreq_transition_notifier_list, 358 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
358 CPUFREQ_POSTCHANGE, freqs); 359 CPUFREQ_POSTCHANGE, freqs);
359 if (likely(policy) && likely(policy->cpu == freqs->cpu)) 360 if (likely(policy) && likely(policy->cpu == freqs->cpu))
@@ -1875,8 +1876,7 @@ static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
1875 return NOTIFY_OK; 1876 return NOTIFY_OK;
1876} 1877}
1877 1878
1878static struct notifier_block __refdata cpufreq_cpu_notifier = 1879static struct notifier_block __refdata cpufreq_cpu_notifier = {
1879{
1880 .notifier_call = cpufreq_cpu_callback, 1880 .notifier_call = cpufreq_cpu_callback,
1881}; 1881};
1882 1882
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index e1314212d8d4..7b5093664e49 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -459,6 +459,17 @@ static struct attribute_group dbs_attr_group_old = {
459 459
460/************************** sysfs end ************************/ 460/************************** sysfs end ************************/
461 461
462static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq)
463{
464 if (dbs_tuners_ins.powersave_bias)
465 freq = powersave_bias_target(p, freq, CPUFREQ_RELATION_H);
466 else if (p->cur == p->max)
467 return;
468
469 __cpufreq_driver_target(p, freq, dbs_tuners_ins.powersave_bias ?
470 CPUFREQ_RELATION_L : CPUFREQ_RELATION_H);
471}
472
462static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) 473static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
463{ 474{
464 unsigned int max_load_freq; 475 unsigned int max_load_freq;
@@ -551,19 +562,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
551 562
552 /* Check for frequency increase */ 563 /* Check for frequency increase */
553 if (max_load_freq > dbs_tuners_ins.up_threshold * policy->cur) { 564 if (max_load_freq > dbs_tuners_ins.up_threshold * policy->cur) {
554 /* if we are already at full speed then break out early */ 565 dbs_freq_increase(policy, policy->max);
555 if (!dbs_tuners_ins.powersave_bias) {
556 if (policy->cur == policy->max)
557 return;
558
559 __cpufreq_driver_target(policy, policy->max,
560 CPUFREQ_RELATION_H);
561 } else {
562 int freq = powersave_bias_target(policy, policy->max,
563 CPUFREQ_RELATION_H);
564 __cpufreq_driver_target(policy, freq,
565 CPUFREQ_RELATION_L);
566 }
567 return; 566 return;
568 } 567 }
569 568
@@ -610,7 +609,9 @@ static void do_dbs_timer(struct work_struct *work)
610 /* We want all CPUs to do sampling nearly on same jiffy */ 609 /* We want all CPUs to do sampling nearly on same jiffy */
611 int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); 610 int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
612 611
613 delay -= jiffies % delay; 612 if (num_online_cpus() > 1)
613 delay -= jiffies % delay;
614
614 mutex_lock(&dbs_info->timer_mutex); 615 mutex_lock(&dbs_info->timer_mutex);
615 616
616 /* Common NORMAL_SAMPLE setup */ 617 /* Common NORMAL_SAMPLE setup */
@@ -635,7 +636,9 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
635{ 636{
636 /* We want all CPUs to do sampling nearly on same jiffy */ 637 /* We want all CPUs to do sampling nearly on same jiffy */
637 int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); 638 int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
638 delay -= jiffies % delay; 639
640 if (num_online_cpus() > 1)
641 delay -= jiffies % delay;
639 642
640 dbs_info->sample_type = DBS_NORMAL_SAMPLE; 643 dbs_info->sample_type = DBS_NORMAL_SAMPLE;
641 INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); 644 INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer);
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 199488576a05..dbefe15bd582 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -95,7 +95,7 @@ static void cpuidle_idle_call(void)
95 /* give the governor an opportunity to reflect on the outcome */ 95 /* give the governor an opportunity to reflect on the outcome */
96 if (cpuidle_curr_governor->reflect) 96 if (cpuidle_curr_governor->reflect)
97 cpuidle_curr_governor->reflect(dev); 97 cpuidle_curr_governor->reflect(dev);
98 trace_power_end(0); 98 trace_power_end(smp_processor_id());
99} 99}
100 100
101/** 101/**
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 54f0fb4cd5d2..03d202b1ff27 100755
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -231,7 +231,7 @@ static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state)
231 231
232 stop_critical_timings(); 232 stop_critical_timings();
233#ifndef MODULE 233#ifndef MODULE
234 trace_power_start(POWER_CSTATE, (eax >> 4) + 1); 234 trace_power_start(POWER_CSTATE, (eax >> 4) + 1, cpu);
235#endif 235#endif
236 if (!need_resched()) { 236 if (!need_resched()) {
237 237