aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/cpufreq
diff options
context:
space:
mode:
authorDave Jones <davej@redhat.com>2006-12-12 17:41:41 -0500
committerDave Jones <davej@redhat.com>2006-12-12 17:41:41 -0500
commitc4366889dda8110247be59ca41fddb82951a8c26 (patch)
tree705c1a996bed8fd48ce94ff33ec9fd00f9b94875 /drivers/cpufreq
parentdb2fb9db5735cc532fd4fc55e94b9a3c3750378e (diff)
parente1036502e5263851259d147771226161e5ccc85a (diff)
Merge ../linus
Conflicts: drivers/cpufreq/cpufreq.c
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r--drivers/cpufreq/Kconfig1
-rw-r--r--drivers/cpufreq/cpufreq.c17
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c7
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c28
4 files changed, 29 insertions, 24 deletions
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 2cc71b66231e..491779af8d55 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -107,6 +107,7 @@ config CPU_FREQ_GOV_USERSPACE
107 107
108config CPU_FREQ_GOV_ONDEMAND 108config CPU_FREQ_GOV_ONDEMAND
109 tristate "'ondemand' cpufreq policy governor" 109 tristate "'ondemand' cpufreq policy governor"
110 select CPU_FREQ_TABLE
110 help 111 help
111 'ondemand' - This driver adds a dynamic cpufreq policy governor. 112 'ondemand' - This driver adds a dynamic cpufreq policy governor.
112 The governor does a periodic polling and 113 The governor does a periodic polling and
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 0c18ac2fe7c2..9fb2edf36611 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -42,9 +42,8 @@ static struct cpufreq_policy *cpufreq_cpu_data[NR_CPUS];
42static DEFINE_SPINLOCK(cpufreq_driver_lock); 42static DEFINE_SPINLOCK(cpufreq_driver_lock);
43 43
44/* internal prototypes */ 44/* internal prototypes */
45static int __cpufreq_governor(struct cpufreq_policy *policy, 45static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event);
46 unsigned int event); 46static void handle_update(struct work_struct *work);
47static void handle_update(void *data);
48 47
49/** 48/**
50 * Two notifier lists: the "policy" list is involved in the 49 * Two notifier lists: the "policy" list is involved in the
@@ -61,7 +60,7 @@ static int __init init_cpufreq_transition_notifier_list(void)
61 srcu_init_notifier_head(&cpufreq_transition_notifier_list); 60 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
62 return 0; 61 return 0;
63} 62}
64core_initcall(init_cpufreq_transition_notifier_list); 63pure_initcall(init_cpufreq_transition_notifier_list);
65 64
66static LIST_HEAD(cpufreq_governor_list); 65static LIST_HEAD(cpufreq_governor_list);
67static DEFINE_MUTEX (cpufreq_governor_mutex); 66static DEFINE_MUTEX (cpufreq_governor_mutex);
@@ -695,7 +694,7 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
695 mutex_init(&policy->lock); 694 mutex_init(&policy->lock);
696 mutex_lock(&policy->lock); 695 mutex_lock(&policy->lock);
697 init_completion(&policy->kobj_unregister); 696 init_completion(&policy->kobj_unregister);
698 INIT_WORK(&policy->update, handle_update, (void *)(long)cpu); 697 INIT_WORK(&policy->update, handle_update);
699 698
700 /* call driver. From then on the cpufreq must be able 699 /* call driver. From then on the cpufreq must be able
701 * to accept all calls to ->verify and ->setpolicy for this CPU 700 * to accept all calls to ->verify and ->setpolicy for this CPU
@@ -925,9 +924,11 @@ static int cpufreq_remove_dev (struct sys_device * sys_dev)
925} 924}
926 925
927 926
928static void handle_update(void *data) 927static void handle_update(struct work_struct *work)
929{ 928{
930 unsigned int cpu = (unsigned int)(long)data; 929 struct cpufreq_policy *policy =
930 container_of(work, struct cpufreq_policy, update);
931 unsigned int cpu = policy->cpu;
931 dprintk("handle_update for cpu %u called\n", cpu); 932 dprintk("handle_update for cpu %u called\n", cpu);
932 cpufreq_update_policy(cpu); 933 cpufreq_update_policy(cpu);
933} 934}
@@ -1599,7 +1600,6 @@ int cpufreq_update_policy(unsigned int cpu)
1599} 1600}
1600EXPORT_SYMBOL(cpufreq_update_policy); 1601EXPORT_SYMBOL(cpufreq_update_policy);
1601 1602
1602#ifdef CONFIG_HOTPLUG_CPU
1603static int cpufreq_cpu_callback(struct notifier_block *nfb, 1603static int cpufreq_cpu_callback(struct notifier_block *nfb,
1604 unsigned long action, void *hcpu) 1604 unsigned long action, void *hcpu)
1605{ 1605{
@@ -1639,7 +1639,6 @@ static struct notifier_block __cpuinitdata cpufreq_cpu_notifier =
1639{ 1639{
1640 .notifier_call = cpufreq_cpu_callback, 1640 .notifier_call = cpufreq_cpu_callback,
1641}; 1641};
1642#endif /* CONFIG_HOTPLUG_CPU */
1643 1642
1644/********************************************************************* 1643/*********************************************************************
1645 * REGISTER / UNREGISTER CPUFREQ DRIVER * 1644 * REGISTER / UNREGISTER CPUFREQ DRIVER *
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 29905b4bf8c8..eef0270c6f3d 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -61,7 +61,7 @@ static unsigned int def_sampling_rate;
61#define MAX_SAMPLING_DOWN_FACTOR (10) 61#define MAX_SAMPLING_DOWN_FACTOR (10)
62#define TRANSITION_LATENCY_LIMIT (10 * 1000) 62#define TRANSITION_LATENCY_LIMIT (10 * 1000)
63 63
64static void do_dbs_timer(void *data); 64static void do_dbs_timer(struct work_struct *work);
65 65
66struct cpu_dbs_info_s { 66struct cpu_dbs_info_s {
67 struct cpufreq_policy *cur_policy; 67 struct cpufreq_policy *cur_policy;
@@ -84,7 +84,7 @@ static unsigned int dbs_enable; /* number of CPUs using this policy */
84 * is recursive for the same process. -Venki 84 * is recursive for the same process. -Venki
85 */ 85 */
86static DEFINE_MUTEX (dbs_mutex); 86static DEFINE_MUTEX (dbs_mutex);
87static DECLARE_WORK (dbs_work, do_dbs_timer, NULL); 87static DECLARE_DELAYED_WORK(dbs_work, do_dbs_timer);
88 88
89struct dbs_tuners { 89struct dbs_tuners {
90 unsigned int sampling_rate; 90 unsigned int sampling_rate;
@@ -427,7 +427,7 @@ static void dbs_check_cpu(int cpu)
427 } 427 }
428} 428}
429 429
430static void do_dbs_timer(void *data) 430static void do_dbs_timer(struct work_struct *work)
431{ 431{
432 int i; 432 int i;
433 lock_cpu_hotplug(); 433 lock_cpu_hotplug();
@@ -442,7 +442,6 @@ static void do_dbs_timer(void *data)
442 442
443static inline void dbs_timer_init(void) 443static inline void dbs_timer_init(void)
444{ 444{
445 INIT_WORK(&dbs_work, do_dbs_timer, NULL);
446 schedule_delayed_work(&dbs_work, 445 schedule_delayed_work(&dbs_work,
447 usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); 446 usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
448 return; 447 return;
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 048ec8b1f406..f697449327c6 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -49,13 +49,17 @@ static unsigned int def_sampling_rate;
49#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000) 49#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000)
50#define TRANSITION_LATENCY_LIMIT (10 * 1000) 50#define TRANSITION_LATENCY_LIMIT (10 * 1000)
51 51
52static void do_dbs_timer(void *data); 52static void do_dbs_timer(struct work_struct *work);
53
54/* Sampling types */
55enum dbs_sample {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE};
53 56
54struct cpu_dbs_info_s { 57struct cpu_dbs_info_s {
55 cputime64_t prev_cpu_idle; 58 cputime64_t prev_cpu_idle;
56 cputime64_t prev_cpu_wall; 59 cputime64_t prev_cpu_wall;
57 struct cpufreq_policy *cur_policy; 60 struct cpufreq_policy *cur_policy;
58 struct work_struct work; 61 struct delayed_work work;
62 enum dbs_sample sample_type;
59 unsigned int enable; 63 unsigned int enable;
60 struct cpufreq_frequency_table *freq_table; 64 struct cpufreq_frequency_table *freq_table;
61 unsigned int freq_lo; 65 unsigned int freq_lo;
@@ -417,30 +421,31 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
417 } 421 }
418} 422}
419 423
420/* Sampling types */ 424static void do_dbs_timer(struct work_struct *work)
421enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE};
422
423static void do_dbs_timer(void *data)
424{ 425{
425 unsigned int cpu = smp_processor_id(); 426 unsigned int cpu = smp_processor_id();
426 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu); 427 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu);
428 enum dbs_sample sample_type = dbs_info->sample_type;
427 /* We want all CPUs to do sampling nearly on same jiffy */ 429 /* We want all CPUs to do sampling nearly on same jiffy */
428 int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); 430 int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
431
432 /* Permit rescheduling of this work item */
433 work_release(work);
434
429 delay -= jiffies % delay; 435 delay -= jiffies % delay;
430 436
431 if (!dbs_info->enable) 437 if (!dbs_info->enable)
432 return; 438 return;
433 /* Common NORMAL_SAMPLE setup */ 439 /* Common NORMAL_SAMPLE setup */
434 INIT_WORK(&dbs_info->work, do_dbs_timer, (void *)DBS_NORMAL_SAMPLE); 440 dbs_info->sample_type = DBS_NORMAL_SAMPLE;
435 if (!dbs_tuners_ins.powersave_bias || 441 if (!dbs_tuners_ins.powersave_bias ||
436 (unsigned long) data == DBS_NORMAL_SAMPLE) { 442 sample_type == DBS_NORMAL_SAMPLE) {
437 lock_cpu_hotplug(); 443 lock_cpu_hotplug();
438 dbs_check_cpu(dbs_info); 444 dbs_check_cpu(dbs_info);
439 unlock_cpu_hotplug(); 445 unlock_cpu_hotplug();
440 if (dbs_info->freq_lo) { 446 if (dbs_info->freq_lo) {
441 /* Setup timer for SUB_SAMPLE */ 447 /* Setup timer for SUB_SAMPLE */
442 INIT_WORK(&dbs_info->work, do_dbs_timer, 448 dbs_info->sample_type = DBS_SUB_SAMPLE;
443 (void *)DBS_SUB_SAMPLE);
444 delay = dbs_info->freq_hi_jiffies; 449 delay = dbs_info->freq_hi_jiffies;
445 } 450 }
446 } else { 451 } else {
@@ -459,7 +464,8 @@ static inline void dbs_timer_init(unsigned int cpu)
459 delay -= jiffies % delay; 464 delay -= jiffies % delay;
460 465
461 ondemand_powersave_bias_init(); 466 ondemand_powersave_bias_init();
462 INIT_WORK(&dbs_info->work, do_dbs_timer, NULL); 467 INIT_DELAYED_WORK_NAR(&dbs_info->work, do_dbs_timer);
468 dbs_info->sample_type = DBS_NORMAL_SAMPLE;
463 queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay); 469 queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay);
464} 470}
465 471