aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c260
-rw-r--r--include/asm-generic/cputime.h2
-rw-r--r--include/linux/workqueue.h2
-rw-r--r--kernel/workqueue.c57
4 files changed, 127 insertions, 194 deletions
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 693e540481b4..87299924e735 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -12,22 +12,11 @@
12 12
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/smp.h>
16#include <linux/init.h> 15#include <linux/init.h>
17#include <linux/interrupt.h>
18#include <linux/ctype.h>
19#include <linux/cpufreq.h> 16#include <linux/cpufreq.h>
20#include <linux/sysctl.h>
21#include <linux/types.h>
22#include <linux/fs.h>
23#include <linux/sysfs.h>
24#include <linux/cpu.h> 17#include <linux/cpu.h>
25#include <linux/sched.h>
26#include <linux/kmod.h>
27#include <linux/workqueue.h>
28#include <linux/jiffies.h> 18#include <linux/jiffies.h>
29#include <linux/kernel_stat.h> 19#include <linux/kernel_stat.h>
30#include <linux/percpu.h>
31#include <linux/mutex.h> 20#include <linux/mutex.h>
32 21
33/* 22/*
@@ -56,16 +45,15 @@ static unsigned int def_sampling_rate;
56#define MIN_SAMPLING_RATE (def_sampling_rate / MIN_SAMPLING_RATE_RATIO) 45#define MIN_SAMPLING_RATE (def_sampling_rate / MIN_SAMPLING_RATE_RATIO)
57#define MAX_SAMPLING_RATE (500 * def_sampling_rate) 46#define MAX_SAMPLING_RATE (500 * def_sampling_rate)
58#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000) 47#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000)
59#define DEF_SAMPLING_DOWN_FACTOR (1)
60#define MAX_SAMPLING_DOWN_FACTOR (10)
61#define TRANSITION_LATENCY_LIMIT (10 * 1000) 48#define TRANSITION_LATENCY_LIMIT (10 * 1000)
62 49
63static void do_dbs_timer(void *data); 50static void do_dbs_timer(void *data);
64 51
65struct cpu_dbs_info_s { 52struct cpu_dbs_info_s {
53 cputime64_t prev_cpu_idle;
54 cputime64_t prev_cpu_wall;
66 struct cpufreq_policy *cur_policy; 55 struct cpufreq_policy *cur_policy;
67 unsigned int prev_cpu_idle_up; 56 struct work_struct work;
68 unsigned int prev_cpu_idle_down;
69 unsigned int enable; 57 unsigned int enable;
70}; 58};
71static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); 59static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
@@ -80,31 +68,32 @@ static unsigned int dbs_enable; /* number of CPUs using this policy */
80 * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock 68 * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock
81 * is recursive for the same process. -Venki 69 * is recursive for the same process. -Venki
82 */ 70 */
83static DEFINE_MUTEX (dbs_mutex); 71static DEFINE_MUTEX(dbs_mutex);
84static DECLARE_WORK (dbs_work, do_dbs_timer, NULL);
85 72
86static struct workqueue_struct *dbs_workq; 73static struct workqueue_struct *kondemand_wq;
87 74
88struct dbs_tuners { 75struct dbs_tuners {
89 unsigned int sampling_rate; 76 unsigned int sampling_rate;
90 unsigned int sampling_down_factor;
91 unsigned int up_threshold; 77 unsigned int up_threshold;
92 unsigned int ignore_nice; 78 unsigned int ignore_nice;
93}; 79};
94 80
95static struct dbs_tuners dbs_tuners_ins = { 81static struct dbs_tuners dbs_tuners_ins = {
96 .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, 82 .up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
97 .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
98 .ignore_nice = 0, 83 .ignore_nice = 0,
99}; 84};
100 85
101static inline unsigned int get_cpu_idle_time(unsigned int cpu) 86static inline cputime64_t get_cpu_idle_time(unsigned int cpu)
102{ 87{
103 return kstat_cpu(cpu).cpustat.idle + 88 cputime64_t retval;
104 kstat_cpu(cpu).cpustat.iowait + 89
105 ( dbs_tuners_ins.ignore_nice ? 90 retval = cputime64_add(kstat_cpu(cpu).cpustat.idle,
106 kstat_cpu(cpu).cpustat.nice : 91 kstat_cpu(cpu).cpustat.iowait);
107 0); 92
93 if (dbs_tuners_ins.ignore_nice)
94 retval = cputime64_add(retval, kstat_cpu(cpu).cpustat.nice);
95
96 return retval;
108} 97}
109 98
110/************************** sysfs interface ************************/ 99/************************** sysfs interface ************************/
@@ -133,35 +122,15 @@ static ssize_t show_##file_name \
133 return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ 122 return sprintf(buf, "%u\n", dbs_tuners_ins.object); \
134} 123}
135show_one(sampling_rate, sampling_rate); 124show_one(sampling_rate, sampling_rate);
136show_one(sampling_down_factor, sampling_down_factor);
137show_one(up_threshold, up_threshold); 125show_one(up_threshold, up_threshold);
138show_one(ignore_nice_load, ignore_nice); 126show_one(ignore_nice_load, ignore_nice);
139 127
140static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused,
141 const char *buf, size_t count)
142{
143 unsigned int input;
144 int ret;
145 ret = sscanf (buf, "%u", &input);
146 if (ret != 1 )
147 return -EINVAL;
148
149 if (input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
150 return -EINVAL;
151
152 mutex_lock(&dbs_mutex);
153 dbs_tuners_ins.sampling_down_factor = input;
154 mutex_unlock(&dbs_mutex);
155
156 return count;
157}
158
159static ssize_t store_sampling_rate(struct cpufreq_policy *unused, 128static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
160 const char *buf, size_t count) 129 const char *buf, size_t count)
161{ 130{
162 unsigned int input; 131 unsigned int input;
163 int ret; 132 int ret;
164 ret = sscanf (buf, "%u", &input); 133 ret = sscanf(buf, "%u", &input);
165 134
166 mutex_lock(&dbs_mutex); 135 mutex_lock(&dbs_mutex);
167 if (ret != 1 || input > MAX_SAMPLING_RATE || input < MIN_SAMPLING_RATE) { 136 if (ret != 1 || input > MAX_SAMPLING_RATE || input < MIN_SAMPLING_RATE) {
@@ -180,7 +149,7 @@ static ssize_t store_up_threshold(struct cpufreq_policy *unused,
180{ 149{
181 unsigned int input; 150 unsigned int input;
182 int ret; 151 int ret;
183 ret = sscanf (buf, "%u", &input); 152 ret = sscanf(buf, "%u", &input);
184 153
185 mutex_lock(&dbs_mutex); 154 mutex_lock(&dbs_mutex);
186 if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || 155 if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
@@ -203,7 +172,7 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
203 172
204 unsigned int j; 173 unsigned int j;
205 174
206 ret = sscanf (buf, "%u", &input); 175 ret = sscanf(buf, "%u", &input);
207 if ( ret != 1 ) 176 if ( ret != 1 )
208 return -EINVAL; 177 return -EINVAL;
209 178
@@ -217,12 +186,12 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
217 } 186 }
218 dbs_tuners_ins.ignore_nice = input; 187 dbs_tuners_ins.ignore_nice = input;
219 188
220 /* we need to re-evaluate prev_cpu_idle_up and prev_cpu_idle_down */ 189 /* we need to re-evaluate prev_cpu_idle */
221 for_each_online_cpu(j) { 190 for_each_online_cpu(j) {
222 struct cpu_dbs_info_s *j_dbs_info; 191 struct cpu_dbs_info_s *dbs_info;
223 j_dbs_info = &per_cpu(cpu_dbs_info, j); 192 dbs_info = &per_cpu(cpu_dbs_info, j);
224 j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j); 193 dbs_info->prev_cpu_idle = get_cpu_idle_time(j);
225 j_dbs_info->prev_cpu_idle_down = j_dbs_info->prev_cpu_idle_up; 194 dbs_info->prev_cpu_wall = get_jiffies_64();
226 } 195 }
227 mutex_unlock(&dbs_mutex); 196 mutex_unlock(&dbs_mutex);
228 197
@@ -234,7 +203,6 @@ static struct freq_attr _name = \
234__ATTR(_name, 0644, show_##_name, store_##_name) 203__ATTR(_name, 0644, show_##_name, store_##_name)
235 204
236define_one_rw(sampling_rate); 205define_one_rw(sampling_rate);
237define_one_rw(sampling_down_factor);
238define_one_rw(up_threshold); 206define_one_rw(up_threshold);
239define_one_rw(ignore_nice_load); 207define_one_rw(ignore_nice_load);
240 208
@@ -242,7 +210,6 @@ static struct attribute * dbs_attributes[] = {
242 &sampling_rate_max.attr, 210 &sampling_rate_max.attr,
243 &sampling_rate_min.attr, 211 &sampling_rate_min.attr,
244 &sampling_rate.attr, 212 &sampling_rate.attr,
245 &sampling_down_factor.attr,
246 &up_threshold.attr, 213 &up_threshold.attr,
247 &ignore_nice_load.attr, 214 &ignore_nice_load.attr,
248 NULL 215 NULL
@@ -255,26 +222,27 @@ static struct attribute_group dbs_attr_group = {
255 222
256/************************** sysfs end ************************/ 223/************************** sysfs end ************************/
257 224
258static void dbs_check_cpu(int cpu) 225static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
259{ 226{
260 unsigned int idle_ticks, up_idle_ticks, total_ticks; 227 unsigned int idle_ticks, total_ticks;
261 unsigned int freq_next; 228 unsigned int load;
262 unsigned int freq_down_sampling_rate; 229 cputime64_t cur_jiffies;
263 static int down_skip[NR_CPUS];
264 struct cpu_dbs_info_s *this_dbs_info;
265 230
266 struct cpufreq_policy *policy; 231 struct cpufreq_policy *policy;
267 unsigned int j; 232 unsigned int j;
268 233
269 this_dbs_info = &per_cpu(cpu_dbs_info, cpu);
270 if (!this_dbs_info->enable) 234 if (!this_dbs_info->enable)
271 return; 235 return;
272 236
273 policy = this_dbs_info->cur_policy; 237 policy = this_dbs_info->cur_policy;
238 cur_jiffies = jiffies64_to_cputime64(get_jiffies_64());
239 total_ticks = (unsigned int) cputime64_sub(cur_jiffies,
240 this_dbs_info->prev_cpu_wall);
241 this_dbs_info->prev_cpu_wall = cur_jiffies;
274 /* 242 /*
275 * Every sampling_rate, we check, if current idle time is less 243 * Every sampling_rate, we check, if current idle time is less
276 * than 20% (default), then we try to increase frequency 244 * than 20% (default), then we try to increase frequency
277 * Every sampling_rate*sampling_down_factor, we look for a the lowest 245 * Every sampling_rate, we look for a the lowest
278 * frequency which can sustain the load while keeping idle time over 246 * frequency which can sustain the load while keeping idle time over
279 * 30%. If such a frequency exist, we try to decrease to this frequency. 247 * 30%. If such a frequency exist, we try to decrease to this frequency.
280 * 248 *
@@ -283,36 +251,26 @@ static void dbs_check_cpu(int cpu)
283 * 5% (default) of current frequency 251 * 5% (default) of current frequency
284 */ 252 */
285 253
286 /* Check for frequency increase */ 254 /* Get Idle Time */
287 idle_ticks = UINT_MAX; 255 idle_ticks = UINT_MAX;
288 for_each_cpu_mask(j, policy->cpus) { 256 for_each_cpu_mask(j, policy->cpus) {
289 unsigned int tmp_idle_ticks, total_idle_ticks; 257 cputime64_t total_idle_ticks;
258 unsigned int tmp_idle_ticks;
290 struct cpu_dbs_info_s *j_dbs_info; 259 struct cpu_dbs_info_s *j_dbs_info;
291 260
292 j_dbs_info = &per_cpu(cpu_dbs_info, j); 261 j_dbs_info = &per_cpu(cpu_dbs_info, j);
293 total_idle_ticks = get_cpu_idle_time(j); 262 total_idle_ticks = get_cpu_idle_time(j);
294 tmp_idle_ticks = total_idle_ticks - 263 tmp_idle_ticks = (unsigned int) cputime64_sub(total_idle_ticks,
295 j_dbs_info->prev_cpu_idle_up; 264 j_dbs_info->prev_cpu_idle);
296 j_dbs_info->prev_cpu_idle_up = total_idle_ticks; 265 j_dbs_info->prev_cpu_idle = total_idle_ticks;
297 266
298 if (tmp_idle_ticks < idle_ticks) 267 if (tmp_idle_ticks < idle_ticks)
299 idle_ticks = tmp_idle_ticks; 268 idle_ticks = tmp_idle_ticks;
300 } 269 }
270 load = (100 * (total_ticks - idle_ticks)) / total_ticks;
301 271
302 /* Scale idle ticks by 100 and compare with up and down ticks */ 272 /* Check for frequency increase */
303 idle_ticks *= 100; 273 if (load > dbs_tuners_ins.up_threshold) {
304 up_idle_ticks = (100 - dbs_tuners_ins.up_threshold) *
305 usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
306
307 if (idle_ticks < up_idle_ticks) {
308 down_skip[cpu] = 0;
309 for_each_cpu_mask(j, policy->cpus) {
310 struct cpu_dbs_info_s *j_dbs_info;
311
312 j_dbs_info = &per_cpu(cpu_dbs_info, j);
313 j_dbs_info->prev_cpu_idle_down =
314 j_dbs_info->prev_cpu_idle_up;
315 }
316 /* if we are already at full speed then break out early */ 274 /* if we are already at full speed then break out early */
317 if (policy->cur == policy->max) 275 if (policy->cur == policy->max)
318 return; 276 return;
@@ -323,83 +281,49 @@ static void dbs_check_cpu(int cpu)
323 } 281 }
324 282
325 /* Check for frequency decrease */ 283 /* Check for frequency decrease */
326 down_skip[cpu]++;
327 if (down_skip[cpu] < dbs_tuners_ins.sampling_down_factor)
328 return;
329
330 idle_ticks = UINT_MAX;
331 for_each_cpu_mask(j, policy->cpus) {
332 unsigned int tmp_idle_ticks, total_idle_ticks;
333 struct cpu_dbs_info_s *j_dbs_info;
334
335 j_dbs_info = &per_cpu(cpu_dbs_info, j);
336 /* Check for frequency decrease */
337 total_idle_ticks = j_dbs_info->prev_cpu_idle_up;
338 tmp_idle_ticks = total_idle_ticks -
339 j_dbs_info->prev_cpu_idle_down;
340 j_dbs_info->prev_cpu_idle_down = total_idle_ticks;
341
342 if (tmp_idle_ticks < idle_ticks)
343 idle_ticks = tmp_idle_ticks;
344 }
345
346 down_skip[cpu] = 0;
347 /* if we cannot reduce the frequency anymore, break out early */ 284 /* if we cannot reduce the frequency anymore, break out early */
348 if (policy->cur == policy->min) 285 if (policy->cur == policy->min)
349 return; 286 return;
350 287
351 /* Compute how many ticks there are between two measurements */
352 freq_down_sampling_rate = dbs_tuners_ins.sampling_rate *
353 dbs_tuners_ins.sampling_down_factor;
354 total_ticks = usecs_to_jiffies(freq_down_sampling_rate);
355
356 /* 288 /*
357 * The optimal frequency is the frequency that is the lowest that 289 * The optimal frequency is the frequency that is the lowest that
358 * can support the current CPU usage without triggering the up 290 * can support the current CPU usage without triggering the up
359 * policy. To be safe, we focus 10 points under the threshold. 291 * policy. To be safe, we focus 10 points under the threshold.
360 */ 292 */
361 freq_next = ((total_ticks - idle_ticks) * 100) / total_ticks; 293 if (load < (dbs_tuners_ins.up_threshold - 10)) {
362 freq_next = (freq_next * policy->cur) / 294 unsigned int freq_next;
295 freq_next = (policy->cur * load) /
363 (dbs_tuners_ins.up_threshold - 10); 296 (dbs_tuners_ins.up_threshold - 10);
364 297
365 if (freq_next < policy->min)
366 freq_next = policy->min;
367
368 if (freq_next <= ((policy->cur * 95) / 100))
369 __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_L); 298 __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_L);
299 }
370} 300}
371 301
372static void do_dbs_timer(void *data) 302static void do_dbs_timer(void *data)
373{ 303{
374 int i; 304 unsigned int cpu = smp_processor_id();
375 lock_cpu_hotplug(); 305 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu);
376 mutex_lock(&dbs_mutex); 306
377 for_each_online_cpu(i) 307 dbs_check_cpu(dbs_info);
378 dbs_check_cpu(i); 308 queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work,
379 queue_delayed_work(dbs_workq, &dbs_work, 309 usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
380 usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
381 mutex_unlock(&dbs_mutex);
382 unlock_cpu_hotplug();
383} 310}
384 311
385static inline void dbs_timer_init(void) 312static inline void dbs_timer_init(unsigned int cpu)
386{ 313{
387 INIT_WORK(&dbs_work, do_dbs_timer, NULL); 314 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu);
388 if (!dbs_workq) 315
389 dbs_workq = create_singlethread_workqueue("ondemand"); 316 INIT_WORK(&dbs_info->work, do_dbs_timer, 0);
390 if (!dbs_workq) { 317 queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work,
391 printk(KERN_ERR "ondemand: Cannot initialize kernel thread\n"); 318 usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
392 return;
393 }
394 queue_delayed_work(dbs_workq, &dbs_work,
395 usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
396 return; 319 return;
397} 320}
398 321
399static inline void dbs_timer_exit(void) 322static inline void dbs_timer_exit(unsigned int cpu)
400{ 323{
401 if (dbs_workq) 324 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu);
402 cancel_rearming_delayed_workqueue(dbs_workq, &dbs_work); 325
326 cancel_rearming_delayed_workqueue(kondemand_wq, &dbs_info->work);
403} 327}
404 328
405static int cpufreq_governor_dbs(struct cpufreq_policy *policy, 329static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
@@ -413,8 +337,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
413 337
414 switch (event) { 338 switch (event) {
415 case CPUFREQ_GOV_START: 339 case CPUFREQ_GOV_START:
416 if ((!cpu_online(cpu)) || 340 if ((!cpu_online(cpu)) || (!policy->cur))
417 (!policy->cur))
418 return -EINVAL; 341 return -EINVAL;
419 342
420 if (policy->cpuinfo.transition_latency > 343 if (policy->cpuinfo.transition_latency >
@@ -427,18 +350,26 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
427 break; 350 break;
428 351
429 mutex_lock(&dbs_mutex); 352 mutex_lock(&dbs_mutex);
353 dbs_enable++;
354 if (dbs_enable == 1) {
355 kondemand_wq = create_workqueue("kondemand");
356 if (!kondemand_wq) {
357 printk(KERN_ERR "Creation of kondemand failed\n");
358 dbs_enable--;
359 mutex_unlock(&dbs_mutex);
360 return -ENOSPC;
361 }
362 }
430 for_each_cpu_mask(j, policy->cpus) { 363 for_each_cpu_mask(j, policy->cpus) {
431 struct cpu_dbs_info_s *j_dbs_info; 364 struct cpu_dbs_info_s *j_dbs_info;
432 j_dbs_info = &per_cpu(cpu_dbs_info, j); 365 j_dbs_info = &per_cpu(cpu_dbs_info, j);
433 j_dbs_info->cur_policy = policy; 366 j_dbs_info->cur_policy = policy;
434 367
435 j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j); 368 j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j);
436 j_dbs_info->prev_cpu_idle_down 369 j_dbs_info->prev_cpu_wall = get_jiffies_64();
437 = j_dbs_info->prev_cpu_idle_up;
438 } 370 }
439 this_dbs_info->enable = 1; 371 this_dbs_info->enable = 1;
440 sysfs_create_group(&policy->kobj, &dbs_attr_group); 372 sysfs_create_group(&policy->kobj, &dbs_attr_group);
441 dbs_enable++;
442 /* 373 /*
443 * Start the timerschedule work, when this governor 374 * Start the timerschedule work, when this governor
444 * is used for first time 375 * is used for first time
@@ -457,23 +388,20 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
457 def_sampling_rate = MIN_STAT_SAMPLING_RATE; 388 def_sampling_rate = MIN_STAT_SAMPLING_RATE;
458 389
459 dbs_tuners_ins.sampling_rate = def_sampling_rate; 390 dbs_tuners_ins.sampling_rate = def_sampling_rate;
460 dbs_timer_init();
461 } 391 }
392 dbs_timer_init(policy->cpu);
462 393
463 mutex_unlock(&dbs_mutex); 394 mutex_unlock(&dbs_mutex);
464 break; 395 break;
465 396
466 case CPUFREQ_GOV_STOP: 397 case CPUFREQ_GOV_STOP:
467 mutex_lock(&dbs_mutex); 398 mutex_lock(&dbs_mutex);
399 dbs_timer_exit(policy->cpu);
468 this_dbs_info->enable = 0; 400 this_dbs_info->enable = 0;
469 sysfs_remove_group(&policy->kobj, &dbs_attr_group); 401 sysfs_remove_group(&policy->kobj, &dbs_attr_group);
470 dbs_enable--; 402 dbs_enable--;
471 /*
472 * Stop the timerschedule work, when this governor
473 * is used for first time
474 */
475 if (dbs_enable == 0) 403 if (dbs_enable == 0)
476 dbs_timer_exit(); 404 destroy_workqueue(kondemand_wq);
477 405
478 mutex_unlock(&dbs_mutex); 406 mutex_unlock(&dbs_mutex);
479 407
@@ -483,13 +411,13 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
483 lock_cpu_hotplug(); 411 lock_cpu_hotplug();
484 mutex_lock(&dbs_mutex); 412 mutex_lock(&dbs_mutex);
485 if (policy->max < this_dbs_info->cur_policy->cur) 413 if (policy->max < this_dbs_info->cur_policy->cur)
486 __cpufreq_driver_target( 414 __cpufreq_driver_target(this_dbs_info->cur_policy,
487 this_dbs_info->cur_policy, 415 policy->max,
488 policy->max, CPUFREQ_RELATION_H); 416 CPUFREQ_RELATION_H);
489 else if (policy->min > this_dbs_info->cur_policy->cur) 417 else if (policy->min > this_dbs_info->cur_policy->cur)
490 __cpufreq_driver_target( 418 __cpufreq_driver_target(this_dbs_info->cur_policy,
491 this_dbs_info->cur_policy, 419 policy->min,
492 policy->min, CPUFREQ_RELATION_L); 420 CPUFREQ_RELATION_L);
493 mutex_unlock(&dbs_mutex); 421 mutex_unlock(&dbs_mutex);
494 unlock_cpu_hotplug(); 422 unlock_cpu_hotplug();
495 break; 423 break;
@@ -498,9 +426,9 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
498} 426}
499 427
500static struct cpufreq_governor cpufreq_gov_dbs = { 428static struct cpufreq_governor cpufreq_gov_dbs = {
501 .name = "ondemand", 429 .name = "ondemand",
502 .governor = cpufreq_governor_dbs, 430 .governor = cpufreq_governor_dbs,
503 .owner = THIS_MODULE, 431 .owner = THIS_MODULE,
504}; 432};
505 433
506static int __init cpufreq_gov_dbs_init(void) 434static int __init cpufreq_gov_dbs_init(void)
@@ -510,21 +438,15 @@ static int __init cpufreq_gov_dbs_init(void)
510 438
511static void __exit cpufreq_gov_dbs_exit(void) 439static void __exit cpufreq_gov_dbs_exit(void)
512{ 440{
513 /* Make sure that the scheduled work is indeed not running.
514 Assumes the timer has been cancelled first. */
515 if (dbs_workq) {
516 flush_workqueue(dbs_workq);
517 destroy_workqueue(dbs_workq);
518 }
519
520 cpufreq_unregister_governor(&cpufreq_gov_dbs); 441 cpufreq_unregister_governor(&cpufreq_gov_dbs);
521} 442}
522 443
523 444
524MODULE_AUTHOR ("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>"); 445MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>");
525MODULE_DESCRIPTION ("'cpufreq_ondemand' - A dynamic cpufreq governor for " 446MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>");
526 "Low Latency Frequency Transition capable processors"); 447MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for "
527MODULE_LICENSE ("GPL"); 448 "Low Latency Frequency Transition capable processors");
449MODULE_LICENSE("GPL");
528 450
529module_init(cpufreq_gov_dbs_init); 451module_init(cpufreq_gov_dbs_init);
530module_exit(cpufreq_gov_dbs_exit); 452module_exit(cpufreq_gov_dbs_exit);
diff --git a/include/asm-generic/cputime.h b/include/asm-generic/cputime.h
index 6f178563e336..09204e40d663 100644
--- a/include/asm-generic/cputime.h
+++ b/include/asm-generic/cputime.h
@@ -24,7 +24,9 @@ typedef u64 cputime64_t;
24 24
25#define cputime64_zero (0ULL) 25#define cputime64_zero (0ULL)
26#define cputime64_add(__a, __b) ((__a) + (__b)) 26#define cputime64_add(__a, __b) ((__a) + (__b))
27#define cputime64_sub(__a, __b) ((__a) - (__b))
27#define cputime64_to_jiffies64(__ct) (__ct) 28#define cputime64_to_jiffies64(__ct) (__ct)
29#define jiffies64_to_cputime64(__jif) (__jif)
28#define cputime_to_cputime64(__ct) ((u64) __ct) 30#define cputime_to_cputime64(__ct) ((u64) __ct)
29 31
30 32
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 957c21c16d62..9bca3539a1e5 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -63,6 +63,8 @@ extern void destroy_workqueue(struct workqueue_struct *wq);
63 63
64extern int FASTCALL(queue_work(struct workqueue_struct *wq, struct work_struct *work)); 64extern int FASTCALL(queue_work(struct workqueue_struct *wq, struct work_struct *work));
65extern int FASTCALL(queue_delayed_work(struct workqueue_struct *wq, struct work_struct *work, unsigned long delay)); 65extern int FASTCALL(queue_delayed_work(struct workqueue_struct *wq, struct work_struct *work, unsigned long delay));
66extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
67 struct work_struct *work, unsigned long delay);
66extern void FASTCALL(flush_workqueue(struct workqueue_struct *wq)); 68extern void FASTCALL(flush_workqueue(struct workqueue_struct *wq));
67 69
68extern int FASTCALL(schedule_work(struct work_struct *work)); 70extern int FASTCALL(schedule_work(struct work_struct *work));
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 90d2c6001659..eebb1d839235 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -114,6 +114,7 @@ int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
114 put_cpu(); 114 put_cpu();
115 return ret; 115 return ret;
116} 116}
117EXPORT_SYMBOL_GPL(queue_work);
117 118
118static void delayed_work_timer_fn(unsigned long __data) 119static void delayed_work_timer_fn(unsigned long __data)
119{ 120{
@@ -147,6 +148,29 @@ int fastcall queue_delayed_work(struct workqueue_struct *wq,
147 } 148 }
148 return ret; 149 return ret;
149} 150}
151EXPORT_SYMBOL_GPL(queue_delayed_work);
152
153int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
154 struct work_struct *work, unsigned long delay)
155{
156 int ret = 0;
157 struct timer_list *timer = &work->timer;
158
159 if (!test_and_set_bit(0, &work->pending)) {
160 BUG_ON(timer_pending(timer));
161 BUG_ON(!list_empty(&work->entry));
162
163 /* This stores wq for the moment, for the timer_fn */
164 work->wq_data = wq;
165 timer->expires = jiffies + delay;
166 timer->data = (unsigned long)work;
167 timer->function = delayed_work_timer_fn;
168 add_timer_on(timer, cpu);
169 ret = 1;
170 }
171 return ret;
172}
173EXPORT_SYMBOL_GPL(queue_delayed_work_on);
150 174
151static void run_workqueue(struct cpu_workqueue_struct *cwq) 175static void run_workqueue(struct cpu_workqueue_struct *cwq)
152{ 176{
@@ -281,6 +305,7 @@ void fastcall flush_workqueue(struct workqueue_struct *wq)
281 unlock_cpu_hotplug(); 305 unlock_cpu_hotplug();
282 } 306 }
283} 307}
308EXPORT_SYMBOL_GPL(flush_workqueue);
284 309
285static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq, 310static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq,
286 int cpu) 311 int cpu)
@@ -358,6 +383,7 @@ struct workqueue_struct *__create_workqueue(const char *name,
358 } 383 }
359 return wq; 384 return wq;
360} 385}
386EXPORT_SYMBOL_GPL(__create_workqueue);
361 387
362static void cleanup_workqueue_thread(struct workqueue_struct *wq, int cpu) 388static void cleanup_workqueue_thread(struct workqueue_struct *wq, int cpu)
363{ 389{
@@ -395,6 +421,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
395 free_percpu(wq->cpu_wq); 421 free_percpu(wq->cpu_wq);
396 kfree(wq); 422 kfree(wq);
397} 423}
424EXPORT_SYMBOL_GPL(destroy_workqueue);
398 425
399static struct workqueue_struct *keventd_wq; 426static struct workqueue_struct *keventd_wq;
400 427
@@ -402,31 +429,20 @@ int fastcall schedule_work(struct work_struct *work)
402{ 429{
403 return queue_work(keventd_wq, work); 430 return queue_work(keventd_wq, work);
404} 431}
432EXPORT_SYMBOL(schedule_work);
405 433
406int fastcall schedule_delayed_work(struct work_struct *work, unsigned long delay) 434int fastcall schedule_delayed_work(struct work_struct *work, unsigned long delay)
407{ 435{
408 return queue_delayed_work(keventd_wq, work, delay); 436 return queue_delayed_work(keventd_wq, work, delay);
409} 437}
438EXPORT_SYMBOL(schedule_delayed_work);
410 439
411int schedule_delayed_work_on(int cpu, 440int schedule_delayed_work_on(int cpu,
412 struct work_struct *work, unsigned long delay) 441 struct work_struct *work, unsigned long delay)
413{ 442{
414 int ret = 0; 443 return queue_delayed_work_on(cpu, keventd_wq, work, delay);
415 struct timer_list *timer = &work->timer;
416
417 if (!test_and_set_bit(0, &work->pending)) {
418 BUG_ON(timer_pending(timer));
419 BUG_ON(!list_empty(&work->entry));
420 /* This stores keventd_wq for the moment, for the timer_fn */
421 work->wq_data = keventd_wq;
422 timer->expires = jiffies + delay;
423 timer->data = (unsigned long)work;
424 timer->function = delayed_work_timer_fn;
425 add_timer_on(timer, cpu);
426 ret = 1;
427 }
428 return ret;
429} 444}
445EXPORT_SYMBOL(schedule_delayed_work_on);
430 446
431/** 447/**
432 * schedule_on_each_cpu - call a function on each online CPU from keventd 448 * schedule_on_each_cpu - call a function on each online CPU from keventd
@@ -463,6 +479,7 @@ void flush_scheduled_work(void)
463{ 479{
464 flush_workqueue(keventd_wq); 480 flush_workqueue(keventd_wq);
465} 481}
482EXPORT_SYMBOL(flush_scheduled_work);
466 483
467/** 484/**
468 * cancel_rearming_delayed_workqueue - reliably kill off a delayed 485 * cancel_rearming_delayed_workqueue - reliably kill off a delayed
@@ -619,13 +636,3 @@ void init_workqueues(void)
619 BUG_ON(!keventd_wq); 636 BUG_ON(!keventd_wq);
620} 637}
621 638
622EXPORT_SYMBOL_GPL(__create_workqueue);
623EXPORT_SYMBOL_GPL(queue_work);
624EXPORT_SYMBOL_GPL(queue_delayed_work);
625EXPORT_SYMBOL_GPL(flush_workqueue);
626EXPORT_SYMBOL_GPL(destroy_workqueue);
627
628EXPORT_SYMBOL(schedule_work);
629EXPORT_SYMBOL(schedule_delayed_work);
630EXPORT_SYMBOL(schedule_delayed_work_on);
631EXPORT_SYMBOL(flush_scheduled_work);