aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/cpufreq
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r--drivers/cpufreq/cpufreq.c85
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c6
-rw-r--r--drivers/cpufreq/cpufreq_stats.c42
3 files changed, 108 insertions, 25 deletions
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index cfe1d0a2262d..25acf478c9e8 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -4,6 +4,9 @@
4 * Copyright (C) 2001 Russell King 4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de> 5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 * 6 *
7 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
8 * Added handling for CPU hotplug
9 *
7 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as 11 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation. 12 * published by the Free Software Foundation.
@@ -36,13 +39,6 @@ static struct cpufreq_policy *cpufreq_cpu_data[NR_CPUS];
36static DEFINE_SPINLOCK(cpufreq_driver_lock); 39static DEFINE_SPINLOCK(cpufreq_driver_lock);
37 40
38 41
39/* we keep a copy of all ->add'ed CPU's struct sys_device here;
40 * as it is only accessed in ->add and ->remove, no lock or reference
41 * count is necessary.
42 */
43static struct sys_device *cpu_sys_devices[NR_CPUS];
44
45
46/* internal prototypes */ 42/* internal prototypes */
47static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event); 43static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event);
48static void handle_update(void *data); 44static void handle_update(void *data);
@@ -574,6 +570,9 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
574 unsigned long flags; 570 unsigned long flags;
575 unsigned int j; 571 unsigned int j;
576 572
573 if (cpu_is_offline(cpu))
574 return 0;
575
577 cpufreq_debug_disable_ratelimit(); 576 cpufreq_debug_disable_ratelimit();
578 dprintk("adding CPU %u\n", cpu); 577 dprintk("adding CPU %u\n", cpu);
579 578
@@ -582,7 +581,6 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
582 * CPU because it is in the same boat. */ 581 * CPU because it is in the same boat. */
583 policy = cpufreq_cpu_get(cpu); 582 policy = cpufreq_cpu_get(cpu);
584 if (unlikely(policy)) { 583 if (unlikely(policy)) {
585 cpu_sys_devices[cpu] = sys_dev;
586 dprintk("CPU already managed, adding link\n"); 584 dprintk("CPU already managed, adding link\n");
587 sysfs_create_link(&sys_dev->kobj, &policy->kobj, "cpufreq"); 585 sysfs_create_link(&sys_dev->kobj, &policy->kobj, "cpufreq");
588 cpufreq_debug_enable_ratelimit(); 586 cpufreq_debug_enable_ratelimit();
@@ -656,7 +654,6 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
656 } 654 }
657 655
658 module_put(cpufreq_driver->owner); 656 module_put(cpufreq_driver->owner);
659 cpu_sys_devices[cpu] = sys_dev;
660 dprintk("initialization complete\n"); 657 dprintk("initialization complete\n");
661 cpufreq_debug_enable_ratelimit(); 658 cpufreq_debug_enable_ratelimit();
662 659
@@ -681,7 +678,7 @@ err_out:
681 678
682nomem_out: 679nomem_out:
683 module_put(cpufreq_driver->owner); 680 module_put(cpufreq_driver->owner);
684 module_out: 681module_out:
685 cpufreq_debug_enable_ratelimit(); 682 cpufreq_debug_enable_ratelimit();
686 return ret; 683 return ret;
687} 684}
@@ -697,6 +694,7 @@ static int cpufreq_remove_dev (struct sys_device * sys_dev)
697 unsigned int cpu = sys_dev->id; 694 unsigned int cpu = sys_dev->id;
698 unsigned long flags; 695 unsigned long flags;
699 struct cpufreq_policy *data; 696 struct cpufreq_policy *data;
697 struct sys_device *cpu_sys_dev;
700#ifdef CONFIG_SMP 698#ifdef CONFIG_SMP
701 unsigned int j; 699 unsigned int j;
702#endif 700#endif
@@ -709,7 +707,6 @@ static int cpufreq_remove_dev (struct sys_device * sys_dev)
709 707
710 if (!data) { 708 if (!data) {
711 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 709 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
712 cpu_sys_devices[cpu] = NULL;
713 cpufreq_debug_enable_ratelimit(); 710 cpufreq_debug_enable_ratelimit();
714 return -EINVAL; 711 return -EINVAL;
715 } 712 }
@@ -724,14 +721,12 @@ static int cpufreq_remove_dev (struct sys_device * sys_dev)
724 dprintk("removing link\n"); 721 dprintk("removing link\n");
725 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 722 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
726 sysfs_remove_link(&sys_dev->kobj, "cpufreq"); 723 sysfs_remove_link(&sys_dev->kobj, "cpufreq");
727 cpu_sys_devices[cpu] = NULL;
728 cpufreq_cpu_put(data); 724 cpufreq_cpu_put(data);
729 cpufreq_debug_enable_ratelimit(); 725 cpufreq_debug_enable_ratelimit();
730 return 0; 726 return 0;
731 } 727 }
732#endif 728#endif
733 729
734 cpu_sys_devices[cpu] = NULL;
735 730
736 if (!kobject_get(&data->kobj)) { 731 if (!kobject_get(&data->kobj)) {
737 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 732 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
@@ -760,7 +755,8 @@ static int cpufreq_remove_dev (struct sys_device * sys_dev)
760 if (j == cpu) 755 if (j == cpu)
761 continue; 756 continue;
762 dprintk("removing link for cpu %u\n", j); 757 dprintk("removing link for cpu %u\n", j);
763 sysfs_remove_link(&cpu_sys_devices[j]->kobj, "cpufreq"); 758 cpu_sys_dev = get_cpu_sysdev(j);
759 sysfs_remove_link(&cpu_sys_dev->kobj, "cpufreq");
764 cpufreq_cpu_put(data); 760 cpufreq_cpu_put(data);
765 } 761 }
766 } 762 }
@@ -771,7 +767,6 @@ static int cpufreq_remove_dev (struct sys_device * sys_dev)
771 down(&data->lock); 767 down(&data->lock);
772 if (cpufreq_driver->target) 768 if (cpufreq_driver->target)
773 __cpufreq_governor(data, CPUFREQ_GOV_STOP); 769 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
774 cpufreq_driver->target = NULL;
775 up(&data->lock); 770 up(&data->lock);
776 771
777 kobject_unregister(&data->kobj); 772 kobject_unregister(&data->kobj);
@@ -1118,17 +1113,30 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
1118 unsigned int relation) 1113 unsigned int relation)
1119{ 1114{
1120 int retval = -EINVAL; 1115 int retval = -EINVAL;
1121 lock_cpu_hotplug(); 1116
1117 /*
1118 * Converted the lock_cpu_hotplug to preempt_disable()
1119 * and preempt_enable(). This is a bit kludgy and relies on how cpu
1120 * hotplug works. All we need is a guarantee that cpu hotplug won't make
1121 * progress on any cpu. Once we do preempt_disable(), this would ensure
1122 * that hotplug threads don't get onto this cpu, thereby delaying
1123 * the cpu remove process.
1124 *
1125 * We removed the lock_cpu_hotplug since we need to call this function
1126 * via cpu hotplug callbacks, which result in locking the cpu hotplug
1127 * thread itself. Agree this is not very clean, cpufreq community
1128 * could improve this if required. - Ashok Raj <ashok.raj@intel.com>
1129 */
1130 preempt_disable();
1122 dprintk("target for CPU %u: %u kHz, relation %u\n", policy->cpu, 1131 dprintk("target for CPU %u: %u kHz, relation %u\n", policy->cpu,
1123 target_freq, relation); 1132 target_freq, relation);
1124 if (cpu_online(policy->cpu) && cpufreq_driver->target) 1133 if (cpu_online(policy->cpu) && cpufreq_driver->target)
1125 retval = cpufreq_driver->target(policy, target_freq, relation); 1134 retval = cpufreq_driver->target(policy, target_freq, relation);
1126 unlock_cpu_hotplug(); 1135 preempt_enable();
1127 return retval; 1136 return retval;
1128} 1137}
1129EXPORT_SYMBOL_GPL(__cpufreq_driver_target); 1138EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1130 1139
1131
1132int cpufreq_driver_target(struct cpufreq_policy *policy, 1140int cpufreq_driver_target(struct cpufreq_policy *policy,
1133 unsigned int target_freq, 1141 unsigned int target_freq,
1134 unsigned int relation) 1142 unsigned int relation)
@@ -1415,6 +1423,45 @@ int cpufreq_update_policy(unsigned int cpu)
1415} 1423}
1416EXPORT_SYMBOL(cpufreq_update_policy); 1424EXPORT_SYMBOL(cpufreq_update_policy);
1417 1425
1426static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
1427 unsigned long action, void *hcpu)
1428{
1429 unsigned int cpu = (unsigned long)hcpu;
1430 struct cpufreq_policy *policy;
1431 struct sys_device *sys_dev;
1432
1433 sys_dev = get_cpu_sysdev(cpu);
1434
1435 if (sys_dev) {
1436 switch (action) {
1437 case CPU_ONLINE:
1438 cpufreq_add_dev(sys_dev);
1439 break;
1440 case CPU_DOWN_PREPARE:
1441 /*
1442 * We attempt to put this cpu in lowest frequency
1443 * possible before going down. This will permit
1444 * hardware-managed P-State to switch other related
1445 * threads to min or higher speeds if possible.
1446 */
1447 policy = cpufreq_cpu_data[cpu];
1448 if (policy) {
1449 cpufreq_driver_target(policy, policy->min,
1450 CPUFREQ_RELATION_H);
1451 }
1452 break;
1453 case CPU_DEAD:
1454 cpufreq_remove_dev(sys_dev);
1455 break;
1456 }
1457 }
1458 return NOTIFY_OK;
1459}
1460
1461static struct notifier_block cpufreq_cpu_notifier =
1462{
1463 .notifier_call = cpufreq_cpu_callback,
1464};
1418 1465
1419/********************************************************************* 1466/*********************************************************************
1420 * REGISTER / UNREGISTER CPUFREQ DRIVER * 1467 * REGISTER / UNREGISTER CPUFREQ DRIVER *
@@ -1475,6 +1522,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1475 } 1522 }
1476 1523
1477 if (!ret) { 1524 if (!ret) {
1525 register_cpu_notifier(&cpufreq_cpu_notifier);
1478 dprintk("driver %s up and running\n", driver_data->name); 1526 dprintk("driver %s up and running\n", driver_data->name);
1479 cpufreq_debug_enable_ratelimit(); 1527 cpufreq_debug_enable_ratelimit();
1480 } 1528 }
@@ -1506,6 +1554,7 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver)
1506 dprintk("unregistering driver %s\n", driver->name); 1554 dprintk("unregistering driver %s\n", driver->name);
1507 1555
1508 sysdev_driver_unregister(&cpu_sysdev_class, &cpufreq_sysdev_driver); 1556 sysdev_driver_unregister(&cpu_sysdev_class, &cpufreq_sysdev_driver);
1557 unregister_cpu_notifier(&cpufreq_cpu_notifier);
1509 1558
1510 spin_lock_irqsave(&cpufreq_driver_lock, flags); 1559 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1511 cpufreq_driver = NULL; 1560 cpufreq_driver = NULL;
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index e1df376e709e..2ed5c4363b53 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -315,9 +315,9 @@ static void dbs_check_cpu(int cpu)
315 policy = this_dbs_info->cur_policy; 315 policy = this_dbs_info->cur_policy;
316 316
317 if ( init_flag == 0 ) { 317 if ( init_flag == 0 ) {
318 for ( /* NULL */; init_flag < NR_CPUS; init_flag++ ) { 318 for_each_online_cpu(j) {
319 dbs_info = &per_cpu(cpu_dbs_info, init_flag); 319 dbs_info = &per_cpu(cpu_dbs_info, j);
320 requested_freq[cpu] = dbs_info->cur_policy->cur; 320 requested_freq[j] = dbs_info->cur_policy->cur;
321 } 321 }
322 init_flag = 1; 322 init_flag = 1;
323 } 323 }
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index 7ddf714c4d43..0bddb8e694d9 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -19,6 +19,7 @@
19#include <linux/percpu.h> 19#include <linux/percpu.h>
20#include <linux/kobject.h> 20#include <linux/kobject.h>
21#include <linux/spinlock.h> 21#include <linux/spinlock.h>
22#include <linux/notifier.h>
22#include <asm/cputime.h> 23#include <asm/cputime.h>
23 24
24static spinlock_t cpufreq_stats_lock; 25static spinlock_t cpufreq_stats_lock;
@@ -302,6 +303,27 @@ cpufreq_stat_notifier_trans (struct notifier_block *nb, unsigned long val,
302 return 0; 303 return 0;
303} 304}
304 305
306static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
307 unsigned long action, void *hcpu)
308{
309 unsigned int cpu = (unsigned long)hcpu;
310
311 switch (action) {
312 case CPU_ONLINE:
313 cpufreq_update_policy(cpu);
314 break;
315 case CPU_DEAD:
316 cpufreq_stats_free_table(cpu);
317 break;
318 }
319 return NOTIFY_OK;
320}
321
322static struct notifier_block cpufreq_stat_cpu_notifier =
323{
324 .notifier_call = cpufreq_stat_cpu_callback,
325};
326
305static struct notifier_block notifier_policy_block = { 327static struct notifier_block notifier_policy_block = {
306 .notifier_call = cpufreq_stat_notifier_policy 328 .notifier_call = cpufreq_stat_notifier_policy
307}; 329};
@@ -315,6 +337,7 @@ __init cpufreq_stats_init(void)
315{ 337{
316 int ret; 338 int ret;
317 unsigned int cpu; 339 unsigned int cpu;
340
318 spin_lock_init(&cpufreq_stats_lock); 341 spin_lock_init(&cpufreq_stats_lock);
319 if ((ret = cpufreq_register_notifier(&notifier_policy_block, 342 if ((ret = cpufreq_register_notifier(&notifier_policy_block,
320 CPUFREQ_POLICY_NOTIFIER))) 343 CPUFREQ_POLICY_NOTIFIER)))
@@ -327,20 +350,31 @@ __init cpufreq_stats_init(void)
327 return ret; 350 return ret;
328 } 351 }
329 352
330 for_each_cpu(cpu) 353 register_cpu_notifier(&cpufreq_stat_cpu_notifier);
331 cpufreq_update_policy(cpu); 354 lock_cpu_hotplug();
355 for_each_online_cpu(cpu) {
356 cpufreq_stat_cpu_callback(&cpufreq_stat_cpu_notifier, CPU_ONLINE,
357 (void *)(long)cpu);
358 }
359 unlock_cpu_hotplug();
332 return 0; 360 return 0;
333} 361}
334static void 362static void
335__exit cpufreq_stats_exit(void) 363__exit cpufreq_stats_exit(void)
336{ 364{
337 unsigned int cpu; 365 unsigned int cpu;
366
338 cpufreq_unregister_notifier(&notifier_policy_block, 367 cpufreq_unregister_notifier(&notifier_policy_block,
339 CPUFREQ_POLICY_NOTIFIER); 368 CPUFREQ_POLICY_NOTIFIER);
340 cpufreq_unregister_notifier(&notifier_trans_block, 369 cpufreq_unregister_notifier(&notifier_trans_block,
341 CPUFREQ_TRANSITION_NOTIFIER); 370 CPUFREQ_TRANSITION_NOTIFIER);
342 for_each_cpu(cpu) 371 unregister_cpu_notifier(&cpufreq_stat_cpu_notifier);
343 cpufreq_stats_free_table(cpu); 372 lock_cpu_hotplug();
373 for_each_online_cpu(cpu) {
374 cpufreq_stat_cpu_callback(&cpufreq_stat_cpu_notifier, CPU_DEAD,
375 (void *)(long)cpu);
376 }
377 unlock_cpu_hotplug();
344} 378}
345 379
346MODULE_AUTHOR ("Zou Nan hai <nanhai.zou@intel.com>"); 380MODULE_AUTHOR ("Zou Nan hai <nanhai.zou@intel.com>");