aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/cpufreq
diff options
context:
space:
mode:
authorDave Jones <davej@redhat.com>2006-03-05 03:37:23 -0500
committerDave Jones <davej@redhat.com>2006-03-05 03:37:23 -0500
commit8ff69732d484ea9ccbf242cc49b4fe9538e64c71 (patch)
tree5b65e26af1cbf2f5c54ca9b3b35755ddeb2f40ee /drivers/cpufreq
parent2a1c1c877ecb446dbdf1715248e151db8719a87b (diff)
[CPUFREQ] Fix handling for CPU hotplug
This patch adds proper logic to cpufreq driver in order to handle CPU Hotplug. When CPUs go on/offline, the affected CPUs data, cpufreq_policy->cpus, is not updated properly. This causes sysfs directories and symlinks to be in an incorrect state after few CPU on/offlines. Signed-off-by: Jacob Shin <jacob.shin@amd.com> Signed-off-by: Dave Jones <davej@redhat.com>
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r--drivers/cpufreq/cpufreq.c51
1 files changed, 49 insertions, 2 deletions
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index cb7d6e0db759..aed80e6aec6d 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -6,6 +6,8 @@
6 * 6 *
7 * Oct 2005 - Ashok Raj <ashok.raj@intel.com> 7 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
8 * Added handling for CPU hotplug 8 * Added handling for CPU hotplug
9 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
10 * Fix handling for CPU hotplug -- affected CPUs
9 * 11 *
10 * This program is free software; you can redistribute it and/or modify 12 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as 13 * it under the terms of the GNU General Public License version 2 as
@@ -573,8 +575,12 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
573 struct cpufreq_policy new_policy; 575 struct cpufreq_policy new_policy;
574 struct cpufreq_policy *policy; 576 struct cpufreq_policy *policy;
575 struct freq_attr **drv_attr; 577 struct freq_attr **drv_attr;
578 struct sys_device *cpu_sys_dev;
576 unsigned long flags; 579 unsigned long flags;
577 unsigned int j; 580 unsigned int j;
581#ifdef CONFIG_SMP
582 struct cpufreq_policy *managed_policy;
583#endif
578 584
579 if (cpu_is_offline(cpu)) 585 if (cpu_is_offline(cpu))
580 return 0; 586 return 0;
@@ -587,8 +593,7 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
587 * CPU because it is in the same boat. */ 593 * CPU because it is in the same boat. */
588 policy = cpufreq_cpu_get(cpu); 594 policy = cpufreq_cpu_get(cpu);
589 if (unlikely(policy)) { 595 if (unlikely(policy)) {
590 dprintk("CPU already managed, adding link\n"); 596 cpufreq_cpu_put(policy);
591 sysfs_create_link(&sys_dev->kobj, &policy->kobj, "cpufreq");
592 cpufreq_debug_enable_ratelimit(); 597 cpufreq_debug_enable_ratelimit();
593 return 0; 598 return 0;
594 } 599 }
@@ -623,6 +628,32 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
623 goto err_out; 628 goto err_out;
624 } 629 }
625 630
631#ifdef CONFIG_SMP
632 for_each_cpu_mask(j, policy->cpus) {
633 if (cpu == j)
634 continue;
635
636 /* check for existing affected CPUs. They may not be aware
637 * of it due to CPU Hotplug.
638 */
639 managed_policy = cpufreq_cpu_get(j);
640 if (unlikely(managed_policy)) {
641 spin_lock_irqsave(&cpufreq_driver_lock, flags);
642 managed_policy->cpus = policy->cpus;
643 cpufreq_cpu_data[cpu] = managed_policy;
644 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
645
646 dprintk("CPU already managed, adding link\n");
647 sysfs_create_link(&sys_dev->kobj,
648 &managed_policy->kobj, "cpufreq");
649
650 cpufreq_debug_enable_ratelimit();
651 mutex_unlock(&policy->lock);
652 ret = 0;
653 goto err_out_driver_exit; /* call driver->exit() */
654 }
655 }
656#endif
626 memcpy(&new_policy, policy, sizeof(struct cpufreq_policy)); 657 memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
627 658
628 /* prepare interface data */ 659 /* prepare interface data */
@@ -650,6 +681,21 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
650 for_each_cpu_mask(j, policy->cpus) 681 for_each_cpu_mask(j, policy->cpus)
651 cpufreq_cpu_data[j] = policy; 682 cpufreq_cpu_data[j] = policy;
652 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 683 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
684
685 /* symlink affected CPUs */
686 for_each_cpu_mask(j, policy->cpus) {
687 if (j == cpu)
688 continue;
689 if (!cpu_online(j))
690 continue;
691
692 dprintk("CPU already managed, adding link\n");
693 cpufreq_cpu_get(cpu);
694 cpu_sys_dev = get_cpu_sysdev(j);
695 sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj,
696 "cpufreq");
697 }
698
653 policy->governor = NULL; /* to assure that the starting sequence is 699 policy->governor = NULL; /* to assure that the starting sequence is
654 * run in cpufreq_set_policy */ 700 * run in cpufreq_set_policy */
655 mutex_unlock(&policy->lock); 701 mutex_unlock(&policy->lock);
@@ -728,6 +774,7 @@ static int cpufreq_remove_dev (struct sys_device * sys_dev)
728 */ 774 */
729 if (unlikely(cpu != data->cpu)) { 775 if (unlikely(cpu != data->cpu)) {
730 dprintk("removing link\n"); 776 dprintk("removing link\n");
777 cpu_clear(cpu, data->cpus);
731 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 778 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
732 sysfs_remove_link(&sys_dev->kobj, "cpufreq"); 779 sysfs_remove_link(&sys_dev->kobj, "cpufreq");
733 cpufreq_cpu_put(data); 780 cpufreq_cpu_put(data);