diff options
author | Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> | 2007-02-05 19:12:44 -0500 |
---|---|---|
committer | Dave Jones <davej@redhat.com> | 2007-02-10 20:01:47 -0500 |
commit | 5a01f2e8f3ac134e24144d74bb48a60236f7024d (patch) | |
tree | 8d807b81618dc1b4782e0e58a9629a6d0a09fbe3 /drivers/cpufreq | |
parent | c120069779e3e35917c15393cf2847fa79811eb6 (diff) |
[CPUFREQ] Rewrite lock in cpufreq to eliminate cpufreq/hotplug related issues
Yet another attempt to resolve cpufreq and hotplug locking issues.
Patchset has 3 patches:
* Rewrite the lock infrastructure of cpufreq using a per cpu rwsem.
* Minor restructuring of work callback in ondemand driver.
* Use the new cpufreq rwsem infrastructure in ondemand work.
This patch:
Convert policy->lock to rwsem and move it to per_cpu area.
This rwsem will protect against both changing/accessing policy
related parameters and CPU hot plug/unplug.
[malattia@linux.it: fix oops in kref_put()]
Cc: Gautham R Shenoy <ego@in.ibm.com>
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Cc: Gautham R Shenoy <ego@in.ibm.com>
Signed-off-by: Mattia Dongili <malattia@linux.it>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Dave Jones <davej@redhat.com>
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r-- | drivers/cpufreq/cpufreq.c | 244 |
1 files changed, 181 insertions, 63 deletions
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 9bdcdbdcc0ad..f52facc570f5 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
@@ -41,8 +41,67 @@ static struct cpufreq_driver *cpufreq_driver; | |||
41 | static struct cpufreq_policy *cpufreq_cpu_data[NR_CPUS]; | 41 | static struct cpufreq_policy *cpufreq_cpu_data[NR_CPUS]; |
42 | static DEFINE_SPINLOCK(cpufreq_driver_lock); | 42 | static DEFINE_SPINLOCK(cpufreq_driver_lock); |
43 | 43 | ||
44 | /* | ||
45 | * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure | ||
46 | * all cpufreq/hotplug/workqueue/etc related lock issues. | ||
47 | * | ||
48 | * The rules for this semaphore: | ||
49 | * - Any routine that wants to read from the policy structure will | ||
50 | * do a down_read on this semaphore. | ||
51 | * - Any routine that will write to the policy structure and/or may take away | ||
52 | * the policy altogether (eg. CPU hotplug), will hold this lock in write | ||
53 | * mode before doing so. | ||
54 | * | ||
55 | * Additional rules: | ||
56 | * - All holders of the lock should check to make sure that the CPU they | ||
57 | * are concerned with are online after they get the lock. | ||
58 | * - Governor routines that can be called in cpufreq hotplug path should not | ||
59 | * take this sem as top level hotplug notifier handler takes this. | ||
60 | */ | ||
61 | static DEFINE_PER_CPU(int, policy_cpu); | ||
62 | static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem); | ||
63 | |||
64 | #define lock_policy_rwsem(mode, cpu) \ | ||
65 | int lock_policy_rwsem_##mode \ | ||
66 | (int cpu) \ | ||
67 | { \ | ||
68 | int policy_cpu = per_cpu(policy_cpu, cpu); \ | ||
69 | BUG_ON(policy_cpu == -1); \ | ||
70 | down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \ | ||
71 | if (unlikely(!cpu_online(cpu))) { \ | ||
72 | up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \ | ||
73 | return -1; \ | ||
74 | } \ | ||
75 | \ | ||
76 | return 0; \ | ||
77 | } | ||
78 | |||
79 | lock_policy_rwsem(read, cpu); | ||
80 | EXPORT_SYMBOL_GPL(lock_policy_rwsem_read); | ||
81 | |||
82 | lock_policy_rwsem(write, cpu); | ||
83 | EXPORT_SYMBOL_GPL(lock_policy_rwsem_write); | ||
84 | |||
85 | void unlock_policy_rwsem_read(int cpu) | ||
86 | { | ||
87 | int policy_cpu = per_cpu(policy_cpu, cpu); | ||
88 | BUG_ON(policy_cpu == -1); | ||
89 | up_read(&per_cpu(cpu_policy_rwsem, policy_cpu)); | ||
90 | } | ||
91 | EXPORT_SYMBOL_GPL(unlock_policy_rwsem_read); | ||
92 | |||
93 | void unlock_policy_rwsem_write(int cpu) | ||
94 | { | ||
95 | int policy_cpu = per_cpu(policy_cpu, cpu); | ||
96 | BUG_ON(policy_cpu == -1); | ||
97 | up_write(&per_cpu(cpu_policy_rwsem, policy_cpu)); | ||
98 | } | ||
99 | EXPORT_SYMBOL_GPL(unlock_policy_rwsem_write); | ||
100 | |||
101 | |||
44 | /* internal prototypes */ | 102 | /* internal prototypes */ |
45 | static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event); | 103 | static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event); |
104 | static unsigned int __cpufreq_get(unsigned int cpu); | ||
46 | static void handle_update(struct work_struct *work); | 105 | static void handle_update(struct work_struct *work); |
47 | 106 | ||
48 | /** | 107 | /** |
@@ -415,10 +474,8 @@ static ssize_t store_##file_name \ | |||
415 | if (ret != 1) \ | 474 | if (ret != 1) \ |
416 | return -EINVAL; \ | 475 | return -EINVAL; \ |
417 | \ | 476 | \ |
418 | mutex_lock(&policy->lock); \ | ||
419 | ret = __cpufreq_set_policy(policy, &new_policy); \ | 477 | ret = __cpufreq_set_policy(policy, &new_policy); \ |
420 | policy->user_policy.object = policy->object; \ | 478 | policy->user_policy.object = policy->object; \ |
421 | mutex_unlock(&policy->lock); \ | ||
422 | \ | 479 | \ |
423 | return ret ? ret : count; \ | 480 | return ret ? ret : count; \ |
424 | } | 481 | } |
@@ -432,7 +489,7 @@ store_one(scaling_max_freq,max); | |||
432 | static ssize_t show_cpuinfo_cur_freq (struct cpufreq_policy * policy, | 489 | static ssize_t show_cpuinfo_cur_freq (struct cpufreq_policy * policy, |
433 | char *buf) | 490 | char *buf) |
434 | { | 491 | { |
435 | unsigned int cur_freq = cpufreq_get(policy->cpu); | 492 | unsigned int cur_freq = __cpufreq_get(policy->cpu); |
436 | if (!cur_freq) | 493 | if (!cur_freq) |
437 | return sprintf(buf, "<unknown>"); | 494 | return sprintf(buf, "<unknown>"); |
438 | return sprintf(buf, "%u\n", cur_freq); | 495 | return sprintf(buf, "%u\n", cur_freq); |
@@ -479,12 +536,10 @@ static ssize_t store_scaling_governor (struct cpufreq_policy * policy, | |||
479 | 536 | ||
480 | /* Do not use cpufreq_set_policy here or the user_policy.max | 537 | /* Do not use cpufreq_set_policy here or the user_policy.max |
481 | will be wrongly overridden */ | 538 | will be wrongly overridden */ |
482 | mutex_lock(&policy->lock); | ||
483 | ret = __cpufreq_set_policy(policy, &new_policy); | 539 | ret = __cpufreq_set_policy(policy, &new_policy); |
484 | 540 | ||
485 | policy->user_policy.policy = policy->policy; | 541 | policy->user_policy.policy = policy->policy; |
486 | policy->user_policy.governor = policy->governor; | 542 | policy->user_policy.governor = policy->governor; |
487 | mutex_unlock(&policy->lock); | ||
488 | 543 | ||
489 | if (ret) | 544 | if (ret) |
490 | return ret; | 545 | return ret; |
@@ -589,11 +644,17 @@ static ssize_t show(struct kobject * kobj, struct attribute * attr ,char * buf) | |||
589 | policy = cpufreq_cpu_get(policy->cpu); | 644 | policy = cpufreq_cpu_get(policy->cpu); |
590 | if (!policy) | 645 | if (!policy) |
591 | return -EINVAL; | 646 | return -EINVAL; |
647 | |||
648 | if (lock_policy_rwsem_read(policy->cpu) < 0) | ||
649 | return -EINVAL; | ||
650 | |||
592 | if (fattr->show) | 651 | if (fattr->show) |
593 | ret = fattr->show(policy, buf); | 652 | ret = fattr->show(policy, buf); |
594 | else | 653 | else |
595 | ret = -EIO; | 654 | ret = -EIO; |
596 | 655 | ||
656 | unlock_policy_rwsem_read(policy->cpu); | ||
657 | |||
597 | cpufreq_cpu_put(policy); | 658 | cpufreq_cpu_put(policy); |
598 | return ret; | 659 | return ret; |
599 | } | 660 | } |
@@ -607,11 +668,17 @@ static ssize_t store(struct kobject * kobj, struct attribute * attr, | |||
607 | policy = cpufreq_cpu_get(policy->cpu); | 668 | policy = cpufreq_cpu_get(policy->cpu); |
608 | if (!policy) | 669 | if (!policy) |
609 | return -EINVAL; | 670 | return -EINVAL; |
671 | |||
672 | if (lock_policy_rwsem_write(policy->cpu) < 0) | ||
673 | return -EINVAL; | ||
674 | |||
610 | if (fattr->store) | 675 | if (fattr->store) |
611 | ret = fattr->store(policy, buf, count); | 676 | ret = fattr->store(policy, buf, count); |
612 | else | 677 | else |
613 | ret = -EIO; | 678 | ret = -EIO; |
614 | 679 | ||
680 | unlock_policy_rwsem_write(policy->cpu); | ||
681 | |||
615 | cpufreq_cpu_put(policy); | 682 | cpufreq_cpu_put(policy); |
616 | return ret; | 683 | return ret; |
617 | } | 684 | } |
@@ -685,8 +752,10 @@ static int cpufreq_add_dev (struct sys_device * sys_dev) | |||
685 | policy->cpu = cpu; | 752 | policy->cpu = cpu; |
686 | policy->cpus = cpumask_of_cpu(cpu); | 753 | policy->cpus = cpumask_of_cpu(cpu); |
687 | 754 | ||
688 | mutex_init(&policy->lock); | 755 | /* Initially set CPU itself as the policy_cpu */ |
689 | mutex_lock(&policy->lock); | 756 | per_cpu(policy_cpu, cpu) = cpu; |
757 | lock_policy_rwsem_write(cpu); | ||
758 | |||
690 | init_completion(&policy->kobj_unregister); | 759 | init_completion(&policy->kobj_unregister); |
691 | INIT_WORK(&policy->update, handle_update); | 760 | INIT_WORK(&policy->update, handle_update); |
692 | 761 | ||
@@ -696,7 +765,7 @@ static int cpufreq_add_dev (struct sys_device * sys_dev) | |||
696 | ret = cpufreq_driver->init(policy); | 765 | ret = cpufreq_driver->init(policy); |
697 | if (ret) { | 766 | if (ret) { |
698 | dprintk("initialization failed\n"); | 767 | dprintk("initialization failed\n"); |
699 | mutex_unlock(&policy->lock); | 768 | unlock_policy_rwsem_write(cpu); |
700 | goto err_out; | 769 | goto err_out; |
701 | } | 770 | } |
702 | 771 | ||
@@ -710,6 +779,14 @@ static int cpufreq_add_dev (struct sys_device * sys_dev) | |||
710 | */ | 779 | */ |
711 | managed_policy = cpufreq_cpu_get(j); | 780 | managed_policy = cpufreq_cpu_get(j); |
712 | if (unlikely(managed_policy)) { | 781 | if (unlikely(managed_policy)) { |
782 | |||
783 | /* Set proper policy_cpu */ | ||
784 | unlock_policy_rwsem_write(cpu); | ||
785 | per_cpu(policy_cpu, cpu) = managed_policy->cpu; | ||
786 | |||
787 | if (lock_policy_rwsem_write(cpu) < 0) | ||
788 | goto err_out_driver_exit; | ||
789 | |||
713 | spin_lock_irqsave(&cpufreq_driver_lock, flags); | 790 | spin_lock_irqsave(&cpufreq_driver_lock, flags); |
714 | managed_policy->cpus = policy->cpus; | 791 | managed_policy->cpus = policy->cpus; |
715 | cpufreq_cpu_data[cpu] = managed_policy; | 792 | cpufreq_cpu_data[cpu] = managed_policy; |
@@ -720,13 +797,13 @@ static int cpufreq_add_dev (struct sys_device * sys_dev) | |||
720 | &managed_policy->kobj, | 797 | &managed_policy->kobj, |
721 | "cpufreq"); | 798 | "cpufreq"); |
722 | if (ret) { | 799 | if (ret) { |
723 | mutex_unlock(&policy->lock); | 800 | unlock_policy_rwsem_write(cpu); |
724 | goto err_out_driver_exit; | 801 | goto err_out_driver_exit; |
725 | } | 802 | } |
726 | 803 | ||
727 | cpufreq_debug_enable_ratelimit(); | 804 | cpufreq_debug_enable_ratelimit(); |
728 | mutex_unlock(&policy->lock); | ||
729 | ret = 0; | 805 | ret = 0; |
806 | unlock_policy_rwsem_write(cpu); | ||
730 | goto err_out_driver_exit; /* call driver->exit() */ | 807 | goto err_out_driver_exit; /* call driver->exit() */ |
731 | } | 808 | } |
732 | } | 809 | } |
@@ -740,7 +817,7 @@ static int cpufreq_add_dev (struct sys_device * sys_dev) | |||
740 | 817 | ||
741 | ret = kobject_register(&policy->kobj); | 818 | ret = kobject_register(&policy->kobj); |
742 | if (ret) { | 819 | if (ret) { |
743 | mutex_unlock(&policy->lock); | 820 | unlock_policy_rwsem_write(cpu); |
744 | goto err_out_driver_exit; | 821 | goto err_out_driver_exit; |
745 | } | 822 | } |
746 | /* set up files for this cpu device */ | 823 | /* set up files for this cpu device */ |
@@ -755,8 +832,10 @@ static int cpufreq_add_dev (struct sys_device * sys_dev) | |||
755 | sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr); | 832 | sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr); |
756 | 833 | ||
757 | spin_lock_irqsave(&cpufreq_driver_lock, flags); | 834 | spin_lock_irqsave(&cpufreq_driver_lock, flags); |
758 | for_each_cpu_mask(j, policy->cpus) | 835 | for_each_cpu_mask(j, policy->cpus) { |
759 | cpufreq_cpu_data[j] = policy; | 836 | cpufreq_cpu_data[j] = policy; |
837 | per_cpu(policy_cpu, j) = policy->cpu; | ||
838 | } | ||
760 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | 839 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); |
761 | 840 | ||
762 | /* symlink affected CPUs */ | 841 | /* symlink affected CPUs */ |
@@ -772,14 +851,14 @@ static int cpufreq_add_dev (struct sys_device * sys_dev) | |||
772 | ret = sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj, | 851 | ret = sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj, |
773 | "cpufreq"); | 852 | "cpufreq"); |
774 | if (ret) { | 853 | if (ret) { |
775 | mutex_unlock(&policy->lock); | 854 | unlock_policy_rwsem_write(cpu); |
776 | goto err_out_unregister; | 855 | goto err_out_unregister; |
777 | } | 856 | } |
778 | } | 857 | } |
779 | 858 | ||
780 | policy->governor = NULL; /* to assure that the starting sequence is | 859 | policy->governor = NULL; /* to assure that the starting sequence is |
781 | * run in cpufreq_set_policy */ | 860 | * run in cpufreq_set_policy */ |
782 | mutex_unlock(&policy->lock); | 861 | unlock_policy_rwsem_write(cpu); |
783 | 862 | ||
784 | /* set default policy */ | 863 | /* set default policy */ |
785 | ret = cpufreq_set_policy(&new_policy); | 864 | ret = cpufreq_set_policy(&new_policy); |
@@ -820,11 +899,13 @@ module_out: | |||
820 | 899 | ||
821 | 900 | ||
822 | /** | 901 | /** |
823 | * cpufreq_remove_dev - remove a CPU device | 902 | * __cpufreq_remove_dev - remove a CPU device |
824 | * | 903 | * |
825 | * Removes the cpufreq interface for a CPU device. | 904 | * Removes the cpufreq interface for a CPU device. |
905 | * Caller should already have policy_rwsem in write mode for this CPU. | ||
906 | * This routine frees the rwsem before returning. | ||
826 | */ | 907 | */ |
827 | static int cpufreq_remove_dev (struct sys_device * sys_dev) | 908 | static int __cpufreq_remove_dev (struct sys_device * sys_dev) |
828 | { | 909 | { |
829 | unsigned int cpu = sys_dev->id; | 910 | unsigned int cpu = sys_dev->id; |
830 | unsigned long flags; | 911 | unsigned long flags; |
@@ -843,6 +924,7 @@ static int cpufreq_remove_dev (struct sys_device * sys_dev) | |||
843 | if (!data) { | 924 | if (!data) { |
844 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | 925 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); |
845 | cpufreq_debug_enable_ratelimit(); | 926 | cpufreq_debug_enable_ratelimit(); |
927 | unlock_policy_rwsem_write(cpu); | ||
846 | return -EINVAL; | 928 | return -EINVAL; |
847 | } | 929 | } |
848 | cpufreq_cpu_data[cpu] = NULL; | 930 | cpufreq_cpu_data[cpu] = NULL; |
@@ -859,6 +941,7 @@ static int cpufreq_remove_dev (struct sys_device * sys_dev) | |||
859 | sysfs_remove_link(&sys_dev->kobj, "cpufreq"); | 941 | sysfs_remove_link(&sys_dev->kobj, "cpufreq"); |
860 | cpufreq_cpu_put(data); | 942 | cpufreq_cpu_put(data); |
861 | cpufreq_debug_enable_ratelimit(); | 943 | cpufreq_debug_enable_ratelimit(); |
944 | unlock_policy_rwsem_write(cpu); | ||
862 | return 0; | 945 | return 0; |
863 | } | 946 | } |
864 | #endif | 947 | #endif |
@@ -867,6 +950,7 @@ static int cpufreq_remove_dev (struct sys_device * sys_dev) | |||
867 | if (!kobject_get(&data->kobj)) { | 950 | if (!kobject_get(&data->kobj)) { |
868 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | 951 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); |
869 | cpufreq_debug_enable_ratelimit(); | 952 | cpufreq_debug_enable_ratelimit(); |
953 | unlock_policy_rwsem_write(cpu); | ||
870 | return -EFAULT; | 954 | return -EFAULT; |
871 | } | 955 | } |
872 | 956 | ||
@@ -900,10 +984,10 @@ static int cpufreq_remove_dev (struct sys_device * sys_dev) | |||
900 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | 984 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); |
901 | #endif | 985 | #endif |
902 | 986 | ||
903 | mutex_lock(&data->lock); | ||
904 | if (cpufreq_driver->target) | 987 | if (cpufreq_driver->target) |
905 | __cpufreq_governor(data, CPUFREQ_GOV_STOP); | 988 | __cpufreq_governor(data, CPUFREQ_GOV_STOP); |
906 | mutex_unlock(&data->lock); | 989 | |
990 | unlock_policy_rwsem_write(cpu); | ||
907 | 991 | ||
908 | kobject_unregister(&data->kobj); | 992 | kobject_unregister(&data->kobj); |
909 | 993 | ||
@@ -927,6 +1011,18 @@ static int cpufreq_remove_dev (struct sys_device * sys_dev) | |||
927 | } | 1011 | } |
928 | 1012 | ||
929 | 1013 | ||
1014 | static int cpufreq_remove_dev (struct sys_device * sys_dev) | ||
1015 | { | ||
1016 | unsigned int cpu = sys_dev->id; | ||
1017 | int retval; | ||
1018 | if (unlikely(lock_policy_rwsem_write(cpu))) | ||
1019 | BUG(); | ||
1020 | |||
1021 | retval = __cpufreq_remove_dev(sys_dev); | ||
1022 | return retval; | ||
1023 | } | ||
1024 | |||
1025 | |||
930 | static void handle_update(struct work_struct *work) | 1026 | static void handle_update(struct work_struct *work) |
931 | { | 1027 | { |
932 | struct cpufreq_policy *policy = | 1028 | struct cpufreq_policy *policy = |
@@ -974,9 +1070,12 @@ unsigned int cpufreq_quick_get(unsigned int cpu) | |||
974 | unsigned int ret_freq = 0; | 1070 | unsigned int ret_freq = 0; |
975 | 1071 | ||
976 | if (policy) { | 1072 | if (policy) { |
977 | mutex_lock(&policy->lock); | 1073 | if (unlikely(lock_policy_rwsem_read(cpu))) |
1074 | return ret_freq; | ||
1075 | |||
978 | ret_freq = policy->cur; | 1076 | ret_freq = policy->cur; |
979 | mutex_unlock(&policy->lock); | 1077 | |
1078 | unlock_policy_rwsem_read(cpu); | ||
980 | cpufreq_cpu_put(policy); | 1079 | cpufreq_cpu_put(policy); |
981 | } | 1080 | } |
982 | 1081 | ||
@@ -985,24 +1084,13 @@ unsigned int cpufreq_quick_get(unsigned int cpu) | |||
985 | EXPORT_SYMBOL(cpufreq_quick_get); | 1084 | EXPORT_SYMBOL(cpufreq_quick_get); |
986 | 1085 | ||
987 | 1086 | ||
988 | /** | 1087 | static unsigned int __cpufreq_get(unsigned int cpu) |
989 | * cpufreq_get - get the current CPU frequency (in kHz) | ||
990 | * @cpu: CPU number | ||
991 | * | ||
992 | * Get the CPU current (static) CPU frequency | ||
993 | */ | ||
994 | unsigned int cpufreq_get(unsigned int cpu) | ||
995 | { | 1088 | { |
996 | struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); | 1089 | struct cpufreq_policy *policy = cpufreq_cpu_data[cpu]; |
997 | unsigned int ret_freq = 0; | 1090 | unsigned int ret_freq = 0; |
998 | 1091 | ||
999 | if (!policy) | ||
1000 | return 0; | ||
1001 | |||
1002 | if (!cpufreq_driver->get) | 1092 | if (!cpufreq_driver->get) |
1003 | goto out; | 1093 | return (ret_freq); |
1004 | |||
1005 | mutex_lock(&policy->lock); | ||
1006 | 1094 | ||
1007 | ret_freq = cpufreq_driver->get(cpu); | 1095 | ret_freq = cpufreq_driver->get(cpu); |
1008 | 1096 | ||
@@ -1016,11 +1104,33 @@ unsigned int cpufreq_get(unsigned int cpu) | |||
1016 | } | 1104 | } |
1017 | } | 1105 | } |
1018 | 1106 | ||
1019 | mutex_unlock(&policy->lock); | 1107 | return (ret_freq); |
1108 | } | ||
1020 | 1109 | ||
1021 | out: | 1110 | /** |
1022 | cpufreq_cpu_put(policy); | 1111 | * cpufreq_get - get the current CPU frequency (in kHz) |
1112 | * @cpu: CPU number | ||
1113 | * | ||
1114 | * Get the CPU current (static) CPU frequency | ||
1115 | */ | ||
1116 | unsigned int cpufreq_get(unsigned int cpu) | ||
1117 | { | ||
1118 | unsigned int ret_freq = 0; | ||
1119 | struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); | ||
1120 | |||
1121 | if (!policy) | ||
1122 | goto out; | ||
1123 | |||
1124 | if (unlikely(lock_policy_rwsem_read(cpu))) | ||
1125 | goto out_policy; | ||
1126 | |||
1127 | ret_freq = __cpufreq_get(cpu); | ||
1128 | |||
1129 | unlock_policy_rwsem_read(cpu); | ||
1023 | 1130 | ||
1131 | out_policy: | ||
1132 | cpufreq_cpu_put(policy); | ||
1133 | out: | ||
1024 | return (ret_freq); | 1134 | return (ret_freq); |
1025 | } | 1135 | } |
1026 | EXPORT_SYMBOL(cpufreq_get); | 1136 | EXPORT_SYMBOL(cpufreq_get); |
@@ -1297,18 +1407,19 @@ int cpufreq_driver_target(struct cpufreq_policy *policy, | |||
1297 | if (!policy) | 1407 | if (!policy) |
1298 | return -EINVAL; | 1408 | return -EINVAL; |
1299 | 1409 | ||
1300 | mutex_lock(&policy->lock); | 1410 | if (unlikely(lock_policy_rwsem_write(policy->cpu))) |
1411 | return -EINVAL; | ||
1301 | 1412 | ||
1302 | ret = __cpufreq_driver_target(policy, target_freq, relation); | 1413 | ret = __cpufreq_driver_target(policy, target_freq, relation); |
1303 | 1414 | ||
1304 | mutex_unlock(&policy->lock); | 1415 | unlock_policy_rwsem_write(policy->cpu); |
1305 | 1416 | ||
1306 | cpufreq_cpu_put(policy); | 1417 | cpufreq_cpu_put(policy); |
1307 | return ret; | 1418 | return ret; |
1308 | } | 1419 | } |
1309 | EXPORT_SYMBOL_GPL(cpufreq_driver_target); | 1420 | EXPORT_SYMBOL_GPL(cpufreq_driver_target); |
1310 | 1421 | ||
1311 | int cpufreq_driver_getavg(struct cpufreq_policy *policy) | 1422 | int __cpufreq_driver_getavg(struct cpufreq_policy *policy) |
1312 | { | 1423 | { |
1313 | int ret = 0; | 1424 | int ret = 0; |
1314 | 1425 | ||
@@ -1316,17 +1427,13 @@ int cpufreq_driver_getavg(struct cpufreq_policy *policy) | |||
1316 | if (!policy) | 1427 | if (!policy) |
1317 | return -EINVAL; | 1428 | return -EINVAL; |
1318 | 1429 | ||
1319 | mutex_lock(&policy->lock); | ||
1320 | |||
1321 | if (cpu_online(policy->cpu) && cpufreq_driver->getavg) | 1430 | if (cpu_online(policy->cpu) && cpufreq_driver->getavg) |
1322 | ret = cpufreq_driver->getavg(policy->cpu); | 1431 | ret = cpufreq_driver->getavg(policy->cpu); |
1323 | 1432 | ||
1324 | mutex_unlock(&policy->lock); | ||
1325 | |||
1326 | cpufreq_cpu_put(policy); | 1433 | cpufreq_cpu_put(policy); |
1327 | return ret; | 1434 | return ret; |
1328 | } | 1435 | } |
1329 | EXPORT_SYMBOL_GPL(cpufreq_driver_getavg); | 1436 | EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg); |
1330 | 1437 | ||
1331 | /* | 1438 | /* |
1332 | * when "event" is CPUFREQ_GOV_LIMITS | 1439 | * when "event" is CPUFREQ_GOV_LIMITS |
@@ -1410,9 +1517,7 @@ int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu) | |||
1410 | if (!cpu_policy) | 1517 | if (!cpu_policy) |
1411 | return -EINVAL; | 1518 | return -EINVAL; |
1412 | 1519 | ||
1413 | mutex_lock(&cpu_policy->lock); | ||
1414 | memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy)); | 1520 | memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy)); |
1415 | mutex_unlock(&cpu_policy->lock); | ||
1416 | 1521 | ||
1417 | cpufreq_cpu_put(cpu_policy); | 1522 | cpufreq_cpu_put(cpu_policy); |
1418 | return 0; | 1523 | return 0; |
@@ -1528,8 +1633,9 @@ int cpufreq_set_policy(struct cpufreq_policy *policy) | |||
1528 | if (!data) | 1633 | if (!data) |
1529 | return -EINVAL; | 1634 | return -EINVAL; |
1530 | 1635 | ||
1531 | /* lock this CPU */ | 1636 | if (unlikely(lock_policy_rwsem_write(policy->cpu))) |
1532 | mutex_lock(&data->lock); | 1637 | return -EINVAL; |
1638 | |||
1533 | 1639 | ||
1534 | ret = __cpufreq_set_policy(data, policy); | 1640 | ret = __cpufreq_set_policy(data, policy); |
1535 | data->user_policy.min = data->min; | 1641 | data->user_policy.min = data->min; |
@@ -1537,7 +1643,7 @@ int cpufreq_set_policy(struct cpufreq_policy *policy) | |||
1537 | data->user_policy.policy = data->policy; | 1643 | data->user_policy.policy = data->policy; |
1538 | data->user_policy.governor = data->governor; | 1644 | data->user_policy.governor = data->governor; |
1539 | 1645 | ||
1540 | mutex_unlock(&data->lock); | 1646 | unlock_policy_rwsem_write(policy->cpu); |
1541 | 1647 | ||
1542 | cpufreq_cpu_put(data); | 1648 | cpufreq_cpu_put(data); |
1543 | 1649 | ||
@@ -1562,7 +1668,8 @@ int cpufreq_update_policy(unsigned int cpu) | |||
1562 | if (!data) | 1668 | if (!data) |
1563 | return -ENODEV; | 1669 | return -ENODEV; |
1564 | 1670 | ||
1565 | mutex_lock(&data->lock); | 1671 | if (unlikely(lock_policy_rwsem_write(cpu))) |
1672 | return -EINVAL; | ||
1566 | 1673 | ||
1567 | dprintk("updating policy for CPU %u\n", cpu); | 1674 | dprintk("updating policy for CPU %u\n", cpu); |
1568 | memcpy(&policy, data, sizeof(struct cpufreq_policy)); | 1675 | memcpy(&policy, data, sizeof(struct cpufreq_policy)); |
@@ -1587,7 +1694,8 @@ int cpufreq_update_policy(unsigned int cpu) | |||
1587 | 1694 | ||
1588 | ret = __cpufreq_set_policy(data, &policy); | 1695 | ret = __cpufreq_set_policy(data, &policy); |
1589 | 1696 | ||
1590 | mutex_unlock(&data->lock); | 1697 | unlock_policy_rwsem_write(cpu); |
1698 | |||
1591 | cpufreq_cpu_put(data); | 1699 | cpufreq_cpu_put(data); |
1592 | return ret; | 1700 | return ret; |
1593 | } | 1701 | } |
@@ -1597,31 +1705,28 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb, | |||
1597 | unsigned long action, void *hcpu) | 1705 | unsigned long action, void *hcpu) |
1598 | { | 1706 | { |
1599 | unsigned int cpu = (unsigned long)hcpu; | 1707 | unsigned int cpu = (unsigned long)hcpu; |
1600 | struct cpufreq_policy *policy; | ||
1601 | struct sys_device *sys_dev; | 1708 | struct sys_device *sys_dev; |
1709 | struct cpufreq_policy *policy; | ||
1602 | 1710 | ||
1603 | sys_dev = get_cpu_sysdev(cpu); | 1711 | sys_dev = get_cpu_sysdev(cpu); |
1604 | |||
1605 | if (sys_dev) { | 1712 | if (sys_dev) { |
1606 | switch (action) { | 1713 | switch (action) { |
1607 | case CPU_ONLINE: | 1714 | case CPU_ONLINE: |
1608 | cpufreq_add_dev(sys_dev); | 1715 | cpufreq_add_dev(sys_dev); |
1609 | break; | 1716 | break; |
1610 | case CPU_DOWN_PREPARE: | 1717 | case CPU_DOWN_PREPARE: |
1611 | /* | 1718 | if (unlikely(lock_policy_rwsem_write(cpu))) |
1612 | * We attempt to put this cpu in lowest frequency | 1719 | BUG(); |
1613 | * possible before going down. This will permit | 1720 | |
1614 | * hardware-managed P-State to switch other related | ||
1615 | * threads to min or higher speeds if possible. | ||
1616 | */ | ||
1617 | policy = cpufreq_cpu_data[cpu]; | 1721 | policy = cpufreq_cpu_data[cpu]; |
1618 | if (policy) { | 1722 | if (policy) { |
1619 | cpufreq_driver_target(policy, policy->min, | 1723 | __cpufreq_driver_target(policy, policy->min, |
1620 | CPUFREQ_RELATION_H); | 1724 | CPUFREQ_RELATION_H); |
1621 | } | 1725 | } |
1726 | __cpufreq_remove_dev(sys_dev); | ||
1622 | break; | 1727 | break; |
1623 | case CPU_DEAD: | 1728 | case CPU_DOWN_FAILED: |
1624 | cpufreq_remove_dev(sys_dev); | 1729 | cpufreq_add_dev(sys_dev); |
1625 | break; | 1730 | break; |
1626 | } | 1731 | } |
1627 | } | 1732 | } |
@@ -1735,3 +1840,16 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver) | |||
1735 | return 0; | 1840 | return 0; |
1736 | } | 1841 | } |
1737 | EXPORT_SYMBOL_GPL(cpufreq_unregister_driver); | 1842 | EXPORT_SYMBOL_GPL(cpufreq_unregister_driver); |
1843 | |||
1844 | static int __init cpufreq_core_init(void) | ||
1845 | { | ||
1846 | int cpu; | ||
1847 | |||
1848 | for_each_possible_cpu(cpu) { | ||
1849 | per_cpu(policy_cpu, cpu) = -1; | ||
1850 | init_rwsem(&per_cpu(cpu_policy_rwsem, cpu)); | ||
1851 | } | ||
1852 | return 0; | ||
1853 | } | ||
1854 | |||
1855 | core_initcall(cpufreq_core_init); | ||