diff options
Diffstat (limited to 'drivers/acpi/processor_throttling.c')
| -rw-r--r-- | drivers/acpi/processor_throttling.c | 80 |
1 files changed, 52 insertions, 28 deletions
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c index a0c38c94a8a0..d27838171f4a 100644 --- a/drivers/acpi/processor_throttling.c +++ b/drivers/acpi/processor_throttling.c | |||
| @@ -61,11 +61,14 @@ static int acpi_processor_update_tsd_coord(void) | |||
| 61 | int count, count_target; | 61 | int count, count_target; |
| 62 | int retval = 0; | 62 | int retval = 0; |
| 63 | unsigned int i, j; | 63 | unsigned int i, j; |
| 64 | cpumask_t covered_cpus; | 64 | cpumask_var_t covered_cpus; |
| 65 | struct acpi_processor *pr, *match_pr; | 65 | struct acpi_processor *pr, *match_pr; |
| 66 | struct acpi_tsd_package *pdomain, *match_pdomain; | 66 | struct acpi_tsd_package *pdomain, *match_pdomain; |
| 67 | struct acpi_processor_throttling *pthrottling, *match_pthrottling; | 67 | struct acpi_processor_throttling *pthrottling, *match_pthrottling; |
| 68 | 68 | ||
| 69 | if (!alloc_cpumask_var(&covered_cpus, GFP_KERNEL)) | ||
| 70 | return -ENOMEM; | ||
| 71 | |||
| 69 | /* | 72 | /* |
| 70 | * Now that we have _TSD data from all CPUs, lets setup T-state | 73 | * Now that we have _TSD data from all CPUs, lets setup T-state |
| 71 | * coordination between all CPUs. | 74 | * coordination between all CPUs. |
| @@ -91,19 +94,19 @@ static int acpi_processor_update_tsd_coord(void) | |||
| 91 | if (retval) | 94 | if (retval) |
| 92 | goto err_ret; | 95 | goto err_ret; |
| 93 | 96 | ||
| 94 | cpus_clear(covered_cpus); | 97 | cpumask_clear(covered_cpus); |
| 95 | for_each_possible_cpu(i) { | 98 | for_each_possible_cpu(i) { |
| 96 | pr = per_cpu(processors, i); | 99 | pr = per_cpu(processors, i); |
| 97 | if (!pr) | 100 | if (!pr) |
| 98 | continue; | 101 | continue; |
| 99 | 102 | ||
| 100 | if (cpu_isset(i, covered_cpus)) | 103 | if (cpumask_test_cpu(i, covered_cpus)) |
| 101 | continue; | 104 | continue; |
| 102 | pthrottling = &pr->throttling; | 105 | pthrottling = &pr->throttling; |
| 103 | 106 | ||
| 104 | pdomain = &(pthrottling->domain_info); | 107 | pdomain = &(pthrottling->domain_info); |
| 105 | cpu_set(i, pthrottling->shared_cpu_map); | 108 | cpumask_set_cpu(i, pthrottling->shared_cpu_map); |
| 106 | cpu_set(i, covered_cpus); | 109 | cpumask_set_cpu(i, covered_cpus); |
| 107 | /* | 110 | /* |
| 108 | * If the number of processor in the TSD domain is 1, it is | 111 | * If the number of processor in the TSD domain is 1, it is |
| 109 | * unnecessary to parse the coordination for this CPU. | 112 | * unnecessary to parse the coordination for this CPU. |
| @@ -144,8 +147,8 @@ static int acpi_processor_update_tsd_coord(void) | |||
| 144 | goto err_ret; | 147 | goto err_ret; |
| 145 | } | 148 | } |
| 146 | 149 | ||
| 147 | cpu_set(j, covered_cpus); | 150 | cpumask_set_cpu(j, covered_cpus); |
| 148 | cpu_set(j, pthrottling->shared_cpu_map); | 151 | cpumask_set_cpu(j, pthrottling->shared_cpu_map); |
| 149 | count++; | 152 | count++; |
| 150 | } | 153 | } |
| 151 | for_each_possible_cpu(j) { | 154 | for_each_possible_cpu(j) { |
| @@ -165,12 +168,14 @@ static int acpi_processor_update_tsd_coord(void) | |||
| 165 | * If some CPUS have the same domain, they | 168 | * If some CPUS have the same domain, they |
| 166 | * will have the same shared_cpu_map. | 169 | * will have the same shared_cpu_map. |
| 167 | */ | 170 | */ |
| 168 | match_pthrottling->shared_cpu_map = | 171 | cpumask_copy(match_pthrottling->shared_cpu_map, |
| 169 | pthrottling->shared_cpu_map; | 172 | pthrottling->shared_cpu_map); |
| 170 | } | 173 | } |
| 171 | } | 174 | } |
| 172 | 175 | ||
| 173 | err_ret: | 176 | err_ret: |
| 177 | free_cpumask_var(covered_cpus); | ||
| 178 | |||
| 174 | for_each_possible_cpu(i) { | 179 | for_each_possible_cpu(i) { |
| 175 | pr = per_cpu(processors, i); | 180 | pr = per_cpu(processors, i); |
| 176 | if (!pr) | 181 | if (!pr) |
| @@ -182,8 +187,8 @@ err_ret: | |||
| 182 | */ | 187 | */ |
| 183 | if (retval) { | 188 | if (retval) { |
| 184 | pthrottling = &(pr->throttling); | 189 | pthrottling = &(pr->throttling); |
| 185 | cpus_clear(pthrottling->shared_cpu_map); | 190 | cpumask_clear(pthrottling->shared_cpu_map); |
| 186 | cpu_set(i, pthrottling->shared_cpu_map); | 191 | cpumask_set_cpu(i, pthrottling->shared_cpu_map); |
| 187 | pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL; | 192 | pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL; |
| 188 | } | 193 | } |
| 189 | } | 194 | } |
| @@ -567,7 +572,7 @@ static int acpi_processor_get_tsd(struct acpi_processor *pr) | |||
| 567 | pthrottling = &pr->throttling; | 572 | pthrottling = &pr->throttling; |
| 568 | pthrottling->tsd_valid_flag = 1; | 573 | pthrottling->tsd_valid_flag = 1; |
| 569 | pthrottling->shared_type = pdomain->coord_type; | 574 | pthrottling->shared_type = pdomain->coord_type; |
| 570 | cpu_set(pr->id, pthrottling->shared_cpu_map); | 575 | cpumask_set_cpu(pr->id, pthrottling->shared_cpu_map); |
| 571 | /* | 576 | /* |
| 572 | * If the coordination type is not defined in ACPI spec, | 577 | * If the coordination type is not defined in ACPI spec, |
| 573 | * the tsd_valid_flag will be clear and coordination type | 578 | * the tsd_valid_flag will be clear and coordination type |
| @@ -826,7 +831,7 @@ static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr) | |||
| 826 | 831 | ||
| 827 | static int acpi_processor_get_throttling(struct acpi_processor *pr) | 832 | static int acpi_processor_get_throttling(struct acpi_processor *pr) |
| 828 | { | 833 | { |
| 829 | cpumask_t saved_mask; | 834 | cpumask_var_t saved_mask; |
| 830 | int ret; | 835 | int ret; |
| 831 | 836 | ||
| 832 | if (!pr) | 837 | if (!pr) |
| @@ -834,14 +839,20 @@ static int acpi_processor_get_throttling(struct acpi_processor *pr) | |||
| 834 | 839 | ||
| 835 | if (!pr->flags.throttling) | 840 | if (!pr->flags.throttling) |
| 836 | return -ENODEV; | 841 | return -ENODEV; |
| 842 | |||
| 843 | if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL)) | ||
| 844 | return -ENOMEM; | ||
| 845 | |||
| 837 | /* | 846 | /* |
| 838 | * Migrate task to the cpu pointed by pr. | 847 | * Migrate task to the cpu pointed by pr. |
| 839 | */ | 848 | */ |
| 840 | saved_mask = current->cpus_allowed; | 849 | cpumask_copy(saved_mask, ¤t->cpus_allowed); |
| 841 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(pr->id)); | 850 | /* FIXME: use work_on_cpu() */ |
| 851 | set_cpus_allowed_ptr(current, cpumask_of(pr->id)); | ||
| 842 | ret = pr->throttling.acpi_processor_get_throttling(pr); | 852 | ret = pr->throttling.acpi_processor_get_throttling(pr); |
| 843 | /* restore the previous state */ | 853 | /* restore the previous state */ |
| 844 | set_cpus_allowed_ptr(current, &saved_mask); | 854 | set_cpus_allowed_ptr(current, saved_mask); |
| 855 | free_cpumask_var(saved_mask); | ||
| 845 | 856 | ||
| 846 | return ret; | 857 | return ret; |
| 847 | } | 858 | } |
| @@ -986,13 +997,13 @@ static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr, | |||
| 986 | 997 | ||
| 987 | int acpi_processor_set_throttling(struct acpi_processor *pr, int state) | 998 | int acpi_processor_set_throttling(struct acpi_processor *pr, int state) |
| 988 | { | 999 | { |
| 989 | cpumask_t saved_mask; | 1000 | cpumask_var_t saved_mask; |
| 990 | int ret = 0; | 1001 | int ret = 0; |
| 991 | unsigned int i; | 1002 | unsigned int i; |
| 992 | struct acpi_processor *match_pr; | 1003 | struct acpi_processor *match_pr; |
| 993 | struct acpi_processor_throttling *p_throttling; | 1004 | struct acpi_processor_throttling *p_throttling; |
| 994 | struct throttling_tstate t_state; | 1005 | struct throttling_tstate t_state; |
| 995 | cpumask_t online_throttling_cpus; | 1006 | cpumask_var_t online_throttling_cpus; |
| 996 | 1007 | ||
| 997 | if (!pr) | 1008 | if (!pr) |
| 998 | return -EINVAL; | 1009 | return -EINVAL; |
| @@ -1003,17 +1014,25 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state) | |||
| 1003 | if ((state < 0) || (state > (pr->throttling.state_count - 1))) | 1014 | if ((state < 0) || (state > (pr->throttling.state_count - 1))) |
| 1004 | return -EINVAL; | 1015 | return -EINVAL; |
| 1005 | 1016 | ||
| 1006 | saved_mask = current->cpus_allowed; | 1017 | if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL)) |
| 1018 | return -ENOMEM; | ||
| 1019 | |||
| 1020 | if (!alloc_cpumask_var(&online_throttling_cpus, GFP_KERNEL)) { | ||
| 1021 | free_cpumask_var(saved_mask); | ||
| 1022 | return -ENOMEM; | ||
| 1023 | } | ||
| 1024 | |||
| 1025 | cpumask_copy(saved_mask, ¤t->cpus_allowed); | ||
| 1007 | t_state.target_state = state; | 1026 | t_state.target_state = state; |
| 1008 | p_throttling = &(pr->throttling); | 1027 | p_throttling = &(pr->throttling); |
| 1009 | cpus_and(online_throttling_cpus, cpu_online_map, | 1028 | cpumask_and(online_throttling_cpus, cpu_online_mask, |
| 1010 | p_throttling->shared_cpu_map); | 1029 | p_throttling->shared_cpu_map); |
| 1011 | /* | 1030 | /* |
| 1012 | * The throttling notifier will be called for every | 1031 | * The throttling notifier will be called for every |
| 1013 | * affected cpu in order to get one proper T-state. | 1032 | * affected cpu in order to get one proper T-state. |
| 1014 | * The notifier event is THROTTLING_PRECHANGE. | 1033 | * The notifier event is THROTTLING_PRECHANGE. |
| 1015 | */ | 1034 | */ |
| 1016 | for_each_cpu_mask_nr(i, online_throttling_cpus) { | 1035 | for_each_cpu(i, online_throttling_cpus) { |
| 1017 | t_state.cpu = i; | 1036 | t_state.cpu = i; |
| 1018 | acpi_processor_throttling_notifier(THROTTLING_PRECHANGE, | 1037 | acpi_processor_throttling_notifier(THROTTLING_PRECHANGE, |
| 1019 | &t_state); | 1038 | &t_state); |
| @@ -1025,7 +1044,8 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state) | |||
| 1025 | * it can be called only for the cpu pointed by pr. | 1044 | * it can be called only for the cpu pointed by pr. |
| 1026 | */ | 1045 | */ |
| 1027 | if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) { | 1046 | if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) { |
| 1028 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(pr->id)); | 1047 | /* FIXME: use work_on_cpu() */ |
| 1048 | set_cpus_allowed_ptr(current, cpumask_of(pr->id)); | ||
| 1029 | ret = p_throttling->acpi_processor_set_throttling(pr, | 1049 | ret = p_throttling->acpi_processor_set_throttling(pr, |
| 1030 | t_state.target_state); | 1050 | t_state.target_state); |
| 1031 | } else { | 1051 | } else { |
| @@ -1034,7 +1054,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state) | |||
| 1034 | * it is necessary to set T-state for every affected | 1054 | * it is necessary to set T-state for every affected |
| 1035 | * cpus. | 1055 | * cpus. |
| 1036 | */ | 1056 | */ |
| 1037 | for_each_cpu_mask_nr(i, online_throttling_cpus) { | 1057 | for_each_cpu(i, online_throttling_cpus) { |
| 1038 | match_pr = per_cpu(processors, i); | 1058 | match_pr = per_cpu(processors, i); |
| 1039 | /* | 1059 | /* |
| 1040 | * If the pointer is invalid, we will report the | 1060 | * If the pointer is invalid, we will report the |
| @@ -1056,7 +1076,8 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state) | |||
| 1056 | continue; | 1076 | continue; |
| 1057 | } | 1077 | } |
| 1058 | t_state.cpu = i; | 1078 | t_state.cpu = i; |
| 1059 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(i)); | 1079 | /* FIXME: use work_on_cpu() */ |
| 1080 | set_cpus_allowed_ptr(current, cpumask_of(i)); | ||
| 1060 | ret = match_pr->throttling. | 1081 | ret = match_pr->throttling. |
| 1061 | acpi_processor_set_throttling( | 1082 | acpi_processor_set_throttling( |
| 1062 | match_pr, t_state.target_state); | 1083 | match_pr, t_state.target_state); |
| @@ -1068,13 +1089,16 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state) | |||
| 1068 | * affected cpu to update the T-states. | 1089 | * affected cpu to update the T-states. |
| 1069 | * The notifier event is THROTTLING_POSTCHANGE | 1090 | * The notifier event is THROTTLING_POSTCHANGE |
| 1070 | */ | 1091 | */ |
| 1071 | for_each_cpu_mask_nr(i, online_throttling_cpus) { | 1092 | for_each_cpu(i, online_throttling_cpus) { |
| 1072 | t_state.cpu = i; | 1093 | t_state.cpu = i; |
| 1073 | acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE, | 1094 | acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE, |
| 1074 | &t_state); | 1095 | &t_state); |
| 1075 | } | 1096 | } |
| 1076 | /* restore the previous state */ | 1097 | /* restore the previous state */ |
| 1077 | set_cpus_allowed_ptr(current, &saved_mask); | 1098 | /* FIXME: use work_on_cpu() */ |
| 1099 | set_cpus_allowed_ptr(current, saved_mask); | ||
| 1100 | free_cpumask_var(online_throttling_cpus); | ||
| 1101 | free_cpumask_var(saved_mask); | ||
| 1078 | return ret; | 1102 | return ret; |
| 1079 | } | 1103 | } |
| 1080 | 1104 | ||
| @@ -1120,7 +1144,7 @@ int acpi_processor_get_throttling_info(struct acpi_processor *pr) | |||
| 1120 | if (acpi_processor_get_tsd(pr)) { | 1144 | if (acpi_processor_get_tsd(pr)) { |
| 1121 | pthrottling = &pr->throttling; | 1145 | pthrottling = &pr->throttling; |
| 1122 | pthrottling->tsd_valid_flag = 0; | 1146 | pthrottling->tsd_valid_flag = 0; |
| 1123 | cpu_set(pr->id, pthrottling->shared_cpu_map); | 1147 | cpumask_set_cpu(pr->id, pthrottling->shared_cpu_map); |
| 1124 | pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL; | 1148 | pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL; |
| 1125 | } | 1149 | } |
| 1126 | 1150 | ||
