diff options
author | Lan Tianyu <tianyu.lan@intel.com> | 2014-02-26 08:03:05 -0500 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2014-03-07 00:30:09 -0500 |
commit | 44ae49aa88441344fcb35a1b0d44b54aa08a6d3d (patch) | |
tree | 46d3af541bea706ec907dfc9c3fbd12bcd166f19 /drivers/acpi | |
parent | 3cb947fdcff6247559da0b23dc3ed6272cff8372 (diff) |
ACPI / processor: Rework processor throttling with work_on_cpu()
commit f3ca4164529b875374c410193bbbac0ee960895f upstream.
acpi_processor_set_throttling() uses set_cpus_allowed_ptr() to make
sure that the (struct acpi_processor)->acpi_processor_set_throttling()
callback will run on the right CPU. However, the function may be
called from a worker thread already bound to a different CPU in which
case that won't work.
Make acpi_processor_set_throttling() use work_on_cpu() as appropriate
instead of abusing set_cpus_allowed_ptr().
Reported-and-tested-by: Jiri Olsa <jolsa@redhat.com>
Signed-off-by: Lan Tianyu <tianyu.lan@intel.com>
[rjw: Changelog]
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/acpi')
-rw-r--r-- | drivers/acpi/processor_throttling.c | 69 |
1 files changed, 32 insertions, 37 deletions
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c index e7dd2c1fee79..5e47d7bf4745 100644 --- a/drivers/acpi/processor_throttling.c +++ b/drivers/acpi/processor_throttling.c | |||
@@ -59,6 +59,12 @@ struct throttling_tstate { | |||
59 | int target_state; /* target T-state */ | 59 | int target_state; /* target T-state */ |
60 | }; | 60 | }; |
61 | 61 | ||
62 | struct acpi_processor_throttling_arg { | ||
63 | struct acpi_processor *pr; | ||
64 | int target_state; | ||
65 | bool force; | ||
66 | }; | ||
67 | |||
62 | #define THROTTLING_PRECHANGE (1) | 68 | #define THROTTLING_PRECHANGE (1) |
63 | #define THROTTLING_POSTCHANGE (2) | 69 | #define THROTTLING_POSTCHANGE (2) |
64 | 70 | ||
@@ -1063,16 +1069,24 @@ static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr, | |||
1063 | return 0; | 1069 | return 0; |
1064 | } | 1070 | } |
1065 | 1071 | ||
1072 | static long acpi_processor_throttling_fn(void *data) | ||
1073 | { | ||
1074 | struct acpi_processor_throttling_arg *arg = data; | ||
1075 | struct acpi_processor *pr = arg->pr; | ||
1076 | |||
1077 | return pr->throttling.acpi_processor_set_throttling(pr, | ||
1078 | arg->target_state, arg->force); | ||
1079 | } | ||
1080 | |||
1066 | int acpi_processor_set_throttling(struct acpi_processor *pr, | 1081 | int acpi_processor_set_throttling(struct acpi_processor *pr, |
1067 | int state, bool force) | 1082 | int state, bool force) |
1068 | { | 1083 | { |
1069 | cpumask_var_t saved_mask; | ||
1070 | int ret = 0; | 1084 | int ret = 0; |
1071 | unsigned int i; | 1085 | unsigned int i; |
1072 | struct acpi_processor *match_pr; | 1086 | struct acpi_processor *match_pr; |
1073 | struct acpi_processor_throttling *p_throttling; | 1087 | struct acpi_processor_throttling *p_throttling; |
1088 | struct acpi_processor_throttling_arg arg; | ||
1074 | struct throttling_tstate t_state; | 1089 | struct throttling_tstate t_state; |
1075 | cpumask_var_t online_throttling_cpus; | ||
1076 | 1090 | ||
1077 | if (!pr) | 1091 | if (!pr) |
1078 | return -EINVAL; | 1092 | return -EINVAL; |
@@ -1083,14 +1097,6 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, | |||
1083 | if ((state < 0) || (state > (pr->throttling.state_count - 1))) | 1097 | if ((state < 0) || (state > (pr->throttling.state_count - 1))) |
1084 | return -EINVAL; | 1098 | return -EINVAL; |
1085 | 1099 | ||
1086 | if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL)) | ||
1087 | return -ENOMEM; | ||
1088 | |||
1089 | if (!alloc_cpumask_var(&online_throttling_cpus, GFP_KERNEL)) { | ||
1090 | free_cpumask_var(saved_mask); | ||
1091 | return -ENOMEM; | ||
1092 | } | ||
1093 | |||
1094 | if (cpu_is_offline(pr->id)) { | 1100 | if (cpu_is_offline(pr->id)) { |
1095 | /* | 1101 | /* |
1096 | * the cpu pointed by pr->id is offline. Unnecessary to change | 1102 | * the cpu pointed by pr->id is offline. Unnecessary to change |
@@ -1099,17 +1105,15 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, | |||
1099 | return -ENODEV; | 1105 | return -ENODEV; |
1100 | } | 1106 | } |
1101 | 1107 | ||
1102 | cpumask_copy(saved_mask, ¤t->cpus_allowed); | ||
1103 | t_state.target_state = state; | 1108 | t_state.target_state = state; |
1104 | p_throttling = &(pr->throttling); | 1109 | p_throttling = &(pr->throttling); |
1105 | cpumask_and(online_throttling_cpus, cpu_online_mask, | 1110 | |
1106 | p_throttling->shared_cpu_map); | ||
1107 | /* | 1111 | /* |
1108 | * The throttling notifier will be called for every | 1112 | * The throttling notifier will be called for every |
1109 | * affected cpu in order to get one proper T-state. | 1113 | * affected cpu in order to get one proper T-state. |
1110 | * The notifier event is THROTTLING_PRECHANGE. | 1114 | * The notifier event is THROTTLING_PRECHANGE. |
1111 | */ | 1115 | */ |
1112 | for_each_cpu(i, online_throttling_cpus) { | 1116 | for_each_cpu_and(i, cpu_online_mask, p_throttling->shared_cpu_map) { |
1113 | t_state.cpu = i; | 1117 | t_state.cpu = i; |
1114 | acpi_processor_throttling_notifier(THROTTLING_PRECHANGE, | 1118 | acpi_processor_throttling_notifier(THROTTLING_PRECHANGE, |
1115 | &t_state); | 1119 | &t_state); |
@@ -1121,21 +1125,18 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, | |||
1121 | * it can be called only for the cpu pointed by pr. | 1125 | * it can be called only for the cpu pointed by pr. |
1122 | */ | 1126 | */ |
1123 | if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) { | 1127 | if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) { |
1124 | /* FIXME: use work_on_cpu() */ | 1128 | arg.pr = pr; |
1125 | if (set_cpus_allowed_ptr(current, cpumask_of(pr->id))) { | 1129 | arg.target_state = state; |
1126 | /* Can't migrate to the pr->id CPU. Exit */ | 1130 | arg.force = force; |
1127 | ret = -ENODEV; | 1131 | ret = work_on_cpu(pr->id, acpi_processor_throttling_fn, &arg); |
1128 | goto exit; | ||
1129 | } | ||
1130 | ret = p_throttling->acpi_processor_set_throttling(pr, | ||
1131 | t_state.target_state, force); | ||
1132 | } else { | 1132 | } else { |
1133 | /* | 1133 | /* |
1134 | * When the T-state coordination is SW_ALL or HW_ALL, | 1134 | * When the T-state coordination is SW_ALL or HW_ALL, |
1135 | * it is necessary to set T-state for every affected | 1135 | * it is necessary to set T-state for every affected |
1136 | * cpus. | 1136 | * cpus. |
1137 | */ | 1137 | */ |
1138 | for_each_cpu(i, online_throttling_cpus) { | 1138 | for_each_cpu_and(i, cpu_online_mask, |
1139 | p_throttling->shared_cpu_map) { | ||
1139 | match_pr = per_cpu(processors, i); | 1140 | match_pr = per_cpu(processors, i); |
1140 | /* | 1141 | /* |
1141 | * If the pointer is invalid, we will report the | 1142 | * If the pointer is invalid, we will report the |
@@ -1156,13 +1157,12 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, | |||
1156 | "on CPU %d\n", i)); | 1157 | "on CPU %d\n", i)); |
1157 | continue; | 1158 | continue; |
1158 | } | 1159 | } |
1159 | t_state.cpu = i; | 1160 | |
1160 | /* FIXME: use work_on_cpu() */ | 1161 | arg.pr = match_pr; |
1161 | if (set_cpus_allowed_ptr(current, cpumask_of(i))) | 1162 | arg.target_state = state; |
1162 | continue; | 1163 | arg.force = force; |
1163 | ret = match_pr->throttling. | 1164 | ret = work_on_cpu(pr->id, acpi_processor_throttling_fn, |
1164 | acpi_processor_set_throttling( | 1165 | &arg); |
1165 | match_pr, t_state.target_state, force); | ||
1166 | } | 1166 | } |
1167 | } | 1167 | } |
1168 | /* | 1168 | /* |
@@ -1171,17 +1171,12 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, | |||
1171 | * affected cpu to update the T-states. | 1171 | * affected cpu to update the T-states. |
1172 | * The notifier event is THROTTLING_POSTCHANGE | 1172 | * The notifier event is THROTTLING_POSTCHANGE |
1173 | */ | 1173 | */ |
1174 | for_each_cpu(i, online_throttling_cpus) { | 1174 | for_each_cpu_and(i, cpu_online_mask, p_throttling->shared_cpu_map) { |
1175 | t_state.cpu = i; | 1175 | t_state.cpu = i; |
1176 | acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE, | 1176 | acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE, |
1177 | &t_state); | 1177 | &t_state); |
1178 | } | 1178 | } |
1179 | /* restore the previous state */ | 1179 | |
1180 | /* FIXME: use work_on_cpu() */ | ||
1181 | set_cpus_allowed_ptr(current, saved_mask); | ||
1182 | exit: | ||
1183 | free_cpumask_var(online_throttling_cpus); | ||
1184 | free_cpumask_var(saved_mask); | ||
1185 | return ret; | 1180 | return ret; |
1186 | } | 1181 | } |
1187 | 1182 | ||