diff options
Diffstat (limited to 'drivers/acpi/cppc_acpi.c')
-rw-r--r-- | drivers/acpi/cppc_acpi.c | 664 |
1 files changed, 513 insertions, 151 deletions
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c index 2e981732805b..d0d0504b7c89 100644 --- a/drivers/acpi/cppc_acpi.c +++ b/drivers/acpi/cppc_acpi.c | |||
@@ -40,15 +40,48 @@ | |||
40 | #include <linux/cpufreq.h> | 40 | #include <linux/cpufreq.h> |
41 | #include <linux/delay.h> | 41 | #include <linux/delay.h> |
42 | #include <linux/ktime.h> | 42 | #include <linux/ktime.h> |
43 | #include <linux/rwsem.h> | ||
44 | #include <linux/wait.h> | ||
43 | 45 | ||
44 | #include <acpi/cppc_acpi.h> | 46 | #include <acpi/cppc_acpi.h> |
45 | /* | 47 | |
46 | * Lock to provide mutually exclusive access to the PCC | 48 | struct cppc_pcc_data { |
47 | * channel. e.g. When the remote updates the shared region | 49 | struct mbox_chan *pcc_channel; |
48 | * with new data, the reader needs to be protected from | 50 | void __iomem *pcc_comm_addr; |
49 | * other CPUs activity on the same channel. | 51 | int pcc_subspace_idx; |
50 | */ | 52 | bool pcc_channel_acquired; |
51 | static DEFINE_SPINLOCK(pcc_lock); | 53 | ktime_t deadline; |
54 | unsigned int pcc_mpar, pcc_mrtt, pcc_nominal; | ||
55 | |||
56 | bool pending_pcc_write_cmd; /* Any pending/batched PCC write cmds? */ | ||
57 | bool platform_owns_pcc; /* Ownership of PCC subspace */ | ||
58 | unsigned int pcc_write_cnt; /* Running count of PCC write commands */ | ||
59 | |||
60 | /* | ||
61 | * Lock to provide controlled access to the PCC channel. | ||
62 | * | ||
63 | * For performance critical usecases(currently cppc_set_perf) | ||
64 | * We need to take read_lock and check if channel belongs to OSPM | ||
65 | * before reading or writing to PCC subspace | ||
66 | * We need to take write_lock before transferring the channel | ||
67 | * ownership to the platform via a Doorbell | ||
68 | * This allows us to batch a number of CPPC requests if they happen | ||
69 | * to originate in about the same time | ||
70 | * | ||
71 | * For non-performance critical usecases(init) | ||
72 | * Take write_lock for all purposes which gives exclusive access | ||
73 | */ | ||
74 | struct rw_semaphore pcc_lock; | ||
75 | |||
76 | /* Wait queue for CPUs whose requests were batched */ | ||
77 | wait_queue_head_t pcc_write_wait_q; | ||
78 | }; | ||
79 | |||
80 | /* Structure to represent the single PCC channel */ | ||
81 | static struct cppc_pcc_data pcc_data = { | ||
82 | .pcc_subspace_idx = -1, | ||
83 | .platform_owns_pcc = true, | ||
84 | }; | ||
52 | 85 | ||
53 | /* | 86 | /* |
54 | * The cpc_desc structure contains the ACPI register details | 87 | * The cpc_desc structure contains the ACPI register details |
@@ -59,18 +92,25 @@ static DEFINE_SPINLOCK(pcc_lock); | |||
59 | */ | 92 | */ |
60 | static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr); | 93 | static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr); |
61 | 94 | ||
62 | /* This layer handles all the PCC specifics for CPPC. */ | ||
63 | static struct mbox_chan *pcc_channel; | ||
64 | static void __iomem *pcc_comm_addr; | ||
65 | static u64 comm_base_addr; | ||
66 | static int pcc_subspace_idx = -1; | ||
67 | static bool pcc_channel_acquired; | ||
68 | static ktime_t deadline; | ||
69 | static unsigned int pcc_mpar, pcc_mrtt; | ||
70 | |||
71 | /* pcc mapped address + header size + offset within PCC subspace */ | 95 | /* pcc mapped address + header size + offset within PCC subspace */ |
72 | #define GET_PCC_VADDR(offs) (pcc_comm_addr + 0x8 + (offs)) | 96 | #define GET_PCC_VADDR(offs) (pcc_data.pcc_comm_addr + 0x8 + (offs)) |
73 | 97 | ||
98 | /* Check if a CPC regsiter is in PCC */ | ||
99 | #define CPC_IN_PCC(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \ | ||
100 | (cpc)->cpc_entry.reg.space_id == \ | ||
101 | ACPI_ADR_SPACE_PLATFORM_COMM) | ||
102 | |||
103 | /* Evalutes to True if reg is a NULL register descriptor */ | ||
104 | #define IS_NULL_REG(reg) ((reg)->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY && \ | ||
105 | (reg)->address == 0 && \ | ||
106 | (reg)->bit_width == 0 && \ | ||
107 | (reg)->bit_offset == 0 && \ | ||
108 | (reg)->access_width == 0) | ||
109 | |||
110 | /* Evalutes to True if an optional cpc field is supported */ | ||
111 | #define CPC_SUPPORTED(cpc) ((cpc)->type == ACPI_TYPE_INTEGER ? \ | ||
112 | !!(cpc)->cpc_entry.int_value : \ | ||
113 | !IS_NULL_REG(&(cpc)->cpc_entry.reg)) | ||
74 | /* | 114 | /* |
75 | * Arbitrary Retries in case the remote processor is slow to respond | 115 | * Arbitrary Retries in case the remote processor is slow to respond |
76 | * to PCC commands. Keeping it high enough to cover emulators where | 116 | * to PCC commands. Keeping it high enough to cover emulators where |
@@ -78,11 +118,79 @@ static unsigned int pcc_mpar, pcc_mrtt; | |||
78 | */ | 118 | */ |
79 | #define NUM_RETRIES 500 | 119 | #define NUM_RETRIES 500 |
80 | 120 | ||
81 | static int check_pcc_chan(void) | 121 | struct cppc_attr { |
122 | struct attribute attr; | ||
123 | ssize_t (*show)(struct kobject *kobj, | ||
124 | struct attribute *attr, char *buf); | ||
125 | ssize_t (*store)(struct kobject *kobj, | ||
126 | struct attribute *attr, const char *c, ssize_t count); | ||
127 | }; | ||
128 | |||
129 | #define define_one_cppc_ro(_name) \ | ||
130 | static struct cppc_attr _name = \ | ||
131 | __ATTR(_name, 0444, show_##_name, NULL) | ||
132 | |||
133 | #define to_cpc_desc(a) container_of(a, struct cpc_desc, kobj) | ||
134 | |||
135 | static ssize_t show_feedback_ctrs(struct kobject *kobj, | ||
136 | struct attribute *attr, char *buf) | ||
137 | { | ||
138 | struct cpc_desc *cpc_ptr = to_cpc_desc(kobj); | ||
139 | struct cppc_perf_fb_ctrs fb_ctrs = {0}; | ||
140 | |||
141 | cppc_get_perf_ctrs(cpc_ptr->cpu_id, &fb_ctrs); | ||
142 | |||
143 | return scnprintf(buf, PAGE_SIZE, "ref:%llu del:%llu\n", | ||
144 | fb_ctrs.reference, fb_ctrs.delivered); | ||
145 | } | ||
146 | define_one_cppc_ro(feedback_ctrs); | ||
147 | |||
148 | static ssize_t show_reference_perf(struct kobject *kobj, | ||
149 | struct attribute *attr, char *buf) | ||
150 | { | ||
151 | struct cpc_desc *cpc_ptr = to_cpc_desc(kobj); | ||
152 | struct cppc_perf_fb_ctrs fb_ctrs = {0}; | ||
153 | |||
154 | cppc_get_perf_ctrs(cpc_ptr->cpu_id, &fb_ctrs); | ||
155 | |||
156 | return scnprintf(buf, PAGE_SIZE, "%llu\n", | ||
157 | fb_ctrs.reference_perf); | ||
158 | } | ||
159 | define_one_cppc_ro(reference_perf); | ||
160 | |||
161 | static ssize_t show_wraparound_time(struct kobject *kobj, | ||
162 | struct attribute *attr, char *buf) | ||
163 | { | ||
164 | struct cpc_desc *cpc_ptr = to_cpc_desc(kobj); | ||
165 | struct cppc_perf_fb_ctrs fb_ctrs = {0}; | ||
166 | |||
167 | cppc_get_perf_ctrs(cpc_ptr->cpu_id, &fb_ctrs); | ||
168 | |||
169 | return scnprintf(buf, PAGE_SIZE, "%llu\n", fb_ctrs.ctr_wrap_time); | ||
170 | |||
171 | } | ||
172 | define_one_cppc_ro(wraparound_time); | ||
173 | |||
174 | static struct attribute *cppc_attrs[] = { | ||
175 | &feedback_ctrs.attr, | ||
176 | &reference_perf.attr, | ||
177 | &wraparound_time.attr, | ||
178 | NULL | ||
179 | }; | ||
180 | |||
181 | static struct kobj_type cppc_ktype = { | ||
182 | .sysfs_ops = &kobj_sysfs_ops, | ||
183 | .default_attrs = cppc_attrs, | ||
184 | }; | ||
185 | |||
186 | static int check_pcc_chan(bool chk_err_bit) | ||
82 | { | 187 | { |
83 | int ret = -EIO; | 188 | int ret = -EIO, status = 0; |
84 | struct acpi_pcct_shared_memory __iomem *generic_comm_base = pcc_comm_addr; | 189 | struct acpi_pcct_shared_memory __iomem *generic_comm_base = pcc_data.pcc_comm_addr; |
85 | ktime_t next_deadline = ktime_add(ktime_get(), deadline); | 190 | ktime_t next_deadline = ktime_add(ktime_get(), pcc_data.deadline); |
191 | |||
192 | if (!pcc_data.platform_owns_pcc) | ||
193 | return 0; | ||
86 | 194 | ||
87 | /* Retry in case the remote processor was too slow to catch up. */ | 195 | /* Retry in case the remote processor was too slow to catch up. */ |
88 | while (!ktime_after(ktime_get(), next_deadline)) { | 196 | while (!ktime_after(ktime_get(), next_deadline)) { |
@@ -91,8 +199,11 @@ static int check_pcc_chan(void) | |||
91 | * platform and should have set the command completion bit when | 199 | * platform and should have set the command completion bit when |
92 | * PCC can be used by OSPM | 200 | * PCC can be used by OSPM |
93 | */ | 201 | */ |
94 | if (readw_relaxed(&generic_comm_base->status) & PCC_CMD_COMPLETE) { | 202 | status = readw_relaxed(&generic_comm_base->status); |
203 | if (status & PCC_CMD_COMPLETE_MASK) { | ||
95 | ret = 0; | 204 | ret = 0; |
205 | if (chk_err_bit && (status & PCC_ERROR_MASK)) | ||
206 | ret = -EIO; | ||
96 | break; | 207 | break; |
97 | } | 208 | } |
98 | /* | 209 | /* |
@@ -102,14 +213,23 @@ static int check_pcc_chan(void) | |||
102 | udelay(3); | 213 | udelay(3); |
103 | } | 214 | } |
104 | 215 | ||
216 | if (likely(!ret)) | ||
217 | pcc_data.platform_owns_pcc = false; | ||
218 | else | ||
219 | pr_err("PCC check channel failed. Status=%x\n", status); | ||
220 | |||
105 | return ret; | 221 | return ret; |
106 | } | 222 | } |
107 | 223 | ||
224 | /* | ||
225 | * This function transfers the ownership of the PCC to the platform | ||
226 | * So it must be called while holding write_lock(pcc_lock) | ||
227 | */ | ||
108 | static int send_pcc_cmd(u16 cmd) | 228 | static int send_pcc_cmd(u16 cmd) |
109 | { | 229 | { |
110 | int ret = -EIO; | 230 | int ret = -EIO, i; |
111 | struct acpi_pcct_shared_memory *generic_comm_base = | 231 | struct acpi_pcct_shared_memory *generic_comm_base = |
112 | (struct acpi_pcct_shared_memory *) pcc_comm_addr; | 232 | (struct acpi_pcct_shared_memory *) pcc_data.pcc_comm_addr; |
113 | static ktime_t last_cmd_cmpl_time, last_mpar_reset; | 233 | static ktime_t last_cmd_cmpl_time, last_mpar_reset; |
114 | static int mpar_count; | 234 | static int mpar_count; |
115 | unsigned int time_delta; | 235 | unsigned int time_delta; |
@@ -119,20 +239,29 @@ static int send_pcc_cmd(u16 cmd) | |||
119 | * the channel before writing to PCC space | 239 | * the channel before writing to PCC space |
120 | */ | 240 | */ |
121 | if (cmd == CMD_READ) { | 241 | if (cmd == CMD_READ) { |
122 | ret = check_pcc_chan(); | 242 | /* |
243 | * If there are pending cpc_writes, then we stole the channel | ||
244 | * before write completion, so first send a WRITE command to | ||
245 | * platform | ||
246 | */ | ||
247 | if (pcc_data.pending_pcc_write_cmd) | ||
248 | send_pcc_cmd(CMD_WRITE); | ||
249 | |||
250 | ret = check_pcc_chan(false); | ||
123 | if (ret) | 251 | if (ret) |
124 | return ret; | 252 | goto end; |
125 | } | 253 | } else /* CMD_WRITE */ |
254 | pcc_data.pending_pcc_write_cmd = FALSE; | ||
126 | 255 | ||
127 | /* | 256 | /* |
128 | * Handle the Minimum Request Turnaround Time(MRTT) | 257 | * Handle the Minimum Request Turnaround Time(MRTT) |
129 | * "The minimum amount of time that OSPM must wait after the completion | 258 | * "The minimum amount of time that OSPM must wait after the completion |
130 | * of a command before issuing the next command, in microseconds" | 259 | * of a command before issuing the next command, in microseconds" |
131 | */ | 260 | */ |
132 | if (pcc_mrtt) { | 261 | if (pcc_data.pcc_mrtt) { |
133 | time_delta = ktime_us_delta(ktime_get(), last_cmd_cmpl_time); | 262 | time_delta = ktime_us_delta(ktime_get(), last_cmd_cmpl_time); |
134 | if (pcc_mrtt > time_delta) | 263 | if (pcc_data.pcc_mrtt > time_delta) |
135 | udelay(pcc_mrtt - time_delta); | 264 | udelay(pcc_data.pcc_mrtt - time_delta); |
136 | } | 265 | } |
137 | 266 | ||
138 | /* | 267 | /* |
@@ -146,15 +275,16 @@ static int send_pcc_cmd(u16 cmd) | |||
146 | * not send the request to the platform after hitting the MPAR limit in | 275 | * not send the request to the platform after hitting the MPAR limit in |
147 | * any 60s window | 276 | * any 60s window |
148 | */ | 277 | */ |
149 | if (pcc_mpar) { | 278 | if (pcc_data.pcc_mpar) { |
150 | if (mpar_count == 0) { | 279 | if (mpar_count == 0) { |
151 | time_delta = ktime_ms_delta(ktime_get(), last_mpar_reset); | 280 | time_delta = ktime_ms_delta(ktime_get(), last_mpar_reset); |
152 | if (time_delta < 60 * MSEC_PER_SEC) { | 281 | if (time_delta < 60 * MSEC_PER_SEC) { |
153 | pr_debug("PCC cmd not sent due to MPAR limit"); | 282 | pr_debug("PCC cmd not sent due to MPAR limit"); |
154 | return -EIO; | 283 | ret = -EIO; |
284 | goto end; | ||
155 | } | 285 | } |
156 | last_mpar_reset = ktime_get(); | 286 | last_mpar_reset = ktime_get(); |
157 | mpar_count = pcc_mpar; | 287 | mpar_count = pcc_data.pcc_mpar; |
158 | } | 288 | } |
159 | mpar_count--; | 289 | mpar_count--; |
160 | } | 290 | } |
@@ -165,33 +295,43 @@ static int send_pcc_cmd(u16 cmd) | |||
165 | /* Flip CMD COMPLETE bit */ | 295 | /* Flip CMD COMPLETE bit */ |
166 | writew_relaxed(0, &generic_comm_base->status); | 296 | writew_relaxed(0, &generic_comm_base->status); |
167 | 297 | ||
298 | pcc_data.platform_owns_pcc = true; | ||
299 | |||
168 | /* Ring doorbell */ | 300 | /* Ring doorbell */ |
169 | ret = mbox_send_message(pcc_channel, &cmd); | 301 | ret = mbox_send_message(pcc_data.pcc_channel, &cmd); |
170 | if (ret < 0) { | 302 | if (ret < 0) { |
171 | pr_err("Err sending PCC mbox message. cmd:%d, ret:%d\n", | 303 | pr_err("Err sending PCC mbox message. cmd:%d, ret:%d\n", |
172 | cmd, ret); | 304 | cmd, ret); |
173 | return ret; | 305 | goto end; |
174 | } | 306 | } |
175 | 307 | ||
176 | /* | 308 | /* wait for completion and check for PCC errro bit */ |
177 | * For READs we need to ensure the cmd completed to ensure | 309 | ret = check_pcc_chan(true); |
178 | * the ensuing read()s can proceed. For WRITEs we dont care | 310 | |
179 | * because the actual write()s are done before coming here | 311 | if (pcc_data.pcc_mrtt) |
180 | * and the next READ or WRITE will check if the channel | 312 | last_cmd_cmpl_time = ktime_get(); |
181 | * is busy/free at the entry of this call. | 313 | |
182 | * | 314 | if (pcc_data.pcc_channel->mbox->txdone_irq) |
183 | * If Minimum Request Turnaround Time is non-zero, we need | 315 | mbox_chan_txdone(pcc_data.pcc_channel, ret); |
184 | * to record the completion time of both READ and WRITE | 316 | else |
185 | * command for proper handling of MRTT, so we need to check | 317 | mbox_client_txdone(pcc_data.pcc_channel, ret); |
186 | * for pcc_mrtt in addition to CMD_READ | 318 | |
187 | */ | 319 | end: |
188 | if (cmd == CMD_READ || pcc_mrtt) { | 320 | if (cmd == CMD_WRITE) { |
189 | ret = check_pcc_chan(); | 321 | if (unlikely(ret)) { |
190 | if (pcc_mrtt) | 322 | for_each_possible_cpu(i) { |
191 | last_cmd_cmpl_time = ktime_get(); | 323 | struct cpc_desc *desc = per_cpu(cpc_desc_ptr, i); |
324 | if (!desc) | ||
325 | continue; | ||
326 | |||
327 | if (desc->write_cmd_id == pcc_data.pcc_write_cnt) | ||
328 | desc->write_cmd_status = ret; | ||
329 | } | ||
330 | } | ||
331 | pcc_data.pcc_write_cnt++; | ||
332 | wake_up_all(&pcc_data.pcc_write_wait_q); | ||
192 | } | 333 | } |
193 | 334 | ||
194 | mbox_client_txdone(pcc_channel, ret); | ||
195 | return ret; | 335 | return ret; |
196 | } | 336 | } |
197 | 337 | ||
@@ -272,13 +412,13 @@ end: | |||
272 | * | 412 | * |
273 | * Return: 0 for success or negative value for err. | 413 | * Return: 0 for success or negative value for err. |
274 | */ | 414 | */ |
275 | int acpi_get_psd_map(struct cpudata **all_cpu_data) | 415 | int acpi_get_psd_map(struct cppc_cpudata **all_cpu_data) |
276 | { | 416 | { |
277 | int count_target; | 417 | int count_target; |
278 | int retval = 0; | 418 | int retval = 0; |
279 | unsigned int i, j; | 419 | unsigned int i, j; |
280 | cpumask_var_t covered_cpus; | 420 | cpumask_var_t covered_cpus; |
281 | struct cpudata *pr, *match_pr; | 421 | struct cppc_cpudata *pr, *match_pr; |
282 | struct acpi_psd_package *pdomain; | 422 | struct acpi_psd_package *pdomain; |
283 | struct acpi_psd_package *match_pdomain; | 423 | struct acpi_psd_package *match_pdomain; |
284 | struct cpc_desc *cpc_ptr, *match_cpc_ptr; | 424 | struct cpc_desc *cpc_ptr, *match_cpc_ptr; |
@@ -394,14 +534,13 @@ EXPORT_SYMBOL_GPL(acpi_get_psd_map); | |||
394 | static int register_pcc_channel(int pcc_subspace_idx) | 534 | static int register_pcc_channel(int pcc_subspace_idx) |
395 | { | 535 | { |
396 | struct acpi_pcct_hw_reduced *cppc_ss; | 536 | struct acpi_pcct_hw_reduced *cppc_ss; |
397 | unsigned int len; | ||
398 | u64 usecs_lat; | 537 | u64 usecs_lat; |
399 | 538 | ||
400 | if (pcc_subspace_idx >= 0) { | 539 | if (pcc_subspace_idx >= 0) { |
401 | pcc_channel = pcc_mbox_request_channel(&cppc_mbox_cl, | 540 | pcc_data.pcc_channel = pcc_mbox_request_channel(&cppc_mbox_cl, |
402 | pcc_subspace_idx); | 541 | pcc_subspace_idx); |
403 | 542 | ||
404 | if (IS_ERR(pcc_channel)) { | 543 | if (IS_ERR(pcc_data.pcc_channel)) { |
405 | pr_err("Failed to find PCC communication channel\n"); | 544 | pr_err("Failed to find PCC communication channel\n"); |
406 | return -ENODEV; | 545 | return -ENODEV; |
407 | } | 546 | } |
@@ -412,7 +551,7 @@ static int register_pcc_channel(int pcc_subspace_idx) | |||
412 | * PCC channels) and stored pointers to the | 551 | * PCC channels) and stored pointers to the |
413 | * subspace communication region in con_priv. | 552 | * subspace communication region in con_priv. |
414 | */ | 553 | */ |
415 | cppc_ss = pcc_channel->con_priv; | 554 | cppc_ss = (pcc_data.pcc_channel)->con_priv; |
416 | 555 | ||
417 | if (!cppc_ss) { | 556 | if (!cppc_ss) { |
418 | pr_err("No PCC subspace found for CPPC\n"); | 557 | pr_err("No PCC subspace found for CPPC\n"); |
@@ -420,35 +559,42 @@ static int register_pcc_channel(int pcc_subspace_idx) | |||
420 | } | 559 | } |
421 | 560 | ||
422 | /* | 561 | /* |
423 | * This is the shared communication region | ||
424 | * for the OS and Platform to communicate over. | ||
425 | */ | ||
426 | comm_base_addr = cppc_ss->base_address; | ||
427 | len = cppc_ss->length; | ||
428 | |||
429 | /* | ||
430 | * cppc_ss->latency is just a Nominal value. In reality | 562 | * cppc_ss->latency is just a Nominal value. In reality |
431 | * the remote processor could be much slower to reply. | 563 | * the remote processor could be much slower to reply. |
432 | * So add an arbitrary amount of wait on top of Nominal. | 564 | * So add an arbitrary amount of wait on top of Nominal. |
433 | */ | 565 | */ |
434 | usecs_lat = NUM_RETRIES * cppc_ss->latency; | 566 | usecs_lat = NUM_RETRIES * cppc_ss->latency; |
435 | deadline = ns_to_ktime(usecs_lat * NSEC_PER_USEC); | 567 | pcc_data.deadline = ns_to_ktime(usecs_lat * NSEC_PER_USEC); |
436 | pcc_mrtt = cppc_ss->min_turnaround_time; | 568 | pcc_data.pcc_mrtt = cppc_ss->min_turnaround_time; |
437 | pcc_mpar = cppc_ss->max_access_rate; | 569 | pcc_data.pcc_mpar = cppc_ss->max_access_rate; |
570 | pcc_data.pcc_nominal = cppc_ss->latency; | ||
438 | 571 | ||
439 | pcc_comm_addr = acpi_os_ioremap(comm_base_addr, len); | 572 | pcc_data.pcc_comm_addr = acpi_os_ioremap(cppc_ss->base_address, cppc_ss->length); |
440 | if (!pcc_comm_addr) { | 573 | if (!pcc_data.pcc_comm_addr) { |
441 | pr_err("Failed to ioremap PCC comm region mem\n"); | 574 | pr_err("Failed to ioremap PCC comm region mem\n"); |
442 | return -ENOMEM; | 575 | return -ENOMEM; |
443 | } | 576 | } |
444 | 577 | ||
445 | /* Set flag so that we dont come here for each CPU. */ | 578 | /* Set flag so that we dont come here for each CPU. */ |
446 | pcc_channel_acquired = true; | 579 | pcc_data.pcc_channel_acquired = true; |
447 | } | 580 | } |
448 | 581 | ||
449 | return 0; | 582 | return 0; |
450 | } | 583 | } |
451 | 584 | ||
585 | /** | ||
586 | * cpc_ffh_supported() - check if FFH reading supported | ||
587 | * | ||
588 | * Check if the architecture has support for functional fixed hardware | ||
589 | * read/write capability. | ||
590 | * | ||
591 | * Return: true for supported, false for not supported | ||
592 | */ | ||
593 | bool __weak cpc_ffh_supported(void) | ||
594 | { | ||
595 | return false; | ||
596 | } | ||
597 | |||
452 | /* | 598 | /* |
453 | * An example CPC table looks like the following. | 599 | * An example CPC table looks like the following. |
454 | * | 600 | * |
@@ -507,6 +653,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr) | |||
507 | union acpi_object *out_obj, *cpc_obj; | 653 | union acpi_object *out_obj, *cpc_obj; |
508 | struct cpc_desc *cpc_ptr; | 654 | struct cpc_desc *cpc_ptr; |
509 | struct cpc_reg *gas_t; | 655 | struct cpc_reg *gas_t; |
656 | struct device *cpu_dev; | ||
510 | acpi_handle handle = pr->handle; | 657 | acpi_handle handle = pr->handle; |
511 | unsigned int num_ent, i, cpc_rev; | 658 | unsigned int num_ent, i, cpc_rev; |
512 | acpi_status status; | 659 | acpi_status status; |
@@ -545,6 +692,8 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr) | |||
545 | goto out_free; | 692 | goto out_free; |
546 | } | 693 | } |
547 | 694 | ||
695 | cpc_ptr->num_entries = num_ent; | ||
696 | |||
548 | /* Second entry should be revision. */ | 697 | /* Second entry should be revision. */ |
549 | cpc_obj = &out_obj->package.elements[1]; | 698 | cpc_obj = &out_obj->package.elements[1]; |
550 | if (cpc_obj->type == ACPI_TYPE_INTEGER) { | 699 | if (cpc_obj->type == ACPI_TYPE_INTEGER) { |
@@ -579,16 +728,27 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr) | |||
579 | * so extract it only once. | 728 | * so extract it only once. |
580 | */ | 729 | */ |
581 | if (gas_t->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) { | 730 | if (gas_t->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) { |
582 | if (pcc_subspace_idx < 0) | 731 | if (pcc_data.pcc_subspace_idx < 0) |
583 | pcc_subspace_idx = gas_t->access_width; | 732 | pcc_data.pcc_subspace_idx = gas_t->access_width; |
584 | else if (pcc_subspace_idx != gas_t->access_width) { | 733 | else if (pcc_data.pcc_subspace_idx != gas_t->access_width) { |
585 | pr_debug("Mismatched PCC ids.\n"); | 734 | pr_debug("Mismatched PCC ids.\n"); |
586 | goto out_free; | 735 | goto out_free; |
587 | } | 736 | } |
588 | } else if (gas_t->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) { | 737 | } else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { |
589 | /* Support only PCC and SYS MEM type regs */ | 738 | if (gas_t->address) { |
590 | pr_debug("Unsupported register type: %d\n", gas_t->space_id); | 739 | void __iomem *addr; |
591 | goto out_free; | 740 | |
741 | addr = ioremap(gas_t->address, gas_t->bit_width/8); | ||
742 | if (!addr) | ||
743 | goto out_free; | ||
744 | cpc_ptr->cpc_regs[i-2].sys_mem_vaddr = addr; | ||
745 | } | ||
746 | } else { | ||
747 | if (gas_t->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE || !cpc_ffh_supported()) { | ||
748 | /* Support only PCC ,SYS MEM and FFH type regs */ | ||
749 | pr_debug("Unsupported register type: %d\n", gas_t->space_id); | ||
750 | goto out_free; | ||
751 | } | ||
592 | } | 752 | } |
593 | 753 | ||
594 | cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_BUFFER; | 754 | cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_BUFFER; |
@@ -607,10 +767,13 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr) | |||
607 | goto out_free; | 767 | goto out_free; |
608 | 768 | ||
609 | /* Register PCC channel once for all CPUs. */ | 769 | /* Register PCC channel once for all CPUs. */ |
610 | if (!pcc_channel_acquired) { | 770 | if (!pcc_data.pcc_channel_acquired) { |
611 | ret = register_pcc_channel(pcc_subspace_idx); | 771 | ret = register_pcc_channel(pcc_data.pcc_subspace_idx); |
612 | if (ret) | 772 | if (ret) |
613 | goto out_free; | 773 | goto out_free; |
774 | |||
775 | init_rwsem(&pcc_data.pcc_lock); | ||
776 | init_waitqueue_head(&pcc_data.pcc_write_wait_q); | ||
614 | } | 777 | } |
615 | 778 | ||
616 | /* Plug PSD data into this CPUs CPC descriptor. */ | 779 | /* Plug PSD data into this CPUs CPC descriptor. */ |
@@ -619,10 +782,27 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr) | |||
619 | /* Everything looks okay */ | 782 | /* Everything looks okay */ |
620 | pr_debug("Parsed CPC struct for CPU: %d\n", pr->id); | 783 | pr_debug("Parsed CPC struct for CPU: %d\n", pr->id); |
621 | 784 | ||
785 | /* Add per logical CPU nodes for reading its feedback counters. */ | ||
786 | cpu_dev = get_cpu_device(pr->id); | ||
787 | if (!cpu_dev) | ||
788 | goto out_free; | ||
789 | |||
790 | ret = kobject_init_and_add(&cpc_ptr->kobj, &cppc_ktype, &cpu_dev->kobj, | ||
791 | "acpi_cppc"); | ||
792 | if (ret) | ||
793 | goto out_free; | ||
794 | |||
622 | kfree(output.pointer); | 795 | kfree(output.pointer); |
623 | return 0; | 796 | return 0; |
624 | 797 | ||
625 | out_free: | 798 | out_free: |
799 | /* Free all the mapped sys mem areas for this CPU */ | ||
800 | for (i = 2; i < cpc_ptr->num_entries; i++) { | ||
801 | void __iomem *addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr; | ||
802 | |||
803 | if (addr) | ||
804 | iounmap(addr); | ||
805 | } | ||
626 | kfree(cpc_ptr); | 806 | kfree(cpc_ptr); |
627 | 807 | ||
628 | out_buf_free: | 808 | out_buf_free: |
@@ -640,26 +820,82 @@ EXPORT_SYMBOL_GPL(acpi_cppc_processor_probe); | |||
640 | void acpi_cppc_processor_exit(struct acpi_processor *pr) | 820 | void acpi_cppc_processor_exit(struct acpi_processor *pr) |
641 | { | 821 | { |
642 | struct cpc_desc *cpc_ptr; | 822 | struct cpc_desc *cpc_ptr; |
823 | unsigned int i; | ||
824 | void __iomem *addr; | ||
825 | |||
643 | cpc_ptr = per_cpu(cpc_desc_ptr, pr->id); | 826 | cpc_ptr = per_cpu(cpc_desc_ptr, pr->id); |
827 | |||
828 | /* Free all the mapped sys mem areas for this CPU */ | ||
829 | for (i = 2; i < cpc_ptr->num_entries; i++) { | ||
830 | addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr; | ||
831 | if (addr) | ||
832 | iounmap(addr); | ||
833 | } | ||
834 | |||
835 | kobject_put(&cpc_ptr->kobj); | ||
644 | kfree(cpc_ptr); | 836 | kfree(cpc_ptr); |
645 | } | 837 | } |
646 | EXPORT_SYMBOL_GPL(acpi_cppc_processor_exit); | 838 | EXPORT_SYMBOL_GPL(acpi_cppc_processor_exit); |
647 | 839 | ||
840 | /** | ||
841 | * cpc_read_ffh() - Read FFH register | ||
842 | * @cpunum: cpu number to read | ||
843 | * @reg: cppc register information | ||
844 | * @val: place holder for return value | ||
845 | * | ||
846 | * Read bit_width bits from a specified address and bit_offset | ||
847 | * | ||
848 | * Return: 0 for success and error code | ||
849 | */ | ||
850 | int __weak cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val) | ||
851 | { | ||
852 | return -ENOTSUPP; | ||
853 | } | ||
854 | |||
855 | /** | ||
856 | * cpc_write_ffh() - Write FFH register | ||
857 | * @cpunum: cpu number to write | ||
858 | * @reg: cppc register information | ||
859 | * @val: value to write | ||
860 | * | ||
861 | * Write value of bit_width bits to a specified address and bit_offset | ||
862 | * | ||
863 | * Return: 0 for success and error code | ||
864 | */ | ||
865 | int __weak cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val) | ||
866 | { | ||
867 | return -ENOTSUPP; | ||
868 | } | ||
869 | |||
648 | /* | 870 | /* |
649 | * Since cpc_read and cpc_write are called while holding pcc_lock, it should be | 871 | * Since cpc_read and cpc_write are called while holding pcc_lock, it should be |
650 | * as fast as possible. We have already mapped the PCC subspace during init, so | 872 | * as fast as possible. We have already mapped the PCC subspace during init, so |
651 | * we can directly write to it. | 873 | * we can directly write to it. |
652 | */ | 874 | */ |
653 | 875 | ||
654 | static int cpc_read(struct cpc_reg *reg, u64 *val) | 876 | static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val) |
655 | { | 877 | { |
656 | int ret_val = 0; | 878 | int ret_val = 0; |
879 | void __iomem *vaddr = 0; | ||
880 | struct cpc_reg *reg = ®_res->cpc_entry.reg; | ||
881 | |||
882 | if (reg_res->type == ACPI_TYPE_INTEGER) { | ||
883 | *val = reg_res->cpc_entry.int_value; | ||
884 | return ret_val; | ||
885 | } | ||
657 | 886 | ||
658 | *val = 0; | 887 | *val = 0; |
659 | if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) { | 888 | if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) |
660 | void __iomem *vaddr = GET_PCC_VADDR(reg->address); | 889 | vaddr = GET_PCC_VADDR(reg->address); |
890 | else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) | ||
891 | vaddr = reg_res->sys_mem_vaddr; | ||
892 | else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) | ||
893 | return cpc_read_ffh(cpu, reg, val); | ||
894 | else | ||
895 | return acpi_os_read_memory((acpi_physical_address)reg->address, | ||
896 | val, reg->bit_width); | ||
661 | 897 | ||
662 | switch (reg->bit_width) { | 898 | switch (reg->bit_width) { |
663 | case 8: | 899 | case 8: |
664 | *val = readb_relaxed(vaddr); | 900 | *val = readb_relaxed(vaddr); |
665 | break; | 901 | break; |
@@ -674,23 +910,30 @@ static int cpc_read(struct cpc_reg *reg, u64 *val) | |||
674 | break; | 910 | break; |
675 | default: | 911 | default: |
676 | pr_debug("Error: Cannot read %u bit width from PCC\n", | 912 | pr_debug("Error: Cannot read %u bit width from PCC\n", |
677 | reg->bit_width); | 913 | reg->bit_width); |
678 | ret_val = -EFAULT; | 914 | ret_val = -EFAULT; |
679 | } | 915 | } |
680 | } else | 916 | |
681 | ret_val = acpi_os_read_memory((acpi_physical_address)reg->address, | ||
682 | val, reg->bit_width); | ||
683 | return ret_val; | 917 | return ret_val; |
684 | } | 918 | } |
685 | 919 | ||
686 | static int cpc_write(struct cpc_reg *reg, u64 val) | 920 | static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val) |
687 | { | 921 | { |
688 | int ret_val = 0; | 922 | int ret_val = 0; |
923 | void __iomem *vaddr = 0; | ||
924 | struct cpc_reg *reg = ®_res->cpc_entry.reg; | ||
925 | |||
926 | if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) | ||
927 | vaddr = GET_PCC_VADDR(reg->address); | ||
928 | else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) | ||
929 | vaddr = reg_res->sys_mem_vaddr; | ||
930 | else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) | ||
931 | return cpc_write_ffh(cpu, reg, val); | ||
932 | else | ||
933 | return acpi_os_write_memory((acpi_physical_address)reg->address, | ||
934 | val, reg->bit_width); | ||
689 | 935 | ||
690 | if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) { | 936 | switch (reg->bit_width) { |
691 | void __iomem *vaddr = GET_PCC_VADDR(reg->address); | ||
692 | |||
693 | switch (reg->bit_width) { | ||
694 | case 8: | 937 | case 8: |
695 | writeb_relaxed(val, vaddr); | 938 | writeb_relaxed(val, vaddr); |
696 | break; | 939 | break; |
@@ -705,13 +948,11 @@ static int cpc_write(struct cpc_reg *reg, u64 val) | |||
705 | break; | 948 | break; |
706 | default: | 949 | default: |
707 | pr_debug("Error: Cannot write %u bit width to PCC\n", | 950 | pr_debug("Error: Cannot write %u bit width to PCC\n", |
708 | reg->bit_width); | 951 | reg->bit_width); |
709 | ret_val = -EFAULT; | 952 | ret_val = -EFAULT; |
710 | break; | 953 | break; |
711 | } | 954 | } |
712 | } else | 955 | |
713 | ret_val = acpi_os_write_memory((acpi_physical_address)reg->address, | ||
714 | val, reg->bit_width); | ||
715 | return ret_val; | 956 | return ret_val; |
716 | } | 957 | } |
717 | 958 | ||
@@ -727,8 +968,8 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps) | |||
727 | struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum); | 968 | struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum); |
728 | struct cpc_register_resource *highest_reg, *lowest_reg, *ref_perf, | 969 | struct cpc_register_resource *highest_reg, *lowest_reg, *ref_perf, |
729 | *nom_perf; | 970 | *nom_perf; |
730 | u64 high, low, ref, nom; | 971 | u64 high, low, nom; |
731 | int ret = 0; | 972 | int ret = 0, regs_in_pcc = 0; |
732 | 973 | ||
733 | if (!cpc_desc) { | 974 | if (!cpc_desc) { |
734 | pr_debug("No CPC descriptor for CPU:%d\n", cpunum); | 975 | pr_debug("No CPC descriptor for CPU:%d\n", cpunum); |
@@ -740,13 +981,11 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps) | |||
740 | ref_perf = &cpc_desc->cpc_regs[REFERENCE_PERF]; | 981 | ref_perf = &cpc_desc->cpc_regs[REFERENCE_PERF]; |
741 | nom_perf = &cpc_desc->cpc_regs[NOMINAL_PERF]; | 982 | nom_perf = &cpc_desc->cpc_regs[NOMINAL_PERF]; |
742 | 983 | ||
743 | spin_lock(&pcc_lock); | ||
744 | |||
745 | /* Are any of the regs PCC ?*/ | 984 | /* Are any of the regs PCC ?*/ |
746 | if ((highest_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) || | 985 | if (CPC_IN_PCC(highest_reg) || CPC_IN_PCC(lowest_reg) || |
747 | (lowest_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) || | 986 | CPC_IN_PCC(ref_perf) || CPC_IN_PCC(nom_perf)) { |
748 | (ref_perf->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) || | 987 | regs_in_pcc = 1; |
749 | (nom_perf->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM)) { | 988 | down_write(&pcc_data.pcc_lock); |
750 | /* Ring doorbell once to update PCC subspace */ | 989 | /* Ring doorbell once to update PCC subspace */ |
751 | if (send_pcc_cmd(CMD_READ) < 0) { | 990 | if (send_pcc_cmd(CMD_READ) < 0) { |
752 | ret = -EIO; | 991 | ret = -EIO; |
@@ -754,26 +993,21 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps) | |||
754 | } | 993 | } |
755 | } | 994 | } |
756 | 995 | ||
757 | cpc_read(&highest_reg->cpc_entry.reg, &high); | 996 | cpc_read(cpunum, highest_reg, &high); |
758 | perf_caps->highest_perf = high; | 997 | perf_caps->highest_perf = high; |
759 | 998 | ||
760 | cpc_read(&lowest_reg->cpc_entry.reg, &low); | 999 | cpc_read(cpunum, lowest_reg, &low); |
761 | perf_caps->lowest_perf = low; | 1000 | perf_caps->lowest_perf = low; |
762 | 1001 | ||
763 | cpc_read(&ref_perf->cpc_entry.reg, &ref); | 1002 | cpc_read(cpunum, nom_perf, &nom); |
764 | perf_caps->reference_perf = ref; | ||
765 | |||
766 | cpc_read(&nom_perf->cpc_entry.reg, &nom); | ||
767 | perf_caps->nominal_perf = nom; | 1003 | perf_caps->nominal_perf = nom; |
768 | 1004 | ||
769 | if (!ref) | ||
770 | perf_caps->reference_perf = perf_caps->nominal_perf; | ||
771 | |||
772 | if (!high || !low || !nom) | 1005 | if (!high || !low || !nom) |
773 | ret = -EFAULT; | 1006 | ret = -EFAULT; |
774 | 1007 | ||
775 | out_err: | 1008 | out_err: |
776 | spin_unlock(&pcc_lock); | 1009 | if (regs_in_pcc) |
1010 | up_write(&pcc_data.pcc_lock); | ||
777 | return ret; | 1011 | return ret; |
778 | } | 1012 | } |
779 | EXPORT_SYMBOL_GPL(cppc_get_perf_caps); | 1013 | EXPORT_SYMBOL_GPL(cppc_get_perf_caps); |
@@ -788,9 +1022,10 @@ EXPORT_SYMBOL_GPL(cppc_get_perf_caps); | |||
788 | int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs) | 1022 | int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs) |
789 | { | 1023 | { |
790 | struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum); | 1024 | struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum); |
791 | struct cpc_register_resource *delivered_reg, *reference_reg; | 1025 | struct cpc_register_resource *delivered_reg, *reference_reg, |
792 | u64 delivered, reference; | 1026 | *ref_perf_reg, *ctr_wrap_reg; |
793 | int ret = 0; | 1027 | u64 delivered, reference, ref_perf, ctr_wrap_time; |
1028 | int ret = 0, regs_in_pcc = 0; | ||
794 | 1029 | ||
795 | if (!cpc_desc) { | 1030 | if (!cpc_desc) { |
796 | pr_debug("No CPC descriptor for CPU:%d\n", cpunum); | 1031 | pr_debug("No CPC descriptor for CPU:%d\n", cpunum); |
@@ -799,12 +1034,21 @@ int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs) | |||
799 | 1034 | ||
800 | delivered_reg = &cpc_desc->cpc_regs[DELIVERED_CTR]; | 1035 | delivered_reg = &cpc_desc->cpc_regs[DELIVERED_CTR]; |
801 | reference_reg = &cpc_desc->cpc_regs[REFERENCE_CTR]; | 1036 | reference_reg = &cpc_desc->cpc_regs[REFERENCE_CTR]; |
1037 | ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF]; | ||
1038 | ctr_wrap_reg = &cpc_desc->cpc_regs[CTR_WRAP_TIME]; | ||
802 | 1039 | ||
803 | spin_lock(&pcc_lock); | 1040 | /* |
1041 | * If refernce perf register is not supported then we should | ||
1042 | * use the nominal perf value | ||
1043 | */ | ||
1044 | if (!CPC_SUPPORTED(ref_perf_reg)) | ||
1045 | ref_perf_reg = &cpc_desc->cpc_regs[NOMINAL_PERF]; | ||
804 | 1046 | ||
805 | /* Are any of the regs PCC ?*/ | 1047 | /* Are any of the regs PCC ?*/ |
806 | if ((delivered_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) || | 1048 | if (CPC_IN_PCC(delivered_reg) || CPC_IN_PCC(reference_reg) || |
807 | (reference_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM)) { | 1049 | CPC_IN_PCC(ctr_wrap_reg) || CPC_IN_PCC(ref_perf_reg)) { |
1050 | down_write(&pcc_data.pcc_lock); | ||
1051 | regs_in_pcc = 1; | ||
808 | /* Ring doorbell once to update PCC subspace */ | 1052 | /* Ring doorbell once to update PCC subspace */ |
809 | if (send_pcc_cmd(CMD_READ) < 0) { | 1053 | if (send_pcc_cmd(CMD_READ) < 0) { |
810 | ret = -EIO; | 1054 | ret = -EIO; |
@@ -812,25 +1056,31 @@ int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs) | |||
812 | } | 1056 | } |
813 | } | 1057 | } |
814 | 1058 | ||
815 | cpc_read(&delivered_reg->cpc_entry.reg, &delivered); | 1059 | cpc_read(cpunum, delivered_reg, &delivered); |
816 | cpc_read(&reference_reg->cpc_entry.reg, &reference); | 1060 | cpc_read(cpunum, reference_reg, &reference); |
1061 | cpc_read(cpunum, ref_perf_reg, &ref_perf); | ||
817 | 1062 | ||
818 | if (!delivered || !reference) { | 1063 | /* |
1064 | * Per spec, if ctr_wrap_time optional register is unsupported, then the | ||
1065 | * performance counters are assumed to never wrap during the lifetime of | ||
1066 | * platform | ||
1067 | */ | ||
1068 | ctr_wrap_time = (u64)(~((u64)0)); | ||
1069 | if (CPC_SUPPORTED(ctr_wrap_reg)) | ||
1070 | cpc_read(cpunum, ctr_wrap_reg, &ctr_wrap_time); | ||
1071 | |||
1072 | if (!delivered || !reference || !ref_perf) { | ||
819 | ret = -EFAULT; | 1073 | ret = -EFAULT; |
820 | goto out_err; | 1074 | goto out_err; |
821 | } | 1075 | } |
822 | 1076 | ||
823 | perf_fb_ctrs->delivered = delivered; | 1077 | perf_fb_ctrs->delivered = delivered; |
824 | perf_fb_ctrs->reference = reference; | 1078 | perf_fb_ctrs->reference = reference; |
825 | 1079 | perf_fb_ctrs->reference_perf = ref_perf; | |
826 | perf_fb_ctrs->delivered -= perf_fb_ctrs->prev_delivered; | 1080 | perf_fb_ctrs->ctr_wrap_time = ctr_wrap_time; |
827 | perf_fb_ctrs->reference -= perf_fb_ctrs->prev_reference; | ||
828 | |||
829 | perf_fb_ctrs->prev_delivered = delivered; | ||
830 | perf_fb_ctrs->prev_reference = reference; | ||
831 | |||
832 | out_err: | 1081 | out_err: |
833 | spin_unlock(&pcc_lock); | 1082 | if (regs_in_pcc) |
1083 | up_write(&pcc_data.pcc_lock); | ||
834 | return ret; | 1084 | return ret; |
835 | } | 1085 | } |
836 | EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs); | 1086 | EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs); |
@@ -855,30 +1105,142 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls) | |||
855 | 1105 | ||
856 | desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF]; | 1106 | desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF]; |
857 | 1107 | ||
858 | spin_lock(&pcc_lock); | 1108 | /* |
859 | 1109 | * This is Phase-I where we want to write to CPC registers | |
860 | /* If this is PCC reg, check if channel is free before writing */ | 1110 | * -> We want all CPUs to be able to execute this phase in parallel |
861 | if (desired_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) { | 1111 | * |
862 | ret = check_pcc_chan(); | 1112 | * Since read_lock can be acquired by multiple CPUs simultaneously we |
863 | if (ret) | 1113 | * achieve that goal here |
864 | goto busy_channel; | 1114 | */ |
1115 | if (CPC_IN_PCC(desired_reg)) { | ||
1116 | down_read(&pcc_data.pcc_lock); /* BEGIN Phase-I */ | ||
1117 | if (pcc_data.platform_owns_pcc) { | ||
1118 | ret = check_pcc_chan(false); | ||
1119 | if (ret) { | ||
1120 | up_read(&pcc_data.pcc_lock); | ||
1121 | return ret; | ||
1122 | } | ||
1123 | } | ||
1124 | /* | ||
1125 | * Update the pending_write to make sure a PCC CMD_READ will not | ||
1126 | * arrive and steal the channel during the switch to write lock | ||
1127 | */ | ||
1128 | pcc_data.pending_pcc_write_cmd = true; | ||
1129 | cpc_desc->write_cmd_id = pcc_data.pcc_write_cnt; | ||
1130 | cpc_desc->write_cmd_status = 0; | ||
865 | } | 1131 | } |
866 | 1132 | ||
867 | /* | 1133 | /* |
868 | * Skip writing MIN/MAX until Linux knows how to come up with | 1134 | * Skip writing MIN/MAX until Linux knows how to come up with |
869 | * useful values. | 1135 | * useful values. |
870 | */ | 1136 | */ |
871 | cpc_write(&desired_reg->cpc_entry.reg, perf_ctrls->desired_perf); | 1137 | cpc_write(cpu, desired_reg, perf_ctrls->desired_perf); |
872 | 1138 | ||
873 | /* Is this a PCC reg ?*/ | 1139 | if (CPC_IN_PCC(desired_reg)) |
874 | if (desired_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) { | 1140 | up_read(&pcc_data.pcc_lock); /* END Phase-I */ |
875 | /* Ring doorbell so Remote can get our perf request. */ | 1141 | /* |
876 | if (send_pcc_cmd(CMD_WRITE) < 0) | 1142 | * This is Phase-II where we transfer the ownership of PCC to Platform |
877 | ret = -EIO; | 1143 | * |
1144 | * Short Summary: Basically if we think of a group of cppc_set_perf | ||
1145 | * requests that happened in short overlapping interval. The last CPU to | ||
1146 | * come out of Phase-I will enter Phase-II and ring the doorbell. | ||
1147 | * | ||
1148 | * We have the following requirements for Phase-II: | ||
1149 | * 1. We want to execute Phase-II only when there are no CPUs | ||
1150 | * currently executing in Phase-I | ||
1151 | * 2. Once we start Phase-II we want to avoid all other CPUs from | ||
1152 | * entering Phase-I. | ||
1153 | * 3. We want only one CPU among all those who went through Phase-I | ||
1154 | * to run phase-II | ||
1155 | * | ||
1156 | * If write_trylock fails to get the lock and doesn't transfer the | ||
1157 | * PCC ownership to the platform, then one of the following will be TRUE | ||
1158 | * 1. There is at-least one CPU in Phase-I which will later execute | ||
1159 | * write_trylock, so the CPUs in Phase-I will be responsible for | ||
1160 | * executing the Phase-II. | ||
1161 | * 2. Some other CPU has beaten this CPU to successfully execute the | ||
1162 | * write_trylock and has already acquired the write_lock. We know for a | ||
1163 | * fact it(other CPU acquiring the write_lock) couldn't have happened | ||
1164 | * before this CPU's Phase-I as we held the read_lock. | ||
1165 | * 3. Some other CPU executing pcc CMD_READ has stolen the | ||
1166 | * down_write, in which case, send_pcc_cmd will check for pending | ||
1167 | * CMD_WRITE commands by checking the pending_pcc_write_cmd. | ||
1168 | * So this CPU can be certain that its request will be delivered | ||
1169 | * So in all cases, this CPU knows that its request will be delivered | ||
1170 | * by another CPU and can return | ||
1171 | * | ||
1172 | * After getting the down_write we still need to check for | ||
1173 | * pending_pcc_write_cmd to take care of the following scenario | ||
1174 | * The thread running this code could be scheduled out between | ||
1175 | * Phase-I and Phase-II. Before it is scheduled back on, another CPU | ||
1176 | * could have delivered the request to Platform by triggering the | ||
1177 | * doorbell and transferred the ownership of PCC to platform. So this | ||
1178 | * avoids triggering an unnecessary doorbell and more importantly before | ||
1179 | * triggering the doorbell it makes sure that the PCC channel ownership | ||
1180 | * is still with OSPM. | ||
1181 | * pending_pcc_write_cmd can also be cleared by a different CPU, if | ||
1182 | * there was a pcc CMD_READ waiting on down_write and it steals the lock | ||
1183 | * before the pcc CMD_WRITE is completed. pcc_send_cmd checks for this | ||
1184 | * case during a CMD_READ and if there are pending writes it delivers | ||
1185 | * the write command before servicing the read command | ||
1186 | */ | ||
1187 | if (CPC_IN_PCC(desired_reg)) { | ||
1188 | if (down_write_trylock(&pcc_data.pcc_lock)) { /* BEGIN Phase-II */ | ||
1189 | /* Update only if there are pending write commands */ | ||
1190 | if (pcc_data.pending_pcc_write_cmd) | ||
1191 | send_pcc_cmd(CMD_WRITE); | ||
1192 | up_write(&pcc_data.pcc_lock); /* END Phase-II */ | ||
1193 | } else | ||
1194 | /* Wait until pcc_write_cnt is updated by send_pcc_cmd */ | ||
1195 | wait_event(pcc_data.pcc_write_wait_q, | ||
1196 | cpc_desc->write_cmd_id != pcc_data.pcc_write_cnt); | ||
1197 | |||
1198 | /* send_pcc_cmd updates the status in case of failure */ | ||
1199 | ret = cpc_desc->write_cmd_status; | ||
878 | } | 1200 | } |
879 | busy_channel: | ||
880 | spin_unlock(&pcc_lock); | ||
881 | |||
882 | return ret; | 1201 | return ret; |
883 | } | 1202 | } |
884 | EXPORT_SYMBOL_GPL(cppc_set_perf); | 1203 | EXPORT_SYMBOL_GPL(cppc_set_perf); |
1204 | |||
1205 | /** | ||
1206 | * cppc_get_transition_latency - returns frequency transition latency in ns | ||
1207 | * | ||
1208 | * ACPI CPPC does not explicitly specifiy how a platform can specify the | ||
1209 | * transition latency for perfromance change requests. The closest we have | ||
1210 | * is the timing information from the PCCT tables which provides the info | ||
1211 | * on the number and frequency of PCC commands the platform can handle. | ||
1212 | */ | ||
1213 | unsigned int cppc_get_transition_latency(int cpu_num) | ||
1214 | { | ||
1215 | /* | ||
1216 | * Expected transition latency is based on the PCCT timing values | ||
1217 | * Below are definition from ACPI spec: | ||
1218 | * pcc_nominal- Expected latency to process a command, in microseconds | ||
1219 | * pcc_mpar - The maximum number of periodic requests that the subspace | ||
1220 | * channel can support, reported in commands per minute. 0 | ||
1221 | * indicates no limitation. | ||
1222 | * pcc_mrtt - The minimum amount of time that OSPM must wait after the | ||
1223 | * completion of a command before issuing the next command, | ||
1224 | * in microseconds. | ||
1225 | */ | ||
1226 | unsigned int latency_ns = 0; | ||
1227 | struct cpc_desc *cpc_desc; | ||
1228 | struct cpc_register_resource *desired_reg; | ||
1229 | |||
1230 | cpc_desc = per_cpu(cpc_desc_ptr, cpu_num); | ||
1231 | if (!cpc_desc) | ||
1232 | return CPUFREQ_ETERNAL; | ||
1233 | |||
1234 | desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF]; | ||
1235 | if (!CPC_IN_PCC(desired_reg)) | ||
1236 | return CPUFREQ_ETERNAL; | ||
1237 | |||
1238 | if (pcc_data.pcc_mpar) | ||
1239 | latency_ns = 60 * (1000 * 1000 * 1000 / pcc_data.pcc_mpar); | ||
1240 | |||
1241 | latency_ns = max(latency_ns, pcc_data.pcc_nominal * 1000); | ||
1242 | latency_ns = max(latency_ns, pcc_data.pcc_mrtt * 1000); | ||
1243 | |||
1244 | return latency_ns; | ||
1245 | } | ||
1246 | EXPORT_SYMBOL_GPL(cppc_get_transition_latency); | ||