aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/acpi/cppc_acpi.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/acpi/cppc_acpi.c')
-rw-r--r--drivers/acpi/cppc_acpi.c240
1 files changed, 151 insertions, 89 deletions
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index e5b47f032d9a..21c28433c590 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -48,7 +48,6 @@
48struct cppc_pcc_data { 48struct cppc_pcc_data {
49 struct mbox_chan *pcc_channel; 49 struct mbox_chan *pcc_channel;
50 void __iomem *pcc_comm_addr; 50 void __iomem *pcc_comm_addr;
51 int pcc_subspace_idx;
52 bool pcc_channel_acquired; 51 bool pcc_channel_acquired;
53 ktime_t deadline; 52 ktime_t deadline;
54 unsigned int pcc_mpar, pcc_mrtt, pcc_nominal; 53 unsigned int pcc_mpar, pcc_mrtt, pcc_nominal;
@@ -75,13 +74,16 @@ struct cppc_pcc_data {
75 74
76 /* Wait queue for CPUs whose requests were batched */ 75 /* Wait queue for CPUs whose requests were batched */
77 wait_queue_head_t pcc_write_wait_q; 76 wait_queue_head_t pcc_write_wait_q;
77 ktime_t last_cmd_cmpl_time;
78 ktime_t last_mpar_reset;
79 int mpar_count;
80 int refcount;
78}; 81};
79 82
80/* Structure to represent the single PCC channel */ 83/* Array to represent the PCC channel per subspace id */
81static struct cppc_pcc_data pcc_data = { 84static struct cppc_pcc_data *pcc_data[MAX_PCC_SUBSPACES];
82 .pcc_subspace_idx = -1, 85/* The cpu_pcc_subspace_idx containsper CPU subspace id */
83 .platform_owns_pcc = true, 86static DEFINE_PER_CPU(int, cpu_pcc_subspace_idx);
84};
85 87
86/* 88/*
87 * The cpc_desc structure contains the ACPI register details 89 * The cpc_desc structure contains the ACPI register details
@@ -93,7 +95,8 @@ static struct cppc_pcc_data pcc_data = {
93static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr); 95static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);
94 96
95/* pcc mapped address + header size + offset within PCC subspace */ 97/* pcc mapped address + header size + offset within PCC subspace */
96#define GET_PCC_VADDR(offs) (pcc_data.pcc_comm_addr + 0x8 + (offs)) 98#define GET_PCC_VADDR(offs, pcc_ss_id) (pcc_data[pcc_ss_id]->pcc_comm_addr + \
99 0x8 + (offs))
97 100
98/* Check if a CPC register is in PCC */ 101/* Check if a CPC register is in PCC */
99#define CPC_IN_PCC(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \ 102#define CPC_IN_PCC(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \
@@ -188,13 +191,16 @@ static struct kobj_type cppc_ktype = {
188 .default_attrs = cppc_attrs, 191 .default_attrs = cppc_attrs,
189}; 192};
190 193
191static int check_pcc_chan(bool chk_err_bit) 194static int check_pcc_chan(int pcc_ss_id, bool chk_err_bit)
192{ 195{
193 int ret = -EIO, status = 0; 196 int ret = -EIO, status = 0;
194 struct acpi_pcct_shared_memory __iomem *generic_comm_base = pcc_data.pcc_comm_addr; 197 struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
195 ktime_t next_deadline = ktime_add(ktime_get(), pcc_data.deadline); 198 struct acpi_pcct_shared_memory __iomem *generic_comm_base =
199 pcc_ss_data->pcc_comm_addr;
200 ktime_t next_deadline = ktime_add(ktime_get(),
201 pcc_ss_data->deadline);
196 202
197 if (!pcc_data.platform_owns_pcc) 203 if (!pcc_ss_data->platform_owns_pcc)
198 return 0; 204 return 0;
199 205
200 /* Retry in case the remote processor was too slow to catch up. */ 206 /* Retry in case the remote processor was too slow to catch up. */
@@ -219,7 +225,7 @@ static int check_pcc_chan(bool chk_err_bit)
219 } 225 }
220 226
221 if (likely(!ret)) 227 if (likely(!ret))
222 pcc_data.platform_owns_pcc = false; 228 pcc_ss_data->platform_owns_pcc = false;
223 else 229 else
224 pr_err("PCC check channel failed. Status=%x\n", status); 230 pr_err("PCC check channel failed. Status=%x\n", status);
225 231
@@ -230,13 +236,12 @@ static int check_pcc_chan(bool chk_err_bit)
230 * This function transfers the ownership of the PCC to the platform 236 * This function transfers the ownership of the PCC to the platform
231 * So it must be called while holding write_lock(pcc_lock) 237 * So it must be called while holding write_lock(pcc_lock)
232 */ 238 */
233static int send_pcc_cmd(u16 cmd) 239static int send_pcc_cmd(int pcc_ss_id, u16 cmd)
234{ 240{
235 int ret = -EIO, i; 241 int ret = -EIO, i;
242 struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
236 struct acpi_pcct_shared_memory *generic_comm_base = 243 struct acpi_pcct_shared_memory *generic_comm_base =
237 (struct acpi_pcct_shared_memory *) pcc_data.pcc_comm_addr; 244 (struct acpi_pcct_shared_memory *)pcc_ss_data->pcc_comm_addr;
238 static ktime_t last_cmd_cmpl_time, last_mpar_reset;
239 static int mpar_count;
240 unsigned int time_delta; 245 unsigned int time_delta;
241 246
242 /* 247 /*
@@ -249,24 +254,25 @@ static int send_pcc_cmd(u16 cmd)
249 * before write completion, so first send a WRITE command to 254 * before write completion, so first send a WRITE command to
250 * platform 255 * platform
251 */ 256 */
252 if (pcc_data.pending_pcc_write_cmd) 257 if (pcc_ss_data->pending_pcc_write_cmd)
253 send_pcc_cmd(CMD_WRITE); 258 send_pcc_cmd(pcc_ss_id, CMD_WRITE);
254 259
255 ret = check_pcc_chan(false); 260 ret = check_pcc_chan(pcc_ss_id, false);
256 if (ret) 261 if (ret)
257 goto end; 262 goto end;
258 } else /* CMD_WRITE */ 263 } else /* CMD_WRITE */
259 pcc_data.pending_pcc_write_cmd = FALSE; 264 pcc_ss_data->pending_pcc_write_cmd = FALSE;
260 265
261 /* 266 /*
262 * Handle the Minimum Request Turnaround Time(MRTT) 267 * Handle the Minimum Request Turnaround Time(MRTT)
263 * "The minimum amount of time that OSPM must wait after the completion 268 * "The minimum amount of time that OSPM must wait after the completion
264 * of a command before issuing the next command, in microseconds" 269 * of a command before issuing the next command, in microseconds"
265 */ 270 */
266 if (pcc_data.pcc_mrtt) { 271 if (pcc_ss_data->pcc_mrtt) {
267 time_delta = ktime_us_delta(ktime_get(), last_cmd_cmpl_time); 272 time_delta = ktime_us_delta(ktime_get(),
268 if (pcc_data.pcc_mrtt > time_delta) 273 pcc_ss_data->last_cmd_cmpl_time);
269 udelay(pcc_data.pcc_mrtt - time_delta); 274 if (pcc_ss_data->pcc_mrtt > time_delta)
275 udelay(pcc_ss_data->pcc_mrtt - time_delta);
270 } 276 }
271 277
272 /* 278 /*
@@ -280,18 +286,19 @@ static int send_pcc_cmd(u16 cmd)
280 * not send the request to the platform after hitting the MPAR limit in 286 * not send the request to the platform after hitting the MPAR limit in
281 * any 60s window 287 * any 60s window
282 */ 288 */
283 if (pcc_data.pcc_mpar) { 289 if (pcc_ss_data->pcc_mpar) {
284 if (mpar_count == 0) { 290 if (pcc_ss_data->mpar_count == 0) {
285 time_delta = ktime_ms_delta(ktime_get(), last_mpar_reset); 291 time_delta = ktime_ms_delta(ktime_get(),
286 if (time_delta < 60 * MSEC_PER_SEC) { 292 pcc_ss_data->last_mpar_reset);
293 if ((time_delta < 60 * MSEC_PER_SEC) && pcc_ss_data->last_mpar_reset) {
287 pr_debug("PCC cmd not sent due to MPAR limit"); 294 pr_debug("PCC cmd not sent due to MPAR limit");
288 ret = -EIO; 295 ret = -EIO;
289 goto end; 296 goto end;
290 } 297 }
291 last_mpar_reset = ktime_get(); 298 pcc_ss_data->last_mpar_reset = ktime_get();
292 mpar_count = pcc_data.pcc_mpar; 299 pcc_ss_data->mpar_count = pcc_ss_data->pcc_mpar;
293 } 300 }
294 mpar_count--; 301 pcc_ss_data->mpar_count--;
295 } 302 }
296 303
297 /* Write to the shared comm region. */ 304 /* Write to the shared comm region. */
@@ -300,10 +307,10 @@ static int send_pcc_cmd(u16 cmd)
300 /* Flip CMD COMPLETE bit */ 307 /* Flip CMD COMPLETE bit */
301 writew_relaxed(0, &generic_comm_base->status); 308 writew_relaxed(0, &generic_comm_base->status);
302 309
303 pcc_data.platform_owns_pcc = true; 310 pcc_ss_data->platform_owns_pcc = true;
304 311
305 /* Ring doorbell */ 312 /* Ring doorbell */
306 ret = mbox_send_message(pcc_data.pcc_channel, &cmd); 313 ret = mbox_send_message(pcc_ss_data->pcc_channel, &cmd);
307 if (ret < 0) { 314 if (ret < 0) {
308 pr_err("Err sending PCC mbox message. cmd:%d, ret:%d\n", 315 pr_err("Err sending PCC mbox message. cmd:%d, ret:%d\n",
309 cmd, ret); 316 cmd, ret);
@@ -311,15 +318,15 @@ static int send_pcc_cmd(u16 cmd)
311 } 318 }
312 319
313 /* wait for completion and check for PCC errro bit */ 320 /* wait for completion and check for PCC errro bit */
314 ret = check_pcc_chan(true); 321 ret = check_pcc_chan(pcc_ss_id, true);
315 322
316 if (pcc_data.pcc_mrtt) 323 if (pcc_ss_data->pcc_mrtt)
317 last_cmd_cmpl_time = ktime_get(); 324 pcc_ss_data->last_cmd_cmpl_time = ktime_get();
318 325
319 if (pcc_data.pcc_channel->mbox->txdone_irq) 326 if (pcc_ss_data->pcc_channel->mbox->txdone_irq)
320 mbox_chan_txdone(pcc_data.pcc_channel, ret); 327 mbox_chan_txdone(pcc_ss_data->pcc_channel, ret);
321 else 328 else
322 mbox_client_txdone(pcc_data.pcc_channel, ret); 329 mbox_client_txdone(pcc_ss_data->pcc_channel, ret);
323 330
324end: 331end:
325 if (cmd == CMD_WRITE) { 332 if (cmd == CMD_WRITE) {
@@ -329,12 +336,12 @@ end:
329 if (!desc) 336 if (!desc)
330 continue; 337 continue;
331 338
332 if (desc->write_cmd_id == pcc_data.pcc_write_cnt) 339 if (desc->write_cmd_id == pcc_ss_data->pcc_write_cnt)
333 desc->write_cmd_status = ret; 340 desc->write_cmd_status = ret;
334 } 341 }
335 } 342 }
336 pcc_data.pcc_write_cnt++; 343 pcc_ss_data->pcc_write_cnt++;
337 wake_up_all(&pcc_data.pcc_write_wait_q); 344 wake_up_all(&pcc_ss_data->pcc_write_wait_q);
338 } 345 }
339 346
340 return ret; 347 return ret;
@@ -536,16 +543,16 @@ err_ret:
536} 543}
537EXPORT_SYMBOL_GPL(acpi_get_psd_map); 544EXPORT_SYMBOL_GPL(acpi_get_psd_map);
538 545
539static int register_pcc_channel(int pcc_subspace_idx) 546static int register_pcc_channel(int pcc_ss_idx)
540{ 547{
541 struct acpi_pcct_hw_reduced *cppc_ss; 548 struct acpi_pcct_hw_reduced *cppc_ss;
542 u64 usecs_lat; 549 u64 usecs_lat;
543 550
544 if (pcc_subspace_idx >= 0) { 551 if (pcc_ss_idx >= 0) {
545 pcc_data.pcc_channel = pcc_mbox_request_channel(&cppc_mbox_cl, 552 pcc_data[pcc_ss_idx]->pcc_channel =
546 pcc_subspace_idx); 553 pcc_mbox_request_channel(&cppc_mbox_cl, pcc_ss_idx);
547 554
548 if (IS_ERR(pcc_data.pcc_channel)) { 555 if (IS_ERR(pcc_data[pcc_ss_idx]->pcc_channel)) {
549 pr_err("Failed to find PCC communication channel\n"); 556 pr_err("Failed to find PCC communication channel\n");
550 return -ENODEV; 557 return -ENODEV;
551 } 558 }
@@ -556,7 +563,7 @@ static int register_pcc_channel(int pcc_subspace_idx)
556 * PCC channels) and stored pointers to the 563 * PCC channels) and stored pointers to the
557 * subspace communication region in con_priv. 564 * subspace communication region in con_priv.
558 */ 565 */
559 cppc_ss = (pcc_data.pcc_channel)->con_priv; 566 cppc_ss = (pcc_data[pcc_ss_idx]->pcc_channel)->con_priv;
560 567
561 if (!cppc_ss) { 568 if (!cppc_ss) {
562 pr_err("No PCC subspace found for CPPC\n"); 569 pr_err("No PCC subspace found for CPPC\n");
@@ -569,19 +576,20 @@ static int register_pcc_channel(int pcc_subspace_idx)
569 * So add an arbitrary amount of wait on top of Nominal. 576 * So add an arbitrary amount of wait on top of Nominal.
570 */ 577 */
571 usecs_lat = NUM_RETRIES * cppc_ss->latency; 578 usecs_lat = NUM_RETRIES * cppc_ss->latency;
572 pcc_data.deadline = ns_to_ktime(usecs_lat * NSEC_PER_USEC); 579 pcc_data[pcc_ss_idx]->deadline = ns_to_ktime(usecs_lat * NSEC_PER_USEC);
573 pcc_data.pcc_mrtt = cppc_ss->min_turnaround_time; 580 pcc_data[pcc_ss_idx]->pcc_mrtt = cppc_ss->min_turnaround_time;
574 pcc_data.pcc_mpar = cppc_ss->max_access_rate; 581 pcc_data[pcc_ss_idx]->pcc_mpar = cppc_ss->max_access_rate;
575 pcc_data.pcc_nominal = cppc_ss->latency; 582 pcc_data[pcc_ss_idx]->pcc_nominal = cppc_ss->latency;
576 583
577 pcc_data.pcc_comm_addr = acpi_os_ioremap(cppc_ss->base_address, cppc_ss->length); 584 pcc_data[pcc_ss_idx]->pcc_comm_addr =
578 if (!pcc_data.pcc_comm_addr) { 585 acpi_os_ioremap(cppc_ss->base_address, cppc_ss->length);
586 if (!pcc_data[pcc_ss_idx]->pcc_comm_addr) {
579 pr_err("Failed to ioremap PCC comm region mem\n"); 587 pr_err("Failed to ioremap PCC comm region mem\n");
580 return -ENOMEM; 588 return -ENOMEM;
581 } 589 }
582 590
583 /* Set flag so that we dont come here for each CPU. */ 591 /* Set flag so that we dont come here for each CPU. */
584 pcc_data.pcc_channel_acquired = true; 592 pcc_data[pcc_ss_idx]->pcc_channel_acquired = true;
585 } 593 }
586 594
587 return 0; 595 return 0;
@@ -600,6 +608,34 @@ bool __weak cpc_ffh_supported(void)
600 return false; 608 return false;
601} 609}
602 610
611
612/**
613 * pcc_data_alloc() - Allocate the pcc_data memory for pcc subspace
614 *
615 * Check and allocate the cppc_pcc_data memory.
616 * In some processor configurations it is possible that same subspace
617 * is shared between multiple CPU's. This is seen especially in CPU's
618 * with hardware multi-threading support.
619 *
620 * Return: 0 for success, errno for failure
621 */
622int pcc_data_alloc(int pcc_ss_id)
623{
624 if (pcc_ss_id < 0 || pcc_ss_id >= MAX_PCC_SUBSPACES)
625 return -EINVAL;
626
627 if (pcc_data[pcc_ss_id]) {
628 pcc_data[pcc_ss_id]->refcount++;
629 } else {
630 pcc_data[pcc_ss_id] = kzalloc(sizeof(struct cppc_pcc_data),
631 GFP_KERNEL);
632 if (!pcc_data[pcc_ss_id])
633 return -ENOMEM;
634 pcc_data[pcc_ss_id]->refcount++;
635 }
636
637 return 0;
638}
603/* 639/*
604 * An example CPC table looks like the following. 640 * An example CPC table looks like the following.
605 * 641 *
@@ -661,6 +697,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
661 struct device *cpu_dev; 697 struct device *cpu_dev;
662 acpi_handle handle = pr->handle; 698 acpi_handle handle = pr->handle;
663 unsigned int num_ent, i, cpc_rev; 699 unsigned int num_ent, i, cpc_rev;
700 int pcc_subspace_id = -1;
664 acpi_status status; 701 acpi_status status;
665 int ret = -EFAULT; 702 int ret = -EFAULT;
666 703
@@ -733,9 +770,11 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
733 * so extract it only once. 770 * so extract it only once.
734 */ 771 */
735 if (gas_t->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) { 772 if (gas_t->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
736 if (pcc_data.pcc_subspace_idx < 0) 773 if (pcc_subspace_id < 0) {
737 pcc_data.pcc_subspace_idx = gas_t->access_width; 774 pcc_subspace_id = gas_t->access_width;
738 else if (pcc_data.pcc_subspace_idx != gas_t->access_width) { 775 if (pcc_data_alloc(pcc_subspace_id))
776 goto out_free;
777 } else if (pcc_subspace_id != gas_t->access_width) {
739 pr_debug("Mismatched PCC ids.\n"); 778 pr_debug("Mismatched PCC ids.\n");
740 goto out_free; 779 goto out_free;
741 } 780 }
@@ -763,6 +802,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
763 goto out_free; 802 goto out_free;
764 } 803 }
765 } 804 }
805 per_cpu(cpu_pcc_subspace_idx, pr->id) = pcc_subspace_id;
766 /* Store CPU Logical ID */ 806 /* Store CPU Logical ID */
767 cpc_ptr->cpu_id = pr->id; 807 cpc_ptr->cpu_id = pr->id;
768 808
@@ -771,14 +811,14 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
771 if (ret) 811 if (ret)
772 goto out_free; 812 goto out_free;
773 813
774 /* Register PCC channel once for all CPUs. */ 814 /* Register PCC channel once for all PCC subspace id. */
775 if (!pcc_data.pcc_channel_acquired) { 815 if (pcc_subspace_id >= 0 && !pcc_data[pcc_subspace_id]->pcc_channel_acquired) {
776 ret = register_pcc_channel(pcc_data.pcc_subspace_idx); 816 ret = register_pcc_channel(pcc_subspace_id);
777 if (ret) 817 if (ret)
778 goto out_free; 818 goto out_free;
779 819
780 init_rwsem(&pcc_data.pcc_lock); 820 init_rwsem(&pcc_data[pcc_subspace_id]->pcc_lock);
781 init_waitqueue_head(&pcc_data.pcc_write_wait_q); 821 init_waitqueue_head(&pcc_data[pcc_subspace_id]->pcc_write_wait_q);
782 } 822 }
783 823
784 /* Everything looks okay */ 824 /* Everything looks okay */
@@ -831,6 +871,18 @@ void acpi_cppc_processor_exit(struct acpi_processor *pr)
831 struct cpc_desc *cpc_ptr; 871 struct cpc_desc *cpc_ptr;
832 unsigned int i; 872 unsigned int i;
833 void __iomem *addr; 873 void __iomem *addr;
874 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, pr->id);
875
876 if (pcc_ss_id >=0 && pcc_data[pcc_ss_id]) {
877 if (pcc_data[pcc_ss_id]->pcc_channel_acquired) {
878 pcc_data[pcc_ss_id]->refcount--;
879 if (!pcc_data[pcc_ss_id]->refcount) {
880 pcc_mbox_free_channel(pcc_data[pcc_ss_id]->pcc_channel);
881 pcc_data[pcc_ss_id]->pcc_channel_acquired = 0;
882 kfree(pcc_data[pcc_ss_id]);
883 }
884 }
885 }
834 886
835 cpc_ptr = per_cpu(cpc_desc_ptr, pr->id); 887 cpc_ptr = per_cpu(cpc_desc_ptr, pr->id);
836 if (!cpc_ptr) 888 if (!cpc_ptr)
@@ -888,6 +940,7 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
888{ 940{
889 int ret_val = 0; 941 int ret_val = 0;
890 void __iomem *vaddr = 0; 942 void __iomem *vaddr = 0;
943 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
891 struct cpc_reg *reg = &reg_res->cpc_entry.reg; 944 struct cpc_reg *reg = &reg_res->cpc_entry.reg;
892 945
893 if (reg_res->type == ACPI_TYPE_INTEGER) { 946 if (reg_res->type == ACPI_TYPE_INTEGER) {
@@ -897,7 +950,7 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
897 950
898 *val = 0; 951 *val = 0;
899 if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) 952 if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM)
900 vaddr = GET_PCC_VADDR(reg->address); 953 vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
901 else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) 954 else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
902 vaddr = reg_res->sys_mem_vaddr; 955 vaddr = reg_res->sys_mem_vaddr;
903 else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) 956 else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
@@ -932,10 +985,11 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
932{ 985{
933 int ret_val = 0; 986 int ret_val = 0;
934 void __iomem *vaddr = 0; 987 void __iomem *vaddr = 0;
988 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
935 struct cpc_reg *reg = &reg_res->cpc_entry.reg; 989 struct cpc_reg *reg = &reg_res->cpc_entry.reg;
936 990
937 if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) 991 if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM)
938 vaddr = GET_PCC_VADDR(reg->address); 992 vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
939 else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) 993 else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
940 vaddr = reg_res->sys_mem_vaddr; 994 vaddr = reg_res->sys_mem_vaddr;
941 else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) 995 else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
@@ -980,6 +1034,8 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
980 struct cpc_register_resource *highest_reg, *lowest_reg, 1034 struct cpc_register_resource *highest_reg, *lowest_reg,
981 *lowest_non_linear_reg, *nominal_reg; 1035 *lowest_non_linear_reg, *nominal_reg;
982 u64 high, low, nom, min_nonlinear; 1036 u64 high, low, nom, min_nonlinear;
1037 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1038 struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
983 int ret = 0, regs_in_pcc = 0; 1039 int ret = 0, regs_in_pcc = 0;
984 1040
985 if (!cpc_desc) { 1041 if (!cpc_desc) {
@@ -996,9 +1052,9 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
996 if (CPC_IN_PCC(highest_reg) || CPC_IN_PCC(lowest_reg) || 1052 if (CPC_IN_PCC(highest_reg) || CPC_IN_PCC(lowest_reg) ||
997 CPC_IN_PCC(lowest_non_linear_reg) || CPC_IN_PCC(nominal_reg)) { 1053 CPC_IN_PCC(lowest_non_linear_reg) || CPC_IN_PCC(nominal_reg)) {
998 regs_in_pcc = 1; 1054 regs_in_pcc = 1;
999 down_write(&pcc_data.pcc_lock); 1055 down_write(&pcc_ss_data->pcc_lock);
1000 /* Ring doorbell once to update PCC subspace */ 1056 /* Ring doorbell once to update PCC subspace */
1001 if (send_pcc_cmd(CMD_READ) < 0) { 1057 if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) {
1002 ret = -EIO; 1058 ret = -EIO;
1003 goto out_err; 1059 goto out_err;
1004 } 1060 }
@@ -1021,7 +1077,7 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
1021 1077
1022out_err: 1078out_err:
1023 if (regs_in_pcc) 1079 if (regs_in_pcc)
1024 up_write(&pcc_data.pcc_lock); 1080 up_write(&pcc_ss_data->pcc_lock);
1025 return ret; 1081 return ret;
1026} 1082}
1027EXPORT_SYMBOL_GPL(cppc_get_perf_caps); 1083EXPORT_SYMBOL_GPL(cppc_get_perf_caps);
@@ -1038,6 +1094,8 @@ int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
1038 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum); 1094 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1039 struct cpc_register_resource *delivered_reg, *reference_reg, 1095 struct cpc_register_resource *delivered_reg, *reference_reg,
1040 *ref_perf_reg, *ctr_wrap_reg; 1096 *ref_perf_reg, *ctr_wrap_reg;
1097 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1098 struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
1041 u64 delivered, reference, ref_perf, ctr_wrap_time; 1099 u64 delivered, reference, ref_perf, ctr_wrap_time;
1042 int ret = 0, regs_in_pcc = 0; 1100 int ret = 0, regs_in_pcc = 0;
1043 1101
@@ -1061,10 +1119,10 @@ int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
1061 /* Are any of the regs PCC ?*/ 1119 /* Are any of the regs PCC ?*/
1062 if (CPC_IN_PCC(delivered_reg) || CPC_IN_PCC(reference_reg) || 1120 if (CPC_IN_PCC(delivered_reg) || CPC_IN_PCC(reference_reg) ||
1063 CPC_IN_PCC(ctr_wrap_reg) || CPC_IN_PCC(ref_perf_reg)) { 1121 CPC_IN_PCC(ctr_wrap_reg) || CPC_IN_PCC(ref_perf_reg)) {
1064 down_write(&pcc_data.pcc_lock); 1122 down_write(&pcc_ss_data->pcc_lock);
1065 regs_in_pcc = 1; 1123 regs_in_pcc = 1;
1066 /* Ring doorbell once to update PCC subspace */ 1124 /* Ring doorbell once to update PCC subspace */
1067 if (send_pcc_cmd(CMD_READ) < 0) { 1125 if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) {
1068 ret = -EIO; 1126 ret = -EIO;
1069 goto out_err; 1127 goto out_err;
1070 } 1128 }
@@ -1094,7 +1152,7 @@ int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
1094 perf_fb_ctrs->wraparound_time = ctr_wrap_time; 1152 perf_fb_ctrs->wraparound_time = ctr_wrap_time;
1095out_err: 1153out_err:
1096 if (regs_in_pcc) 1154 if (regs_in_pcc)
1097 up_write(&pcc_data.pcc_lock); 1155 up_write(&pcc_ss_data->pcc_lock);
1098 return ret; 1156 return ret;
1099} 1157}
1100EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs); 1158EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs);
@@ -1110,6 +1168,8 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
1110{ 1168{
1111 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu); 1169 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
1112 struct cpc_register_resource *desired_reg; 1170 struct cpc_register_resource *desired_reg;
1171 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
1172 struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
1113 int ret = 0; 1173 int ret = 0;
1114 1174
1115 if (!cpc_desc) { 1175 if (!cpc_desc) {
@@ -1127,11 +1187,11 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
1127 * achieve that goal here 1187 * achieve that goal here
1128 */ 1188 */
1129 if (CPC_IN_PCC(desired_reg)) { 1189 if (CPC_IN_PCC(desired_reg)) {
1130 down_read(&pcc_data.pcc_lock); /* BEGIN Phase-I */ 1190 down_read(&pcc_ss_data->pcc_lock); /* BEGIN Phase-I */
1131 if (pcc_data.platform_owns_pcc) { 1191 if (pcc_ss_data->platform_owns_pcc) {
1132 ret = check_pcc_chan(false); 1192 ret = check_pcc_chan(pcc_ss_id, false);
1133 if (ret) { 1193 if (ret) {
1134 up_read(&pcc_data.pcc_lock); 1194 up_read(&pcc_ss_data->pcc_lock);
1135 return ret; 1195 return ret;
1136 } 1196 }
1137 } 1197 }
@@ -1139,8 +1199,8 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
1139 * Update the pending_write to make sure a PCC CMD_READ will not 1199 * Update the pending_write to make sure a PCC CMD_READ will not
1140 * arrive and steal the channel during the switch to write lock 1200 * arrive and steal the channel during the switch to write lock
1141 */ 1201 */
1142 pcc_data.pending_pcc_write_cmd = true; 1202 pcc_ss_data->pending_pcc_write_cmd = true;
1143 cpc_desc->write_cmd_id = pcc_data.pcc_write_cnt; 1203 cpc_desc->write_cmd_id = pcc_ss_data->pcc_write_cnt;
1144 cpc_desc->write_cmd_status = 0; 1204 cpc_desc->write_cmd_status = 0;
1145 } 1205 }
1146 1206
@@ -1151,7 +1211,7 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
1151 cpc_write(cpu, desired_reg, perf_ctrls->desired_perf); 1211 cpc_write(cpu, desired_reg, perf_ctrls->desired_perf);
1152 1212
1153 if (CPC_IN_PCC(desired_reg)) 1213 if (CPC_IN_PCC(desired_reg))
1154 up_read(&pcc_data.pcc_lock); /* END Phase-I */ 1214 up_read(&pcc_ss_data->pcc_lock); /* END Phase-I */
1155 /* 1215 /*
1156 * This is Phase-II where we transfer the ownership of PCC to Platform 1216 * This is Phase-II where we transfer the ownership of PCC to Platform
1157 * 1217 *
@@ -1199,15 +1259,15 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
1199 * the write command before servicing the read command 1259 * the write command before servicing the read command
1200 */ 1260 */
1201 if (CPC_IN_PCC(desired_reg)) { 1261 if (CPC_IN_PCC(desired_reg)) {
1202 if (down_write_trylock(&pcc_data.pcc_lock)) { /* BEGIN Phase-II */ 1262 if (down_write_trylock(&pcc_ss_data->pcc_lock)) {/* BEGIN Phase-II */
1203 /* Update only if there are pending write commands */ 1263 /* Update only if there are pending write commands */
1204 if (pcc_data.pending_pcc_write_cmd) 1264 if (pcc_ss_data->pending_pcc_write_cmd)
1205 send_pcc_cmd(CMD_WRITE); 1265 send_pcc_cmd(pcc_ss_id, CMD_WRITE);
1206 up_write(&pcc_data.pcc_lock); /* END Phase-II */ 1266 up_write(&pcc_ss_data->pcc_lock); /* END Phase-II */
1207 } else 1267 } else
1208 /* Wait until pcc_write_cnt is updated by send_pcc_cmd */ 1268 /* Wait until pcc_write_cnt is updated by send_pcc_cmd */
1209 wait_event(pcc_data.pcc_write_wait_q, 1269 wait_event(pcc_ss_data->pcc_write_wait_q,
1210 cpc_desc->write_cmd_id != pcc_data.pcc_write_cnt); 1270 cpc_desc->write_cmd_id != pcc_ss_data->pcc_write_cnt);
1211 1271
1212 /* send_pcc_cmd updates the status in case of failure */ 1272 /* send_pcc_cmd updates the status in case of failure */
1213 ret = cpc_desc->write_cmd_status; 1273 ret = cpc_desc->write_cmd_status;
@@ -1240,6 +1300,8 @@ unsigned int cppc_get_transition_latency(int cpu_num)
1240 unsigned int latency_ns = 0; 1300 unsigned int latency_ns = 0;
1241 struct cpc_desc *cpc_desc; 1301 struct cpc_desc *cpc_desc;
1242 struct cpc_register_resource *desired_reg; 1302 struct cpc_register_resource *desired_reg;
1303 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu_num);
1304 struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
1243 1305
1244 cpc_desc = per_cpu(cpc_desc_ptr, cpu_num); 1306 cpc_desc = per_cpu(cpc_desc_ptr, cpu_num);
1245 if (!cpc_desc) 1307 if (!cpc_desc)
@@ -1249,11 +1311,11 @@ unsigned int cppc_get_transition_latency(int cpu_num)
1249 if (!CPC_IN_PCC(desired_reg)) 1311 if (!CPC_IN_PCC(desired_reg))
1250 return CPUFREQ_ETERNAL; 1312 return CPUFREQ_ETERNAL;
1251 1313
1252 if (pcc_data.pcc_mpar) 1314 if (pcc_ss_data->pcc_mpar)
1253 latency_ns = 60 * (1000 * 1000 * 1000 / pcc_data.pcc_mpar); 1315 latency_ns = 60 * (1000 * 1000 * 1000 / pcc_ss_data->pcc_mpar);
1254 1316
1255 latency_ns = max(latency_ns, pcc_data.pcc_nominal * 1000); 1317 latency_ns = max(latency_ns, pcc_ss_data->pcc_nominal * 1000);
1256 latency_ns = max(latency_ns, pcc_data.pcc_mrtt * 1000); 1318 latency_ns = max(latency_ns, pcc_ss_data->pcc_mrtt * 1000);
1257 1319
1258 return latency_ns; 1320 return latency_ns;
1259} 1321}