diff options
author | Prakash, Prashanth <pprakash@codeaurora.org> | 2016-08-16 16:39:43 -0400 |
---|---|---|
committer | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2016-08-30 19:02:34 -0400 |
commit | 8482ef8c6e684a1bba703c330e0bafe2d1d29ef7 (patch) | |
tree | d69eb767e7f0dce6684bbcec12de24079ec487e0 /drivers/acpi/cppc_acpi.c | |
parent | 158c998ea44ba30ae3d1bde535581c4436417530 (diff) |
ACPI / CPPC: move all PCC related information into pcc_data
There are several global variables in cppc driver that are related
to PCC channel used for CPPC. This patch collects all such
information into a single consolidated structure(cppc_pcc_data).
Signed-off-by: Prashanth Prakash <pprakash@codeaurora.org>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Diffstat (limited to 'drivers/acpi/cppc_acpi.c')
-rw-r--r-- | drivers/acpi/cppc_acpi.c | 167 |
1 files changed, 87 insertions, 80 deletions
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c index f00fac363acd..80c123fbfdcf 100644 --- a/drivers/acpi/cppc_acpi.c +++ b/drivers/acpi/cppc_acpi.c | |||
@@ -45,30 +45,41 @@ | |||
45 | 45 | ||
46 | #include <acpi/cppc_acpi.h> | 46 | #include <acpi/cppc_acpi.h> |
47 | 47 | ||
48 | /* | 48 | struct cppc_pcc_data { |
49 | * Lock to provide controlled access to the PCC channel. | 49 | struct mbox_chan *pcc_channel; |
50 | * | 50 | void __iomem *pcc_comm_addr; |
51 | * For performance critical usecases(currently cppc_set_perf) | 51 | int pcc_subspace_idx; |
52 | * We need to take read_lock and check if channel belongs to OSPM before | 52 | bool pcc_channel_acquired; |
53 | * reading or writing to PCC subspace | 53 | ktime_t deadline; |
54 | * We need to take write_lock before transferring the channel ownership to | 54 | unsigned int pcc_mpar, pcc_mrtt, pcc_nominal; |
55 | * the platform via a Doorbell | ||
56 | * This allows us to batch a number of CPPC requests if they happen to | ||
57 | * originate in about the same time | ||
58 | * | ||
59 | * For non-performance critical usecases(init) | ||
60 | * Take write_lock for all purposes which gives exclusive access | ||
61 | */ | ||
62 | static DECLARE_RWSEM(pcc_lock); | ||
63 | 55 | ||
64 | /* Indicates if there are any pending/batched PCC write commands */ | 56 | bool pending_pcc_write_cmd; /* Any pending/batched PCC write cmds? */ |
65 | static bool pending_pcc_write_cmd; | 57 | unsigned int pcc_write_cnt; /* Running count of PCC write commands */ |
66 | 58 | ||
67 | /* Wait queue for CPUs whose requests were batched */ | 59 | /* |
68 | static DECLARE_WAIT_QUEUE_HEAD(pcc_write_wait_q); | 60 | * Lock to provide controlled access to the PCC channel. |
61 | * | ||
62 | * For performance critical usecases(currently cppc_set_perf) | ||
63 | * We need to take read_lock and check if channel belongs to OSPM | ||
64 | * before reading or writing to PCC subspace | ||
65 | * We need to take write_lock before transferring the channel | ||
66 | * ownership to the platform via a Doorbell | ||
67 | * This allows us to batch a number of CPPC requests if they happen | ||
68 | * to originate in about the same time | ||
69 | * | ||
70 | * For non-performance critical usecases(init) | ||
71 | * Take write_lock for all purposes which gives exclusive access | ||
72 | */ | ||
73 | struct rw_semaphore pcc_lock; | ||
74 | |||
75 | /* Wait queue for CPUs whose requests were batched */ | ||
76 | wait_queue_head_t pcc_write_wait_q; | ||
77 | }; | ||
69 | 78 | ||
70 | /* Used to identify if a batched request is delivered to platform */ | 79 | /* Structure to represent the single PCC channel */ |
71 | static unsigned int pcc_write_cnt; | 80 | static struct cppc_pcc_data pcc_data = { |
81 | .pcc_subspace_idx = -1, | ||
82 | }; | ||
72 | 83 | ||
73 | /* | 84 | /* |
74 | * The cpc_desc structure contains the ACPI register details | 85 | * The cpc_desc structure contains the ACPI register details |
@@ -79,16 +90,8 @@ static unsigned int pcc_write_cnt; | |||
79 | */ | 90 | */ |
80 | static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr); | 91 | static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr); |
81 | 92 | ||
82 | /* This layer handles all the PCC specifics for CPPC. */ | ||
83 | static struct mbox_chan *pcc_channel; | ||
84 | static void __iomem *pcc_comm_addr; | ||
85 | static int pcc_subspace_idx = -1; | ||
86 | static bool pcc_channel_acquired; | ||
87 | static ktime_t deadline; | ||
88 | static unsigned int pcc_mpar, pcc_mrtt, pcc_nominal; | ||
89 | |||
90 | /* pcc mapped address + header size + offset within PCC subspace */ | 93 | /* pcc mapped address + header size + offset within PCC subspace */ |
91 | #define GET_PCC_VADDR(offs) (pcc_comm_addr + 0x8 + (offs)) | 94 | #define GET_PCC_VADDR(offs) (pcc_data.pcc_comm_addr + 0x8 + (offs)) |
92 | 95 | ||
93 | /* Check if a CPC regsiter is in PCC */ | 96 | /* Check if a CPC regsiter is in PCC */ |
94 | #define CPC_IN_PCC(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \ | 97 | #define CPC_IN_PCC(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \ |
@@ -181,8 +184,8 @@ static struct kobj_type cppc_ktype = { | |||
181 | static int check_pcc_chan(void) | 184 | static int check_pcc_chan(void) |
182 | { | 185 | { |
183 | int ret = -EIO; | 186 | int ret = -EIO; |
184 | struct acpi_pcct_shared_memory __iomem *generic_comm_base = pcc_comm_addr; | 187 | struct acpi_pcct_shared_memory __iomem *generic_comm_base = pcc_data.pcc_comm_addr; |
185 | ktime_t next_deadline = ktime_add(ktime_get(), deadline); | 188 | ktime_t next_deadline = ktime_add(ktime_get(), pcc_data.deadline); |
186 | 189 | ||
187 | /* Retry in case the remote processor was too slow to catch up. */ | 190 | /* Retry in case the remote processor was too slow to catch up. */ |
188 | while (!ktime_after(ktime_get(), next_deadline)) { | 191 | while (!ktime_after(ktime_get(), next_deadline)) { |
@@ -213,7 +216,7 @@ static int send_pcc_cmd(u16 cmd) | |||
213 | { | 216 | { |
214 | int ret = -EIO, i; | 217 | int ret = -EIO, i; |
215 | struct acpi_pcct_shared_memory *generic_comm_base = | 218 | struct acpi_pcct_shared_memory *generic_comm_base = |
216 | (struct acpi_pcct_shared_memory *) pcc_comm_addr; | 219 | (struct acpi_pcct_shared_memory *) pcc_data.pcc_comm_addr; |
217 | static ktime_t last_cmd_cmpl_time, last_mpar_reset; | 220 | static ktime_t last_cmd_cmpl_time, last_mpar_reset; |
218 | static int mpar_count; | 221 | static int mpar_count; |
219 | unsigned int time_delta; | 222 | unsigned int time_delta; |
@@ -228,24 +231,24 @@ static int send_pcc_cmd(u16 cmd) | |||
228 | * before write completion, so first send a WRITE command to | 231 | * before write completion, so first send a WRITE command to |
229 | * platform | 232 | * platform |
230 | */ | 233 | */ |
231 | if (pending_pcc_write_cmd) | 234 | if (pcc_data.pending_pcc_write_cmd) |
232 | send_pcc_cmd(CMD_WRITE); | 235 | send_pcc_cmd(CMD_WRITE); |
233 | 236 | ||
234 | ret = check_pcc_chan(); | 237 | ret = check_pcc_chan(); |
235 | if (ret) | 238 | if (ret) |
236 | goto end; | 239 | goto end; |
237 | } else /* CMD_WRITE */ | 240 | } else /* CMD_WRITE */ |
238 | pending_pcc_write_cmd = FALSE; | 241 | pcc_data.pending_pcc_write_cmd = FALSE; |
239 | 242 | ||
240 | /* | 243 | /* |
241 | * Handle the Minimum Request Turnaround Time(MRTT) | 244 | * Handle the Minimum Request Turnaround Time(MRTT) |
242 | * "The minimum amount of time that OSPM must wait after the completion | 245 | * "The minimum amount of time that OSPM must wait after the completion |
243 | * of a command before issuing the next command, in microseconds" | 246 | * of a command before issuing the next command, in microseconds" |
244 | */ | 247 | */ |
245 | if (pcc_mrtt) { | 248 | if (pcc_data.pcc_mrtt) { |
246 | time_delta = ktime_us_delta(ktime_get(), last_cmd_cmpl_time); | 249 | time_delta = ktime_us_delta(ktime_get(), last_cmd_cmpl_time); |
247 | if (pcc_mrtt > time_delta) | 250 | if (pcc_data.pcc_mrtt > time_delta) |
248 | udelay(pcc_mrtt - time_delta); | 251 | udelay(pcc_data.pcc_mrtt - time_delta); |
249 | } | 252 | } |
250 | 253 | ||
251 | /* | 254 | /* |
@@ -259,7 +262,7 @@ static int send_pcc_cmd(u16 cmd) | |||
259 | * not send the request to the platform after hitting the MPAR limit in | 262 | * not send the request to the platform after hitting the MPAR limit in |
260 | * any 60s window | 263 | * any 60s window |
261 | */ | 264 | */ |
262 | if (pcc_mpar) { | 265 | if (pcc_data.pcc_mpar) { |
263 | if (mpar_count == 0) { | 266 | if (mpar_count == 0) { |
264 | time_delta = ktime_ms_delta(ktime_get(), last_mpar_reset); | 267 | time_delta = ktime_ms_delta(ktime_get(), last_mpar_reset); |
265 | if (time_delta < 60 * MSEC_PER_SEC) { | 268 | if (time_delta < 60 * MSEC_PER_SEC) { |
@@ -268,7 +271,7 @@ static int send_pcc_cmd(u16 cmd) | |||
268 | goto end; | 271 | goto end; |
269 | } | 272 | } |
270 | last_mpar_reset = ktime_get(); | 273 | last_mpar_reset = ktime_get(); |
271 | mpar_count = pcc_mpar; | 274 | mpar_count = pcc_data.pcc_mpar; |
272 | } | 275 | } |
273 | mpar_count--; | 276 | mpar_count--; |
274 | } | 277 | } |
@@ -280,7 +283,7 @@ static int send_pcc_cmd(u16 cmd) | |||
280 | writew_relaxed(0, &generic_comm_base->status); | 283 | writew_relaxed(0, &generic_comm_base->status); |
281 | 284 | ||
282 | /* Ring doorbell */ | 285 | /* Ring doorbell */ |
283 | ret = mbox_send_message(pcc_channel, &cmd); | 286 | ret = mbox_send_message(pcc_data.pcc_channel, &cmd); |
284 | if (ret < 0) { | 287 | if (ret < 0) { |
285 | pr_err("Err sending PCC mbox message. cmd:%d, ret:%d\n", | 288 | pr_err("Err sending PCC mbox message. cmd:%d, ret:%d\n", |
286 | cmd, ret); | 289 | cmd, ret); |
@@ -299,13 +302,13 @@ static int send_pcc_cmd(u16 cmd) | |||
299 | * command for proper handling of MRTT, so we need to check | 302 | * command for proper handling of MRTT, so we need to check |
300 | * for pcc_mrtt in addition to CMD_READ | 303 | * for pcc_mrtt in addition to CMD_READ |
301 | */ | 304 | */ |
302 | if (cmd == CMD_READ || pcc_mrtt) { | 305 | if (cmd == CMD_READ || pcc_data.pcc_mrtt) { |
303 | ret = check_pcc_chan(); | 306 | ret = check_pcc_chan(); |
304 | if (pcc_mrtt) | 307 | if (pcc_data.pcc_mrtt) |
305 | last_cmd_cmpl_time = ktime_get(); | 308 | last_cmd_cmpl_time = ktime_get(); |
306 | } | 309 | } |
307 | 310 | ||
308 | mbox_client_txdone(pcc_channel, ret); | 311 | mbox_client_txdone(pcc_data.pcc_channel, ret); |
309 | 312 | ||
310 | end: | 313 | end: |
311 | if (cmd == CMD_WRITE) { | 314 | if (cmd == CMD_WRITE) { |
@@ -315,12 +318,12 @@ end: | |||
315 | if (!desc) | 318 | if (!desc) |
316 | continue; | 319 | continue; |
317 | 320 | ||
318 | if (desc->write_cmd_id == pcc_write_cnt) | 321 | if (desc->write_cmd_id == pcc_data.pcc_write_cnt) |
319 | desc->write_cmd_status = ret; | 322 | desc->write_cmd_status = ret; |
320 | } | 323 | } |
321 | } | 324 | } |
322 | pcc_write_cnt++; | 325 | pcc_data.pcc_write_cnt++; |
323 | wake_up_all(&pcc_write_wait_q); | 326 | wake_up_all(&pcc_data.pcc_write_wait_q); |
324 | } | 327 | } |
325 | 328 | ||
326 | return ret; | 329 | return ret; |
@@ -528,10 +531,10 @@ static int register_pcc_channel(int pcc_subspace_idx) | |||
528 | u64 usecs_lat; | 531 | u64 usecs_lat; |
529 | 532 | ||
530 | if (pcc_subspace_idx >= 0) { | 533 | if (pcc_subspace_idx >= 0) { |
531 | pcc_channel = pcc_mbox_request_channel(&cppc_mbox_cl, | 534 | pcc_data.pcc_channel = pcc_mbox_request_channel(&cppc_mbox_cl, |
532 | pcc_subspace_idx); | 535 | pcc_subspace_idx); |
533 | 536 | ||
534 | if (IS_ERR(pcc_channel)) { | 537 | if (IS_ERR(pcc_data.pcc_channel)) { |
535 | pr_err("Failed to find PCC communication channel\n"); | 538 | pr_err("Failed to find PCC communication channel\n"); |
536 | return -ENODEV; | 539 | return -ENODEV; |
537 | } | 540 | } |
@@ -542,7 +545,7 @@ static int register_pcc_channel(int pcc_subspace_idx) | |||
542 | * PCC channels) and stored pointers to the | 545 | * PCC channels) and stored pointers to the |
543 | * subspace communication region in con_priv. | 546 | * subspace communication region in con_priv. |
544 | */ | 547 | */ |
545 | cppc_ss = pcc_channel->con_priv; | 548 | cppc_ss = (pcc_data.pcc_channel)->con_priv; |
546 | 549 | ||
547 | if (!cppc_ss) { | 550 | if (!cppc_ss) { |
548 | pr_err("No PCC subspace found for CPPC\n"); | 551 | pr_err("No PCC subspace found for CPPC\n"); |
@@ -555,19 +558,19 @@ static int register_pcc_channel(int pcc_subspace_idx) | |||
555 | * So add an arbitrary amount of wait on top of Nominal. | 558 | * So add an arbitrary amount of wait on top of Nominal. |
556 | */ | 559 | */ |
557 | usecs_lat = NUM_RETRIES * cppc_ss->latency; | 560 | usecs_lat = NUM_RETRIES * cppc_ss->latency; |
558 | deadline = ns_to_ktime(usecs_lat * NSEC_PER_USEC); | 561 | pcc_data.deadline = ns_to_ktime(usecs_lat * NSEC_PER_USEC); |
559 | pcc_mrtt = cppc_ss->min_turnaround_time; | 562 | pcc_data.pcc_mrtt = cppc_ss->min_turnaround_time; |
560 | pcc_mpar = cppc_ss->max_access_rate; | 563 | pcc_data.pcc_mpar = cppc_ss->max_access_rate; |
561 | pcc_nominal = cppc_ss->latency; | 564 | pcc_data.pcc_nominal = cppc_ss->latency; |
562 | 565 | ||
563 | pcc_comm_addr = acpi_os_ioremap(cppc_ss->base_address, cppc_ss->length); | 566 | pcc_data.pcc_comm_addr = acpi_os_ioremap(cppc_ss->base_address, cppc_ss->length); |
564 | if (!pcc_comm_addr) { | 567 | if (!pcc_data.pcc_comm_addr) { |
565 | pr_err("Failed to ioremap PCC comm region mem\n"); | 568 | pr_err("Failed to ioremap PCC comm region mem\n"); |
566 | return -ENOMEM; | 569 | return -ENOMEM; |
567 | } | 570 | } |
568 | 571 | ||
569 | /* Set flag so that we dont come here for each CPU. */ | 572 | /* Set flag so that we dont come here for each CPU. */ |
570 | pcc_channel_acquired = true; | 573 | pcc_data.pcc_channel_acquired = true; |
571 | } | 574 | } |
572 | 575 | ||
573 | return 0; | 576 | return 0; |
@@ -706,9 +709,9 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr) | |||
706 | * so extract it only once. | 709 | * so extract it only once. |
707 | */ | 710 | */ |
708 | if (gas_t->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) { | 711 | if (gas_t->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) { |
709 | if (pcc_subspace_idx < 0) | 712 | if (pcc_data.pcc_subspace_idx < 0) |
710 | pcc_subspace_idx = gas_t->access_width; | 713 | pcc_data.pcc_subspace_idx = gas_t->access_width; |
711 | else if (pcc_subspace_idx != gas_t->access_width) { | 714 | else if (pcc_data.pcc_subspace_idx != gas_t->access_width) { |
712 | pr_debug("Mismatched PCC ids.\n"); | 715 | pr_debug("Mismatched PCC ids.\n"); |
713 | goto out_free; | 716 | goto out_free; |
714 | } | 717 | } |
@@ -743,10 +746,13 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr) | |||
743 | goto out_free; | 746 | goto out_free; |
744 | 747 | ||
745 | /* Register PCC channel once for all CPUs. */ | 748 | /* Register PCC channel once for all CPUs. */ |
746 | if (!pcc_channel_acquired) { | 749 | if (!pcc_data.pcc_channel_acquired) { |
747 | ret = register_pcc_channel(pcc_subspace_idx); | 750 | ret = register_pcc_channel(pcc_data.pcc_subspace_idx); |
748 | if (ret) | 751 | if (ret) |
749 | goto out_free; | 752 | goto out_free; |
753 | |||
754 | init_rwsem(&pcc_data.pcc_lock); | ||
755 | init_waitqueue_head(&pcc_data.pcc_write_wait_q); | ||
750 | } | 756 | } |
751 | 757 | ||
752 | /* Plug PSD data into this CPUs CPC descriptor. */ | 758 | /* Plug PSD data into this CPUs CPC descriptor. */ |
@@ -924,7 +930,7 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps) | |||
924 | if (CPC_IN_PCC(highest_reg) || CPC_IN_PCC(lowest_reg) || | 930 | if (CPC_IN_PCC(highest_reg) || CPC_IN_PCC(lowest_reg) || |
925 | CPC_IN_PCC(ref_perf) || CPC_IN_PCC(nom_perf)) { | 931 | CPC_IN_PCC(ref_perf) || CPC_IN_PCC(nom_perf)) { |
926 | regs_in_pcc = 1; | 932 | regs_in_pcc = 1; |
927 | down_write(&pcc_lock); | 933 | down_write(&pcc_data.pcc_lock); |
928 | /* Ring doorbell once to update PCC subspace */ | 934 | /* Ring doorbell once to update PCC subspace */ |
929 | if (send_pcc_cmd(CMD_READ) < 0) { | 935 | if (send_pcc_cmd(CMD_READ) < 0) { |
930 | ret = -EIO; | 936 | ret = -EIO; |
@@ -946,7 +952,7 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps) | |||
946 | 952 | ||
947 | out_err: | 953 | out_err: |
948 | if (regs_in_pcc) | 954 | if (regs_in_pcc) |
949 | up_write(&pcc_lock); | 955 | up_write(&pcc_data.pcc_lock); |
950 | return ret; | 956 | return ret; |
951 | } | 957 | } |
952 | EXPORT_SYMBOL_GPL(cppc_get_perf_caps); | 958 | EXPORT_SYMBOL_GPL(cppc_get_perf_caps); |
@@ -986,7 +992,7 @@ int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs) | |||
986 | /* Are any of the regs PCC ?*/ | 992 | /* Are any of the regs PCC ?*/ |
987 | if (CPC_IN_PCC(delivered_reg) || CPC_IN_PCC(reference_reg) || | 993 | if (CPC_IN_PCC(delivered_reg) || CPC_IN_PCC(reference_reg) || |
988 | CPC_IN_PCC(ctr_wrap_reg) || CPC_IN_PCC(ref_perf_reg)) { | 994 | CPC_IN_PCC(ctr_wrap_reg) || CPC_IN_PCC(ref_perf_reg)) { |
989 | down_write(&pcc_lock); | 995 | down_write(&pcc_data.pcc_lock); |
990 | regs_in_pcc = 1; | 996 | regs_in_pcc = 1; |
991 | /* Ring doorbell once to update PCC subspace */ | 997 | /* Ring doorbell once to update PCC subspace */ |
992 | if (send_pcc_cmd(CMD_READ) < 0) { | 998 | if (send_pcc_cmd(CMD_READ) < 0) { |
@@ -1019,7 +1025,7 @@ int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs) | |||
1019 | perf_fb_ctrs->ctr_wrap_time = ctr_wrap_time; | 1025 | perf_fb_ctrs->ctr_wrap_time = ctr_wrap_time; |
1020 | out_err: | 1026 | out_err: |
1021 | if (regs_in_pcc) | 1027 | if (regs_in_pcc) |
1022 | up_write(&pcc_lock); | 1028 | up_write(&pcc_data.pcc_lock); |
1023 | return ret; | 1029 | return ret; |
1024 | } | 1030 | } |
1025 | EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs); | 1031 | EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs); |
@@ -1052,17 +1058,17 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls) | |||
1052 | * achieve that goal here | 1058 | * achieve that goal here |
1053 | */ | 1059 | */ |
1054 | if (CPC_IN_PCC(desired_reg)) { | 1060 | if (CPC_IN_PCC(desired_reg)) { |
1055 | down_read(&pcc_lock); /* BEGIN Phase-I */ | 1061 | down_read(&pcc_data.pcc_lock); /* BEGIN Phase-I */ |
1056 | /* | 1062 | /* |
1057 | * If there are pending write commands i.e pending_pcc_write_cmd | 1063 | * If there are pending write commands i.e pending_pcc_write_cmd |
1058 | * is TRUE, then we know OSPM owns the channel as another CPU | 1064 | * is TRUE, then we know OSPM owns the channel as another CPU |
1059 | * has already checked for command completion bit and updated | 1065 | * has already checked for command completion bit and updated |
1060 | * the corresponding CPC registers | 1066 | * the corresponding CPC registers |
1061 | */ | 1067 | */ |
1062 | if (!pending_pcc_write_cmd) { | 1068 | if (!pcc_data.pending_pcc_write_cmd) { |
1063 | ret = check_pcc_chan(); | 1069 | ret = check_pcc_chan(); |
1064 | if (ret) { | 1070 | if (ret) { |
1065 | up_read(&pcc_lock); | 1071 | up_read(&pcc_data.pcc_lock); |
1066 | return ret; | 1072 | return ret; |
1067 | } | 1073 | } |
1068 | /* | 1074 | /* |
@@ -1070,9 +1076,9 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls) | |||
1070 | * will not arrive and steal the channel during the | 1076 | * will not arrive and steal the channel during the |
1071 | * transition to write lock | 1077 | * transition to write lock |
1072 | */ | 1078 | */ |
1073 | pending_pcc_write_cmd = TRUE; | 1079 | pcc_data.pending_pcc_write_cmd = TRUE; |
1074 | } | 1080 | } |
1075 | cpc_desc->write_cmd_id = pcc_write_cnt; | 1081 | cpc_desc->write_cmd_id = pcc_data.pcc_write_cnt; |
1076 | cpc_desc->write_cmd_status = 0; | 1082 | cpc_desc->write_cmd_status = 0; |
1077 | } | 1083 | } |
1078 | 1084 | ||
@@ -1083,7 +1089,7 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls) | |||
1083 | cpc_write(desired_reg, perf_ctrls->desired_perf); | 1089 | cpc_write(desired_reg, perf_ctrls->desired_perf); |
1084 | 1090 | ||
1085 | if (CPC_IN_PCC(desired_reg)) | 1091 | if (CPC_IN_PCC(desired_reg)) |
1086 | up_read(&pcc_lock); /* END Phase-I */ | 1092 | up_read(&pcc_data.pcc_lock); /* END Phase-I */ |
1087 | /* | 1093 | /* |
1088 | * This is Phase-II where we transfer the ownership of PCC to Platform | 1094 | * This is Phase-II where we transfer the ownership of PCC to Platform |
1089 | * | 1095 | * |
@@ -1131,15 +1137,15 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls) | |||
1131 | * the write command before servicing the read command | 1137 | * the write command before servicing the read command |
1132 | */ | 1138 | */ |
1133 | if (CPC_IN_PCC(desired_reg)) { | 1139 | if (CPC_IN_PCC(desired_reg)) { |
1134 | if (down_write_trylock(&pcc_lock)) { /* BEGIN Phase-II */ | 1140 | if (down_write_trylock(&pcc_data.pcc_lock)) { /* BEGIN Phase-II */ |
1135 | /* Update only if there are pending write commands */ | 1141 | /* Update only if there are pending write commands */ |
1136 | if (pending_pcc_write_cmd) | 1142 | if (pcc_data.pending_pcc_write_cmd) |
1137 | send_pcc_cmd(CMD_WRITE); | 1143 | send_pcc_cmd(CMD_WRITE); |
1138 | up_write(&pcc_lock); /* END Phase-II */ | 1144 | up_write(&pcc_data.pcc_lock); /* END Phase-II */ |
1139 | } else | 1145 | } else |
1140 | /* Wait until pcc_write_cnt is updated by send_pcc_cmd */ | 1146 | /* Wait until pcc_write_cnt is updated by send_pcc_cmd */ |
1141 | wait_event(pcc_write_wait_q, | 1147 | wait_event(pcc_data.pcc_write_wait_q, |
1142 | cpc_desc->write_cmd_id != pcc_write_cnt); | 1148 | cpc_desc->write_cmd_id != pcc_data.pcc_write_cnt); |
1143 | 1149 | ||
1144 | /* send_pcc_cmd updates the status in case of failure */ | 1150 | /* send_pcc_cmd updates the status in case of failure */ |
1145 | ret = cpc_desc->write_cmd_status; | 1151 | ret = cpc_desc->write_cmd_status; |
@@ -1181,10 +1187,11 @@ unsigned int cppc_get_transition_latency(int cpu_num) | |||
1181 | if (!CPC_IN_PCC(desired_reg)) | 1187 | if (!CPC_IN_PCC(desired_reg)) |
1182 | return CPUFREQ_ETERNAL; | 1188 | return CPUFREQ_ETERNAL; |
1183 | 1189 | ||
1184 | if (pcc_mpar) | 1190 | if (pcc_data.pcc_mpar) |
1185 | latency_ns = 60 * (1000 * 1000 * 1000 / pcc_mpar); | 1191 | latency_ns = 60 * (1000 * 1000 * 1000 / pcc_data.pcc_mpar); |
1186 | 1192 | ||
1187 | latency_ns = max(latency_ns, (pcc_nominal + pcc_mrtt) * 1000); | 1193 | latency_ns = max(latency_ns, pcc_data.pcc_nominal * 1000); |
1194 | latency_ns = max(latency_ns, pcc_data.pcc_mrtt * 1000); | ||
1188 | 1195 | ||
1189 | return latency_ns; | 1196 | return latency_ns; |
1190 | } | 1197 | } |