diff options
Diffstat (limited to 'drivers/acpi/cppc_acpi.c')
-rw-r--r-- | drivers/acpi/cppc_acpi.c | 198 |
1 files changed, 164 insertions, 34 deletions
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c index 93826c7b73ae..5623fca54ca1 100644 --- a/drivers/acpi/cppc_acpi.c +++ b/drivers/acpi/cppc_acpi.c | |||
@@ -40,15 +40,35 @@ | |||
40 | #include <linux/cpufreq.h> | 40 | #include <linux/cpufreq.h> |
41 | #include <linux/delay.h> | 41 | #include <linux/delay.h> |
42 | #include <linux/ktime.h> | 42 | #include <linux/ktime.h> |
43 | #include <linux/rwsem.h> | ||
44 | #include <linux/wait.h> | ||
43 | 45 | ||
44 | #include <acpi/cppc_acpi.h> | 46 | #include <acpi/cppc_acpi.h> |
47 | |||
45 | /* | 48 | /* |
46 | * Lock to provide mutually exclusive access to the PCC | 49 | * Lock to provide controlled access to the PCC channel. |
47 | * channel. e.g. When the remote updates the shared region | 50 | * |
48 | * with new data, the reader needs to be protected from | 51 | * For performance critical usecases(currently cppc_set_perf) |
49 | * other CPUs activity on the same channel. | 52 | * We need to take read_lock and check if channel belongs to OSPM before |
53 | * reading or writing to PCC subspace | ||
54 | * We need to take write_lock before transferring the channel ownership to | ||
55 | * the platform via a Doorbell | ||
56 | * This allows us to batch a number of CPPC requests if they happen to | ||
57 | * originate in about the same time | ||
58 | * | ||
59 | * For non-performance critical usecases(init) | ||
60 | * Take write_lock for all purposes which gives exclusive access | ||
50 | */ | 61 | */ |
51 | static DEFINE_SPINLOCK(pcc_lock); | 62 | static DECLARE_RWSEM(pcc_lock); |
63 | |||
64 | /* Indicates if there are any pending/batched PCC write commands */ | ||
65 | static bool pending_pcc_write_cmd; | ||
66 | |||
67 | /* Wait queue for CPUs whose requests were batched */ | ||
68 | static DECLARE_WAIT_QUEUE_HEAD(pcc_write_wait_q); | ||
69 | |||
70 | /* Used to identify if a batched request is delivered to platform */ | ||
71 | static unsigned int pcc_write_cnt; | ||
52 | 72 | ||
53 | /* | 73 | /* |
54 | * The cpc_desc structure contains the ACPI register details | 74 | * The cpc_desc structure contains the ACPI register details |
@@ -70,6 +90,11 @@ static unsigned int pcc_mpar, pcc_mrtt; | |||
70 | /* pcc mapped address + header size + offset within PCC subspace */ | 90 | /* pcc mapped address + header size + offset within PCC subspace */ |
71 | #define GET_PCC_VADDR(offs) (pcc_comm_addr + 0x8 + (offs)) | 91 | #define GET_PCC_VADDR(offs) (pcc_comm_addr + 0x8 + (offs)) |
72 | 92 | ||
93 | /* Check if a CPC regsiter is in PCC */ | ||
94 | #define CPC_IN_PCC(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \ | ||
95 | (cpc)->cpc_entry.reg.space_id == \ | ||
96 | ACPI_ADR_SPACE_PLATFORM_COMM) | ||
97 | |||
73 | /* | 98 | /* |
74 | * Arbitrary Retries in case the remote processor is slow to respond | 99 | * Arbitrary Retries in case the remote processor is slow to respond |
75 | * to PCC commands. Keeping it high enough to cover emulators where | 100 | * to PCC commands. Keeping it high enough to cover emulators where |
@@ -104,9 +129,13 @@ static int check_pcc_chan(void) | |||
104 | return ret; | 129 | return ret; |
105 | } | 130 | } |
106 | 131 | ||
132 | /* | ||
133 | * This function transfers the ownership of the PCC to the platform | ||
134 | * So it must be called while holding write_lock(pcc_lock) | ||
135 | */ | ||
107 | static int send_pcc_cmd(u16 cmd) | 136 | static int send_pcc_cmd(u16 cmd) |
108 | { | 137 | { |
109 | int ret = -EIO; | 138 | int ret = -EIO, i; |
110 | struct acpi_pcct_shared_memory *generic_comm_base = | 139 | struct acpi_pcct_shared_memory *generic_comm_base = |
111 | (struct acpi_pcct_shared_memory *) pcc_comm_addr; | 140 | (struct acpi_pcct_shared_memory *) pcc_comm_addr; |
112 | static ktime_t last_cmd_cmpl_time, last_mpar_reset; | 141 | static ktime_t last_cmd_cmpl_time, last_mpar_reset; |
@@ -118,10 +147,19 @@ static int send_pcc_cmd(u16 cmd) | |||
118 | * the channel before writing to PCC space | 147 | * the channel before writing to PCC space |
119 | */ | 148 | */ |
120 | if (cmd == CMD_READ) { | 149 | if (cmd == CMD_READ) { |
150 | /* | ||
151 | * If there are pending cpc_writes, then we stole the channel | ||
152 | * before write completion, so first send a WRITE command to | ||
153 | * platform | ||
154 | */ | ||
155 | if (pending_pcc_write_cmd) | ||
156 | send_pcc_cmd(CMD_WRITE); | ||
157 | |||
121 | ret = check_pcc_chan(); | 158 | ret = check_pcc_chan(); |
122 | if (ret) | 159 | if (ret) |
123 | return ret; | 160 | goto end; |
124 | } | 161 | } else /* CMD_WRITE */ |
162 | pending_pcc_write_cmd = FALSE; | ||
125 | 163 | ||
126 | /* | 164 | /* |
127 | * Handle the Minimum Request Turnaround Time(MRTT) | 165 | * Handle the Minimum Request Turnaround Time(MRTT) |
@@ -150,7 +188,8 @@ static int send_pcc_cmd(u16 cmd) | |||
150 | time_delta = ktime_ms_delta(ktime_get(), last_mpar_reset); | 188 | time_delta = ktime_ms_delta(ktime_get(), last_mpar_reset); |
151 | if (time_delta < 60 * MSEC_PER_SEC) { | 189 | if (time_delta < 60 * MSEC_PER_SEC) { |
152 | pr_debug("PCC cmd not sent due to MPAR limit"); | 190 | pr_debug("PCC cmd not sent due to MPAR limit"); |
153 | return -EIO; | 191 | ret = -EIO; |
192 | goto end; | ||
154 | } | 193 | } |
155 | last_mpar_reset = ktime_get(); | 194 | last_mpar_reset = ktime_get(); |
156 | mpar_count = pcc_mpar; | 195 | mpar_count = pcc_mpar; |
@@ -169,7 +208,7 @@ static int send_pcc_cmd(u16 cmd) | |||
169 | if (ret < 0) { | 208 | if (ret < 0) { |
170 | pr_err("Err sending PCC mbox message. cmd:%d, ret:%d\n", | 209 | pr_err("Err sending PCC mbox message. cmd:%d, ret:%d\n", |
171 | cmd, ret); | 210 | cmd, ret); |
172 | return ret; | 211 | goto end; |
173 | } | 212 | } |
174 | 213 | ||
175 | /* | 214 | /* |
@@ -191,6 +230,23 @@ static int send_pcc_cmd(u16 cmd) | |||
191 | } | 230 | } |
192 | 231 | ||
193 | mbox_client_txdone(pcc_channel, ret); | 232 | mbox_client_txdone(pcc_channel, ret); |
233 | |||
234 | end: | ||
235 | if (cmd == CMD_WRITE) { | ||
236 | if (unlikely(ret)) { | ||
237 | for_each_possible_cpu(i) { | ||
238 | struct cpc_desc *desc = per_cpu(cpc_desc_ptr, i); | ||
239 | if (!desc) | ||
240 | continue; | ||
241 | |||
242 | if (desc->write_cmd_id == pcc_write_cnt) | ||
243 | desc->write_cmd_status = ret; | ||
244 | } | ||
245 | } | ||
246 | pcc_write_cnt++; | ||
247 | wake_up_all(&pcc_write_wait_q); | ||
248 | } | ||
249 | |||
194 | return ret; | 250 | return ret; |
195 | } | 251 | } |
196 | 252 | ||
@@ -776,12 +832,10 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps) | |||
776 | nom_perf = &cpc_desc->cpc_regs[NOMINAL_PERF]; | 832 | nom_perf = &cpc_desc->cpc_regs[NOMINAL_PERF]; |
777 | 833 | ||
778 | /* Are any of the regs PCC ?*/ | 834 | /* Are any of the regs PCC ?*/ |
779 | if ((highest_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) || | 835 | if (CPC_IN_PCC(highest_reg) || CPC_IN_PCC(lowest_reg) || |
780 | (lowest_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) || | 836 | CPC_IN_PCC(ref_perf) || CPC_IN_PCC(nom_perf)) { |
781 | (ref_perf->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) || | ||
782 | (nom_perf->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM)) { | ||
783 | spin_lock(&pcc_lock); | ||
784 | regs_in_pcc = 1; | 837 | regs_in_pcc = 1; |
838 | down_write(&pcc_lock); | ||
785 | /* Ring doorbell once to update PCC subspace */ | 839 | /* Ring doorbell once to update PCC subspace */ |
786 | if (send_pcc_cmd(CMD_READ) < 0) { | 840 | if (send_pcc_cmd(CMD_READ) < 0) { |
787 | ret = -EIO; | 841 | ret = -EIO; |
@@ -809,7 +863,7 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps) | |||
809 | 863 | ||
810 | out_err: | 864 | out_err: |
811 | if (regs_in_pcc) | 865 | if (regs_in_pcc) |
812 | spin_unlock(&pcc_lock); | 866 | up_write(&pcc_lock); |
813 | return ret; | 867 | return ret; |
814 | } | 868 | } |
815 | EXPORT_SYMBOL_GPL(cppc_get_perf_caps); | 869 | EXPORT_SYMBOL_GPL(cppc_get_perf_caps); |
@@ -837,9 +891,8 @@ int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs) | |||
837 | reference_reg = &cpc_desc->cpc_regs[REFERENCE_CTR]; | 891 | reference_reg = &cpc_desc->cpc_regs[REFERENCE_CTR]; |
838 | 892 | ||
839 | /* Are any of the regs PCC ?*/ | 893 | /* Are any of the regs PCC ?*/ |
840 | if ((delivered_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) || | 894 | if (CPC_IN_PCC(delivered_reg) || CPC_IN_PCC(reference_reg)) { |
841 | (reference_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM)) { | 895 | down_write(&pcc_lock); |
842 | spin_lock(&pcc_lock); | ||
843 | regs_in_pcc = 1; | 896 | regs_in_pcc = 1; |
844 | /* Ring doorbell once to update PCC subspace */ | 897 | /* Ring doorbell once to update PCC subspace */ |
845 | if (send_pcc_cmd(CMD_READ) < 0) { | 898 | if (send_pcc_cmd(CMD_READ) < 0) { |
@@ -867,7 +920,7 @@ int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs) | |||
867 | 920 | ||
868 | out_err: | 921 | out_err: |
869 | if (regs_in_pcc) | 922 | if (regs_in_pcc) |
870 | spin_unlock(&pcc_lock); | 923 | up_write(&pcc_lock); |
871 | return ret; | 924 | return ret; |
872 | } | 925 | } |
873 | EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs); | 926 | EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs); |
@@ -892,12 +945,36 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls) | |||
892 | 945 | ||
893 | desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF]; | 946 | desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF]; |
894 | 947 | ||
895 | /* If this is PCC reg, check if channel is free before writing */ | 948 | /* |
896 | if (desired_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) { | 949 | * This is Phase-I where we want to write to CPC registers |
897 | spin_lock(&pcc_lock); | 950 | * -> We want all CPUs to be able to execute this phase in parallel |
898 | ret = check_pcc_chan(); | 951 | * |
899 | if (ret) | 952 | * Since read_lock can be acquired by multiple CPUs simultaneously we |
900 | goto busy_channel; | 953 | * achieve that goal here |
954 | */ | ||
955 | if (CPC_IN_PCC(desired_reg)) { | ||
956 | down_read(&pcc_lock); /* BEGIN Phase-I */ | ||
957 | /* | ||
958 | * If there are pending write commands i.e pending_pcc_write_cmd | ||
959 | * is TRUE, then we know OSPM owns the channel as another CPU | ||
960 | * has already checked for command completion bit and updated | ||
961 | * the corresponding CPC registers | ||
962 | */ | ||
963 | if (!pending_pcc_write_cmd) { | ||
964 | ret = check_pcc_chan(); | ||
965 | if (ret) { | ||
966 | up_read(&pcc_lock); | ||
967 | return ret; | ||
968 | } | ||
969 | /* | ||
970 | * Update the pending_write to make sure a PCC CMD_READ | ||
971 | * will not arrive and steal the channel during the | ||
972 | * transition to write lock | ||
973 | */ | ||
974 | pending_pcc_write_cmd = TRUE; | ||
975 | } | ||
976 | cpc_desc->write_cmd_id = pcc_write_cnt; | ||
977 | cpc_desc->write_cmd_status = 0; | ||
901 | } | 978 | } |
902 | 979 | ||
903 | /* | 980 | /* |
@@ -906,15 +983,68 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls) | |||
906 | */ | 983 | */ |
907 | cpc_write(desired_reg, perf_ctrls->desired_perf); | 984 | cpc_write(desired_reg, perf_ctrls->desired_perf); |
908 | 985 | ||
909 | /* Is this a PCC reg ?*/ | 986 | if (CPC_IN_PCC(desired_reg)) |
910 | if (desired_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) { | 987 | up_read(&pcc_lock); /* END Phase-I */ |
911 | /* Ring doorbell so Remote can get our perf request. */ | 988 | /* |
912 | if (send_pcc_cmd(CMD_WRITE) < 0) | 989 | * This is Phase-II where we transfer the ownership of PCC to Platform |
913 | ret = -EIO; | 990 | * |
991 | * Short Summary: Basically if we think of a group of cppc_set_perf | ||
992 | * requests that happened in short overlapping interval. The last CPU to | ||
993 | * come out of Phase-I will enter Phase-II and ring the doorbell. | ||
994 | * | ||
995 | * We have the following requirements for Phase-II: | ||
996 | * 1. We want to execute Phase-II only when there are no CPUs | ||
997 | * currently executing in Phase-I | ||
998 | * 2. Once we start Phase-II we want to avoid all other CPUs from | ||
999 | * entering Phase-I. | ||
1000 | * 3. We want only one CPU among all those who went through Phase-I | ||
1001 | * to run phase-II | ||
1002 | * | ||
1003 | * If write_trylock fails to get the lock and doesn't transfer the | ||
1004 | * PCC ownership to the platform, then one of the following will be TRUE | ||
1005 | * 1. There is at-least one CPU in Phase-I which will later execute | ||
1006 | * write_trylock, so the CPUs in Phase-I will be responsible for | ||
1007 | * executing the Phase-II. | ||
1008 | * 2. Some other CPU has beaten this CPU to successfully execute the | ||
1009 | * write_trylock and has already acquired the write_lock. We know for a | ||
1010 | * fact it(other CPU acquiring the write_lock) couldn't have happened | ||
1011 | * before this CPU's Phase-I as we held the read_lock. | ||
1012 | * 3. Some other CPU executing pcc CMD_READ has stolen the | ||
1013 | * down_write, in which case, send_pcc_cmd will check for pending | ||
1014 | * CMD_WRITE commands by checking the pending_pcc_write_cmd. | ||
1015 | * So this CPU can be certain that its request will be delivered | ||
1016 | * So in all cases, this CPU knows that its request will be delivered | ||
1017 | * by another CPU and can return | ||
1018 | * | ||
1019 | * After getting the down_write we still need to check for | ||
1020 | * pending_pcc_write_cmd to take care of the following scenario | ||
1021 | * The thread running this code could be scheduled out between | ||
1022 | * Phase-I and Phase-II. Before it is scheduled back on, another CPU | ||
1023 | * could have delivered the request to Platform by triggering the | ||
1024 | * doorbell and transferred the ownership of PCC to platform. So this | ||
1025 | * avoids triggering an unnecessary doorbell and more importantly before | ||
1026 | * triggering the doorbell it makes sure that the PCC channel ownership | ||
1027 | * is still with OSPM. | ||
1028 | * pending_pcc_write_cmd can also be cleared by a different CPU, if | ||
1029 | * there was a pcc CMD_READ waiting on down_write and it steals the lock | ||
1030 | * before the pcc CMD_WRITE is completed. pcc_send_cmd checks for this | ||
1031 | * case during a CMD_READ and if there are pending writes it delivers | ||
1032 | * the write command before servicing the read command | ||
1033 | */ | ||
1034 | if (CPC_IN_PCC(desired_reg)) { | ||
1035 | if (down_write_trylock(&pcc_lock)) { /* BEGIN Phase-II */ | ||
1036 | /* Update only if there are pending write commands */ | ||
1037 | if (pending_pcc_write_cmd) | ||
1038 | send_pcc_cmd(CMD_WRITE); | ||
1039 | up_write(&pcc_lock); /* END Phase-II */ | ||
1040 | } else | ||
1041 | /* Wait until pcc_write_cnt is updated by send_pcc_cmd */ | ||
1042 | wait_event(pcc_write_wait_q, | ||
1043 | cpc_desc->write_cmd_id != pcc_write_cnt); | ||
1044 | |||
1045 | /* send_pcc_cmd updates the status in case of failure */ | ||
1046 | ret = cpc_desc->write_cmd_status; | ||
914 | } | 1047 | } |
915 | busy_channel: | ||
916 | if (desired_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) | ||
917 | spin_unlock(&pcc_lock); | ||
918 | return ret; | 1048 | return ret; |
919 | } | 1049 | } |
920 | EXPORT_SYMBOL_GPL(cppc_set_perf); | 1050 | EXPORT_SYMBOL_GPL(cppc_set_perf); |