aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/acpi/cppc_acpi.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/acpi/cppc_acpi.c')
-rw-r--r--drivers/acpi/cppc_acpi.c135
1 files changed, 117 insertions, 18 deletions
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index 6c54a8f16706..f00fac363acd 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -95,6 +95,17 @@ static unsigned int pcc_mpar, pcc_mrtt, pcc_nominal;
95 (cpc)->cpc_entry.reg.space_id == \ 95 (cpc)->cpc_entry.reg.space_id == \
96 ACPI_ADR_SPACE_PLATFORM_COMM) 96 ACPI_ADR_SPACE_PLATFORM_COMM)
97 97
98/* Evalutes to True if reg is a NULL register descriptor */
99#define IS_NULL_REG(reg) ((reg)->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY && \
100 (reg)->address == 0 && \
101 (reg)->bit_width == 0 && \
102 (reg)->bit_offset == 0 && \
103 (reg)->access_width == 0)
104
105/* Evalutes to True if an optional cpc field is supported */
106#define CPC_SUPPORTED(cpc) ((cpc)->type == ACPI_TYPE_INTEGER ? \
107 !!(cpc)->cpc_entry.int_value : \
108 !IS_NULL_REG(&(cpc)->cpc_entry.reg))
98/* 109/*
99 * Arbitrary Retries in case the remote processor is slow to respond 110 * Arbitrary Retries in case the remote processor is slow to respond
100 * to PCC commands. Keeping it high enough to cover emulators where 111 * to PCC commands. Keeping it high enough to cover emulators where
@@ -102,6 +113,71 @@ static unsigned int pcc_mpar, pcc_mrtt, pcc_nominal;
102 */ 113 */
103#define NUM_RETRIES 500 114#define NUM_RETRIES 500
104 115
116struct cppc_attr {
117 struct attribute attr;
118 ssize_t (*show)(struct kobject *kobj,
119 struct attribute *attr, char *buf);
120 ssize_t (*store)(struct kobject *kobj,
121 struct attribute *attr, const char *c, ssize_t count);
122};
123
124#define define_one_cppc_ro(_name) \
125static struct cppc_attr _name = \
126__ATTR(_name, 0444, show_##_name, NULL)
127
128#define to_cpc_desc(a) container_of(a, struct cpc_desc, kobj)
129
130static ssize_t show_feedback_ctrs(struct kobject *kobj,
131 struct attribute *attr, char *buf)
132{
133 struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);
134 struct cppc_perf_fb_ctrs fb_ctrs = {0};
135
136 cppc_get_perf_ctrs(cpc_ptr->cpu_id, &fb_ctrs);
137
138 return scnprintf(buf, PAGE_SIZE, "ref:%llu del:%llu\n",
139 fb_ctrs.reference, fb_ctrs.delivered);
140}
141define_one_cppc_ro(feedback_ctrs);
142
143static ssize_t show_reference_perf(struct kobject *kobj,
144 struct attribute *attr, char *buf)
145{
146 struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);
147 struct cppc_perf_fb_ctrs fb_ctrs = {0};
148
149 cppc_get_perf_ctrs(cpc_ptr->cpu_id, &fb_ctrs);
150
151 return scnprintf(buf, PAGE_SIZE, "%llu\n",
152 fb_ctrs.reference_perf);
153}
154define_one_cppc_ro(reference_perf);
155
156static ssize_t show_wraparound_time(struct kobject *kobj,
157 struct attribute *attr, char *buf)
158{
159 struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);
160 struct cppc_perf_fb_ctrs fb_ctrs = {0};
161
162 cppc_get_perf_ctrs(cpc_ptr->cpu_id, &fb_ctrs);
163
164 return scnprintf(buf, PAGE_SIZE, "%llu\n", fb_ctrs.ctr_wrap_time);
165
166}
167define_one_cppc_ro(wraparound_time);
168
169static struct attribute *cppc_attrs[] = {
170 &feedback_ctrs.attr,
171 &reference_perf.attr,
172 &wraparound_time.attr,
173 NULL
174};
175
176static struct kobj_type cppc_ktype = {
177 .sysfs_ops = &kobj_sysfs_ops,
178 .default_attrs = cppc_attrs,
179};
180
105static int check_pcc_chan(void) 181static int check_pcc_chan(void)
106{ 182{
107 int ret = -EIO; 183 int ret = -EIO;
@@ -555,6 +631,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
555 union acpi_object *out_obj, *cpc_obj; 631 union acpi_object *out_obj, *cpc_obj;
556 struct cpc_desc *cpc_ptr; 632 struct cpc_desc *cpc_ptr;
557 struct cpc_reg *gas_t; 633 struct cpc_reg *gas_t;
634 struct device *cpu_dev;
558 acpi_handle handle = pr->handle; 635 acpi_handle handle = pr->handle;
559 unsigned int num_ent, i, cpc_rev; 636 unsigned int num_ent, i, cpc_rev;
560 acpi_status status; 637 acpi_status status;
@@ -678,6 +755,16 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
678 /* Everything looks okay */ 755 /* Everything looks okay */
679 pr_debug("Parsed CPC struct for CPU: %d\n", pr->id); 756 pr_debug("Parsed CPC struct for CPU: %d\n", pr->id);
680 757
758 /* Add per logical CPU nodes for reading its feedback counters. */
759 cpu_dev = get_cpu_device(pr->id);
760 if (!cpu_dev)
761 goto out_free;
762
763 ret = kobject_init_and_add(&cpc_ptr->kobj, &cppc_ktype, &cpu_dev->kobj,
764 "acpi_cppc");
765 if (ret)
766 goto out_free;
767
681 kfree(output.pointer); 768 kfree(output.pointer);
682 return 0; 769 return 0;
683 770
@@ -708,6 +795,7 @@ void acpi_cppc_processor_exit(struct acpi_processor *pr)
708 struct cpc_desc *cpc_ptr; 795 struct cpc_desc *cpc_ptr;
709 unsigned int i; 796 unsigned int i;
710 void __iomem *addr; 797 void __iomem *addr;
798
711 cpc_ptr = per_cpu(cpc_desc_ptr, pr->id); 799 cpc_ptr = per_cpu(cpc_desc_ptr, pr->id);
712 800
713 /* Free all the mapped sys mem areas for this CPU */ 801 /* Free all the mapped sys mem areas for this CPU */
@@ -717,6 +805,7 @@ void acpi_cppc_processor_exit(struct acpi_processor *pr)
717 iounmap(addr); 805 iounmap(addr);
718 } 806 }
719 807
808 kobject_put(&cpc_ptr->kobj);
720 kfree(cpc_ptr); 809 kfree(cpc_ptr);
721} 810}
722EXPORT_SYMBOL_GPL(acpi_cppc_processor_exit); 811EXPORT_SYMBOL_GPL(acpi_cppc_processor_exit);
@@ -818,7 +907,7 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
818 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum); 907 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
819 struct cpc_register_resource *highest_reg, *lowest_reg, *ref_perf, 908 struct cpc_register_resource *highest_reg, *lowest_reg, *ref_perf,
820 *nom_perf; 909 *nom_perf;
821 u64 high, low, ref, nom; 910 u64 high, low, nom;
822 int ret = 0, regs_in_pcc = 0; 911 int ret = 0, regs_in_pcc = 0;
823 912
824 if (!cpc_desc) { 913 if (!cpc_desc) {
@@ -849,15 +938,9 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
849 cpc_read(lowest_reg, &low); 938 cpc_read(lowest_reg, &low);
850 perf_caps->lowest_perf = low; 939 perf_caps->lowest_perf = low;
851 940
852 cpc_read(ref_perf, &ref);
853 perf_caps->reference_perf = ref;
854
855 cpc_read(nom_perf, &nom); 941 cpc_read(nom_perf, &nom);
856 perf_caps->nominal_perf = nom; 942 perf_caps->nominal_perf = nom;
857 943
858 if (!ref)
859 perf_caps->reference_perf = perf_caps->nominal_perf;
860
861 if (!high || !low || !nom) 944 if (!high || !low || !nom)
862 ret = -EFAULT; 945 ret = -EFAULT;
863 946
@@ -878,8 +961,9 @@ EXPORT_SYMBOL_GPL(cppc_get_perf_caps);
878int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs) 961int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
879{ 962{
880 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum); 963 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
881 struct cpc_register_resource *delivered_reg, *reference_reg; 964 struct cpc_register_resource *delivered_reg, *reference_reg,
882 u64 delivered, reference; 965 *ref_perf_reg, *ctr_wrap_reg;
966 u64 delivered, reference, ref_perf, ctr_wrap_time;
883 int ret = 0, regs_in_pcc = 0; 967 int ret = 0, regs_in_pcc = 0;
884 968
885 if (!cpc_desc) { 969 if (!cpc_desc) {
@@ -889,9 +973,19 @@ int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
889 973
890 delivered_reg = &cpc_desc->cpc_regs[DELIVERED_CTR]; 974 delivered_reg = &cpc_desc->cpc_regs[DELIVERED_CTR];
891 reference_reg = &cpc_desc->cpc_regs[REFERENCE_CTR]; 975 reference_reg = &cpc_desc->cpc_regs[REFERENCE_CTR];
976 ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF];
977 ctr_wrap_reg = &cpc_desc->cpc_regs[CTR_WRAP_TIME];
978
979 /*
980 * If refernce perf register is not supported then we should
981 * use the nominal perf value
982 */
983 if (!CPC_SUPPORTED(ref_perf_reg))
984 ref_perf_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
892 985
893 /* Are any of the regs PCC ?*/ 986 /* Are any of the regs PCC ?*/
894 if (CPC_IN_PCC(delivered_reg) || CPC_IN_PCC(reference_reg)) { 987 if (CPC_IN_PCC(delivered_reg) || CPC_IN_PCC(reference_reg) ||
988 CPC_IN_PCC(ctr_wrap_reg) || CPC_IN_PCC(ref_perf_reg)) {
895 down_write(&pcc_lock); 989 down_write(&pcc_lock);
896 regs_in_pcc = 1; 990 regs_in_pcc = 1;
897 /* Ring doorbell once to update PCC subspace */ 991 /* Ring doorbell once to update PCC subspace */
@@ -903,21 +997,26 @@ int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
903 997
904 cpc_read(delivered_reg, &delivered); 998 cpc_read(delivered_reg, &delivered);
905 cpc_read(reference_reg, &reference); 999 cpc_read(reference_reg, &reference);
1000 cpc_read(ref_perf_reg, &ref_perf);
1001
1002 /*
1003 * Per spec, if ctr_wrap_time optional register is unsupported, then the
1004 * performance counters are assumed to never wrap during the lifetime of
1005 * platform
1006 */
1007 ctr_wrap_time = (u64)(~((u64)0));
1008 if (CPC_SUPPORTED(ctr_wrap_reg))
1009 cpc_read(ctr_wrap_reg, &ctr_wrap_time);
906 1010
907 if (!delivered || !reference) { 1011 if (!delivered || !reference || !ref_perf) {
908 ret = -EFAULT; 1012 ret = -EFAULT;
909 goto out_err; 1013 goto out_err;
910 } 1014 }
911 1015
912 perf_fb_ctrs->delivered = delivered; 1016 perf_fb_ctrs->delivered = delivered;
913 perf_fb_ctrs->reference = reference; 1017 perf_fb_ctrs->reference = reference;
914 1018 perf_fb_ctrs->reference_perf = ref_perf;
915 perf_fb_ctrs->delivered -= perf_fb_ctrs->prev_delivered; 1019 perf_fb_ctrs->ctr_wrap_time = ctr_wrap_time;
916 perf_fb_ctrs->reference -= perf_fb_ctrs->prev_reference;
917
918 perf_fb_ctrs->prev_delivered = delivered;
919 perf_fb_ctrs->prev_reference = reference;
920
921out_err: 1020out_err:
922 if (regs_in_pcc) 1021 if (regs_in_pcc)
923 up_write(&pcc_lock); 1022 up_write(&pcc_lock);