aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>2016-03-14 09:21:45 -0400
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2016-03-14 09:21:45 -0400
commit07cc77e05e82b4eb6d11161c31b6aca5ed146baf (patch)
tree4f37f2016e549f7dd9360cdd68c5aaba050ec429
parente1fb19cbbbf57ee8c10d4fa6d6aadf16c73e75bd (diff)
parent323ee64aa175a67fbbe744e809777d17e6fb42d7 (diff)
Merge branch 'powercap'
* powercap: powercap/rapl: track lead cpu per package powercap/rapl: add package reference per domain powercap/rapl: reduce ipi calls cpumask: export cpumask_any_but
-rw-r--r--drivers/powercap/intel_rapl.c220
-rw-r--r--lib/cpumask.c1
2 files changed, 115 insertions, 106 deletions
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c
index 6c592dc71aee..cdfd01f0adb8 100644
--- a/drivers/powercap/intel_rapl.c
+++ b/drivers/powercap/intel_rapl.c
@@ -133,6 +133,12 @@ struct rapl_domain_data {
133 unsigned long timestamp; 133 unsigned long timestamp;
134}; 134};
135 135
136struct msrl_action {
137 u32 msr_no;
138 u64 clear_mask;
139 u64 set_mask;
140 int err;
141};
136 142
137#define DOMAIN_STATE_INACTIVE BIT(0) 143#define DOMAIN_STATE_INACTIVE BIT(0)
138#define DOMAIN_STATE_POWER_LIMIT_SET BIT(1) 144#define DOMAIN_STATE_POWER_LIMIT_SET BIT(1)
@@ -149,6 +155,7 @@ struct rapl_power_limit {
149static const char pl1_name[] = "long_term"; 155static const char pl1_name[] = "long_term";
150static const char pl2_name[] = "short_term"; 156static const char pl2_name[] = "short_term";
151 157
158struct rapl_package;
152struct rapl_domain { 159struct rapl_domain {
153 const char *name; 160 const char *name;
154 enum rapl_domain_type id; 161 enum rapl_domain_type id;
@@ -159,7 +166,7 @@ struct rapl_domain {
159 u64 attr_map; /* track capabilities */ 166 u64 attr_map; /* track capabilities */
160 unsigned int state; 167 unsigned int state;
161 unsigned int domain_energy_unit; 168 unsigned int domain_energy_unit;
162 int package_id; 169 struct rapl_package *rp;
163}; 170};
164#define power_zone_to_rapl_domain(_zone) \ 171#define power_zone_to_rapl_domain(_zone) \
165 container_of(_zone, struct rapl_domain, power_zone) 172 container_of(_zone, struct rapl_domain, power_zone)
@@ -184,6 +191,7 @@ struct rapl_package {
184 * notify interrupt enable status. 191 * notify interrupt enable status.
185 */ 192 */
186 struct list_head plist; 193 struct list_head plist;
194 int lead_cpu; /* one active cpu per package for access */
187}; 195};
188 196
189struct rapl_defaults { 197struct rapl_defaults {
@@ -231,10 +239,10 @@ static int rapl_read_data_raw(struct rapl_domain *rd,
231static int rapl_write_data_raw(struct rapl_domain *rd, 239static int rapl_write_data_raw(struct rapl_domain *rd,
232 enum rapl_primitives prim, 240 enum rapl_primitives prim,
233 unsigned long long value); 241 unsigned long long value);
234static u64 rapl_unit_xlate(struct rapl_domain *rd, int package, 242static u64 rapl_unit_xlate(struct rapl_domain *rd,
235 enum unit_type type, u64 value, 243 enum unit_type type, u64 value,
236 int to_raw); 244 int to_raw);
237static void package_power_limit_irq_save(int package_id); 245static void package_power_limit_irq_save(struct rapl_package *rp);
238 246
239static LIST_HEAD(rapl_packages); /* guarded by CPU hotplug lock */ 247static LIST_HEAD(rapl_packages); /* guarded by CPU hotplug lock */
240 248
@@ -260,20 +268,6 @@ static struct rapl_package *find_package_by_id(int id)
260 return NULL; 268 return NULL;
261} 269}
262 270
263/* caller to ensure CPU hotplug lock is held */
264static int find_active_cpu_on_package(int package_id)
265{
266 int i;
267
268 for_each_online_cpu(i) {
269 if (topology_physical_package_id(i) == package_id)
270 return i;
271 }
272 /* all CPUs on this package are offline */
273
274 return -ENODEV;
275}
276
277/* caller must hold cpu hotplug lock */ 271/* caller must hold cpu hotplug lock */
278static void rapl_cleanup_data(void) 272static void rapl_cleanup_data(void)
279{ 273{
@@ -312,25 +306,19 @@ static int get_max_energy_counter(struct powercap_zone *pcd_dev, u64 *energy)
312{ 306{
313 struct rapl_domain *rd = power_zone_to_rapl_domain(pcd_dev); 307 struct rapl_domain *rd = power_zone_to_rapl_domain(pcd_dev);
314 308
315 *energy = rapl_unit_xlate(rd, 0, ENERGY_UNIT, ENERGY_STATUS_MASK, 0); 309 *energy = rapl_unit_xlate(rd, ENERGY_UNIT, ENERGY_STATUS_MASK, 0);
316 return 0; 310 return 0;
317} 311}
318 312
319static int release_zone(struct powercap_zone *power_zone) 313static int release_zone(struct powercap_zone *power_zone)
320{ 314{
321 struct rapl_domain *rd = power_zone_to_rapl_domain(power_zone); 315 struct rapl_domain *rd = power_zone_to_rapl_domain(power_zone);
322 struct rapl_package *rp; 316 struct rapl_package *rp = rd->rp;
323 317
324 /* package zone is the last zone of a package, we can free 318 /* package zone is the last zone of a package, we can free
325 * memory here since all children has been unregistered. 319 * memory here since all children has been unregistered.
326 */ 320 */
327 if (rd->id == RAPL_DOMAIN_PACKAGE) { 321 if (rd->id == RAPL_DOMAIN_PACKAGE) {
328 rp = find_package_by_id(rd->package_id);
329 if (!rp) {
330 dev_warn(&power_zone->dev, "no package id %s\n",
331 rd->name);
332 return -ENODEV;
333 }
334 kfree(rd); 322 kfree(rd);
335 rp->domains = NULL; 323 rp->domains = NULL;
336 } 324 }
@@ -432,11 +420,7 @@ static int set_power_limit(struct powercap_zone *power_zone, int id,
432 420
433 get_online_cpus(); 421 get_online_cpus();
434 rd = power_zone_to_rapl_domain(power_zone); 422 rd = power_zone_to_rapl_domain(power_zone);
435 rp = find_package_by_id(rd->package_id); 423 rp = rd->rp;
436 if (!rp) {
437 ret = -ENODEV;
438 goto set_exit;
439 }
440 424
441 if (rd->state & DOMAIN_STATE_BIOS_LOCKED) { 425 if (rd->state & DOMAIN_STATE_BIOS_LOCKED) {
442 dev_warn(&power_zone->dev, "%s locked by BIOS, monitoring only\n", 426 dev_warn(&power_zone->dev, "%s locked by BIOS, monitoring only\n",
@@ -456,7 +440,7 @@ static int set_power_limit(struct powercap_zone *power_zone, int id,
456 ret = -EINVAL; 440 ret = -EINVAL;
457 } 441 }
458 if (!ret) 442 if (!ret)
459 package_power_limit_irq_save(rd->package_id); 443 package_power_limit_irq_save(rp);
460set_exit: 444set_exit:
461 put_online_cpus(); 445 put_online_cpus();
462 return ret; 446 return ret;
@@ -655,24 +639,19 @@ static void rapl_init_domains(struct rapl_package *rp)
655 break; 639 break;
656 } 640 }
657 if (mask) { 641 if (mask) {
658 rd->package_id = rp->id; 642 rd->rp = rp;
659 rd++; 643 rd++;
660 } 644 }
661 } 645 }
662} 646}
663 647
664static u64 rapl_unit_xlate(struct rapl_domain *rd, int package, 648static u64 rapl_unit_xlate(struct rapl_domain *rd, enum unit_type type,
665 enum unit_type type, u64 value, 649 u64 value, int to_raw)
666 int to_raw)
667{ 650{
668 u64 units = 1; 651 u64 units = 1;
669 struct rapl_package *rp; 652 struct rapl_package *rp = rd->rp;
670 u64 scale = 1; 653 u64 scale = 1;
671 654
672 rp = find_package_by_id(package);
673 if (!rp)
674 return value;
675
676 switch (type) { 655 switch (type) {
677 case POWER_UNIT: 656 case POWER_UNIT:
678 units = rp->power_unit; 657 units = rp->power_unit;
@@ -769,10 +748,8 @@ static int rapl_read_data_raw(struct rapl_domain *rd,
769 msr = rd->msrs[rp->id]; 748 msr = rd->msrs[rp->id];
770 if (!msr) 749 if (!msr)
771 return -EINVAL; 750 return -EINVAL;
772 /* use physical package id to look up active cpus */ 751
773 cpu = find_active_cpu_on_package(rd->package_id); 752 cpu = rd->rp->lead_cpu;
774 if (cpu < 0)
775 return cpu;
776 753
777 /* special-case package domain, which uses a different bit*/ 754 /* special-case package domain, which uses a different bit*/
778 if (prim == FW_LOCK && rd->id == RAPL_DOMAIN_PACKAGE) { 755 if (prim == FW_LOCK && rd->id == RAPL_DOMAIN_PACKAGE) {
@@ -793,42 +770,66 @@ static int rapl_read_data_raw(struct rapl_domain *rd,
793 final = value & rp->mask; 770 final = value & rp->mask;
794 final = final >> rp->shift; 771 final = final >> rp->shift;
795 if (xlate) 772 if (xlate)
796 *data = rapl_unit_xlate(rd, rd->package_id, rp->unit, final, 0); 773 *data = rapl_unit_xlate(rd, rp->unit, final, 0);
797 else 774 else
798 *data = final; 775 *data = final;
799 776
800 return 0; 777 return 0;
801} 778}
802 779
780
781static int msrl_update_safe(u32 msr_no, u64 clear_mask, u64 set_mask)
782{
783 int err;
784 u64 val;
785
786 err = rdmsrl_safe(msr_no, &val);
787 if (err)
788 goto out;
789
790 val &= ~clear_mask;
791 val |= set_mask;
792
793 err = wrmsrl_safe(msr_no, val);
794
795out:
796 return err;
797}
798
799static void msrl_update_func(void *info)
800{
801 struct msrl_action *ma = info;
802
803 ma->err = msrl_update_safe(ma->msr_no, ma->clear_mask, ma->set_mask);
804}
805
803/* Similar use of primitive info in the read counterpart */ 806/* Similar use of primitive info in the read counterpart */
804static int rapl_write_data_raw(struct rapl_domain *rd, 807static int rapl_write_data_raw(struct rapl_domain *rd,
805 enum rapl_primitives prim, 808 enum rapl_primitives prim,
806 unsigned long long value) 809 unsigned long long value)
807{ 810{
808 u64 msr_val;
809 u32 msr;
810 struct rapl_primitive_info *rp = &rpi[prim]; 811 struct rapl_primitive_info *rp = &rpi[prim];
811 int cpu; 812 int cpu;
813 u64 bits;
814 struct msrl_action ma;
815 int ret;
812 816
813 cpu = find_active_cpu_on_package(rd->package_id); 817 cpu = rd->rp->lead_cpu;
814 if (cpu < 0) 818 bits = rapl_unit_xlate(rd, rp->unit, value, 1);
815 return cpu; 819 bits |= bits << rp->shift;
816 msr = rd->msrs[rp->id]; 820 memset(&ma, 0, sizeof(ma));
817 if (rdmsrl_safe_on_cpu(cpu, msr, &msr_val)) {
818 dev_dbg(&rd->power_zone.dev,
819 "failed to read msr 0x%x on cpu %d\n", msr, cpu);
820 return -EIO;
821 }
822 value = rapl_unit_xlate(rd, rd->package_id, rp->unit, value, 1);
823 msr_val &= ~rp->mask;
824 msr_val |= value << rp->shift;
825 if (wrmsrl_safe_on_cpu(cpu, msr, msr_val)) {
826 dev_dbg(&rd->power_zone.dev,
827 "failed to write msr 0x%x on cpu %d\n", msr, cpu);
828 return -EIO;
829 }
830 821
831 return 0; 822 ma.msr_no = rd->msrs[rp->id];
823 ma.clear_mask = rp->mask;
824 ma.set_mask = bits;
825
826 ret = smp_call_function_single(cpu, msrl_update_func, &ma, 1);
827 if (ret)
828 WARN_ON_ONCE(ret);
829 else
830 ret = ma.err;
831
832 return ret;
832} 833}
833 834
834/* 835/*
@@ -893,6 +894,21 @@ static int rapl_check_unit_atom(struct rapl_package *rp, int cpu)
893 return 0; 894 return 0;
894} 895}
895 896
897static void power_limit_irq_save_cpu(void *info)
898{
899 u32 l, h = 0;
900 struct rapl_package *rp = (struct rapl_package *)info;
901
902 /* save the state of PLN irq mask bit before disabling it */
903 rdmsr_safe(MSR_IA32_PACKAGE_THERM_INTERRUPT, &l, &h);
904 if (!(rp->power_limit_irq & PACKAGE_PLN_INT_SAVED)) {
905 rp->power_limit_irq = l & PACKAGE_THERM_INT_PLN_ENABLE;
906 rp->power_limit_irq |= PACKAGE_PLN_INT_SAVED;
907 }
908 l &= ~PACKAGE_THERM_INT_PLN_ENABLE;
909 wrmsr_safe(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h);
910}
911
896 912
897/* REVISIT: 913/* REVISIT:
898 * When package power limit is set artificially low by RAPL, LVT 914 * When package power limit is set artificially low by RAPL, LVT
@@ -904,61 +920,40 @@ static int rapl_check_unit_atom(struct rapl_package *rp, int cpu)
904 * to do by adding an atomic notifier. 920 * to do by adding an atomic notifier.
905 */ 921 */
906 922
907static void package_power_limit_irq_save(int package_id) 923static void package_power_limit_irq_save(struct rapl_package *rp)
908{ 924{
909 u32 l, h = 0;
910 int cpu;
911 struct rapl_package *rp;
912
913 rp = find_package_by_id(package_id);
914 if (!rp)
915 return;
916
917 if (!boot_cpu_has(X86_FEATURE_PTS) || !boot_cpu_has(X86_FEATURE_PLN)) 925 if (!boot_cpu_has(X86_FEATURE_PTS) || !boot_cpu_has(X86_FEATURE_PLN))
918 return; 926 return;
919 927
920 cpu = find_active_cpu_on_package(package_id); 928 smp_call_function_single(rp->lead_cpu, power_limit_irq_save_cpu, rp, 1);
921 if (cpu < 0)
922 return;
923 /* save the state of PLN irq mask bit before disabling it */
924 rdmsr_safe_on_cpu(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, &l, &h);
925 if (!(rp->power_limit_irq & PACKAGE_PLN_INT_SAVED)) {
926 rp->power_limit_irq = l & PACKAGE_THERM_INT_PLN_ENABLE;
927 rp->power_limit_irq |= PACKAGE_PLN_INT_SAVED;
928 }
929 l &= ~PACKAGE_THERM_INT_PLN_ENABLE;
930 wrmsr_on_cpu(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h);
931} 929}
932 930
933/* restore per package power limit interrupt enable state */ 931static void power_limit_irq_restore_cpu(void *info)
934static void package_power_limit_irq_restore(int package_id)
935{ 932{
936 u32 l, h; 933 u32 l, h = 0;
937 int cpu; 934 struct rapl_package *rp = (struct rapl_package *)info;
938 struct rapl_package *rp;
939 935
940 rp = find_package_by_id(package_id); 936 rdmsr_safe(MSR_IA32_PACKAGE_THERM_INTERRUPT, &l, &h);
941 if (!rp)
942 return;
943 937
944 if (!boot_cpu_has(X86_FEATURE_PTS) || !boot_cpu_has(X86_FEATURE_PLN)) 938 if (rp->power_limit_irq & PACKAGE_THERM_INT_PLN_ENABLE)
945 return; 939 l |= PACKAGE_THERM_INT_PLN_ENABLE;
940 else
941 l &= ~PACKAGE_THERM_INT_PLN_ENABLE;
942
943 wrmsr_safe(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h);
944}
946 945
947 cpu = find_active_cpu_on_package(package_id); 946/* restore per package power limit interrupt enable state */
948 if (cpu < 0) 947static void package_power_limit_irq_restore(struct rapl_package *rp)
948{
949 if (!boot_cpu_has(X86_FEATURE_PTS) || !boot_cpu_has(X86_FEATURE_PLN))
949 return; 950 return;
950 951
951 /* irq enable state not saved, nothing to restore */ 952 /* irq enable state not saved, nothing to restore */
952 if (!(rp->power_limit_irq & PACKAGE_PLN_INT_SAVED)) 953 if (!(rp->power_limit_irq & PACKAGE_PLN_INT_SAVED))
953 return; 954 return;
954 rdmsr_safe_on_cpu(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, &l, &h);
955
956 if (rp->power_limit_irq & PACKAGE_THERM_INT_PLN_ENABLE)
957 l |= PACKAGE_THERM_INT_PLN_ENABLE;
958 else
959 l &= ~PACKAGE_THERM_INT_PLN_ENABLE;
960 955
961 wrmsr_on_cpu(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h); 956 smp_call_function_single(rp->lead_cpu, power_limit_irq_restore_cpu, rp, 1);
962} 957}
963 958
964static void set_floor_freq_default(struct rapl_domain *rd, bool mode) 959static void set_floor_freq_default(struct rapl_domain *rd, bool mode)
@@ -1141,7 +1136,7 @@ static int rapl_unregister_powercap(void)
1141 * hotplug lock held 1136 * hotplug lock held
1142 */ 1137 */
1143 list_for_each_entry(rp, &rapl_packages, plist) { 1138 list_for_each_entry(rp, &rapl_packages, plist) {
1144 package_power_limit_irq_restore(rp->id); 1139 package_power_limit_irq_restore(rp);
1145 1140
1146 for (rd = rp->domains; rd < rp->domains + rp->nr_domains; 1141 for (rd = rp->domains; rd < rp->domains + rp->nr_domains;
1147 rd++) { 1142 rd++) {
@@ -1392,7 +1387,8 @@ static int rapl_detect_topology(void)
1392 /* add the new package to the list */ 1387 /* add the new package to the list */
1393 new_package->id = phy_package_id; 1388 new_package->id = phy_package_id;
1394 new_package->nr_cpus = 1; 1389 new_package->nr_cpus = 1;
1395 1390 /* use the first active cpu of the package to access */
1391 new_package->lead_cpu = i;
1396 /* check if the package contains valid domains */ 1392 /* check if the package contains valid domains */
1397 if (rapl_detect_domains(new_package, i) || 1393 if (rapl_detect_domains(new_package, i) ||
1398 rapl_defaults->check_unit(new_package, i)) { 1394 rapl_defaults->check_unit(new_package, i)) {
@@ -1448,6 +1444,8 @@ static int rapl_add_package(int cpu)
1448 /* add the new package to the list */ 1444 /* add the new package to the list */
1449 rp->id = phy_package_id; 1445 rp->id = phy_package_id;
1450 rp->nr_cpus = 1; 1446 rp->nr_cpus = 1;
1447 rp->lead_cpu = cpu;
1448
1451 /* check if the package contains valid domains */ 1449 /* check if the package contains valid domains */
1452 if (rapl_detect_domains(rp, cpu) || 1450 if (rapl_detect_domains(rp, cpu) ||
1453 rapl_defaults->check_unit(rp, cpu)) { 1451 rapl_defaults->check_unit(rp, cpu)) {
@@ -1480,6 +1478,7 @@ static int rapl_cpu_callback(struct notifier_block *nfb,
1480 unsigned long cpu = (unsigned long)hcpu; 1478 unsigned long cpu = (unsigned long)hcpu;
1481 int phy_package_id; 1479 int phy_package_id;
1482 struct rapl_package *rp; 1480 struct rapl_package *rp;
1481 int lead_cpu;
1483 1482
1484 phy_package_id = topology_physical_package_id(cpu); 1483 phy_package_id = topology_physical_package_id(cpu);
1485 switch (action) { 1484 switch (action) {
@@ -1500,6 +1499,15 @@ static int rapl_cpu_callback(struct notifier_block *nfb,
1500 break; 1499 break;
1501 if (--rp->nr_cpus == 0) 1500 if (--rp->nr_cpus == 0)
1502 rapl_remove_package(rp); 1501 rapl_remove_package(rp);
1502 else if (cpu == rp->lead_cpu) {
1503 /* choose another active cpu in the package */
1504 lead_cpu = cpumask_any_but(topology_core_cpumask(cpu), cpu);
1505 if (lead_cpu < nr_cpu_ids)
1506 rp->lead_cpu = lead_cpu;
1507 else /* should never go here */
1508 pr_err("no active cpu available for package %d\n",
1509 phy_package_id);
1510 }
1503 } 1511 }
1504 1512
1505 return NOTIFY_OK; 1513 return NOTIFY_OK;
diff --git a/lib/cpumask.c b/lib/cpumask.c
index 5a70f6196f57..81dedaab36cc 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -41,6 +41,7 @@ int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
41 break; 41 break;
42 return i; 42 return i;
43} 43}
44EXPORT_SYMBOL(cpumask_any_but);
44 45
45/* These are not inline because of header tangles. */ 46/* These are not inline because of header tangles. */
46#ifdef CONFIG_CPUMASK_OFFSTACK 47#ifdef CONFIG_CPUMASK_OFFSTACK