aboutsummaryrefslogtreecommitdiffstats
path: root/tools
diff options
context:
space:
mode:
authorLen Brown <len.brown@intel.com>2012-06-03 23:24:00 -0400
committerLen Brown <len.brown@intel.com>2012-06-03 23:24:00 -0400
commitd15cf7c129fa4ec4b44c52521e49ffafb9749029 (patch)
tree03c323fe9623f7e82400bdfabdd3711a45596a53 /tools
parentf8f5701bdaf9134b1f90e5044a82c66324d2073f (diff)
tools/power turbostat: fix un-intended affinity of forked program
Linux 3.4 included a modification to turbostat to lower cross-call overhead by using scheduler affinity: 15aaa34654831e98dd76f7738b6c7f5d05a66430 (tools turbostat: reduce measurement overhead due to IPIs) In the use-case where turbostat forks a child program, that change had the un-intended side-effect of binding the child to the last cpu in the system. This change removed the binding before forking the child. This is a back-port of a fix already included in turbostat v2. Signed-off-by: Len Brown <len.brown@intel.com>
Diffstat (limited to 'tools')
-rw-r--r--tools/power/x86/turbostat/turbostat.c28
1 files changed, 26 insertions, 2 deletions
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index ab2f682fd44c..d5d6a3dc9bac 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -73,8 +73,8 @@ int backwards_count;
73char *progname; 73char *progname;
74 74
75int num_cpus; 75int num_cpus;
76cpu_set_t *cpu_mask; 76cpu_set_t *cpu_present_set, *cpu_mask;
77size_t cpu_mask_size; 77size_t cpu_present_setsize, cpu_mask_size;
78 78
79struct counters { 79struct counters {
80 unsigned long long tsc; /* per thread */ 80 unsigned long long tsc; /* per thread */
@@ -103,6 +103,12 @@ struct timeval tv_even;
103struct timeval tv_odd; 103struct timeval tv_odd;
104struct timeval tv_delta; 104struct timeval tv_delta;
105 105
106int mark_cpu_present(int pkg, int core, int cpu)
107{
108 CPU_SET_S(cpu, cpu_present_setsize, cpu_present_set);
109 return 0;
110}
111
106/* 112/*
107 * cpu_mask_init(ncpus) 113 * cpu_mask_init(ncpus)
108 * 114 *
@@ -118,6 +124,18 @@ void cpu_mask_init(int ncpus)
118 } 124 }
119 cpu_mask_size = CPU_ALLOC_SIZE(ncpus); 125 cpu_mask_size = CPU_ALLOC_SIZE(ncpus);
120 CPU_ZERO_S(cpu_mask_size, cpu_mask); 126 CPU_ZERO_S(cpu_mask_size, cpu_mask);
127
128 /*
129 * Allocate and initialize cpu_present_set
130 */
131 cpu_present_set = CPU_ALLOC(ncpus);
132 if (cpu_present_set == NULL) {
133 perror("CPU_ALLOC");
134 exit(3);
135 }
136 cpu_present_setsize = CPU_ALLOC_SIZE(ncpus);
137 CPU_ZERO_S(cpu_present_setsize, cpu_present_set);
138 for_all_cpus(mark_cpu_present);
121} 139}
122 140
123void cpu_mask_uninit() 141void cpu_mask_uninit()
@@ -125,6 +143,9 @@ void cpu_mask_uninit()
125 CPU_FREE(cpu_mask); 143 CPU_FREE(cpu_mask);
126 cpu_mask = NULL; 144 cpu_mask = NULL;
127 cpu_mask_size = 0; 145 cpu_mask_size = 0;
146 CPU_FREE(cpu_present_set);
147 cpu_present_set = NULL;
148 cpu_present_setsize = 0;
128} 149}
129 150
130int cpu_migrate(int cpu) 151int cpu_migrate(int cpu)
@@ -1047,6 +1068,9 @@ int fork_it(char **argv)
1047 int retval; 1068 int retval;
1048 pid_t child_pid; 1069 pid_t child_pid;
1049 get_counters(cnt_even); 1070 get_counters(cnt_even);
1071
1072 /* clear affinity side-effect of get_counters() */
1073 sched_setaffinity(0, cpu_present_setsize, cpu_present_set);
1050 gettimeofday(&tv_even, (struct timezone *)NULL); 1074 gettimeofday(&tv_even, (struct timezone *)NULL);
1051 1075
1052 child_pid = fork(); 1076 child_pid = fork();