aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/oprofile
diff options
context:
space:
mode:
authorRobert Richter <robert.richter@amd.com>2009-07-09 08:40:04 -0400
committerRobert Richter <robert.richter@amd.com>2009-07-20 10:43:20 -0400
commit6ab82f958a5dca591a6ea17a3ca6f2aca06f4f2f (patch)
tree49cf59fb2a7341a225911fcebb068d91e6873864 /arch/x86/oprofile
parent7e7478c6bc0e011d2854b21f190cc3a1dba89905 (diff)
x86/oprofile: Implement multiplexing setup/shutdown functions
This patch implements nmi_setup_mux() and nmi_shutdown_mux() functions to setup/shutdown multiplexing. Multiplexing code in nmi_int.c is now much more separated. Signed-off-by: Robert Richter <robert.richter@amd.com>
Diffstat (limited to 'arch/x86/oprofile')
-rw-r--r--arch/x86/oprofile/nmi_int.c76
1 files changed, 40 insertions, 36 deletions
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index 02b57b8d0e61..674fa37d1502 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -106,9 +106,35 @@ inline int op_x86_phys_to_virt(int phys)
106 return __get_cpu_var(switch_index) + phys; 106 return __get_cpu_var(switch_index) + phys;
107} 107}
108 108
109static void nmi_shutdown_mux(void)
110{
111 int i;
112 for_each_possible_cpu(i) {
113 kfree(per_cpu(cpu_msrs, i).multiplex);
114 per_cpu(cpu_msrs, i).multiplex = NULL;
115 per_cpu(switch_index, i) = 0;
116 }
117}
118
119static int nmi_setup_mux(void)
120{
121 size_t multiplex_size =
122 sizeof(struct op_msr) * model->num_virt_counters;
123 int i;
124 for_each_possible_cpu(i) {
125 per_cpu(cpu_msrs, i).multiplex =
126 kmalloc(multiplex_size, GFP_KERNEL);
127 if (!per_cpu(cpu_msrs, i).multiplex)
128 return 0;
129 }
130 return 1;
131}
132
109#else 133#else
110 134
111inline int op_x86_phys_to_virt(int phys) { return phys; } 135inline int op_x86_phys_to_virt(int phys) { return phys; }
136static inline void nmi_shutdown_mux(void) { }
137static inline int nmi_setup_mux(void) { return 1; }
112 138
113#endif 139#endif
114 140
@@ -120,51 +146,27 @@ static void free_msrs(void)
120 per_cpu(cpu_msrs, i).counters = NULL; 146 per_cpu(cpu_msrs, i).counters = NULL;
121 kfree(per_cpu(cpu_msrs, i).controls); 147 kfree(per_cpu(cpu_msrs, i).controls);
122 per_cpu(cpu_msrs, i).controls = NULL; 148 per_cpu(cpu_msrs, i).controls = NULL;
123
124#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
125 kfree(per_cpu(cpu_msrs, i).multiplex);
126 per_cpu(cpu_msrs, i).multiplex = NULL;
127#endif
128 } 149 }
129} 150}
130 151
131static int allocate_msrs(void) 152static int allocate_msrs(void)
132{ 153{
133 int success = 1;
134 size_t controls_size = sizeof(struct op_msr) * model->num_controls; 154 size_t controls_size = sizeof(struct op_msr) * model->num_controls;
135 size_t counters_size = sizeof(struct op_msr) * model->num_counters; 155 size_t counters_size = sizeof(struct op_msr) * model->num_counters;
136#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
137 size_t multiplex_size = sizeof(struct op_msr) * model->num_virt_counters;
138#endif
139 156
140 int i; 157 int i;
141 for_each_possible_cpu(i) { 158 for_each_possible_cpu(i) {
142 per_cpu(cpu_msrs, i).counters = kmalloc(counters_size, 159 per_cpu(cpu_msrs, i).counters = kmalloc(counters_size,
143 GFP_KERNEL); 160 GFP_KERNEL);
144 if (!per_cpu(cpu_msrs, i).counters) { 161 if (!per_cpu(cpu_msrs, i).counters)
145 success = 0; 162 return 0;
146 break;
147 }
148 per_cpu(cpu_msrs, i).controls = kmalloc(controls_size, 163 per_cpu(cpu_msrs, i).controls = kmalloc(controls_size,
149 GFP_KERNEL); 164 GFP_KERNEL);
150 if (!per_cpu(cpu_msrs, i).controls) { 165 if (!per_cpu(cpu_msrs, i).controls)
151 success = 0; 166 return 0;
152 break;
153 }
154#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
155 per_cpu(cpu_msrs, i).multiplex =
156 kmalloc(multiplex_size, GFP_KERNEL);
157 if (!per_cpu(cpu_msrs, i).multiplex) {
158 success = 0;
159 break;
160 }
161#endif
162 } 167 }
163 168
164 if (!success) 169 return 1;
165 free_msrs();
166
167 return success;
168} 170}
169 171
170#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX 172#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
@@ -218,11 +220,15 @@ static int nmi_setup(void)
218 int cpu; 220 int cpu;
219 221
220 if (!allocate_msrs()) 222 if (!allocate_msrs())
221 return -ENOMEM; 223 err = -ENOMEM;
224 else if (!nmi_setup_mux())
225 err = -ENOMEM;
226 else
227 err = register_die_notifier(&profile_exceptions_nb);
222 228
223 err = register_die_notifier(&profile_exceptions_nb);
224 if (err) { 229 if (err) {
225 free_msrs(); 230 free_msrs();
231 nmi_shutdown_mux();
226 return err; 232 return err;
227 } 233 }
228 234
@@ -314,9 +320,6 @@ static void nmi_cpu_shutdown(void *dummy)
314 apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu)); 320 apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu));
315 apic_write(APIC_LVTERR, v); 321 apic_write(APIC_LVTERR, v);
316 nmi_cpu_restore_registers(msrs); 322 nmi_cpu_restore_registers(msrs);
317#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
318 per_cpu(switch_index, cpu) = 0;
319#endif
320} 323}
321 324
322static void nmi_shutdown(void) 325static void nmi_shutdown(void)
@@ -326,6 +329,7 @@ static void nmi_shutdown(void)
326 nmi_enabled = 0; 329 nmi_enabled = 0;
327 on_each_cpu(nmi_cpu_shutdown, NULL, 1); 330 on_each_cpu(nmi_cpu_shutdown, NULL, 1);
328 unregister_die_notifier(&profile_exceptions_nb); 331 unregister_die_notifier(&profile_exceptions_nb);
332 nmi_shutdown_mux();
329 msrs = &get_cpu_var(cpu_msrs); 333 msrs = &get_cpu_var(cpu_msrs);
330 model->shutdown(msrs); 334 model->shutdown(msrs);
331 free_msrs(); 335 free_msrs();