aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/oprofile/nmi_int.c
diff options
context:
space:
mode:
authorRobert Richter <robert.richter@amd.com>2010-03-23 14:09:51 -0400
committerRobert Richter <robert.richter@amd.com>2010-05-04 05:35:07 -0400
commit8f5a2dd83a1f8e89fdc17eb0f2f07c2e713e635a (patch)
tree6d9aeaa4b18d5b01e67b1391e1784793bfe4136a /arch/x86/oprofile/nmi_int.c
parent81c4a8a6733ad2ff49c0e077b51403367601b3e7 (diff)
oprofile/x86: rework error handler in nmi_setup()
This patch improves the error handler in nmi_setup(). Most parts of the code are moved to allocate_msrs(). In case of an error allocate_msrs() also frees already allocated memory. nmi_setup() becomes easier and better extendable. Signed-off-by: Robert Richter <robert.richter@amd.com>
Diffstat (limited to 'arch/x86/oprofile/nmi_int.c')
-rw-r--r--arch/x86/oprofile/nmi_int.c33
1 files changed, 19 insertions, 14 deletions
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index 2c505ee71014..c0c21f200faf 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -295,6 +295,7 @@ static void free_msrs(void)
295 kfree(per_cpu(cpu_msrs, i).controls); 295 kfree(per_cpu(cpu_msrs, i).controls);
296 per_cpu(cpu_msrs, i).controls = NULL; 296 per_cpu(cpu_msrs, i).controls = NULL;
297 } 297 }
298 nmi_shutdown_mux();
298} 299}
299 300
300static int allocate_msrs(void) 301static int allocate_msrs(void)
@@ -307,14 +308,21 @@ static int allocate_msrs(void)
307 per_cpu(cpu_msrs, i).counters = kzalloc(counters_size, 308 per_cpu(cpu_msrs, i).counters = kzalloc(counters_size,
308 GFP_KERNEL); 309 GFP_KERNEL);
309 if (!per_cpu(cpu_msrs, i).counters) 310 if (!per_cpu(cpu_msrs, i).counters)
310 return 0; 311 goto fail;
311 per_cpu(cpu_msrs, i).controls = kzalloc(controls_size, 312 per_cpu(cpu_msrs, i).controls = kzalloc(controls_size,
312 GFP_KERNEL); 313 GFP_KERNEL);
313 if (!per_cpu(cpu_msrs, i).controls) 314 if (!per_cpu(cpu_msrs, i).controls)
314 return 0; 315 goto fail;
315 } 316 }
316 317
318 if (!nmi_setup_mux())
319 goto fail;
320
317 return 1; 321 return 1;
322
323fail:
324 free_msrs();
325 return 0;
318} 326}
319 327
320static void nmi_cpu_setup(void *dummy) 328static void nmi_cpu_setup(void *dummy)
@@ -342,17 +350,7 @@ static int nmi_setup(void)
342 int cpu; 350 int cpu;
343 351
344 if (!allocate_msrs()) 352 if (!allocate_msrs())
345 err = -ENOMEM; 353 return -ENOMEM;
346 else if (!nmi_setup_mux())
347 err = -ENOMEM;
348 else
349 err = register_die_notifier(&profile_exceptions_nb);
350
351 if (err) {
352 free_msrs();
353 nmi_shutdown_mux();
354 return err;
355 }
356 354
357 /* We need to serialize save and setup for HT because the subset 355 /* We need to serialize save and setup for HT because the subset
358 * of msrs are distinct for save and setup operations 356 * of msrs are distinct for save and setup operations
@@ -374,9 +372,17 @@ static int nmi_setup(void)
374 372
375 mux_clone(cpu); 373 mux_clone(cpu);
376 } 374 }
375
376 err = register_die_notifier(&profile_exceptions_nb);
377 if (err)
378 goto fail;
379
377 on_each_cpu(nmi_cpu_setup, NULL, 1); 380 on_each_cpu(nmi_cpu_setup, NULL, 1);
378 nmi_enabled = 1; 381 nmi_enabled = 1;
379 return 0; 382 return 0;
383fail:
384 free_msrs();
385 return err;
380} 386}
381 387
382static void nmi_cpu_restore_registers(struct op_msrs *msrs) 388static void nmi_cpu_restore_registers(struct op_msrs *msrs)
@@ -421,7 +427,6 @@ static void nmi_shutdown(void)
421 nmi_enabled = 0; 427 nmi_enabled = 0;
422 on_each_cpu(nmi_cpu_shutdown, NULL, 1); 428 on_each_cpu(nmi_cpu_shutdown, NULL, 1);
423 unregister_die_notifier(&profile_exceptions_nb); 429 unregister_die_notifier(&profile_exceptions_nb);
424 nmi_shutdown_mux();
425 msrs = &get_cpu_var(cpu_msrs); 430 msrs = &get_cpu_var(cpu_msrs);
426 model->shutdown(msrs); 431 model->shutdown(msrs);
427 free_msrs(); 432 free_msrs();