diff options
author | Tony Luck <tony.luck@intel.com> | 2005-05-18 19:06:00 -0400 |
---|---|---|
committer | Tony Luck <tony.luck@intel.com> | 2005-05-18 19:14:30 -0400 |
commit | a1ecf7f6e65637ba4470405ad39794710dbf85d4 (patch) | |
tree | a086e2ba8bf56c398c6d50a47d91af6d977aaf63 /arch/ia64/kernel | |
parent | 6872ec548970e9fb3ccd61013f84f9bb8b30fa9a (diff) |
[IA64] alternate perfmon handler
Patch from Charles Spirakis
Some linux customers want to optimize their applications on the latest
hardware but are not yet willing to upgrade to the latest kernel. This
patch provides a way to plug in an alternate, basic, and GPL'ed PMU
subsystem to help with their monitoring needs or for specialty work. It
can also be used in case of serious unexpected bugs in perfmon. Mutual
exclusion between the two subsystems is guaranteed, hence no conflict
can arise from both subsystem being present.
Acked-by: Stephane Eranian <eranian@hpl.hp.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch/ia64/kernel')
-rw-r--r-- | arch/ia64/kernel/perfmon.c | 175 |
1 files changed, 160 insertions, 15 deletions
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 71c101601e3e..ab9682f8b044 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c | |||
@@ -11,7 +11,7 @@ | |||
11 | * Version Perfmon-2.x is a rewrite of perfmon-1.x | 11 | * Version Perfmon-2.x is a rewrite of perfmon-1.x |
12 | * by Stephane Eranian, Hewlett Packard Co. | 12 | * by Stephane Eranian, Hewlett Packard Co. |
13 | * | 13 | * |
14 | * Copyright (C) 1999-2003, 2005 Hewlett Packard Co | 14 | * Copyright (C) 1999-2005 Hewlett Packard Co |
15 | * Stephane Eranian <eranian@hpl.hp.com> | 15 | * Stephane Eranian <eranian@hpl.hp.com> |
16 | * David Mosberger-Tang <davidm@hpl.hp.com> | 16 | * David Mosberger-Tang <davidm@hpl.hp.com> |
17 | * | 17 | * |
@@ -497,6 +497,9 @@ typedef struct { | |||
497 | static pfm_stats_t pfm_stats[NR_CPUS]; | 497 | static pfm_stats_t pfm_stats[NR_CPUS]; |
498 | static pfm_session_t pfm_sessions; /* global sessions information */ | 498 | static pfm_session_t pfm_sessions; /* global sessions information */ |
499 | 499 | ||
500 | static spinlock_t pfm_alt_install_check; | ||
501 | static pfm_intr_handler_desc_t *pfm_alt_intr_handler; | ||
502 | |||
500 | static struct proc_dir_entry *perfmon_dir; | 503 | static struct proc_dir_entry *perfmon_dir; |
501 | static pfm_uuid_t pfm_null_uuid = {0,}; | 504 | static pfm_uuid_t pfm_null_uuid = {0,}; |
502 | 505 | ||
@@ -606,6 +609,7 @@ DEFINE_PER_CPU(unsigned long, pfm_syst_info); | |||
606 | DEFINE_PER_CPU(struct task_struct *, pmu_owner); | 609 | DEFINE_PER_CPU(struct task_struct *, pmu_owner); |
607 | DEFINE_PER_CPU(pfm_context_t *, pmu_ctx); | 610 | DEFINE_PER_CPU(pfm_context_t *, pmu_ctx); |
608 | DEFINE_PER_CPU(unsigned long, pmu_activation_number); | 611 | DEFINE_PER_CPU(unsigned long, pmu_activation_number); |
612 | EXPORT_SYMBOL_GPL(per_cpu__pfm_syst_info); | ||
609 | 613 | ||
610 | 614 | ||
611 | /* forward declaration */ | 615 | /* forward declaration */ |
@@ -1325,7 +1329,7 @@ pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu) | |||
1325 | error_conflict: | 1329 | error_conflict: |
1326 | DPRINT(("system wide not possible, conflicting session [%d] on CPU%d\n", | 1330 | DPRINT(("system wide not possible, conflicting session [%d] on CPU%d\n", |
1327 | pfm_sessions.pfs_sys_session[cpu]->pid, | 1331 | pfm_sessions.pfs_sys_session[cpu]->pid, |
1328 | smp_processor_id())); | 1332 | cpu)); |
1329 | abort: | 1333 | abort: |
1330 | UNLOCK_PFS(flags); | 1334 | UNLOCK_PFS(flags); |
1331 | 1335 | ||
@@ -5555,26 +5559,32 @@ pfm_interrupt_handler(int irq, void *arg, struct pt_regs *regs) | |||
5555 | int ret; | 5559 | int ret; |
5556 | 5560 | ||
5557 | this_cpu = get_cpu(); | 5561 | this_cpu = get_cpu(); |
5558 | min = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min; | 5562 | if (likely(!pfm_alt_intr_handler)) { |
5559 | max = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max; | 5563 | min = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min; |
5564 | max = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max; | ||
5560 | 5565 | ||
5561 | start_cycles = ia64_get_itc(); | 5566 | start_cycles = ia64_get_itc(); |
5562 | 5567 | ||
5563 | ret = pfm_do_interrupt_handler(irq, arg, regs); | 5568 | ret = pfm_do_interrupt_handler(irq, arg, regs); |
5564 | 5569 | ||
5565 | total_cycles = ia64_get_itc(); | 5570 | total_cycles = ia64_get_itc(); |
5566 | 5571 | ||
5567 | /* | 5572 | /* |
5568 | * don't measure spurious interrupts | 5573 | * don't measure spurious interrupts |
5569 | */ | 5574 | */ |
5570 | if (likely(ret == 0)) { | 5575 | if (likely(ret == 0)) { |
5571 | total_cycles -= start_cycles; | 5576 | total_cycles -= start_cycles; |
5572 | 5577 | ||
5573 | if (total_cycles < min) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min = total_cycles; | 5578 | if (total_cycles < min) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min = total_cycles; |
5574 | if (total_cycles > max) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max = total_cycles; | 5579 | if (total_cycles > max) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max = total_cycles; |
5575 | 5580 | ||
5576 | pfm_stats[this_cpu].pfm_ovfl_intr_cycles += total_cycles; | 5581 | pfm_stats[this_cpu].pfm_ovfl_intr_cycles += total_cycles; |
5582 | } | ||
5583 | } | ||
5584 | else { | ||
5585 | (*pfm_alt_intr_handler->handler)(irq, arg, regs); | ||
5577 | } | 5586 | } |
5587 | |||
5578 | put_cpu_no_resched(); | 5588 | put_cpu_no_resched(); |
5579 | return IRQ_HANDLED; | 5589 | return IRQ_HANDLED; |
5580 | } | 5590 | } |
@@ -6425,6 +6435,141 @@ static struct irqaction perfmon_irqaction = { | |||
6425 | .name = "perfmon" | 6435 | .name = "perfmon" |
6426 | }; | 6436 | }; |
6427 | 6437 | ||
6438 | static void | ||
6439 | pfm_alt_save_pmu_state(void *data) | ||
6440 | { | ||
6441 | struct pt_regs *regs; | ||
6442 | |||
6443 | regs = ia64_task_regs(current); | ||
6444 | |||
6445 | DPRINT(("called\n")); | ||
6446 | |||
6447 | /* | ||
6448 | * should not be necessary but | ||
6449 | * let's take not risk | ||
6450 | */ | ||
6451 | pfm_clear_psr_up(); | ||
6452 | pfm_clear_psr_pp(); | ||
6453 | ia64_psr(regs)->pp = 0; | ||
6454 | |||
6455 | /* | ||
6456 | * This call is required | ||
6457 | * May cause a spurious interrupt on some processors | ||
6458 | */ | ||
6459 | pfm_freeze_pmu(); | ||
6460 | |||
6461 | ia64_srlz_d(); | ||
6462 | } | ||
6463 | |||
6464 | void | ||
6465 | pfm_alt_restore_pmu_state(void *data) | ||
6466 | { | ||
6467 | struct pt_regs *regs; | ||
6468 | |||
6469 | regs = ia64_task_regs(current); | ||
6470 | |||
6471 | DPRINT(("called\n")); | ||
6472 | |||
6473 | /* | ||
6474 | * put PMU back in state expected | ||
6475 | * by perfmon | ||
6476 | */ | ||
6477 | pfm_clear_psr_up(); | ||
6478 | pfm_clear_psr_pp(); | ||
6479 | ia64_psr(regs)->pp = 0; | ||
6480 | |||
6481 | /* | ||
6482 | * perfmon runs with PMU unfrozen at all times | ||
6483 | */ | ||
6484 | pfm_unfreeze_pmu(); | ||
6485 | |||
6486 | ia64_srlz_d(); | ||
6487 | } | ||
6488 | |||
6489 | int | ||
6490 | pfm_install_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl) | ||
6491 | { | ||
6492 | int ret, i; | ||
6493 | int reserve_cpu; | ||
6494 | |||
6495 | /* some sanity checks */ | ||
6496 | if (hdl == NULL || hdl->handler == NULL) return -EINVAL; | ||
6497 | |||
6498 | /* do the easy test first */ | ||
6499 | if (pfm_alt_intr_handler) return -EBUSY; | ||
6500 | |||
6501 | /* one at a time in the install or remove, just fail the others */ | ||
6502 | if (!spin_trylock(&pfm_alt_install_check)) { | ||
6503 | return -EBUSY; | ||
6504 | } | ||
6505 | |||
6506 | /* reserve our session */ | ||
6507 | for_each_online_cpu(reserve_cpu) { | ||
6508 | ret = pfm_reserve_session(NULL, 1, reserve_cpu); | ||
6509 | if (ret) goto cleanup_reserve; | ||
6510 | } | ||
6511 | |||
6512 | /* save the current system wide pmu states */ | ||
6513 | ret = on_each_cpu(pfm_alt_save_pmu_state, NULL, 0, 1); | ||
6514 | if (ret) { | ||
6515 | DPRINT(("on_each_cpu() failed: %d\n", ret)); | ||
6516 | goto cleanup_reserve; | ||
6517 | } | ||
6518 | |||
6519 | /* officially change to the alternate interrupt handler */ | ||
6520 | pfm_alt_intr_handler = hdl; | ||
6521 | |||
6522 | spin_unlock(&pfm_alt_install_check); | ||
6523 | |||
6524 | return 0; | ||
6525 | |||
6526 | cleanup_reserve: | ||
6527 | for_each_online_cpu(i) { | ||
6528 | /* don't unreserve more than we reserved */ | ||
6529 | if (i >= reserve_cpu) break; | ||
6530 | |||
6531 | pfm_unreserve_session(NULL, 1, i); | ||
6532 | } | ||
6533 | |||
6534 | spin_unlock(&pfm_alt_install_check); | ||
6535 | |||
6536 | return ret; | ||
6537 | } | ||
6538 | EXPORT_SYMBOL_GPL(pfm_install_alt_pmu_interrupt); | ||
6539 | |||
6540 | int | ||
6541 | pfm_remove_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl) | ||
6542 | { | ||
6543 | int i; | ||
6544 | int ret; | ||
6545 | |||
6546 | if (hdl == NULL) return -EINVAL; | ||
6547 | |||
6548 | /* cannot remove someone else's handler! */ | ||
6549 | if (pfm_alt_intr_handler != hdl) return -EINVAL; | ||
6550 | |||
6551 | /* one at a time in the install or remove, just fail the others */ | ||
6552 | if (!spin_trylock(&pfm_alt_install_check)) { | ||
6553 | return -EBUSY; | ||
6554 | } | ||
6555 | |||
6556 | pfm_alt_intr_handler = NULL; | ||
6557 | |||
6558 | ret = on_each_cpu(pfm_alt_restore_pmu_state, NULL, 0, 1); | ||
6559 | if (ret) { | ||
6560 | DPRINT(("on_each_cpu() failed: %d\n", ret)); | ||
6561 | } | ||
6562 | |||
6563 | for_each_online_cpu(i) { | ||
6564 | pfm_unreserve_session(NULL, 1, i); | ||
6565 | } | ||
6566 | |||
6567 | spin_unlock(&pfm_alt_install_check); | ||
6568 | |||
6569 | return 0; | ||
6570 | } | ||
6571 | EXPORT_SYMBOL_GPL(pfm_remove_alt_pmu_interrupt); | ||
6572 | |||
6428 | /* | 6573 | /* |
6429 | * perfmon initialization routine, called from the initcall() table | 6574 | * perfmon initialization routine, called from the initcall() table |
6430 | */ | 6575 | */ |