diff options
author | Stephane Eranian <eranian@hpl.hp.com> | 2005-04-25 16:08:30 -0400 |
---|---|---|
committer | Tony Luck <tony.luck@intel.com> | 2005-04-25 16:08:30 -0400 |
commit | 4944930ab748942e41ea4dc313fcb0946aee3f17 (patch) | |
tree | 5d23e420c0d7f4387237028ca2dca1ec97f59861 /arch/ia64/kernel/perfmon.c | |
parent | 658b32cad9ae087bd34f35a925fd75b76d663d4e (diff) |
[IA64] perfmon: make pfm_sysctl a global, and other cleanup
- make pfm_sysctl a global such that it is possible
to enable/disable debug printk in sampling formats
using PFM_DEBUG.
- remove unused pfm_debug_var variable
- fix a bug in pfm_handle_work where an BUG_ON() could
be triggered. There is a path where pfm_handle_work()
can be called with interrupts enabled, i.e., when
TIF_NEED_RESCHED is set. The fix correct the masking
and unmasking of interrupts in pfm_handle_work() such
that we restore the interrupt mask as it was upon entry.
signed-off-by: stephane eranian <eranian@hpl.hp.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch/ia64/kernel/perfmon.c')
-rw-r--r-- | arch/ia64/kernel/perfmon.c | 59 |
1 files changed, 28 insertions, 31 deletions
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 71147be3279c..376fcbc3f8da 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c | |||
@@ -480,14 +480,6 @@ typedef struct { | |||
480 | #define PFM_CMD_ARG_MANY -1 /* cannot be zero */ | 480 | #define PFM_CMD_ARG_MANY -1 /* cannot be zero */ |
481 | 481 | ||
482 | typedef struct { | 482 | typedef struct { |
483 | int debug; /* turn on/off debugging via syslog */ | ||
484 | int debug_ovfl; /* turn on/off debug printk in overflow handler */ | ||
485 | int fastctxsw; /* turn on/off fast (unsecure) ctxsw */ | ||
486 | int expert_mode; /* turn on/off value checking */ | ||
487 | int debug_pfm_read; | ||
488 | } pfm_sysctl_t; | ||
489 | |||
490 | typedef struct { | ||
491 | unsigned long pfm_spurious_ovfl_intr_count; /* keep track of spurious ovfl interrupts */ | 483 | unsigned long pfm_spurious_ovfl_intr_count; /* keep track of spurious ovfl interrupts */ |
492 | unsigned long pfm_replay_ovfl_intr_count; /* keep track of replayed ovfl interrupts */ | 484 | unsigned long pfm_replay_ovfl_intr_count; /* keep track of replayed ovfl interrupts */ |
493 | unsigned long pfm_ovfl_intr_count; /* keep track of ovfl interrupts */ | 485 | unsigned long pfm_ovfl_intr_count; /* keep track of ovfl interrupts */ |
@@ -514,8 +506,8 @@ static LIST_HEAD(pfm_buffer_fmt_list); | |||
514 | static pmu_config_t *pmu_conf; | 506 | static pmu_config_t *pmu_conf; |
515 | 507 | ||
516 | /* sysctl() controls */ | 508 | /* sysctl() controls */ |
517 | static pfm_sysctl_t pfm_sysctl; | 509 | pfm_sysctl_t pfm_sysctl; |
518 | int pfm_debug_var; | 510 | EXPORT_SYMBOL(pfm_sysctl); |
519 | 511 | ||
520 | static ctl_table pfm_ctl_table[]={ | 512 | static ctl_table pfm_ctl_table[]={ |
521 | {1, "debug", &pfm_sysctl.debug, sizeof(int), 0666, NULL, &proc_dointvec, NULL,}, | 513 | {1, "debug", &pfm_sysctl.debug, sizeof(int), 0666, NULL, &proc_dointvec, NULL,}, |
@@ -1576,7 +1568,7 @@ pfm_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos) | |||
1576 | goto abort_locked; | 1568 | goto abort_locked; |
1577 | } | 1569 | } |
1578 | 1570 | ||
1579 | DPRINT(("[%d] fd=%d type=%d\n", current->pid, msg->pfm_gen_msg.msg_ctx_fd, msg->pfm_gen_msg.msg_type)); | 1571 | DPRINT(("fd=%d type=%d\n", msg->pfm_gen_msg.msg_ctx_fd, msg->pfm_gen_msg.msg_type)); |
1580 | 1572 | ||
1581 | ret = -EFAULT; | 1573 | ret = -EFAULT; |
1582 | if(copy_to_user(buf, msg, sizeof(pfm_msg_t)) == 0) ret = sizeof(pfm_msg_t); | 1574 | if(copy_to_user(buf, msg, sizeof(pfm_msg_t)) == 0) ret = sizeof(pfm_msg_t); |
@@ -3695,8 +3687,6 @@ pfm_debug(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) | |||
3695 | 3687 | ||
3696 | pfm_sysctl.debug = m == 0 ? 0 : 1; | 3688 | pfm_sysctl.debug = m == 0 ? 0 : 1; |
3697 | 3689 | ||
3698 | pfm_debug_var = pfm_sysctl.debug; | ||
3699 | |||
3700 | printk(KERN_INFO "perfmon debugging %s (timing reset)\n", pfm_sysctl.debug ? "on" : "off"); | 3690 | printk(KERN_INFO "perfmon debugging %s (timing reset)\n", pfm_sysctl.debug ? "on" : "off"); |
3701 | 3691 | ||
3702 | if (m == 0) { | 3692 | if (m == 0) { |
@@ -4996,13 +4986,21 @@ pfm_context_force_terminate(pfm_context_t *ctx, struct pt_regs *regs) | |||
4996 | } | 4986 | } |
4997 | 4987 | ||
4998 | static int pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds); | 4988 | static int pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds); |
4999 | 4989 | /* | |
4990 | * pfm_handle_work() can be called with interrupts enabled | ||
4991 | * (TIF_NEED_RESCHED) or disabled. The down_interruptible | ||
4992 | * call may sleep, therefore we must re-enable interrupts | ||
4993 | * to avoid deadlocks. It is safe to do so because this function | ||
4994 | * is called ONLY when returning to user level (PUStk=1), in which case | ||
4995 | * there is no risk of kernel stack overflow due to deep | ||
4996 | * interrupt nesting. | ||
4997 | */ | ||
5000 | void | 4998 | void |
5001 | pfm_handle_work(void) | 4999 | pfm_handle_work(void) |
5002 | { | 5000 | { |
5003 | pfm_context_t *ctx; | 5001 | pfm_context_t *ctx; |
5004 | struct pt_regs *regs; | 5002 | struct pt_regs *regs; |
5005 | unsigned long flags; | 5003 | unsigned long flags, dummy_flags; |
5006 | unsigned long ovfl_regs; | 5004 | unsigned long ovfl_regs; |
5007 | unsigned int reason; | 5005 | unsigned int reason; |
5008 | int ret; | 5006 | int ret; |
@@ -5039,18 +5037,15 @@ pfm_handle_work(void) | |||
5039 | //if (CTX_OVFL_NOBLOCK(ctx)) goto skip_blocking; | 5037 | //if (CTX_OVFL_NOBLOCK(ctx)) goto skip_blocking; |
5040 | if (reason == PFM_TRAP_REASON_RESET) goto skip_blocking; | 5038 | if (reason == PFM_TRAP_REASON_RESET) goto skip_blocking; |
5041 | 5039 | ||
5040 | /* | ||
5041 | * restore interrupt mask to what it was on entry. | ||
5042 | * Could be enabled/diasbled. | ||
5043 | */ | ||
5042 | UNPROTECT_CTX(ctx, flags); | 5044 | UNPROTECT_CTX(ctx, flags); |
5043 | 5045 | ||
5044 | /* | 5046 | /* |
5045 | * pfm_handle_work() is currently called with interrupts disabled. | 5047 | * force interrupt enable because of down_interruptible() |
5046 | * The down_interruptible call may sleep, therefore we | 5048 | */ |
5047 | * must re-enable interrupts to avoid deadlocks. It is | ||
5048 | * safe to do so because this function is called ONLY | ||
5049 | * when returning to user level (PUStk=1), in which case | ||
5050 | * there is no risk of kernel stack overflow due to deep | ||
5051 | * interrupt nesting. | ||
5052 | */ | ||
5053 | BUG_ON(flags & IA64_PSR_I); | ||
5054 | local_irq_enable(); | 5049 | local_irq_enable(); |
5055 | 5050 | ||
5056 | DPRINT(("before block sleeping\n")); | 5051 | DPRINT(("before block sleeping\n")); |
@@ -5064,12 +5059,12 @@ pfm_handle_work(void) | |||
5064 | DPRINT(("after block sleeping ret=%d\n", ret)); | 5059 | DPRINT(("after block sleeping ret=%d\n", ret)); |
5065 | 5060 | ||
5066 | /* | 5061 | /* |
5067 | * disable interrupts to restore state we had upon entering | 5062 | * lock context and mask interrupts again |
5068 | * this function | 5063 | * We save flags into a dummy because we may have |
5064 | * altered interrupts mask compared to entry in this | ||
5065 | * function. | ||
5069 | */ | 5066 | */ |
5070 | local_irq_disable(); | 5067 | PROTECT_CTX(ctx, dummy_flags); |
5071 | |||
5072 | PROTECT_CTX(ctx, flags); | ||
5073 | 5068 | ||
5074 | /* | 5069 | /* |
5075 | * we need to read the ovfl_regs only after wake-up | 5070 | * we need to read the ovfl_regs only after wake-up |
@@ -5095,7 +5090,9 @@ skip_blocking: | |||
5095 | ctx->ctx_ovfl_regs[0] = 0UL; | 5090 | ctx->ctx_ovfl_regs[0] = 0UL; |
5096 | 5091 | ||
5097 | nothing_to_do: | 5092 | nothing_to_do: |
5098 | 5093 | /* | |
5094 | * restore flags as they were upon entry | ||
5095 | */ | ||
5099 | UNPROTECT_CTX(ctx, flags); | 5096 | UNPROTECT_CTX(ctx, flags); |
5100 | } | 5097 | } |
5101 | 5098 | ||