diff options
Diffstat (limited to 'arch/ia64/kernel/perfmon.c')
-rw-r--r-- | arch/ia64/kernel/perfmon.c | 102 |
1 files changed, 61 insertions, 41 deletions
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 71147be3279c..71c101601e3e 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c | |||
@@ -480,14 +480,6 @@ typedef struct { | |||
480 | #define PFM_CMD_ARG_MANY -1 /* cannot be zero */ | 480 | #define PFM_CMD_ARG_MANY -1 /* cannot be zero */ |
481 | 481 | ||
482 | typedef struct { | 482 | typedef struct { |
483 | int debug; /* turn on/off debugging via syslog */ | ||
484 | int debug_ovfl; /* turn on/off debug printk in overflow handler */ | ||
485 | int fastctxsw; /* turn on/off fast (unsecure) ctxsw */ | ||
486 | int expert_mode; /* turn on/off value checking */ | ||
487 | int debug_pfm_read; | ||
488 | } pfm_sysctl_t; | ||
489 | |||
490 | typedef struct { | ||
491 | unsigned long pfm_spurious_ovfl_intr_count; /* keep track of spurious ovfl interrupts */ | 483 | unsigned long pfm_spurious_ovfl_intr_count; /* keep track of spurious ovfl interrupts */ |
492 | unsigned long pfm_replay_ovfl_intr_count; /* keep track of replayed ovfl interrupts */ | 484 | unsigned long pfm_replay_ovfl_intr_count; /* keep track of replayed ovfl interrupts */ |
493 | unsigned long pfm_ovfl_intr_count; /* keep track of ovfl interrupts */ | 485 | unsigned long pfm_ovfl_intr_count; /* keep track of ovfl interrupts */ |
@@ -514,8 +506,8 @@ static LIST_HEAD(pfm_buffer_fmt_list); | |||
514 | static pmu_config_t *pmu_conf; | 506 | static pmu_config_t *pmu_conf; |
515 | 507 | ||
516 | /* sysctl() controls */ | 508 | /* sysctl() controls */ |
517 | static pfm_sysctl_t pfm_sysctl; | 509 | pfm_sysctl_t pfm_sysctl; |
518 | int pfm_debug_var; | 510 | EXPORT_SYMBOL(pfm_sysctl); |
519 | 511 | ||
520 | static ctl_table pfm_ctl_table[]={ | 512 | static ctl_table pfm_ctl_table[]={ |
521 | {1, "debug", &pfm_sysctl.debug, sizeof(int), 0666, NULL, &proc_dointvec, NULL,}, | 513 | {1, "debug", &pfm_sysctl.debug, sizeof(int), 0666, NULL, &proc_dointvec, NULL,}, |
@@ -1273,6 +1265,8 @@ out: | |||
1273 | } | 1265 | } |
1274 | EXPORT_SYMBOL(pfm_unregister_buffer_fmt); | 1266 | EXPORT_SYMBOL(pfm_unregister_buffer_fmt); |
1275 | 1267 | ||
1268 | extern void update_pal_halt_status(int); | ||
1269 | |||
1276 | static int | 1270 | static int |
1277 | pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu) | 1271 | pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu) |
1278 | { | 1272 | { |
@@ -1319,6 +1313,11 @@ pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu) | |||
1319 | is_syswide, | 1313 | is_syswide, |
1320 | cpu)); | 1314 | cpu)); |
1321 | 1315 | ||
1316 | /* | ||
1317 | * disable default_idle() to go to PAL_HALT | ||
1318 | */ | ||
1319 | update_pal_halt_status(0); | ||
1320 | |||
1322 | UNLOCK_PFS(flags); | 1321 | UNLOCK_PFS(flags); |
1323 | 1322 | ||
1324 | return 0; | 1323 | return 0; |
@@ -1374,6 +1373,12 @@ pfm_unreserve_session(pfm_context_t *ctx, int is_syswide, unsigned int cpu) | |||
1374 | is_syswide, | 1373 | is_syswide, |
1375 | cpu)); | 1374 | cpu)); |
1376 | 1375 | ||
1376 | /* | ||
1377 | * if possible, enable default_idle() to go into PAL_HALT | ||
1378 | */ | ||
1379 | if (pfm_sessions.pfs_task_sessions == 0 && pfm_sessions.pfs_sys_sessions == 0) | ||
1380 | update_pal_halt_status(1); | ||
1381 | |||
1377 | UNLOCK_PFS(flags); | 1382 | UNLOCK_PFS(flags); |
1378 | 1383 | ||
1379 | return 0; | 1384 | return 0; |
@@ -1576,7 +1581,7 @@ pfm_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos) | |||
1576 | goto abort_locked; | 1581 | goto abort_locked; |
1577 | } | 1582 | } |
1578 | 1583 | ||
1579 | DPRINT(("[%d] fd=%d type=%d\n", current->pid, msg->pfm_gen_msg.msg_ctx_fd, msg->pfm_gen_msg.msg_type)); | 1584 | DPRINT(("fd=%d type=%d\n", msg->pfm_gen_msg.msg_ctx_fd, msg->pfm_gen_msg.msg_type)); |
1580 | 1585 | ||
1581 | ret = -EFAULT; | 1586 | ret = -EFAULT; |
1582 | if(copy_to_user(buf, msg, sizeof(pfm_msg_t)) == 0) ret = sizeof(pfm_msg_t); | 1587 | if(copy_to_user(buf, msg, sizeof(pfm_msg_t)) == 0) ret = sizeof(pfm_msg_t); |
@@ -3695,8 +3700,6 @@ pfm_debug(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) | |||
3695 | 3700 | ||
3696 | pfm_sysctl.debug = m == 0 ? 0 : 1; | 3701 | pfm_sysctl.debug = m == 0 ? 0 : 1; |
3697 | 3702 | ||
3698 | pfm_debug_var = pfm_sysctl.debug; | ||
3699 | |||
3700 | printk(KERN_INFO "perfmon debugging %s (timing reset)\n", pfm_sysctl.debug ? "on" : "off"); | 3703 | printk(KERN_INFO "perfmon debugging %s (timing reset)\n", pfm_sysctl.debug ? "on" : "off"); |
3701 | 3704 | ||
3702 | if (m == 0) { | 3705 | if (m == 0) { |
@@ -4212,7 +4215,7 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) | |||
4212 | DPRINT(("cannot load to [%d], invalid ctx_state=%d\n", | 4215 | DPRINT(("cannot load to [%d], invalid ctx_state=%d\n", |
4213 | req->load_pid, | 4216 | req->load_pid, |
4214 | ctx->ctx_state)); | 4217 | ctx->ctx_state)); |
4215 | return -EINVAL; | 4218 | return -EBUSY; |
4216 | } | 4219 | } |
4217 | 4220 | ||
4218 | DPRINT(("load_pid [%d] using_dbreg=%d\n", req->load_pid, ctx->ctx_fl_using_dbreg)); | 4221 | DPRINT(("load_pid [%d] using_dbreg=%d\n", req->load_pid, ctx->ctx_fl_using_dbreg)); |
@@ -4714,16 +4717,26 @@ recheck: | |||
4714 | if (task == current || ctx->ctx_fl_system) return 0; | 4717 | if (task == current || ctx->ctx_fl_system) return 0; |
4715 | 4718 | ||
4716 | /* | 4719 | /* |
4717 | * if context is UNLOADED we are safe to go | 4720 | * we are monitoring another thread |
4718 | */ | 4721 | */ |
4719 | if (state == PFM_CTX_UNLOADED) return 0; | 4722 | switch(state) { |
4720 | 4723 | case PFM_CTX_UNLOADED: | |
4721 | /* | 4724 | /* |
4722 | * no command can operate on a zombie context | 4725 | * if context is UNLOADED we are safe to go |
4723 | */ | 4726 | */ |
4724 | if (state == PFM_CTX_ZOMBIE) { | 4727 | return 0; |
4725 | DPRINT(("cmd %d state zombie cannot operate on context\n", cmd)); | 4728 | case PFM_CTX_ZOMBIE: |
4726 | return -EINVAL; | 4729 | /* |
4730 | * no command can operate on a zombie context | ||
4731 | */ | ||
4732 | DPRINT(("cmd %d state zombie cannot operate on context\n", cmd)); | ||
4733 | return -EINVAL; | ||
4734 | case PFM_CTX_MASKED: | ||
4735 | /* | ||
4736 | * PMU state has been saved to software even though | ||
4737 | * the thread may still be running. | ||
4738 | */ | ||
4739 | if (cmd != PFM_UNLOAD_CONTEXT) return 0; | ||
4727 | } | 4740 | } |
4728 | 4741 | ||
4729 | /* | 4742 | /* |
@@ -4996,13 +5009,21 @@ pfm_context_force_terminate(pfm_context_t *ctx, struct pt_regs *regs) | |||
4996 | } | 5009 | } |
4997 | 5010 | ||
4998 | static int pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds); | 5011 | static int pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds); |
4999 | 5012 | /* | |
5013 | * pfm_handle_work() can be called with interrupts enabled | ||
5014 | * (TIF_NEED_RESCHED) or disabled. The down_interruptible | ||
5015 | * call may sleep, therefore we must re-enable interrupts | ||
5016 | * to avoid deadlocks. It is safe to do so because this function | ||
5017 | * is called ONLY when returning to user level (PUStk=1), in which case | ||
5018 | * there is no risk of kernel stack overflow due to deep | ||
5019 | * interrupt nesting. | ||
5020 | */ | ||
5000 | void | 5021 | void |
5001 | pfm_handle_work(void) | 5022 | pfm_handle_work(void) |
5002 | { | 5023 | { |
5003 | pfm_context_t *ctx; | 5024 | pfm_context_t *ctx; |
5004 | struct pt_regs *regs; | 5025 | struct pt_regs *regs; |
5005 | unsigned long flags; | 5026 | unsigned long flags, dummy_flags; |
5006 | unsigned long ovfl_regs; | 5027 | unsigned long ovfl_regs; |
5007 | unsigned int reason; | 5028 | unsigned int reason; |
5008 | int ret; | 5029 | int ret; |
@@ -5039,18 +5060,15 @@ pfm_handle_work(void) | |||
5039 | //if (CTX_OVFL_NOBLOCK(ctx)) goto skip_blocking; | 5060 | //if (CTX_OVFL_NOBLOCK(ctx)) goto skip_blocking; |
5040 | if (reason == PFM_TRAP_REASON_RESET) goto skip_blocking; | 5061 | if (reason == PFM_TRAP_REASON_RESET) goto skip_blocking; |
5041 | 5062 | ||
5063 | /* | ||
5064 | * restore interrupt mask to what it was on entry. | ||
5065 | * Could be enabled/diasbled. | ||
5066 | */ | ||
5042 | UNPROTECT_CTX(ctx, flags); | 5067 | UNPROTECT_CTX(ctx, flags); |
5043 | 5068 | ||
5044 | /* | 5069 | /* |
5045 | * pfm_handle_work() is currently called with interrupts disabled. | 5070 | * force interrupt enable because of down_interruptible() |
5046 | * The down_interruptible call may sleep, therefore we | 5071 | */ |
5047 | * must re-enable interrupts to avoid deadlocks. It is | ||
5048 | * safe to do so because this function is called ONLY | ||
5049 | * when returning to user level (PUStk=1), in which case | ||
5050 | * there is no risk of kernel stack overflow due to deep | ||
5051 | * interrupt nesting. | ||
5052 | */ | ||
5053 | BUG_ON(flags & IA64_PSR_I); | ||
5054 | local_irq_enable(); | 5072 | local_irq_enable(); |
5055 | 5073 | ||
5056 | DPRINT(("before block sleeping\n")); | 5074 | DPRINT(("before block sleeping\n")); |
@@ -5064,12 +5082,12 @@ pfm_handle_work(void) | |||
5064 | DPRINT(("after block sleeping ret=%d\n", ret)); | 5082 | DPRINT(("after block sleeping ret=%d\n", ret)); |
5065 | 5083 | ||
5066 | /* | 5084 | /* |
5067 | * disable interrupts to restore state we had upon entering | 5085 | * lock context and mask interrupts again |
5068 | * this function | 5086 | * We save flags into a dummy because we may have |
5087 | * altered interrupts mask compared to entry in this | ||
5088 | * function. | ||
5069 | */ | 5089 | */ |
5070 | local_irq_disable(); | 5090 | PROTECT_CTX(ctx, dummy_flags); |
5071 | |||
5072 | PROTECT_CTX(ctx, flags); | ||
5073 | 5091 | ||
5074 | /* | 5092 | /* |
5075 | * we need to read the ovfl_regs only after wake-up | 5093 | * we need to read the ovfl_regs only after wake-up |
@@ -5095,7 +5113,9 @@ skip_blocking: | |||
5095 | ctx->ctx_ovfl_regs[0] = 0UL; | 5113 | ctx->ctx_ovfl_regs[0] = 0UL; |
5096 | 5114 | ||
5097 | nothing_to_do: | 5115 | nothing_to_do: |
5098 | 5116 | /* | |
5117 | * restore flags as they were upon entry | ||
5118 | */ | ||
5099 | UNPROTECT_CTX(ctx, flags); | 5119 | UNPROTECT_CTX(ctx, flags); |
5100 | } | 5120 | } |
5101 | 5121 | ||