diff options
Diffstat (limited to 'arch/ia64/kernel/perfmon.c')
-rw-r--r-- | arch/ia64/kernel/perfmon.c | 32 |
1 files changed, 16 insertions, 16 deletions
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index c026ac1142a6..bd87cb6b7a81 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c | |||
@@ -1710,7 +1710,7 @@ static void | |||
1710 | pfm_syswide_force_stop(void *info) | 1710 | pfm_syswide_force_stop(void *info) |
1711 | { | 1711 | { |
1712 | pfm_context_t *ctx = (pfm_context_t *)info; | 1712 | pfm_context_t *ctx = (pfm_context_t *)info; |
1713 | struct pt_regs *regs = ia64_task_regs(current); | 1713 | struct pt_regs *regs = task_pt_regs(current); |
1714 | struct task_struct *owner; | 1714 | struct task_struct *owner; |
1715 | unsigned long flags; | 1715 | unsigned long flags; |
1716 | int ret; | 1716 | int ret; |
@@ -1815,7 +1815,7 @@ pfm_flush(struct file *filp) | |||
1815 | is_system = ctx->ctx_fl_system; | 1815 | is_system = ctx->ctx_fl_system; |
1816 | 1816 | ||
1817 | task = PFM_CTX_TASK(ctx); | 1817 | task = PFM_CTX_TASK(ctx); |
1818 | regs = ia64_task_regs(task); | 1818 | regs = task_pt_regs(task); |
1819 | 1819 | ||
1820 | DPRINT(("ctx_state=%d is_current=%d\n", | 1820 | DPRINT(("ctx_state=%d is_current=%d\n", |
1821 | state, | 1821 | state, |
@@ -1945,7 +1945,7 @@ pfm_close(struct inode *inode, struct file *filp) | |||
1945 | is_system = ctx->ctx_fl_system; | 1945 | is_system = ctx->ctx_fl_system; |
1946 | 1946 | ||
1947 | task = PFM_CTX_TASK(ctx); | 1947 | task = PFM_CTX_TASK(ctx); |
1948 | regs = ia64_task_regs(task); | 1948 | regs = task_pt_regs(task); |
1949 | 1949 | ||
1950 | DPRINT(("ctx_state=%d is_current=%d\n", | 1950 | DPRINT(("ctx_state=%d is_current=%d\n", |
1951 | state, | 1951 | state, |
@@ -4052,7 +4052,7 @@ pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) | |||
4052 | */ | 4052 | */ |
4053 | ia64_psr(regs)->up = 0; | 4053 | ia64_psr(regs)->up = 0; |
4054 | } else { | 4054 | } else { |
4055 | tregs = ia64_task_regs(task); | 4055 | tregs = task_pt_regs(task); |
4056 | 4056 | ||
4057 | /* | 4057 | /* |
4058 | * stop monitoring at the user level | 4058 | * stop monitoring at the user level |
@@ -4134,7 +4134,7 @@ pfm_start(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) | |||
4134 | ia64_psr(regs)->up = 1; | 4134 | ia64_psr(regs)->up = 1; |
4135 | 4135 | ||
4136 | } else { | 4136 | } else { |
4137 | tregs = ia64_task_regs(ctx->ctx_task); | 4137 | tregs = task_pt_regs(ctx->ctx_task); |
4138 | 4138 | ||
4139 | /* | 4139 | /* |
4140 | * start monitoring at the kernel level the next | 4140 | * start monitoring at the kernel level the next |
@@ -4404,7 +4404,7 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) | |||
4404 | /* | 4404 | /* |
4405 | * when not current, task MUST be stopped, so this is safe | 4405 | * when not current, task MUST be stopped, so this is safe |
4406 | */ | 4406 | */ |
4407 | regs = ia64_task_regs(task); | 4407 | regs = task_pt_regs(task); |
4408 | 4408 | ||
4409 | /* force a full reload */ | 4409 | /* force a full reload */ |
4410 | ctx->ctx_last_activation = PFM_INVALID_ACTIVATION; | 4410 | ctx->ctx_last_activation = PFM_INVALID_ACTIVATION; |
@@ -4530,7 +4530,7 @@ pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *reg | |||
4530 | /* | 4530 | /* |
4531 | * per-task mode | 4531 | * per-task mode |
4532 | */ | 4532 | */ |
4533 | tregs = task == current ? regs : ia64_task_regs(task); | 4533 | tregs = task == current ? regs : task_pt_regs(task); |
4534 | 4534 | ||
4535 | if (task == current) { | 4535 | if (task == current) { |
4536 | /* | 4536 | /* |
@@ -4593,7 +4593,7 @@ pfm_exit_thread(struct task_struct *task) | |||
4593 | { | 4593 | { |
4594 | pfm_context_t *ctx; | 4594 | pfm_context_t *ctx; |
4595 | unsigned long flags; | 4595 | unsigned long flags; |
4596 | struct pt_regs *regs = ia64_task_regs(task); | 4596 | struct pt_regs *regs = task_pt_regs(task); |
4597 | int ret, state; | 4597 | int ret, state; |
4598 | int free_ok = 0; | 4598 | int free_ok = 0; |
4599 | 4599 | ||
@@ -4926,7 +4926,7 @@ restart_args: | |||
4926 | if (unlikely(ret)) goto abort_locked; | 4926 | if (unlikely(ret)) goto abort_locked; |
4927 | 4927 | ||
4928 | skip_fd: | 4928 | skip_fd: |
4929 | ret = (*func)(ctx, args_k, count, ia64_task_regs(current)); | 4929 | ret = (*func)(ctx, args_k, count, task_pt_regs(current)); |
4930 | 4930 | ||
4931 | call_made = 1; | 4931 | call_made = 1; |
4932 | 4932 | ||
@@ -5050,7 +5050,7 @@ pfm_handle_work(void) | |||
5050 | 5050 | ||
5051 | pfm_clear_task_notify(); | 5051 | pfm_clear_task_notify(); |
5052 | 5052 | ||
5053 | regs = ia64_task_regs(current); | 5053 | regs = task_pt_regs(current); |
5054 | 5054 | ||
5055 | /* | 5055 | /* |
5056 | * extract reason for being here and clear | 5056 | * extract reason for being here and clear |
@@ -5794,7 +5794,7 @@ pfm_syst_wide_update_task(struct task_struct *task, unsigned long info, int is_c | |||
5794 | * on every CPU, so we can rely on the pid to identify the idle task. | 5794 | * on every CPU, so we can rely on the pid to identify the idle task. |
5795 | */ | 5795 | */ |
5796 | if ((info & PFM_CPUINFO_EXCL_IDLE) == 0 || task->pid) { | 5796 | if ((info & PFM_CPUINFO_EXCL_IDLE) == 0 || task->pid) { |
5797 | regs = ia64_task_regs(task); | 5797 | regs = task_pt_regs(task); |
5798 | ia64_psr(regs)->pp = is_ctxswin ? dcr_pp : 0; | 5798 | ia64_psr(regs)->pp = is_ctxswin ? dcr_pp : 0; |
5799 | return; | 5799 | return; |
5800 | } | 5800 | } |
@@ -5877,7 +5877,7 @@ pfm_save_regs(struct task_struct *task) | |||
5877 | flags = pfm_protect_ctx_ctxsw(ctx); | 5877 | flags = pfm_protect_ctx_ctxsw(ctx); |
5878 | 5878 | ||
5879 | if (ctx->ctx_state == PFM_CTX_ZOMBIE) { | 5879 | if (ctx->ctx_state == PFM_CTX_ZOMBIE) { |
5880 | struct pt_regs *regs = ia64_task_regs(task); | 5880 | struct pt_regs *regs = task_pt_regs(task); |
5881 | 5881 | ||
5882 | pfm_clear_psr_up(); | 5882 | pfm_clear_psr_up(); |
5883 | 5883 | ||
@@ -6077,7 +6077,7 @@ pfm_load_regs (struct task_struct *task) | |||
6077 | BUG_ON(psr & IA64_PSR_I); | 6077 | BUG_ON(psr & IA64_PSR_I); |
6078 | 6078 | ||
6079 | if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) { | 6079 | if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) { |
6080 | struct pt_regs *regs = ia64_task_regs(task); | 6080 | struct pt_regs *regs = task_pt_regs(task); |
6081 | 6081 | ||
6082 | BUG_ON(ctx->ctx_smpl_hdr); | 6082 | BUG_ON(ctx->ctx_smpl_hdr); |
6083 | 6083 | ||
@@ -6446,7 +6446,7 @@ pfm_alt_save_pmu_state(void *data) | |||
6446 | { | 6446 | { |
6447 | struct pt_regs *regs; | 6447 | struct pt_regs *regs; |
6448 | 6448 | ||
6449 | regs = ia64_task_regs(current); | 6449 | regs = task_pt_regs(current); |
6450 | 6450 | ||
6451 | DPRINT(("called\n")); | 6451 | DPRINT(("called\n")); |
6452 | 6452 | ||
@@ -6472,7 +6472,7 @@ pfm_alt_restore_pmu_state(void *data) | |||
6472 | { | 6472 | { |
6473 | struct pt_regs *regs; | 6473 | struct pt_regs *regs; |
6474 | 6474 | ||
6475 | regs = ia64_task_regs(current); | 6475 | regs = task_pt_regs(current); |
6476 | 6476 | ||
6477 | DPRINT(("called\n")); | 6477 | DPRINT(("called\n")); |
6478 | 6478 | ||
@@ -6754,7 +6754,7 @@ dump_pmu_state(const char *from) | |||
6754 | local_irq_save(flags); | 6754 | local_irq_save(flags); |
6755 | 6755 | ||
6756 | this_cpu = smp_processor_id(); | 6756 | this_cpu = smp_processor_id(); |
6757 | regs = ia64_task_regs(current); | 6757 | regs = task_pt_regs(current); |
6758 | info = PFM_CPUINFO_GET(); | 6758 | info = PFM_CPUINFO_GET(); |
6759 | dcr = ia64_getreg(_IA64_REG_CR_DCR); | 6759 | dcr = ia64_getreg(_IA64_REG_CR_DCR); |
6760 | 6760 | ||