aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64
diff options
context:
space:
mode:
authorKeshavamurthy Anil S <anil.s.keshavamurthy@intel.com>2006-09-26 15:03:13 -0400
committerTony Luck <tony.luck@intel.com>2006-09-26 15:03:13 -0400
commit35589a8fa8138244e7f2ef9317c440aa580c9335 (patch)
treec108ecc979e4b95ee5b6d617790212426b80d372 /arch/ia64
parentdd562c05410e13e878a3ee0deb8ac06db2e132c7 (diff)
[IA64] Move perfmon tables from thread_struct to pfm_context
This patch renders thread_struct->pmcs[] and thread_struct->pmds[] OBSOLETE. The actual table is moved to pfm_context structure which saves space in thread_struct (in turn saving space in task_struct which frees up more space for kernel stacks). Signed-off-by: Stephane Eranian <eranian@hpl.hp.com> Signed-off-by: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/kernel/perfmon.c113
1 files changed, 49 insertions, 64 deletions
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 84a7e52f56f6..45000d5d0cfa 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -62,6 +62,9 @@
62 62
63#define PFM_INVALID_ACTIVATION (~0UL) 63#define PFM_INVALID_ACTIVATION (~0UL)
64 64
65#define PFM_NUM_PMC_REGS 64 /* PMC save area for ctxsw */
66#define PFM_NUM_PMD_REGS 64 /* PMD save area for ctxsw */
67
65/* 68/*
66 * depth of message queue 69 * depth of message queue
67 */ 70 */
@@ -296,14 +299,17 @@ typedef struct pfm_context {
296 unsigned long ctx_reload_pmcs[4]; /* bitmask of force reload PMC on ctxsw in */ 299 unsigned long ctx_reload_pmcs[4]; /* bitmask of force reload PMC on ctxsw in */
297 unsigned long ctx_used_monitors[4]; /* bitmask of monitor PMC being used */ 300 unsigned long ctx_used_monitors[4]; /* bitmask of monitor PMC being used */
298 301
299 unsigned long ctx_pmcs[IA64_NUM_PMC_REGS]; /* saved copies of PMC values */ 302 unsigned long ctx_pmcs[PFM_NUM_PMC_REGS]; /* saved copies of PMC values */
300 303
301 unsigned int ctx_used_ibrs[1]; /* bitmask of used IBR (speedup ctxsw in) */ 304 unsigned int ctx_used_ibrs[1]; /* bitmask of used IBR (speedup ctxsw in) */
302 unsigned int ctx_used_dbrs[1]; /* bitmask of used DBR (speedup ctxsw in) */ 305 unsigned int ctx_used_dbrs[1]; /* bitmask of used DBR (speedup ctxsw in) */
303 unsigned long ctx_dbrs[IA64_NUM_DBG_REGS]; /* DBR values (cache) when not loaded */ 306 unsigned long ctx_dbrs[IA64_NUM_DBG_REGS]; /* DBR values (cache) when not loaded */
304 unsigned long ctx_ibrs[IA64_NUM_DBG_REGS]; /* IBR values (cache) when not loaded */ 307 unsigned long ctx_ibrs[IA64_NUM_DBG_REGS]; /* IBR values (cache) when not loaded */
305 308
306 pfm_counter_t ctx_pmds[IA64_NUM_PMD_REGS]; /* software state for PMDS */ 309 pfm_counter_t ctx_pmds[PFM_NUM_PMD_REGS]; /* software state for PMDS */
310
311 unsigned long th_pmcs[PFM_NUM_PMC_REGS]; /* PMC thread save state */
312 unsigned long th_pmds[PFM_NUM_PMD_REGS]; /* PMD thread save state */
307 313
308 u64 ctx_saved_psr_up; /* only contains psr.up value */ 314 u64 ctx_saved_psr_up; /* only contains psr.up value */
309 315
@@ -867,7 +873,6 @@ static void
867pfm_mask_monitoring(struct task_struct *task) 873pfm_mask_monitoring(struct task_struct *task)
868{ 874{
869 pfm_context_t *ctx = PFM_GET_CTX(task); 875 pfm_context_t *ctx = PFM_GET_CTX(task);
870 struct thread_struct *th = &task->thread;
871 unsigned long mask, val, ovfl_mask; 876 unsigned long mask, val, ovfl_mask;
872 int i; 877 int i;
873 878
@@ -888,7 +893,7 @@ pfm_mask_monitoring(struct task_struct *task)
888 * So in both cases, the live register contains the owner's 893 * So in both cases, the live register contains the owner's
889 * state. We can ONLY touch the PMU registers and NOT the PSR. 894 * state. We can ONLY touch the PMU registers and NOT the PSR.
890 * 895 *
891 * As a consequence to this call, the thread->pmds[] array 896 * As a consequence to this call, the ctx->th_pmds[] array
892 * contains stale information which must be ignored 897 * contains stale information which must be ignored
893 * when context is reloaded AND monitoring is active (see 898 * when context is reloaded AND monitoring is active (see
894 * pfm_restart). 899 * pfm_restart).
@@ -923,9 +928,9 @@ pfm_mask_monitoring(struct task_struct *task)
923 mask = ctx->ctx_used_monitors[0] >> PMU_FIRST_COUNTER; 928 mask = ctx->ctx_used_monitors[0] >> PMU_FIRST_COUNTER;
924 for(i= PMU_FIRST_COUNTER; mask; i++, mask>>=1) { 929 for(i= PMU_FIRST_COUNTER; mask; i++, mask>>=1) {
925 if ((mask & 0x1) == 0UL) continue; 930 if ((mask & 0x1) == 0UL) continue;
926 ia64_set_pmc(i, th->pmcs[i] & ~0xfUL); 931 ia64_set_pmc(i, ctx->th_pmcs[i] & ~0xfUL);
927 th->pmcs[i] &= ~0xfUL; 932 ctx->th_pmcs[i] &= ~0xfUL;
928 DPRINT_ovfl(("pmc[%d]=0x%lx\n", i, th->pmcs[i])); 933 DPRINT_ovfl(("pmc[%d]=0x%lx\n", i, ctx->th_pmcs[i]));
929 } 934 }
930 /* 935 /*
931 * make all of this visible 936 * make all of this visible
@@ -942,7 +947,6 @@ static void
942pfm_restore_monitoring(struct task_struct *task) 947pfm_restore_monitoring(struct task_struct *task)
943{ 948{
944 pfm_context_t *ctx = PFM_GET_CTX(task); 949 pfm_context_t *ctx = PFM_GET_CTX(task);
945 struct thread_struct *th = &task->thread;
946 unsigned long mask, ovfl_mask; 950 unsigned long mask, ovfl_mask;
947 unsigned long psr, val; 951 unsigned long psr, val;
948 int i, is_system; 952 int i, is_system;
@@ -1008,9 +1012,9 @@ pfm_restore_monitoring(struct task_struct *task)
1008 mask = ctx->ctx_used_monitors[0] >> PMU_FIRST_COUNTER; 1012 mask = ctx->ctx_used_monitors[0] >> PMU_FIRST_COUNTER;
1009 for(i= PMU_FIRST_COUNTER; mask; i++, mask>>=1) { 1013 for(i= PMU_FIRST_COUNTER; mask; i++, mask>>=1) {
1010 if ((mask & 0x1) == 0UL) continue; 1014 if ((mask & 0x1) == 0UL) continue;
1011 th->pmcs[i] = ctx->ctx_pmcs[i]; 1015 ctx->th_pmcs[i] = ctx->ctx_pmcs[i];
1012 ia64_set_pmc(i, th->pmcs[i]); 1016 ia64_set_pmc(i, ctx->th_pmcs[i]);
1013 DPRINT(("[%d] pmc[%d]=0x%lx\n", task->pid, i, th->pmcs[i])); 1017 DPRINT(("[%d] pmc[%d]=0x%lx\n", task->pid, i, ctx->th_pmcs[i]));
1014 } 1018 }
1015 ia64_srlz_d(); 1019 ia64_srlz_d();
1016 1020
@@ -1069,7 +1073,6 @@ pfm_restore_pmds(unsigned long *pmds, unsigned long mask)
1069static inline void 1073static inline void
1070pfm_copy_pmds(struct task_struct *task, pfm_context_t *ctx) 1074pfm_copy_pmds(struct task_struct *task, pfm_context_t *ctx)
1071{ 1075{
1072 struct thread_struct *thread = &task->thread;
1073 unsigned long ovfl_val = pmu_conf->ovfl_val; 1076 unsigned long ovfl_val = pmu_conf->ovfl_val;
1074 unsigned long mask = ctx->ctx_all_pmds[0]; 1077 unsigned long mask = ctx->ctx_all_pmds[0];
1075 unsigned long val; 1078 unsigned long val;
@@ -1091,11 +1094,11 @@ pfm_copy_pmds(struct task_struct *task, pfm_context_t *ctx)
1091 ctx->ctx_pmds[i].val = val & ~ovfl_val; 1094 ctx->ctx_pmds[i].val = val & ~ovfl_val;
1092 val &= ovfl_val; 1095 val &= ovfl_val;
1093 } 1096 }
1094 thread->pmds[i] = val; 1097 ctx->th_pmds[i] = val;
1095 1098
1096 DPRINT(("pmd[%d]=0x%lx soft_val=0x%lx\n", 1099 DPRINT(("pmd[%d]=0x%lx soft_val=0x%lx\n",
1097 i, 1100 i,
1098 thread->pmds[i], 1101 ctx->th_pmds[i],
1099 ctx->ctx_pmds[i].val)); 1102 ctx->ctx_pmds[i].val));
1100 } 1103 }
1101} 1104}
@@ -1106,7 +1109,6 @@ pfm_copy_pmds(struct task_struct *task, pfm_context_t *ctx)
1106static inline void 1109static inline void
1107pfm_copy_pmcs(struct task_struct *task, pfm_context_t *ctx) 1110pfm_copy_pmcs(struct task_struct *task, pfm_context_t *ctx)
1108{ 1111{
1109 struct thread_struct *thread = &task->thread;
1110 unsigned long mask = ctx->ctx_all_pmcs[0]; 1112 unsigned long mask = ctx->ctx_all_pmcs[0];
1111 int i; 1113 int i;
1112 1114
@@ -1114,8 +1116,8 @@ pfm_copy_pmcs(struct task_struct *task, pfm_context_t *ctx)
1114 1116
1115 for (i=0; mask; i++, mask>>=1) { 1117 for (i=0; mask; i++, mask>>=1) {
1116 /* masking 0 with ovfl_val yields 0 */ 1118 /* masking 0 with ovfl_val yields 0 */
1117 thread->pmcs[i] = ctx->ctx_pmcs[i]; 1119 ctx->th_pmcs[i] = ctx->ctx_pmcs[i];
1118 DPRINT(("pmc[%d]=0x%lx\n", i, thread->pmcs[i])); 1120 DPRINT(("pmc[%d]=0x%lx\n", i, ctx->th_pmcs[i]));
1119 } 1121 }
1120} 1122}
1121 1123
@@ -2859,7 +2861,6 @@ pfm_reset_regs(pfm_context_t *ctx, unsigned long *ovfl_regs, int is_long_reset)
2859static int 2861static int
2860pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) 2862pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
2861{ 2863{
2862 struct thread_struct *thread = NULL;
2863 struct task_struct *task; 2864 struct task_struct *task;
2864 pfarg_reg_t *req = (pfarg_reg_t *)arg; 2865 pfarg_reg_t *req = (pfarg_reg_t *)arg;
2865 unsigned long value, pmc_pm; 2866 unsigned long value, pmc_pm;
@@ -2880,7 +2881,6 @@ pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
2880 if (state == PFM_CTX_ZOMBIE) return -EINVAL; 2881 if (state == PFM_CTX_ZOMBIE) return -EINVAL;
2881 2882
2882 if (is_loaded) { 2883 if (is_loaded) {
2883 thread = &task->thread;
2884 /* 2884 /*
2885 * In system wide and when the context is loaded, access can only happen 2885 * In system wide and when the context is loaded, access can only happen
2886 * when the caller is running on the CPU being monitored by the session. 2886 * when the caller is running on the CPU being monitored by the session.
@@ -3035,7 +3035,7 @@ pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3035 * 3035 *
3036 * The value in ctx_pmcs[] can only be changed in pfm_write_pmcs(). 3036 * The value in ctx_pmcs[] can only be changed in pfm_write_pmcs().
3037 * 3037 *
3038 * The value in thread->pmcs[] may be modified on overflow, i.e., when 3038 * The value in th_pmcs[] may be modified on overflow, i.e., when
3039 * monitoring needs to be stopped. 3039 * monitoring needs to be stopped.
3040 */ 3040 */
3041 if (is_monitor) CTX_USED_MONITOR(ctx, 1UL << cnum); 3041 if (is_monitor) CTX_USED_MONITOR(ctx, 1UL << cnum);
@@ -3049,7 +3049,7 @@ pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3049 /* 3049 /*
3050 * write thread state 3050 * write thread state
3051 */ 3051 */
3052 if (is_system == 0) thread->pmcs[cnum] = value; 3052 if (is_system == 0) ctx->th_pmcs[cnum] = value;
3053 3053
3054 /* 3054 /*
3055 * write hardware register if we can 3055 * write hardware register if we can
@@ -3101,7 +3101,6 @@ error:
3101static int 3101static int
3102pfm_write_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) 3102pfm_write_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3103{ 3103{
3104 struct thread_struct *thread = NULL;
3105 struct task_struct *task; 3104 struct task_struct *task;
3106 pfarg_reg_t *req = (pfarg_reg_t *)arg; 3105 pfarg_reg_t *req = (pfarg_reg_t *)arg;
3107 unsigned long value, hw_value, ovfl_mask; 3106 unsigned long value, hw_value, ovfl_mask;
@@ -3125,7 +3124,6 @@ pfm_write_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3125 * the owner of the local PMU. 3124 * the owner of the local PMU.
3126 */ 3125 */
3127 if (likely(is_loaded)) { 3126 if (likely(is_loaded)) {
3128 thread = &task->thread;
3129 /* 3127 /*
3130 * In system wide and when the context is loaded, access can only happen 3128 * In system wide and when the context is loaded, access can only happen
3131 * when the caller is running on the CPU being monitored by the session. 3129 * when the caller is running on the CPU being monitored by the session.
@@ -3233,7 +3231,7 @@ pfm_write_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3233 /* 3231 /*
3234 * write thread state 3232 * write thread state
3235 */ 3233 */
3236 if (is_system == 0) thread->pmds[cnum] = hw_value; 3234 if (is_system == 0) ctx->th_pmds[cnum] = hw_value;
3237 3235
3238 /* 3236 /*
3239 * write hardware register if we can 3237 * write hardware register if we can
@@ -3299,7 +3297,6 @@ abort_mission:
3299static int 3297static int
3300pfm_read_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) 3298pfm_read_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3301{ 3299{
3302 struct thread_struct *thread = NULL;
3303 struct task_struct *task; 3300 struct task_struct *task;
3304 unsigned long val = 0UL, lval, ovfl_mask, sval; 3301 unsigned long val = 0UL, lval, ovfl_mask, sval;
3305 pfarg_reg_t *req = (pfarg_reg_t *)arg; 3302 pfarg_reg_t *req = (pfarg_reg_t *)arg;
@@ -3323,7 +3320,6 @@ pfm_read_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3323 if (state == PFM_CTX_ZOMBIE) return -EINVAL; 3320 if (state == PFM_CTX_ZOMBIE) return -EINVAL;
3324 3321
3325 if (likely(is_loaded)) { 3322 if (likely(is_loaded)) {
3326 thread = &task->thread;
3327 /* 3323 /*
3328 * In system wide and when the context is loaded, access can only happen 3324 * In system wide and when the context is loaded, access can only happen
3329 * when the caller is running on the CPU being monitored by the session. 3325 * when the caller is running on the CPU being monitored by the session.
@@ -3385,7 +3381,7 @@ pfm_read_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3385 * if context is zombie, then task does not exist anymore. 3381 * if context is zombie, then task does not exist anymore.
3386 * In this case, we use the full value saved in the context (pfm_flush_regs()). 3382 * In this case, we use the full value saved in the context (pfm_flush_regs()).
3387 */ 3383 */
3388 val = is_loaded ? thread->pmds[cnum] : 0UL; 3384 val = is_loaded ? ctx->th_pmds[cnum] : 0UL;
3389 } 3385 }
3390 rd_func = pmu_conf->pmd_desc[cnum].read_check; 3386 rd_func = pmu_conf->pmd_desc[cnum].read_check;
3391 3387
@@ -4354,8 +4350,8 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4354 pfm_copy_pmds(task, ctx); 4350 pfm_copy_pmds(task, ctx);
4355 pfm_copy_pmcs(task, ctx); 4351 pfm_copy_pmcs(task, ctx);
4356 4352
4357 pmcs_source = thread->pmcs; 4353 pmcs_source = ctx->th_pmcs;
4358 pmds_source = thread->pmds; 4354 pmds_source = ctx->th_pmds;
4359 4355
4360 /* 4356 /*
4361 * always the case for system-wide 4357 * always the case for system-wide
@@ -5864,14 +5860,12 @@ void
5864pfm_save_regs(struct task_struct *task) 5860pfm_save_regs(struct task_struct *task)
5865{ 5861{
5866 pfm_context_t *ctx; 5862 pfm_context_t *ctx;
5867 struct thread_struct *t;
5868 unsigned long flags; 5863 unsigned long flags;
5869 u64 psr; 5864 u64 psr;
5870 5865
5871 5866
5872 ctx = PFM_GET_CTX(task); 5867 ctx = PFM_GET_CTX(task);
5873 if (ctx == NULL) return; 5868 if (ctx == NULL) return;
5874 t = &task->thread;
5875 5869
5876 /* 5870 /*
5877 * we always come here with interrupts ALREADY disabled by 5871 * we always come here with interrupts ALREADY disabled by
@@ -5929,19 +5923,19 @@ pfm_save_regs(struct task_struct *task)
5929 * guarantee we will be schedule at that same 5923 * guarantee we will be schedule at that same
5930 * CPU again. 5924 * CPU again.
5931 */ 5925 */
5932 pfm_save_pmds(t->pmds, ctx->ctx_used_pmds[0]); 5926 pfm_save_pmds(ctx->th_pmds, ctx->ctx_used_pmds[0]);
5933 5927
5934 /* 5928 /*
5935 * save pmc0 ia64_srlz_d() done in pfm_save_pmds() 5929 * save pmc0 ia64_srlz_d() done in pfm_save_pmds()
5936 * we will need it on the restore path to check 5930 * we will need it on the restore path to check
5937 * for pending overflow. 5931 * for pending overflow.
5938 */ 5932 */
5939 t->pmcs[0] = ia64_get_pmc(0); 5933 ctx->th_pmcs[0] = ia64_get_pmc(0);
5940 5934
5941 /* 5935 /*
5942 * unfreeze PMU if had pending overflows 5936 * unfreeze PMU if had pending overflows
5943 */ 5937 */
5944 if (t->pmcs[0] & ~0x1UL) pfm_unfreeze_pmu(); 5938 if (ctx->th_pmcs[0] & ~0x1UL) pfm_unfreeze_pmu();
5945 5939
5946 /* 5940 /*
5947 * finally, allow context access. 5941 * finally, allow context access.
@@ -5986,7 +5980,6 @@ static void
5986pfm_lazy_save_regs (struct task_struct *task) 5980pfm_lazy_save_regs (struct task_struct *task)
5987{ 5981{
5988 pfm_context_t *ctx; 5982 pfm_context_t *ctx;
5989 struct thread_struct *t;
5990 unsigned long flags; 5983 unsigned long flags;
5991 5984
5992 { u64 psr = pfm_get_psr(); 5985 { u64 psr = pfm_get_psr();
@@ -5994,7 +5987,6 @@ pfm_lazy_save_regs (struct task_struct *task)
5994 } 5987 }
5995 5988
5996 ctx = PFM_GET_CTX(task); 5989 ctx = PFM_GET_CTX(task);
5997 t = &task->thread;
5998 5990
5999 /* 5991 /*
6000 * we need to mask PMU overflow here to 5992 * we need to mask PMU overflow here to
@@ -6019,19 +6011,19 @@ pfm_lazy_save_regs (struct task_struct *task)
6019 /* 6011 /*
6020 * save all the pmds we use 6012 * save all the pmds we use
6021 */ 6013 */
6022 pfm_save_pmds(t->pmds, ctx->ctx_used_pmds[0]); 6014 pfm_save_pmds(ctx->th_pmds, ctx->ctx_used_pmds[0]);
6023 6015
6024 /* 6016 /*
6025 * save pmc0 ia64_srlz_d() done in pfm_save_pmds() 6017 * save pmc0 ia64_srlz_d() done in pfm_save_pmds()
6026 * it is needed to check for pended overflow 6018 * it is needed to check for pended overflow
6027 * on the restore path 6019 * on the restore path
6028 */ 6020 */
6029 t->pmcs[0] = ia64_get_pmc(0); 6021 ctx->th_pmcs[0] = ia64_get_pmc(0);
6030 6022
6031 /* 6023 /*
6032 * unfreeze PMU if had pending overflows 6024 * unfreeze PMU if had pending overflows
6033 */ 6025 */
6034 if (t->pmcs[0] & ~0x1UL) pfm_unfreeze_pmu(); 6026 if (ctx->th_pmcs[0] & ~0x1UL) pfm_unfreeze_pmu();
6035 6027
6036 /* 6028 /*
6037 * now get can unmask PMU interrupts, they will 6029 * now get can unmask PMU interrupts, they will
@@ -6050,7 +6042,6 @@ void
6050pfm_load_regs (struct task_struct *task) 6042pfm_load_regs (struct task_struct *task)
6051{ 6043{
6052 pfm_context_t *ctx; 6044 pfm_context_t *ctx;
6053 struct thread_struct *t;
6054 unsigned long pmc_mask = 0UL, pmd_mask = 0UL; 6045 unsigned long pmc_mask = 0UL, pmd_mask = 0UL;
6055 unsigned long flags; 6046 unsigned long flags;
6056 u64 psr, psr_up; 6047 u64 psr, psr_up;
@@ -6061,11 +6052,10 @@ pfm_load_regs (struct task_struct *task)
6061 6052
6062 BUG_ON(GET_PMU_OWNER()); 6053 BUG_ON(GET_PMU_OWNER());
6063 6054
6064 t = &task->thread;
6065 /* 6055 /*
6066 * possible on unload 6056 * possible on unload
6067 */ 6057 */
6068 if (unlikely((t->flags & IA64_THREAD_PM_VALID) == 0)) return; 6058 if (unlikely((task->thread.flags & IA64_THREAD_PM_VALID) == 0)) return;
6069 6059
6070 /* 6060 /*
6071 * we always come here with interrupts ALREADY disabled by 6061 * we always come here with interrupts ALREADY disabled by
@@ -6147,21 +6137,21 @@ pfm_load_regs (struct task_struct *task)
6147 * 6137 *
6148 * XXX: optimize here 6138 * XXX: optimize here
6149 */ 6139 */
6150 if (pmd_mask) pfm_restore_pmds(t->pmds, pmd_mask); 6140 if (pmd_mask) pfm_restore_pmds(ctx->th_pmds, pmd_mask);
6151 if (pmc_mask) pfm_restore_pmcs(t->pmcs, pmc_mask); 6141 if (pmc_mask) pfm_restore_pmcs(ctx->th_pmcs, pmc_mask);
6152 6142
6153 /* 6143 /*
6154 * check for pending overflow at the time the state 6144 * check for pending overflow at the time the state
6155 * was saved. 6145 * was saved.
6156 */ 6146 */
6157 if (unlikely(PMC0_HAS_OVFL(t->pmcs[0]))) { 6147 if (unlikely(PMC0_HAS_OVFL(ctx->th_pmcs[0]))) {
6158 /* 6148 /*
6159 * reload pmc0 with the overflow information 6149 * reload pmc0 with the overflow information
6160 * On McKinley PMU, this will trigger a PMU interrupt 6150 * On McKinley PMU, this will trigger a PMU interrupt
6161 */ 6151 */
6162 ia64_set_pmc(0, t->pmcs[0]); 6152 ia64_set_pmc(0, ctx->th_pmcs[0]);
6163 ia64_srlz_d(); 6153 ia64_srlz_d();
6164 t->pmcs[0] = 0UL; 6154 ctx->th_pmcs[0] = 0UL;
6165 6155
6166 /* 6156 /*
6167 * will replay the PMU interrupt 6157 * will replay the PMU interrupt
@@ -6214,7 +6204,6 @@ pfm_load_regs (struct task_struct *task)
6214void 6204void
6215pfm_load_regs (struct task_struct *task) 6205pfm_load_regs (struct task_struct *task)
6216{ 6206{
6217 struct thread_struct *t;
6218 pfm_context_t *ctx; 6207 pfm_context_t *ctx;
6219 struct task_struct *owner; 6208 struct task_struct *owner;
6220 unsigned long pmd_mask, pmc_mask; 6209 unsigned long pmd_mask, pmc_mask;
@@ -6223,7 +6212,6 @@ pfm_load_regs (struct task_struct *task)
6223 6212
6224 owner = GET_PMU_OWNER(); 6213 owner = GET_PMU_OWNER();
6225 ctx = PFM_GET_CTX(task); 6214 ctx = PFM_GET_CTX(task);
6226 t = &task->thread;
6227 psr = pfm_get_psr(); 6215 psr = pfm_get_psr();
6228 6216
6229 BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP)); 6217 BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
@@ -6286,22 +6274,22 @@ pfm_load_regs (struct task_struct *task)
6286 */ 6274 */
6287 pmc_mask = ctx->ctx_all_pmcs[0]; 6275 pmc_mask = ctx->ctx_all_pmcs[0];
6288 6276
6289 pfm_restore_pmds(t->pmds, pmd_mask); 6277 pfm_restore_pmds(ctx->th_pmds, pmd_mask);
6290 pfm_restore_pmcs(t->pmcs, pmc_mask); 6278 pfm_restore_pmcs(ctx->th_pmcs, pmc_mask);
6291 6279
6292 /* 6280 /*
6293 * check for pending overflow at the time the state 6281 * check for pending overflow at the time the state
6294 * was saved. 6282 * was saved.
6295 */ 6283 */
6296 if (unlikely(PMC0_HAS_OVFL(t->pmcs[0]))) { 6284 if (unlikely(PMC0_HAS_OVFL(ctx->th_pmcs[0]))) {
6297 /* 6285 /*
6298 * reload pmc0 with the overflow information 6286 * reload pmc0 with the overflow information
6299 * On McKinley PMU, this will trigger a PMU interrupt 6287 * On McKinley PMU, this will trigger a PMU interrupt
6300 */ 6288 */
6301 ia64_set_pmc(0, t->pmcs[0]); 6289 ia64_set_pmc(0, ctx->th_pmcs[0]);
6302 ia64_srlz_d(); 6290 ia64_srlz_d();
6303 6291
6304 t->pmcs[0] = 0UL; 6292 ctx->th_pmcs[0] = 0UL;
6305 6293
6306 /* 6294 /*
6307 * will replay the PMU interrupt 6295 * will replay the PMU interrupt
@@ -6376,11 +6364,11 @@ pfm_flush_pmds(struct task_struct *task, pfm_context_t *ctx)
6376 */ 6364 */
6377 pfm_unfreeze_pmu(); 6365 pfm_unfreeze_pmu();
6378 } else { 6366 } else {
6379 pmc0 = task->thread.pmcs[0]; 6367 pmc0 = ctx->th_pmcs[0];
6380 /* 6368 /*
6381 * clear whatever overflow status bits there were 6369 * clear whatever overflow status bits there were
6382 */ 6370 */
6383 task->thread.pmcs[0] = 0; 6371 ctx->th_pmcs[0] = 0;
6384 } 6372 }
6385 ovfl_val = pmu_conf->ovfl_val; 6373 ovfl_val = pmu_conf->ovfl_val;
6386 /* 6374 /*
@@ -6401,7 +6389,7 @@ pfm_flush_pmds(struct task_struct *task, pfm_context_t *ctx)
6401 /* 6389 /*
6402 * can access PMU always true in system wide mode 6390 * can access PMU always true in system wide mode
6403 */ 6391 */
6404 val = pmd_val = can_access_pmu ? ia64_get_pmd(i) : task->thread.pmds[i]; 6392 val = pmd_val = can_access_pmu ? ia64_get_pmd(i) : ctx->th_pmds[i];
6405 6393
6406 if (PMD_IS_COUNTING(i)) { 6394 if (PMD_IS_COUNTING(i)) {
6407 DPRINT(("[%d] pmd[%d] ctx_pmd=0x%lx hw_pmd=0x%lx\n", 6395 DPRINT(("[%d] pmd[%d] ctx_pmd=0x%lx hw_pmd=0x%lx\n",
@@ -6433,7 +6421,7 @@ pfm_flush_pmds(struct task_struct *task, pfm_context_t *ctx)
6433 6421
6434 DPRINT(("[%d] ctx_pmd[%d]=0x%lx pmd_val=0x%lx\n", task->pid, i, val, pmd_val)); 6422 DPRINT(("[%d] ctx_pmd[%d]=0x%lx pmd_val=0x%lx\n", task->pid, i, val, pmd_val));
6435 6423
6436 if (is_self) task->thread.pmds[i] = pmd_val; 6424 if (is_self) ctx->th_pmds[i] = pmd_val;
6437 6425
6438 ctx->ctx_pmds[i].val = val; 6426 ctx->ctx_pmds[i].val = val;
6439 } 6427 }
@@ -6677,7 +6665,7 @@ pfm_init(void)
6677 ffz(pmu_conf->ovfl_val)); 6665 ffz(pmu_conf->ovfl_val));
6678 6666
6679 /* sanity check */ 6667 /* sanity check */
6680 if (pmu_conf->num_pmds >= IA64_NUM_PMD_REGS || pmu_conf->num_pmcs >= IA64_NUM_PMC_REGS) { 6668 if (pmu_conf->num_pmds >= PFM_NUM_PMD_REGS || pmu_conf->num_pmcs >= PFM_NUM_PMC_REGS) {
6681 printk(KERN_ERR "perfmon: not enough pmc/pmd, perfmon disabled\n"); 6669 printk(KERN_ERR "perfmon: not enough pmc/pmd, perfmon disabled\n");
6682 pmu_conf = NULL; 6670 pmu_conf = NULL;
6683 return -1; 6671 return -1;
@@ -6752,7 +6740,6 @@ void
6752dump_pmu_state(const char *from) 6740dump_pmu_state(const char *from)
6753{ 6741{
6754 struct task_struct *task; 6742 struct task_struct *task;
6755 struct thread_struct *t;
6756 struct pt_regs *regs; 6743 struct pt_regs *regs;
6757 pfm_context_t *ctx; 6744 pfm_context_t *ctx;
6758 unsigned long psr, dcr, info, flags; 6745 unsigned long psr, dcr, info, flags;
@@ -6797,16 +6784,14 @@ dump_pmu_state(const char *from)
6797 ia64_psr(regs)->up = 0; 6784 ia64_psr(regs)->up = 0;
6798 ia64_psr(regs)->pp = 0; 6785 ia64_psr(regs)->pp = 0;
6799 6786
6800 t = &current->thread;
6801
6802 for (i=1; PMC_IS_LAST(i) == 0; i++) { 6787 for (i=1; PMC_IS_LAST(i) == 0; i++) {
6803 if (PMC_IS_IMPL(i) == 0) continue; 6788 if (PMC_IS_IMPL(i) == 0) continue;
6804 printk("->CPU%d pmc[%d]=0x%lx thread_pmc[%d]=0x%lx\n", this_cpu, i, ia64_get_pmc(i), i, t->pmcs[i]); 6789 printk("->CPU%d pmc[%d]=0x%lx thread_pmc[%d]=0x%lx\n", this_cpu, i, ia64_get_pmc(i), i, ctx->th_pmcs[i]);
6805 } 6790 }
6806 6791
6807 for (i=1; PMD_IS_LAST(i) == 0; i++) { 6792 for (i=1; PMD_IS_LAST(i) == 0; i++) {
6808 if (PMD_IS_IMPL(i) == 0) continue; 6793 if (PMD_IS_IMPL(i) == 0) continue;
6809 printk("->CPU%d pmd[%d]=0x%lx thread_pmd[%d]=0x%lx\n", this_cpu, i, ia64_get_pmd(i), i, t->pmds[i]); 6794 printk("->CPU%d pmd[%d]=0x%lx thread_pmd[%d]=0x%lx\n", this_cpu, i, ia64_get_pmd(i), i, ctx->th_pmds[i]);
6810 } 6795 }
6811 6796
6812 if (ctx) { 6797 if (ctx) {