diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/ia64/kernel/perfmon.c |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'arch/ia64/kernel/perfmon.c')
-rw-r--r-- | arch/ia64/kernel/perfmon.c | 6676 |
1 files changed, 6676 insertions, 0 deletions
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c new file mode 100644 index 000000000000..71147be3279c --- /dev/null +++ b/arch/ia64/kernel/perfmon.c | |||
@@ -0,0 +1,6676 @@ | |||
1 | /* | ||
2 | * This file implements the perfmon-2 subsystem which is used | ||
3 | * to program the IA-64 Performance Monitoring Unit (PMU). | ||
4 | * | ||
5 | * The initial version of perfmon.c was written by | ||
6 | * Ganesh Venkitachalam, IBM Corp. | ||
7 | * | ||
8 | * Then it was modified for perfmon-1.x by Stephane Eranian and | ||
9 | * David Mosberger, Hewlett Packard Co. | ||
10 | * | ||
11 | * Version Perfmon-2.x is a rewrite of perfmon-1.x | ||
12 | * by Stephane Eranian, Hewlett Packard Co. | ||
13 | * | ||
14 | * Copyright (C) 1999-2003, 2005 Hewlett Packard Co | ||
15 | * Stephane Eranian <eranian@hpl.hp.com> | ||
16 | * David Mosberger-Tang <davidm@hpl.hp.com> | ||
17 | * | ||
18 | * More information about perfmon available at: | ||
19 | * http://www.hpl.hp.com/research/linux/perfmon | ||
20 | */ | ||
21 | |||
22 | #include <linux/config.h> | ||
23 | #include <linux/module.h> | ||
24 | #include <linux/kernel.h> | ||
25 | #include <linux/sched.h> | ||
26 | #include <linux/interrupt.h> | ||
27 | #include <linux/smp_lock.h> | ||
28 | #include <linux/proc_fs.h> | ||
29 | #include <linux/seq_file.h> | ||
30 | #include <linux/init.h> | ||
31 | #include <linux/vmalloc.h> | ||
32 | #include <linux/mm.h> | ||
33 | #include <linux/sysctl.h> | ||
34 | #include <linux/list.h> | ||
35 | #include <linux/file.h> | ||
36 | #include <linux/poll.h> | ||
37 | #include <linux/vfs.h> | ||
38 | #include <linux/pagemap.h> | ||
39 | #include <linux/mount.h> | ||
40 | #include <linux/version.h> | ||
41 | #include <linux/bitops.h> | ||
42 | |||
43 | #include <asm/errno.h> | ||
44 | #include <asm/intrinsics.h> | ||
45 | #include <asm/page.h> | ||
46 | #include <asm/perfmon.h> | ||
47 | #include <asm/processor.h> | ||
48 | #include <asm/signal.h> | ||
49 | #include <asm/system.h> | ||
50 | #include <asm/uaccess.h> | ||
51 | #include <asm/delay.h> | ||
52 | |||
53 | #ifdef CONFIG_PERFMON | ||
54 | /* | ||
55 | * perfmon context state | ||
56 | */ | ||
57 | #define PFM_CTX_UNLOADED 1 /* context is not loaded onto any task */ | ||
58 | #define PFM_CTX_LOADED 2 /* context is loaded onto a task */ | ||
59 | #define PFM_CTX_MASKED 3 /* context is loaded but monitoring is masked due to overflow */ | ||
60 | #define PFM_CTX_ZOMBIE 4 /* owner of the context is closing it */ | ||
61 | |||
62 | #define PFM_INVALID_ACTIVATION (~0UL) | ||
63 | |||
64 | /* | ||
65 | * depth of message queue | ||
66 | */ | ||
67 | #define PFM_MAX_MSGS 32 | ||
68 | #define PFM_CTXQ_EMPTY(g) ((g)->ctx_msgq_head == (g)->ctx_msgq_tail) | ||
69 | |||
70 | /* | ||
71 | * type of a PMU register (bitmask). | ||
72 | * bitmask structure: | ||
73 | * bit0 : register implemented | ||
74 | * bit1 : end marker | ||
75 | * bit2-3 : reserved | ||
76 | * bit4 : pmc has pmc.pm | ||
77 | * bit5 : pmc controls a counter (has pmc.oi), pmd is used as counter | ||
78 | * bit6-7 : register type | ||
79 | * bit8-31: reserved | ||
80 | */ | ||
81 | #define PFM_REG_NOTIMPL 0x0 /* not implemented at all */ | ||
82 | #define PFM_REG_IMPL 0x1 /* register implemented */ | ||
83 | #define PFM_REG_END 0x2 /* end marker */ | ||
84 | #define PFM_REG_MONITOR (0x1<<4|PFM_REG_IMPL) /* a PMC with a pmc.pm field only */ | ||
85 | #define PFM_REG_COUNTING (0x2<<4|PFM_REG_MONITOR) /* a monitor + pmc.oi+ PMD used as a counter */ | ||
86 | #define PFM_REG_CONTROL (0x4<<4|PFM_REG_IMPL) /* PMU control register */ | ||
87 | #define PFM_REG_CONFIG (0x8<<4|PFM_REG_IMPL) /* configuration register */ | ||
88 | #define PFM_REG_BUFFER (0xc<<4|PFM_REG_IMPL) /* PMD used as buffer */ | ||
89 | |||
90 | #define PMC_IS_LAST(i) (pmu_conf->pmc_desc[i].type & PFM_REG_END) | ||
91 | #define PMD_IS_LAST(i) (pmu_conf->pmd_desc[i].type & PFM_REG_END) | ||
92 | |||
93 | #define PMC_OVFL_NOTIFY(ctx, i) ((ctx)->ctx_pmds[i].flags & PFM_REGFL_OVFL_NOTIFY) | ||
94 | |||
95 | /* i assumed unsigned */ | ||
96 | #define PMC_IS_IMPL(i) (i< PMU_MAX_PMCS && (pmu_conf->pmc_desc[i].type & PFM_REG_IMPL)) | ||
97 | #define PMD_IS_IMPL(i) (i< PMU_MAX_PMDS && (pmu_conf->pmd_desc[i].type & PFM_REG_IMPL)) | ||
98 | |||
99 | /* XXX: these assume that register i is implemented */ | ||
100 | #define PMD_IS_COUNTING(i) ((pmu_conf->pmd_desc[i].type & PFM_REG_COUNTING) == PFM_REG_COUNTING) | ||
101 | #define PMC_IS_COUNTING(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_COUNTING) == PFM_REG_COUNTING) | ||
102 | #define PMC_IS_MONITOR(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_MONITOR) == PFM_REG_MONITOR) | ||
103 | #define PMC_IS_CONTROL(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_CONTROL) == PFM_REG_CONTROL) | ||
104 | |||
105 | #define PMC_DFL_VAL(i) pmu_conf->pmc_desc[i].default_value | ||
106 | #define PMC_RSVD_MASK(i) pmu_conf->pmc_desc[i].reserved_mask | ||
107 | #define PMD_PMD_DEP(i) pmu_conf->pmd_desc[i].dep_pmd[0] | ||
108 | #define PMC_PMD_DEP(i) pmu_conf->pmc_desc[i].dep_pmd[0] | ||
109 | |||
110 | #define PFM_NUM_IBRS IA64_NUM_DBG_REGS | ||
111 | #define PFM_NUM_DBRS IA64_NUM_DBG_REGS | ||
112 | |||
113 | #define CTX_OVFL_NOBLOCK(c) ((c)->ctx_fl_block == 0) | ||
114 | #define CTX_HAS_SMPL(c) ((c)->ctx_fl_is_sampling) | ||
115 | #define PFM_CTX_TASK(h) (h)->ctx_task | ||
116 | |||
117 | #define PMU_PMC_OI 5 /* position of pmc.oi bit */ | ||
118 | |||
119 | /* XXX: does not support more than 64 PMDs */ | ||
120 | #define CTX_USED_PMD(ctx, mask) (ctx)->ctx_used_pmds[0] |= (mask) | ||
121 | #define CTX_IS_USED_PMD(ctx, c) (((ctx)->ctx_used_pmds[0] & (1UL << (c))) != 0UL) | ||
122 | |||
123 | #define CTX_USED_MONITOR(ctx, mask) (ctx)->ctx_used_monitors[0] |= (mask) | ||
124 | |||
125 | #define CTX_USED_IBR(ctx,n) (ctx)->ctx_used_ibrs[(n)>>6] |= 1UL<< ((n) % 64) | ||
126 | #define CTX_USED_DBR(ctx,n) (ctx)->ctx_used_dbrs[(n)>>6] |= 1UL<< ((n) % 64) | ||
127 | #define CTX_USES_DBREGS(ctx) (((pfm_context_t *)(ctx))->ctx_fl_using_dbreg==1) | ||
128 | #define PFM_CODE_RR 0 /* requesting code range restriction */ | ||
129 | #define PFM_DATA_RR 1 /* requestion data range restriction */ | ||
130 | |||
131 | #define PFM_CPUINFO_CLEAR(v) pfm_get_cpu_var(pfm_syst_info) &= ~(v) | ||
132 | #define PFM_CPUINFO_SET(v) pfm_get_cpu_var(pfm_syst_info) |= (v) | ||
133 | #define PFM_CPUINFO_GET() pfm_get_cpu_var(pfm_syst_info) | ||
134 | |||
135 | #define RDEP(x) (1UL<<(x)) | ||
136 | |||
137 | /* | ||
138 | * context protection macros | ||
139 | * in SMP: | ||
140 | * - we need to protect against CPU concurrency (spin_lock) | ||
141 | * - we need to protect against PMU overflow interrupts (local_irq_disable) | ||
142 | * in UP: | ||
143 | * - we need to protect against PMU overflow interrupts (local_irq_disable) | ||
144 | * | ||
145 | * spin_lock_irqsave()/spin_lock_irqrestore(): | ||
146 | * in SMP: local_irq_disable + spin_lock | ||
147 | * in UP : local_irq_disable | ||
148 | * | ||
149 | * spin_lock()/spin_lock(): | ||
150 | * in UP : removed automatically | ||
151 | * in SMP: protect against context accesses from other CPU. interrupts | ||
152 | * are not masked. This is useful for the PMU interrupt handler | ||
153 | * because we know we will not get PMU concurrency in that code. | ||
154 | */ | ||
155 | #define PROTECT_CTX(c, f) \ | ||
156 | do { \ | ||
157 | DPRINT(("spinlock_irq_save ctx %p by [%d]\n", c, current->pid)); \ | ||
158 | spin_lock_irqsave(&(c)->ctx_lock, f); \ | ||
159 | DPRINT(("spinlocked ctx %p by [%d]\n", c, current->pid)); \ | ||
160 | } while(0) | ||
161 | |||
162 | #define UNPROTECT_CTX(c, f) \ | ||
163 | do { \ | ||
164 | DPRINT(("spinlock_irq_restore ctx %p by [%d]\n", c, current->pid)); \ | ||
165 | spin_unlock_irqrestore(&(c)->ctx_lock, f); \ | ||
166 | } while(0) | ||
167 | |||
168 | #define PROTECT_CTX_NOPRINT(c, f) \ | ||
169 | do { \ | ||
170 | spin_lock_irqsave(&(c)->ctx_lock, f); \ | ||
171 | } while(0) | ||
172 | |||
173 | |||
174 | #define UNPROTECT_CTX_NOPRINT(c, f) \ | ||
175 | do { \ | ||
176 | spin_unlock_irqrestore(&(c)->ctx_lock, f); \ | ||
177 | } while(0) | ||
178 | |||
179 | |||
180 | #define PROTECT_CTX_NOIRQ(c) \ | ||
181 | do { \ | ||
182 | spin_lock(&(c)->ctx_lock); \ | ||
183 | } while(0) | ||
184 | |||
185 | #define UNPROTECT_CTX_NOIRQ(c) \ | ||
186 | do { \ | ||
187 | spin_unlock(&(c)->ctx_lock); \ | ||
188 | } while(0) | ||
189 | |||
190 | |||
191 | #ifdef CONFIG_SMP | ||
192 | |||
193 | #define GET_ACTIVATION() pfm_get_cpu_var(pmu_activation_number) | ||
194 | #define INC_ACTIVATION() pfm_get_cpu_var(pmu_activation_number)++ | ||
195 | #define SET_ACTIVATION(c) (c)->ctx_last_activation = GET_ACTIVATION() | ||
196 | |||
197 | #else /* !CONFIG_SMP */ | ||
198 | #define SET_ACTIVATION(t) do {} while(0) | ||
199 | #define GET_ACTIVATION(t) do {} while(0) | ||
200 | #define INC_ACTIVATION(t) do {} while(0) | ||
201 | #endif /* CONFIG_SMP */ | ||
202 | |||
203 | #define SET_PMU_OWNER(t, c) do { pfm_get_cpu_var(pmu_owner) = (t); pfm_get_cpu_var(pmu_ctx) = (c); } while(0) | ||
204 | #define GET_PMU_OWNER() pfm_get_cpu_var(pmu_owner) | ||
205 | #define GET_PMU_CTX() pfm_get_cpu_var(pmu_ctx) | ||
206 | |||
207 | #define LOCK_PFS(g) spin_lock_irqsave(&pfm_sessions.pfs_lock, g) | ||
208 | #define UNLOCK_PFS(g) spin_unlock_irqrestore(&pfm_sessions.pfs_lock, g) | ||
209 | |||
210 | #define PFM_REG_RETFLAG_SET(flags, val) do { flags &= ~PFM_REG_RETFL_MASK; flags |= (val); } while(0) | ||
211 | |||
212 | /* | ||
213 | * cmp0 must be the value of pmc0 | ||
214 | */ | ||
215 | #define PMC0_HAS_OVFL(cmp0) (cmp0 & ~0x1UL) | ||
216 | |||
217 | #define PFMFS_MAGIC 0xa0b4d889 | ||
218 | |||
219 | /* | ||
220 | * debugging | ||
221 | */ | ||
222 | #define PFM_DEBUGGING 1 | ||
223 | #ifdef PFM_DEBUGGING | ||
224 | #define DPRINT(a) \ | ||
225 | do { \ | ||
226 | if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d [%d] ", __FUNCTION__, __LINE__, smp_processor_id(), current->pid); printk a; } \ | ||
227 | } while (0) | ||
228 | |||
229 | #define DPRINT_ovfl(a) \ | ||
230 | do { \ | ||
231 | if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d [%d] ", __FUNCTION__, __LINE__, smp_processor_id(), current->pid); printk a; } \ | ||
232 | } while (0) | ||
233 | #endif | ||
234 | |||
235 | /* | ||
236 | * 64-bit software counter structure | ||
237 | * | ||
238 | * the next_reset_type is applied to the next call to pfm_reset_regs() | ||
239 | */ | ||
240 | typedef struct { | ||
241 | unsigned long val; /* virtual 64bit counter value */ | ||
242 | unsigned long lval; /* last reset value */ | ||
243 | unsigned long long_reset; /* reset value on sampling overflow */ | ||
244 | unsigned long short_reset; /* reset value on overflow */ | ||
245 | unsigned long reset_pmds[4]; /* which other pmds to reset when this counter overflows */ | ||
246 | unsigned long smpl_pmds[4]; /* which pmds are accessed when counter overflow */ | ||
247 | unsigned long seed; /* seed for random-number generator */ | ||
248 | unsigned long mask; /* mask for random-number generator */ | ||
249 | unsigned int flags; /* notify/do not notify */ | ||
250 | unsigned long eventid; /* overflow event identifier */ | ||
251 | } pfm_counter_t; | ||
252 | |||
253 | /* | ||
254 | * context flags | ||
255 | */ | ||
256 | typedef struct { | ||
257 | unsigned int block:1; /* when 1, task will blocked on user notifications */ | ||
258 | unsigned int system:1; /* do system wide monitoring */ | ||
259 | unsigned int using_dbreg:1; /* using range restrictions (debug registers) */ | ||
260 | unsigned int is_sampling:1; /* true if using a custom format */ | ||
261 | unsigned int excl_idle:1; /* exclude idle task in system wide session */ | ||
262 | unsigned int going_zombie:1; /* context is zombie (MASKED+blocking) */ | ||
263 | unsigned int trap_reason:2; /* reason for going into pfm_handle_work() */ | ||
264 | unsigned int no_msg:1; /* no message sent on overflow */ | ||
265 | unsigned int can_restart:1; /* allowed to issue a PFM_RESTART */ | ||
266 | unsigned int reserved:22; | ||
267 | } pfm_context_flags_t; | ||
268 | |||
269 | #define PFM_TRAP_REASON_NONE 0x0 /* default value */ | ||
270 | #define PFM_TRAP_REASON_BLOCK 0x1 /* we need to block on overflow */ | ||
271 | #define PFM_TRAP_REASON_RESET 0x2 /* we need to reset PMDs */ | ||
272 | |||
273 | |||
274 | /* | ||
275 | * perfmon context: encapsulates all the state of a monitoring session | ||
276 | */ | ||
277 | |||
278 | typedef struct pfm_context { | ||
279 | spinlock_t ctx_lock; /* context protection */ | ||
280 | |||
281 | pfm_context_flags_t ctx_flags; /* bitmask of flags (block reason incl.) */ | ||
282 | unsigned int ctx_state; /* state: active/inactive (no bitfield) */ | ||
283 | |||
284 | struct task_struct *ctx_task; /* task to which context is attached */ | ||
285 | |||
286 | unsigned long ctx_ovfl_regs[4]; /* which registers overflowed (notification) */ | ||
287 | |||
288 | struct semaphore ctx_restart_sem; /* use for blocking notification mode */ | ||
289 | |||
290 | unsigned long ctx_used_pmds[4]; /* bitmask of PMD used */ | ||
291 | unsigned long ctx_all_pmds[4]; /* bitmask of all accessible PMDs */ | ||
292 | unsigned long ctx_reload_pmds[4]; /* bitmask of force reload PMD on ctxsw in */ | ||
293 | |||
294 | unsigned long ctx_all_pmcs[4]; /* bitmask of all accessible PMCs */ | ||
295 | unsigned long ctx_reload_pmcs[4]; /* bitmask of force reload PMC on ctxsw in */ | ||
296 | unsigned long ctx_used_monitors[4]; /* bitmask of monitor PMC being used */ | ||
297 | |||
298 | unsigned long ctx_pmcs[IA64_NUM_PMC_REGS]; /* saved copies of PMC values */ | ||
299 | |||
300 | unsigned int ctx_used_ibrs[1]; /* bitmask of used IBR (speedup ctxsw in) */ | ||
301 | unsigned int ctx_used_dbrs[1]; /* bitmask of used DBR (speedup ctxsw in) */ | ||
302 | unsigned long ctx_dbrs[IA64_NUM_DBG_REGS]; /* DBR values (cache) when not loaded */ | ||
303 | unsigned long ctx_ibrs[IA64_NUM_DBG_REGS]; /* IBR values (cache) when not loaded */ | ||
304 | |||
305 | pfm_counter_t ctx_pmds[IA64_NUM_PMD_REGS]; /* software state for PMDS */ | ||
306 | |||
307 | u64 ctx_saved_psr_up; /* only contains psr.up value */ | ||
308 | |||
309 | unsigned long ctx_last_activation; /* context last activation number for last_cpu */ | ||
310 | unsigned int ctx_last_cpu; /* CPU id of current or last CPU used (SMP only) */ | ||
311 | unsigned int ctx_cpu; /* cpu to which perfmon is applied (system wide) */ | ||
312 | |||
313 | int ctx_fd; /* file descriptor used my this context */ | ||
314 | pfm_ovfl_arg_t ctx_ovfl_arg; /* argument to custom buffer format handler */ | ||
315 | |||
316 | pfm_buffer_fmt_t *ctx_buf_fmt; /* buffer format callbacks */ | ||
317 | void *ctx_smpl_hdr; /* points to sampling buffer header kernel vaddr */ | ||
318 | unsigned long ctx_smpl_size; /* size of sampling buffer */ | ||
319 | void *ctx_smpl_vaddr; /* user level virtual address of smpl buffer */ | ||
320 | |||
321 | wait_queue_head_t ctx_msgq_wait; | ||
322 | pfm_msg_t ctx_msgq[PFM_MAX_MSGS]; | ||
323 | int ctx_msgq_head; | ||
324 | int ctx_msgq_tail; | ||
325 | struct fasync_struct *ctx_async_queue; | ||
326 | |||
327 | wait_queue_head_t ctx_zombieq; /* termination cleanup wait queue */ | ||
328 | } pfm_context_t; | ||
329 | |||
330 | /* | ||
331 | * magic number used to verify that structure is really | ||
332 | * a perfmon context | ||
333 | */ | ||
334 | #define PFM_IS_FILE(f) ((f)->f_op == &pfm_file_ops) | ||
335 | |||
336 | #define PFM_GET_CTX(t) ((pfm_context_t *)(t)->thread.pfm_context) | ||
337 | |||
338 | #ifdef CONFIG_SMP | ||
339 | #define SET_LAST_CPU(ctx, v) (ctx)->ctx_last_cpu = (v) | ||
340 | #define GET_LAST_CPU(ctx) (ctx)->ctx_last_cpu | ||
341 | #else | ||
342 | #define SET_LAST_CPU(ctx, v) do {} while(0) | ||
343 | #define GET_LAST_CPU(ctx) do {} while(0) | ||
344 | #endif | ||
345 | |||
346 | |||
347 | #define ctx_fl_block ctx_flags.block | ||
348 | #define ctx_fl_system ctx_flags.system | ||
349 | #define ctx_fl_using_dbreg ctx_flags.using_dbreg | ||
350 | #define ctx_fl_is_sampling ctx_flags.is_sampling | ||
351 | #define ctx_fl_excl_idle ctx_flags.excl_idle | ||
352 | #define ctx_fl_going_zombie ctx_flags.going_zombie | ||
353 | #define ctx_fl_trap_reason ctx_flags.trap_reason | ||
354 | #define ctx_fl_no_msg ctx_flags.no_msg | ||
355 | #define ctx_fl_can_restart ctx_flags.can_restart | ||
356 | |||
357 | #define PFM_SET_WORK_PENDING(t, v) do { (t)->thread.pfm_needs_checking = v; } while(0); | ||
358 | #define PFM_GET_WORK_PENDING(t) (t)->thread.pfm_needs_checking | ||
359 | |||
360 | /* | ||
361 | * global information about all sessions | ||
362 | * mostly used to synchronize between system wide and per-process | ||
363 | */ | ||
364 | typedef struct { | ||
365 | spinlock_t pfs_lock; /* lock the structure */ | ||
366 | |||
367 | unsigned int pfs_task_sessions; /* number of per task sessions */ | ||
368 | unsigned int pfs_sys_sessions; /* number of per system wide sessions */ | ||
369 | unsigned int pfs_sys_use_dbregs; /* incremented when a system wide session uses debug regs */ | ||
370 | unsigned int pfs_ptrace_use_dbregs; /* incremented when a process uses debug regs */ | ||
371 | struct task_struct *pfs_sys_session[NR_CPUS]; /* point to task owning a system-wide session */ | ||
372 | } pfm_session_t; | ||
373 | |||
374 | /* | ||
375 | * information about a PMC or PMD. | ||
376 | * dep_pmd[]: a bitmask of dependent PMD registers | ||
377 | * dep_pmc[]: a bitmask of dependent PMC registers | ||
378 | */ | ||
379 | typedef int (*pfm_reg_check_t)(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs); | ||
380 | typedef struct { | ||
381 | unsigned int type; | ||
382 | int pm_pos; | ||
383 | unsigned long default_value; /* power-on default value */ | ||
384 | unsigned long reserved_mask; /* bitmask of reserved bits */ | ||
385 | pfm_reg_check_t read_check; | ||
386 | pfm_reg_check_t write_check; | ||
387 | unsigned long dep_pmd[4]; | ||
388 | unsigned long dep_pmc[4]; | ||
389 | } pfm_reg_desc_t; | ||
390 | |||
391 | /* assume cnum is a valid monitor */ | ||
392 | #define PMC_PM(cnum, val) (((val) >> (pmu_conf->pmc_desc[cnum].pm_pos)) & 0x1) | ||
393 | |||
394 | /* | ||
395 | * This structure is initialized at boot time and contains | ||
396 | * a description of the PMU main characteristics. | ||
397 | * | ||
398 | * If the probe function is defined, detection is based | ||
399 | * on its return value: | ||
400 | * - 0 means recognized PMU | ||
401 | * - anything else means not supported | ||
402 | * When the probe function is not defined, then the pmu_family field | ||
403 | * is used and it must match the host CPU family such that: | ||
404 | * - cpu->family & config->pmu_family != 0 | ||
405 | */ | ||
406 | typedef struct { | ||
407 | unsigned long ovfl_val; /* overflow value for counters */ | ||
408 | |||
409 | pfm_reg_desc_t *pmc_desc; /* detailed PMC register dependencies descriptions */ | ||
410 | pfm_reg_desc_t *pmd_desc; /* detailed PMD register dependencies descriptions */ | ||
411 | |||
412 | unsigned int num_pmcs; /* number of PMCS: computed at init time */ | ||
413 | unsigned int num_pmds; /* number of PMDS: computed at init time */ | ||
414 | unsigned long impl_pmcs[4]; /* bitmask of implemented PMCS */ | ||
415 | unsigned long impl_pmds[4]; /* bitmask of implemented PMDS */ | ||
416 | |||
417 | char *pmu_name; /* PMU family name */ | ||
418 | unsigned int pmu_family; /* cpuid family pattern used to identify pmu */ | ||
419 | unsigned int flags; /* pmu specific flags */ | ||
420 | unsigned int num_ibrs; /* number of IBRS: computed at init time */ | ||
421 | unsigned int num_dbrs; /* number of DBRS: computed at init time */ | ||
422 | unsigned int num_counters; /* PMC/PMD counting pairs : computed at init time */ | ||
423 | int (*probe)(void); /* customized probe routine */ | ||
424 | unsigned int use_rr_dbregs:1; /* set if debug registers used for range restriction */ | ||
425 | } pmu_config_t; | ||
426 | /* | ||
427 | * PMU specific flags | ||
428 | */ | ||
429 | #define PFM_PMU_IRQ_RESEND 1 /* PMU needs explicit IRQ resend */ | ||
430 | |||
431 | /* | ||
432 | * debug register related type definitions | ||
433 | */ | ||
434 | typedef struct { | ||
435 | unsigned long ibr_mask:56; | ||
436 | unsigned long ibr_plm:4; | ||
437 | unsigned long ibr_ig:3; | ||
438 | unsigned long ibr_x:1; | ||
439 | } ibr_mask_reg_t; | ||
440 | |||
441 | typedef struct { | ||
442 | unsigned long dbr_mask:56; | ||
443 | unsigned long dbr_plm:4; | ||
444 | unsigned long dbr_ig:2; | ||
445 | unsigned long dbr_w:1; | ||
446 | unsigned long dbr_r:1; | ||
447 | } dbr_mask_reg_t; | ||
448 | |||
449 | typedef union { | ||
450 | unsigned long val; | ||
451 | ibr_mask_reg_t ibr; | ||
452 | dbr_mask_reg_t dbr; | ||
453 | } dbreg_t; | ||
454 | |||
455 | |||
456 | /* | ||
457 | * perfmon command descriptions | ||
458 | */ | ||
459 | typedef struct { | ||
460 | int (*cmd_func)(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs); | ||
461 | char *cmd_name; | ||
462 | int cmd_flags; | ||
463 | unsigned int cmd_narg; | ||
464 | size_t cmd_argsize; | ||
465 | int (*cmd_getsize)(void *arg, size_t *sz); | ||
466 | } pfm_cmd_desc_t; | ||
467 | |||
468 | #define PFM_CMD_FD 0x01 /* command requires a file descriptor */ | ||
469 | #define PFM_CMD_ARG_READ 0x02 /* command must read argument(s) */ | ||
470 | #define PFM_CMD_ARG_RW 0x04 /* command must read/write argument(s) */ | ||
471 | #define PFM_CMD_STOP 0x08 /* command does not work on zombie context */ | ||
472 | |||
473 | |||
474 | #define PFM_CMD_NAME(cmd) pfm_cmd_tab[(cmd)].cmd_name | ||
475 | #define PFM_CMD_READ_ARG(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_ARG_READ) | ||
476 | #define PFM_CMD_RW_ARG(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_ARG_RW) | ||
477 | #define PFM_CMD_USE_FD(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_FD) | ||
478 | #define PFM_CMD_STOPPED(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_STOP) | ||
479 | |||
480 | #define PFM_CMD_ARG_MANY -1 /* cannot be zero */ | ||
481 | |||
482 | typedef struct { | ||
483 | int debug; /* turn on/off debugging via syslog */ | ||
484 | int debug_ovfl; /* turn on/off debug printk in overflow handler */ | ||
485 | int fastctxsw; /* turn on/off fast (unsecure) ctxsw */ | ||
486 | int expert_mode; /* turn on/off value checking */ | ||
487 | int debug_pfm_read; | ||
488 | } pfm_sysctl_t; | ||
489 | |||
490 | typedef struct { | ||
491 | unsigned long pfm_spurious_ovfl_intr_count; /* keep track of spurious ovfl interrupts */ | ||
492 | unsigned long pfm_replay_ovfl_intr_count; /* keep track of replayed ovfl interrupts */ | ||
493 | unsigned long pfm_ovfl_intr_count; /* keep track of ovfl interrupts */ | ||
494 | unsigned long pfm_ovfl_intr_cycles; /* cycles spent processing ovfl interrupts */ | ||
495 | unsigned long pfm_ovfl_intr_cycles_min; /* min cycles spent processing ovfl interrupts */ | ||
496 | unsigned long pfm_ovfl_intr_cycles_max; /* max cycles spent processing ovfl interrupts */ | ||
497 | unsigned long pfm_smpl_handler_calls; | ||
498 | unsigned long pfm_smpl_handler_cycles; | ||
499 | char pad[SMP_CACHE_BYTES] ____cacheline_aligned; | ||
500 | } pfm_stats_t; | ||
501 | |||
502 | /* | ||
503 | * perfmon internal variables | ||
504 | */ | ||
505 | static pfm_stats_t pfm_stats[NR_CPUS]; | ||
506 | static pfm_session_t pfm_sessions; /* global sessions information */ | ||
507 | |||
508 | static struct proc_dir_entry *perfmon_dir; | ||
509 | static pfm_uuid_t pfm_null_uuid = {0,}; | ||
510 | |||
511 | static spinlock_t pfm_buffer_fmt_lock; | ||
512 | static LIST_HEAD(pfm_buffer_fmt_list); | ||
513 | |||
514 | static pmu_config_t *pmu_conf; | ||
515 | |||
516 | /* sysctl() controls */ | ||
517 | static pfm_sysctl_t pfm_sysctl; | ||
518 | int pfm_debug_var; | ||
519 | |||
520 | static ctl_table pfm_ctl_table[]={ | ||
521 | {1, "debug", &pfm_sysctl.debug, sizeof(int), 0666, NULL, &proc_dointvec, NULL,}, | ||
522 | {2, "debug_ovfl", &pfm_sysctl.debug_ovfl, sizeof(int), 0666, NULL, &proc_dointvec, NULL,}, | ||
523 | {3, "fastctxsw", &pfm_sysctl.fastctxsw, sizeof(int), 0600, NULL, &proc_dointvec, NULL,}, | ||
524 | {4, "expert_mode", &pfm_sysctl.expert_mode, sizeof(int), 0600, NULL, &proc_dointvec, NULL,}, | ||
525 | { 0, }, | ||
526 | }; | ||
527 | static ctl_table pfm_sysctl_dir[] = { | ||
528 | {1, "perfmon", NULL, 0, 0755, pfm_ctl_table, }, | ||
529 | {0,}, | ||
530 | }; | ||
531 | static ctl_table pfm_sysctl_root[] = { | ||
532 | {1, "kernel", NULL, 0, 0755, pfm_sysctl_dir, }, | ||
533 | {0,}, | ||
534 | }; | ||
535 | static struct ctl_table_header *pfm_sysctl_header; | ||
536 | |||
537 | static int pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs); | ||
538 | static int pfm_flush(struct file *filp); | ||
539 | |||
540 | #define pfm_get_cpu_var(v) __ia64_per_cpu_var(v) | ||
541 | #define pfm_get_cpu_data(a,b) per_cpu(a, b) | ||
542 | |||
543 | static inline void | ||
544 | pfm_put_task(struct task_struct *task) | ||
545 | { | ||
546 | if (task != current) put_task_struct(task); | ||
547 | } | ||
548 | |||
549 | static inline void | ||
550 | pfm_set_task_notify(struct task_struct *task) | ||
551 | { | ||
552 | struct thread_info *info; | ||
553 | |||
554 | info = (struct thread_info *) ((char *) task + IA64_TASK_SIZE); | ||
555 | set_bit(TIF_NOTIFY_RESUME, &info->flags); | ||
556 | } | ||
557 | |||
558 | static inline void | ||
559 | pfm_clear_task_notify(void) | ||
560 | { | ||
561 | clear_thread_flag(TIF_NOTIFY_RESUME); | ||
562 | } | ||
563 | |||
564 | static inline void | ||
565 | pfm_reserve_page(unsigned long a) | ||
566 | { | ||
567 | SetPageReserved(vmalloc_to_page((void *)a)); | ||
568 | } | ||
569 | static inline void | ||
570 | pfm_unreserve_page(unsigned long a) | ||
571 | { | ||
572 | ClearPageReserved(vmalloc_to_page((void*)a)); | ||
573 | } | ||
574 | |||
575 | static inline unsigned long | ||
576 | pfm_protect_ctx_ctxsw(pfm_context_t *x) | ||
577 | { | ||
578 | spin_lock(&(x)->ctx_lock); | ||
579 | return 0UL; | ||
580 | } | ||
581 | |||
582 | static inline unsigned long | ||
583 | pfm_unprotect_ctx_ctxsw(pfm_context_t *x, unsigned long f) | ||
584 | { | ||
585 | spin_unlock(&(x)->ctx_lock); | ||
586 | } | ||
587 | |||
588 | static inline unsigned int | ||
589 | pfm_do_munmap(struct mm_struct *mm, unsigned long addr, size_t len, int acct) | ||
590 | { | ||
591 | return do_munmap(mm, addr, len); | ||
592 | } | ||
593 | |||
594 | static inline unsigned long | ||
595 | pfm_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags, unsigned long exec) | ||
596 | { | ||
597 | return get_unmapped_area(file, addr, len, pgoff, flags); | ||
598 | } | ||
599 | |||
600 | |||
601 | static struct super_block * | ||
602 | pfmfs_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) | ||
603 | { | ||
604 | return get_sb_pseudo(fs_type, "pfm:", NULL, PFMFS_MAGIC); | ||
605 | } | ||
606 | |||
607 | static struct file_system_type pfm_fs_type = { | ||
608 | .name = "pfmfs", | ||
609 | .get_sb = pfmfs_get_sb, | ||
610 | .kill_sb = kill_anon_super, | ||
611 | }; | ||
612 | |||
613 | DEFINE_PER_CPU(unsigned long, pfm_syst_info); | ||
614 | DEFINE_PER_CPU(struct task_struct *, pmu_owner); | ||
615 | DEFINE_PER_CPU(pfm_context_t *, pmu_ctx); | ||
616 | DEFINE_PER_CPU(unsigned long, pmu_activation_number); | ||
617 | |||
618 | |||
619 | /* forward declaration */ | ||
620 | static struct file_operations pfm_file_ops; | ||
621 | |||
622 | /* | ||
623 | * forward declarations | ||
624 | */ | ||
625 | #ifndef CONFIG_SMP | ||
626 | static void pfm_lazy_save_regs (struct task_struct *ta); | ||
627 | #endif | ||
628 | |||
629 | void dump_pmu_state(const char *); | ||
630 | static int pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs); | ||
631 | |||
632 | #include "perfmon_itanium.h" | ||
633 | #include "perfmon_mckinley.h" | ||
634 | #include "perfmon_generic.h" | ||
635 | |||
636 | static pmu_config_t *pmu_confs[]={ | ||
637 | &pmu_conf_mck, | ||
638 | &pmu_conf_ita, | ||
639 | &pmu_conf_gen, /* must be last */ | ||
640 | NULL | ||
641 | }; | ||
642 | |||
643 | |||
644 | static int pfm_end_notify_user(pfm_context_t *ctx); | ||
645 | |||
646 | static inline void | ||
647 | pfm_clear_psr_pp(void) | ||
648 | { | ||
649 | ia64_rsm(IA64_PSR_PP); | ||
650 | ia64_srlz_i(); | ||
651 | } | ||
652 | |||
653 | static inline void | ||
654 | pfm_set_psr_pp(void) | ||
655 | { | ||
656 | ia64_ssm(IA64_PSR_PP); | ||
657 | ia64_srlz_i(); | ||
658 | } | ||
659 | |||
660 | static inline void | ||
661 | pfm_clear_psr_up(void) | ||
662 | { | ||
663 | ia64_rsm(IA64_PSR_UP); | ||
664 | ia64_srlz_i(); | ||
665 | } | ||
666 | |||
667 | static inline void | ||
668 | pfm_set_psr_up(void) | ||
669 | { | ||
670 | ia64_ssm(IA64_PSR_UP); | ||
671 | ia64_srlz_i(); | ||
672 | } | ||
673 | |||
674 | static inline unsigned long | ||
675 | pfm_get_psr(void) | ||
676 | { | ||
677 | unsigned long tmp; | ||
678 | tmp = ia64_getreg(_IA64_REG_PSR); | ||
679 | ia64_srlz_i(); | ||
680 | return tmp; | ||
681 | } | ||
682 | |||
683 | static inline void | ||
684 | pfm_set_psr_l(unsigned long val) | ||
685 | { | ||
686 | ia64_setreg(_IA64_REG_PSR_L, val); | ||
687 | ia64_srlz_i(); | ||
688 | } | ||
689 | |||
690 | static inline void | ||
691 | pfm_freeze_pmu(void) | ||
692 | { | ||
693 | ia64_set_pmc(0,1UL); | ||
694 | ia64_srlz_d(); | ||
695 | } | ||
696 | |||
697 | static inline void | ||
698 | pfm_unfreeze_pmu(void) | ||
699 | { | ||
700 | ia64_set_pmc(0,0UL); | ||
701 | ia64_srlz_d(); | ||
702 | } | ||
703 | |||
704 | static inline void | ||
705 | pfm_restore_ibrs(unsigned long *ibrs, unsigned int nibrs) | ||
706 | { | ||
707 | int i; | ||
708 | |||
709 | for (i=0; i < nibrs; i++) { | ||
710 | ia64_set_ibr(i, ibrs[i]); | ||
711 | ia64_dv_serialize_instruction(); | ||
712 | } | ||
713 | ia64_srlz_i(); | ||
714 | } | ||
715 | |||
716 | static inline void | ||
717 | pfm_restore_dbrs(unsigned long *dbrs, unsigned int ndbrs) | ||
718 | { | ||
719 | int i; | ||
720 | |||
721 | for (i=0; i < ndbrs; i++) { | ||
722 | ia64_set_dbr(i, dbrs[i]); | ||
723 | ia64_dv_serialize_data(); | ||
724 | } | ||
725 | ia64_srlz_d(); | ||
726 | } | ||
727 | |||
728 | /* | ||
729 | * PMD[i] must be a counter. no check is made | ||
730 | */ | ||
731 | static inline unsigned long | ||
732 | pfm_read_soft_counter(pfm_context_t *ctx, int i) | ||
733 | { | ||
734 | return ctx->ctx_pmds[i].val + (ia64_get_pmd(i) & pmu_conf->ovfl_val); | ||
735 | } | ||
736 | |||
737 | /* | ||
738 | * PMD[i] must be a counter. no check is made | ||
739 | */ | ||
740 | static inline void | ||
741 | pfm_write_soft_counter(pfm_context_t *ctx, int i, unsigned long val) | ||
742 | { | ||
743 | unsigned long ovfl_val = pmu_conf->ovfl_val; | ||
744 | |||
745 | ctx->ctx_pmds[i].val = val & ~ovfl_val; | ||
746 | /* | ||
747 | * writing to unimplemented part is ignore, so we do not need to | ||
748 | * mask off top part | ||
749 | */ | ||
750 | ia64_set_pmd(i, val & ovfl_val); | ||
751 | } | ||
752 | |||
753 | static pfm_msg_t * | ||
754 | pfm_get_new_msg(pfm_context_t *ctx) | ||
755 | { | ||
756 | int idx, next; | ||
757 | |||
758 | next = (ctx->ctx_msgq_tail+1) % PFM_MAX_MSGS; | ||
759 | |||
760 | DPRINT(("ctx_fd=%p head=%d tail=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail)); | ||
761 | if (next == ctx->ctx_msgq_head) return NULL; | ||
762 | |||
763 | idx = ctx->ctx_msgq_tail; | ||
764 | ctx->ctx_msgq_tail = next; | ||
765 | |||
766 | DPRINT(("ctx=%p head=%d tail=%d msg=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail, idx)); | ||
767 | |||
768 | return ctx->ctx_msgq+idx; | ||
769 | } | ||
770 | |||
771 | static pfm_msg_t * | ||
772 | pfm_get_next_msg(pfm_context_t *ctx) | ||
773 | { | ||
774 | pfm_msg_t *msg; | ||
775 | |||
776 | DPRINT(("ctx=%p head=%d tail=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail)); | ||
777 | |||
778 | if (PFM_CTXQ_EMPTY(ctx)) return NULL; | ||
779 | |||
780 | /* | ||
781 | * get oldest message | ||
782 | */ | ||
783 | msg = ctx->ctx_msgq+ctx->ctx_msgq_head; | ||
784 | |||
785 | /* | ||
786 | * and move forward | ||
787 | */ | ||
788 | ctx->ctx_msgq_head = (ctx->ctx_msgq_head+1) % PFM_MAX_MSGS; | ||
789 | |||
790 | DPRINT(("ctx=%p head=%d tail=%d type=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail, msg->pfm_gen_msg.msg_type)); | ||
791 | |||
792 | return msg; | ||
793 | } | ||
794 | |||
795 | static void | ||
796 | pfm_reset_msgq(pfm_context_t *ctx) | ||
797 | { | ||
798 | ctx->ctx_msgq_head = ctx->ctx_msgq_tail = 0; | ||
799 | DPRINT(("ctx=%p msgq reset\n", ctx)); | ||
800 | } | ||
801 | |||
802 | static void * | ||
803 | pfm_rvmalloc(unsigned long size) | ||
804 | { | ||
805 | void *mem; | ||
806 | unsigned long addr; | ||
807 | |||
808 | size = PAGE_ALIGN(size); | ||
809 | mem = vmalloc(size); | ||
810 | if (mem) { | ||
811 | //printk("perfmon: CPU%d pfm_rvmalloc(%ld)=%p\n", smp_processor_id(), size, mem); | ||
812 | memset(mem, 0, size); | ||
813 | addr = (unsigned long)mem; | ||
814 | while (size > 0) { | ||
815 | pfm_reserve_page(addr); | ||
816 | addr+=PAGE_SIZE; | ||
817 | size-=PAGE_SIZE; | ||
818 | } | ||
819 | } | ||
820 | return mem; | ||
821 | } | ||
822 | |||
823 | static void | ||
824 | pfm_rvfree(void *mem, unsigned long size) | ||
825 | { | ||
826 | unsigned long addr; | ||
827 | |||
828 | if (mem) { | ||
829 | DPRINT(("freeing physical buffer @%p size=%lu\n", mem, size)); | ||
830 | addr = (unsigned long) mem; | ||
831 | while ((long) size > 0) { | ||
832 | pfm_unreserve_page(addr); | ||
833 | addr+=PAGE_SIZE; | ||
834 | size-=PAGE_SIZE; | ||
835 | } | ||
836 | vfree(mem); | ||
837 | } | ||
838 | return; | ||
839 | } | ||
840 | |||
841 | static pfm_context_t * | ||
842 | pfm_context_alloc(void) | ||
843 | { | ||
844 | pfm_context_t *ctx; | ||
845 | |||
846 | /* | ||
847 | * allocate context descriptor | ||
848 | * must be able to free with interrupts disabled | ||
849 | */ | ||
850 | ctx = kmalloc(sizeof(pfm_context_t), GFP_KERNEL); | ||
851 | if (ctx) { | ||
852 | memset(ctx, 0, sizeof(pfm_context_t)); | ||
853 | DPRINT(("alloc ctx @%p\n", ctx)); | ||
854 | } | ||
855 | return ctx; | ||
856 | } | ||
857 | |||
858 | static void | ||
859 | pfm_context_free(pfm_context_t *ctx) | ||
860 | { | ||
861 | if (ctx) { | ||
862 | DPRINT(("free ctx @%p\n", ctx)); | ||
863 | kfree(ctx); | ||
864 | } | ||
865 | } | ||
866 | |||
867 | static void | ||
868 | pfm_mask_monitoring(struct task_struct *task) | ||
869 | { | ||
870 | pfm_context_t *ctx = PFM_GET_CTX(task); | ||
871 | struct thread_struct *th = &task->thread; | ||
872 | unsigned long mask, val, ovfl_mask; | ||
873 | int i; | ||
874 | |||
875 | DPRINT_ovfl(("masking monitoring for [%d]\n", task->pid)); | ||
876 | |||
877 | ovfl_mask = pmu_conf->ovfl_val; | ||
878 | /* | ||
879 | * monitoring can only be masked as a result of a valid | ||
880 | * counter overflow. In UP, it means that the PMU still | ||
881 | * has an owner. Note that the owner can be different | ||
882 | * from the current task. However the PMU state belongs | ||
883 | * to the owner. | ||
884 | * In SMP, a valid overflow only happens when task is | ||
885 | * current. Therefore if we come here, we know that | ||
886 | * the PMU state belongs to the current task, therefore | ||
887 | * we can access the live registers. | ||
888 | * | ||
889 | * So in both cases, the live register contains the owner's | ||
890 | * state. We can ONLY touch the PMU registers and NOT the PSR. | ||
891 | * | ||
892 | * As a consequence to this call, the thread->pmds[] array | ||
893 | * contains stale information which must be ignored | ||
894 | * when context is reloaded AND monitoring is active (see | ||
895 | * pfm_restart). | ||
896 | */ | ||
897 | mask = ctx->ctx_used_pmds[0]; | ||
898 | for (i = 0; mask; i++, mask>>=1) { | ||
899 | /* skip non used pmds */ | ||
900 | if ((mask & 0x1) == 0) continue; | ||
901 | val = ia64_get_pmd(i); | ||
902 | |||
903 | if (PMD_IS_COUNTING(i)) { | ||
904 | /* | ||
905 | * we rebuild the full 64 bit value of the counter | ||
906 | */ | ||
907 | ctx->ctx_pmds[i].val += (val & ovfl_mask); | ||
908 | } else { | ||
909 | ctx->ctx_pmds[i].val = val; | ||
910 | } | ||
911 | DPRINT_ovfl(("pmd[%d]=0x%lx hw_pmd=0x%lx\n", | ||
912 | i, | ||
913 | ctx->ctx_pmds[i].val, | ||
914 | val & ovfl_mask)); | ||
915 | } | ||
916 | /* | ||
917 | * mask monitoring by setting the privilege level to 0 | ||
918 | * we cannot use psr.pp/psr.up for this, it is controlled by | ||
919 | * the user | ||
920 | * | ||
921 | * if task is current, modify actual registers, otherwise modify | ||
922 | * thread save state, i.e., what will be restored in pfm_load_regs() | ||
923 | */ | ||
924 | mask = ctx->ctx_used_monitors[0] >> PMU_FIRST_COUNTER; | ||
925 | for(i= PMU_FIRST_COUNTER; mask; i++, mask>>=1) { | ||
926 | if ((mask & 0x1) == 0UL) continue; | ||
927 | ia64_set_pmc(i, th->pmcs[i] & ~0xfUL); | ||
928 | th->pmcs[i] &= ~0xfUL; | ||
929 | DPRINT_ovfl(("pmc[%d]=0x%lx\n", i, th->pmcs[i])); | ||
930 | } | ||
931 | /* | ||
932 | * make all of this visible | ||
933 | */ | ||
934 | ia64_srlz_d(); | ||
935 | } | ||
936 | |||
937 | /* | ||
938 | * must always be done with task == current | ||
939 | * | ||
940 | * context must be in MASKED state when calling | ||
941 | */ | ||
942 | static void | ||
943 | pfm_restore_monitoring(struct task_struct *task) | ||
944 | { | ||
945 | pfm_context_t *ctx = PFM_GET_CTX(task); | ||
946 | struct thread_struct *th = &task->thread; | ||
947 | unsigned long mask, ovfl_mask; | ||
948 | unsigned long psr, val; | ||
949 | int i, is_system; | ||
950 | |||
951 | is_system = ctx->ctx_fl_system; | ||
952 | ovfl_mask = pmu_conf->ovfl_val; | ||
953 | |||
954 | if (task != current) { | ||
955 | printk(KERN_ERR "perfmon.%d: invalid task[%d] current[%d]\n", __LINE__, task->pid, current->pid); | ||
956 | return; | ||
957 | } | ||
958 | if (ctx->ctx_state != PFM_CTX_MASKED) { | ||
959 | printk(KERN_ERR "perfmon.%d: task[%d] current[%d] invalid state=%d\n", __LINE__, | ||
960 | task->pid, current->pid, ctx->ctx_state); | ||
961 | return; | ||
962 | } | ||
963 | psr = pfm_get_psr(); | ||
964 | /* | ||
965 | * monitoring is masked via the PMC. | ||
966 | * As we restore their value, we do not want each counter to | ||
967 | * restart right away. We stop monitoring using the PSR, | ||
968 | * restore the PMC (and PMD) and then re-establish the psr | ||
969 | * as it was. Note that there can be no pending overflow at | ||
970 | * this point, because monitoring was MASKED. | ||
971 | * | ||
972 | * system-wide session are pinned and self-monitoring | ||
973 | */ | ||
974 | if (is_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) { | ||
975 | /* disable dcr pp */ | ||
976 | ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP); | ||
977 | pfm_clear_psr_pp(); | ||
978 | } else { | ||
979 | pfm_clear_psr_up(); | ||
980 | } | ||
981 | /* | ||
982 | * first, we restore the PMD | ||
983 | */ | ||
984 | mask = ctx->ctx_used_pmds[0]; | ||
985 | for (i = 0; mask; i++, mask>>=1) { | ||
986 | /* skip non used pmds */ | ||
987 | if ((mask & 0x1) == 0) continue; | ||
988 | |||
989 | if (PMD_IS_COUNTING(i)) { | ||
990 | /* | ||
991 | * we split the 64bit value according to | ||
992 | * counter width | ||
993 | */ | ||
994 | val = ctx->ctx_pmds[i].val & ovfl_mask; | ||
995 | ctx->ctx_pmds[i].val &= ~ovfl_mask; | ||
996 | } else { | ||
997 | val = ctx->ctx_pmds[i].val; | ||
998 | } | ||
999 | ia64_set_pmd(i, val); | ||
1000 | |||
1001 | DPRINT(("pmd[%d]=0x%lx hw_pmd=0x%lx\n", | ||
1002 | i, | ||
1003 | ctx->ctx_pmds[i].val, | ||
1004 | val)); | ||
1005 | } | ||
1006 | /* | ||
1007 | * restore the PMCs | ||
1008 | */ | ||
1009 | mask = ctx->ctx_used_monitors[0] >> PMU_FIRST_COUNTER; | ||
1010 | for(i= PMU_FIRST_COUNTER; mask; i++, mask>>=1) { | ||
1011 | if ((mask & 0x1) == 0UL) continue; | ||
1012 | th->pmcs[i] = ctx->ctx_pmcs[i]; | ||
1013 | ia64_set_pmc(i, th->pmcs[i]); | ||
1014 | DPRINT(("[%d] pmc[%d]=0x%lx\n", task->pid, i, th->pmcs[i])); | ||
1015 | } | ||
1016 | ia64_srlz_d(); | ||
1017 | |||
1018 | /* | ||
1019 | * must restore DBR/IBR because could be modified while masked | ||
1020 | * XXX: need to optimize | ||
1021 | */ | ||
1022 | if (ctx->ctx_fl_using_dbreg) { | ||
1023 | pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs); | ||
1024 | pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs); | ||
1025 | } | ||
1026 | |||
1027 | /* | ||
1028 | * now restore PSR | ||
1029 | */ | ||
1030 | if (is_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) { | ||
1031 | /* enable dcr pp */ | ||
1032 | ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) | IA64_DCR_PP); | ||
1033 | ia64_srlz_i(); | ||
1034 | } | ||
1035 | pfm_set_psr_l(psr); | ||
1036 | } | ||
1037 | |||
1038 | static inline void | ||
1039 | pfm_save_pmds(unsigned long *pmds, unsigned long mask) | ||
1040 | { | ||
1041 | int i; | ||
1042 | |||
1043 | ia64_srlz_d(); | ||
1044 | |||
1045 | for (i=0; mask; i++, mask>>=1) { | ||
1046 | if (mask & 0x1) pmds[i] = ia64_get_pmd(i); | ||
1047 | } | ||
1048 | } | ||
1049 | |||
1050 | /* | ||
1051 | * reload from thread state (used for ctxw only) | ||
1052 | */ | ||
1053 | static inline void | ||
1054 | pfm_restore_pmds(unsigned long *pmds, unsigned long mask) | ||
1055 | { | ||
1056 | int i; | ||
1057 | unsigned long val, ovfl_val = pmu_conf->ovfl_val; | ||
1058 | |||
1059 | for (i=0; mask; i++, mask>>=1) { | ||
1060 | if ((mask & 0x1) == 0) continue; | ||
1061 | val = PMD_IS_COUNTING(i) ? pmds[i] & ovfl_val : pmds[i]; | ||
1062 | ia64_set_pmd(i, val); | ||
1063 | } | ||
1064 | ia64_srlz_d(); | ||
1065 | } | ||
1066 | |||
1067 | /* | ||
1068 | * propagate PMD from context to thread-state | ||
1069 | */ | ||
1070 | static inline void | ||
1071 | pfm_copy_pmds(struct task_struct *task, pfm_context_t *ctx) | ||
1072 | { | ||
1073 | struct thread_struct *thread = &task->thread; | ||
1074 | unsigned long ovfl_val = pmu_conf->ovfl_val; | ||
1075 | unsigned long mask = ctx->ctx_all_pmds[0]; | ||
1076 | unsigned long val; | ||
1077 | int i; | ||
1078 | |||
1079 | DPRINT(("mask=0x%lx\n", mask)); | ||
1080 | |||
1081 | for (i=0; mask; i++, mask>>=1) { | ||
1082 | |||
1083 | val = ctx->ctx_pmds[i].val; | ||
1084 | |||
1085 | /* | ||
1086 | * We break up the 64 bit value into 2 pieces | ||
1087 | * the lower bits go to the machine state in the | ||
1088 | * thread (will be reloaded on ctxsw in). | ||
1089 | * The upper part stays in the soft-counter. | ||
1090 | */ | ||
1091 | if (PMD_IS_COUNTING(i)) { | ||
1092 | ctx->ctx_pmds[i].val = val & ~ovfl_val; | ||
1093 | val &= ovfl_val; | ||
1094 | } | ||
1095 | thread->pmds[i] = val; | ||
1096 | |||
1097 | DPRINT(("pmd[%d]=0x%lx soft_val=0x%lx\n", | ||
1098 | i, | ||
1099 | thread->pmds[i], | ||
1100 | ctx->ctx_pmds[i].val)); | ||
1101 | } | ||
1102 | } | ||
1103 | |||
1104 | /* | ||
1105 | * propagate PMC from context to thread-state | ||
1106 | */ | ||
1107 | static inline void | ||
1108 | pfm_copy_pmcs(struct task_struct *task, pfm_context_t *ctx) | ||
1109 | { | ||
1110 | struct thread_struct *thread = &task->thread; | ||
1111 | unsigned long mask = ctx->ctx_all_pmcs[0]; | ||
1112 | int i; | ||
1113 | |||
1114 | DPRINT(("mask=0x%lx\n", mask)); | ||
1115 | |||
1116 | for (i=0; mask; i++, mask>>=1) { | ||
1117 | /* masking 0 with ovfl_val yields 0 */ | ||
1118 | thread->pmcs[i] = ctx->ctx_pmcs[i]; | ||
1119 | DPRINT(("pmc[%d]=0x%lx\n", i, thread->pmcs[i])); | ||
1120 | } | ||
1121 | } | ||
1122 | |||
1123 | |||
1124 | |||
1125 | static inline void | ||
1126 | pfm_restore_pmcs(unsigned long *pmcs, unsigned long mask) | ||
1127 | { | ||
1128 | int i; | ||
1129 | |||
1130 | for (i=0; mask; i++, mask>>=1) { | ||
1131 | if ((mask & 0x1) == 0) continue; | ||
1132 | ia64_set_pmc(i, pmcs[i]); | ||
1133 | } | ||
1134 | ia64_srlz_d(); | ||
1135 | } | ||
1136 | |||
1137 | static inline int | ||
1138 | pfm_uuid_cmp(pfm_uuid_t a, pfm_uuid_t b) | ||
1139 | { | ||
1140 | return memcmp(a, b, sizeof(pfm_uuid_t)); | ||
1141 | } | ||
1142 | |||
1143 | static inline int | ||
1144 | pfm_buf_fmt_exit(pfm_buffer_fmt_t *fmt, struct task_struct *task, void *buf, struct pt_regs *regs) | ||
1145 | { | ||
1146 | int ret = 0; | ||
1147 | if (fmt->fmt_exit) ret = (*fmt->fmt_exit)(task, buf, regs); | ||
1148 | return ret; | ||
1149 | } | ||
1150 | |||
1151 | static inline int | ||
1152 | pfm_buf_fmt_getsize(pfm_buffer_fmt_t *fmt, struct task_struct *task, unsigned int flags, int cpu, void *arg, unsigned long *size) | ||
1153 | { | ||
1154 | int ret = 0; | ||
1155 | if (fmt->fmt_getsize) ret = (*fmt->fmt_getsize)(task, flags, cpu, arg, size); | ||
1156 | return ret; | ||
1157 | } | ||
1158 | |||
1159 | |||
1160 | static inline int | ||
1161 | pfm_buf_fmt_validate(pfm_buffer_fmt_t *fmt, struct task_struct *task, unsigned int flags, | ||
1162 | int cpu, void *arg) | ||
1163 | { | ||
1164 | int ret = 0; | ||
1165 | if (fmt->fmt_validate) ret = (*fmt->fmt_validate)(task, flags, cpu, arg); | ||
1166 | return ret; | ||
1167 | } | ||
1168 | |||
1169 | static inline int | ||
1170 | pfm_buf_fmt_init(pfm_buffer_fmt_t *fmt, struct task_struct *task, void *buf, unsigned int flags, | ||
1171 | int cpu, void *arg) | ||
1172 | { | ||
1173 | int ret = 0; | ||
1174 | if (fmt->fmt_init) ret = (*fmt->fmt_init)(task, buf, flags, cpu, arg); | ||
1175 | return ret; | ||
1176 | } | ||
1177 | |||
1178 | static inline int | ||
1179 | pfm_buf_fmt_restart(pfm_buffer_fmt_t *fmt, struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs) | ||
1180 | { | ||
1181 | int ret = 0; | ||
1182 | if (fmt->fmt_restart) ret = (*fmt->fmt_restart)(task, ctrl, buf, regs); | ||
1183 | return ret; | ||
1184 | } | ||
1185 | |||
1186 | static inline int | ||
1187 | pfm_buf_fmt_restart_active(pfm_buffer_fmt_t *fmt, struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs) | ||
1188 | { | ||
1189 | int ret = 0; | ||
1190 | if (fmt->fmt_restart_active) ret = (*fmt->fmt_restart_active)(task, ctrl, buf, regs); | ||
1191 | return ret; | ||
1192 | } | ||
1193 | |||
1194 | static pfm_buffer_fmt_t * | ||
1195 | __pfm_find_buffer_fmt(pfm_uuid_t uuid) | ||
1196 | { | ||
1197 | struct list_head * pos; | ||
1198 | pfm_buffer_fmt_t * entry; | ||
1199 | |||
1200 | list_for_each(pos, &pfm_buffer_fmt_list) { | ||
1201 | entry = list_entry(pos, pfm_buffer_fmt_t, fmt_list); | ||
1202 | if (pfm_uuid_cmp(uuid, entry->fmt_uuid) == 0) | ||
1203 | return entry; | ||
1204 | } | ||
1205 | return NULL; | ||
1206 | } | ||
1207 | |||
1208 | /* | ||
1209 | * find a buffer format based on its uuid | ||
1210 | */ | ||
1211 | static pfm_buffer_fmt_t * | ||
1212 | pfm_find_buffer_fmt(pfm_uuid_t uuid) | ||
1213 | { | ||
1214 | pfm_buffer_fmt_t * fmt; | ||
1215 | spin_lock(&pfm_buffer_fmt_lock); | ||
1216 | fmt = __pfm_find_buffer_fmt(uuid); | ||
1217 | spin_unlock(&pfm_buffer_fmt_lock); | ||
1218 | return fmt; | ||
1219 | } | ||
1220 | |||
1221 | int | ||
1222 | pfm_register_buffer_fmt(pfm_buffer_fmt_t *fmt) | ||
1223 | { | ||
1224 | int ret = 0; | ||
1225 | |||
1226 | /* some sanity checks */ | ||
1227 | if (fmt == NULL || fmt->fmt_name == NULL) return -EINVAL; | ||
1228 | |||
1229 | /* we need at least a handler */ | ||
1230 | if (fmt->fmt_handler == NULL) return -EINVAL; | ||
1231 | |||
1232 | /* | ||
1233 | * XXX: need check validity of fmt_arg_size | ||
1234 | */ | ||
1235 | |||
1236 | spin_lock(&pfm_buffer_fmt_lock); | ||
1237 | |||
1238 | if (__pfm_find_buffer_fmt(fmt->fmt_uuid)) { | ||
1239 | printk(KERN_ERR "perfmon: duplicate sampling format: %s\n", fmt->fmt_name); | ||
1240 | ret = -EBUSY; | ||
1241 | goto out; | ||
1242 | } | ||
1243 | list_add(&fmt->fmt_list, &pfm_buffer_fmt_list); | ||
1244 | printk(KERN_INFO "perfmon: added sampling format %s\n", fmt->fmt_name); | ||
1245 | |||
1246 | out: | ||
1247 | spin_unlock(&pfm_buffer_fmt_lock); | ||
1248 | return ret; | ||
1249 | } | ||
1250 | EXPORT_SYMBOL(pfm_register_buffer_fmt); | ||
1251 | |||
1252 | int | ||
1253 | pfm_unregister_buffer_fmt(pfm_uuid_t uuid) | ||
1254 | { | ||
1255 | pfm_buffer_fmt_t *fmt; | ||
1256 | int ret = 0; | ||
1257 | |||
1258 | spin_lock(&pfm_buffer_fmt_lock); | ||
1259 | |||
1260 | fmt = __pfm_find_buffer_fmt(uuid); | ||
1261 | if (!fmt) { | ||
1262 | printk(KERN_ERR "perfmon: cannot unregister format, not found\n"); | ||
1263 | ret = -EINVAL; | ||
1264 | goto out; | ||
1265 | } | ||
1266 | list_del_init(&fmt->fmt_list); | ||
1267 | printk(KERN_INFO "perfmon: removed sampling format: %s\n", fmt->fmt_name); | ||
1268 | |||
1269 | out: | ||
1270 | spin_unlock(&pfm_buffer_fmt_lock); | ||
1271 | return ret; | ||
1272 | |||
1273 | } | ||
1274 | EXPORT_SYMBOL(pfm_unregister_buffer_fmt); | ||
1275 | |||
1276 | static int | ||
1277 | pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu) | ||
1278 | { | ||
1279 | unsigned long flags; | ||
1280 | /* | ||
1281 | * validy checks on cpu_mask have been done upstream | ||
1282 | */ | ||
1283 | LOCK_PFS(flags); | ||
1284 | |||
1285 | DPRINT(("in sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n", | ||
1286 | pfm_sessions.pfs_sys_sessions, | ||
1287 | pfm_sessions.pfs_task_sessions, | ||
1288 | pfm_sessions.pfs_sys_use_dbregs, | ||
1289 | is_syswide, | ||
1290 | cpu)); | ||
1291 | |||
1292 | if (is_syswide) { | ||
1293 | /* | ||
1294 | * cannot mix system wide and per-task sessions | ||
1295 | */ | ||
1296 | if (pfm_sessions.pfs_task_sessions > 0UL) { | ||
1297 | DPRINT(("system wide not possible, %u conflicting task_sessions\n", | ||
1298 | pfm_sessions.pfs_task_sessions)); | ||
1299 | goto abort; | ||
1300 | } | ||
1301 | |||
1302 | if (pfm_sessions.pfs_sys_session[cpu]) goto error_conflict; | ||
1303 | |||
1304 | DPRINT(("reserving system wide session on CPU%u currently on CPU%u\n", cpu, smp_processor_id())); | ||
1305 | |||
1306 | pfm_sessions.pfs_sys_session[cpu] = task; | ||
1307 | |||
1308 | pfm_sessions.pfs_sys_sessions++ ; | ||
1309 | |||
1310 | } else { | ||
1311 | if (pfm_sessions.pfs_sys_sessions) goto abort; | ||
1312 | pfm_sessions.pfs_task_sessions++; | ||
1313 | } | ||
1314 | |||
1315 | DPRINT(("out sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n", | ||
1316 | pfm_sessions.pfs_sys_sessions, | ||
1317 | pfm_sessions.pfs_task_sessions, | ||
1318 | pfm_sessions.pfs_sys_use_dbregs, | ||
1319 | is_syswide, | ||
1320 | cpu)); | ||
1321 | |||
1322 | UNLOCK_PFS(flags); | ||
1323 | |||
1324 | return 0; | ||
1325 | |||
1326 | error_conflict: | ||
1327 | DPRINT(("system wide not possible, conflicting session [%d] on CPU%d\n", | ||
1328 | pfm_sessions.pfs_sys_session[cpu]->pid, | ||
1329 | smp_processor_id())); | ||
1330 | abort: | ||
1331 | UNLOCK_PFS(flags); | ||
1332 | |||
1333 | return -EBUSY; | ||
1334 | |||
1335 | } | ||
1336 | |||
1337 | static int | ||
1338 | pfm_unreserve_session(pfm_context_t *ctx, int is_syswide, unsigned int cpu) | ||
1339 | { | ||
1340 | unsigned long flags; | ||
1341 | /* | ||
1342 | * validy checks on cpu_mask have been done upstream | ||
1343 | */ | ||
1344 | LOCK_PFS(flags); | ||
1345 | |||
1346 | DPRINT(("in sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n", | ||
1347 | pfm_sessions.pfs_sys_sessions, | ||
1348 | pfm_sessions.pfs_task_sessions, | ||
1349 | pfm_sessions.pfs_sys_use_dbregs, | ||
1350 | is_syswide, | ||
1351 | cpu)); | ||
1352 | |||
1353 | |||
1354 | if (is_syswide) { | ||
1355 | pfm_sessions.pfs_sys_session[cpu] = NULL; | ||
1356 | /* | ||
1357 | * would not work with perfmon+more than one bit in cpu_mask | ||
1358 | */ | ||
1359 | if (ctx && ctx->ctx_fl_using_dbreg) { | ||
1360 | if (pfm_sessions.pfs_sys_use_dbregs == 0) { | ||
1361 | printk(KERN_ERR "perfmon: invalid release for ctx %p sys_use_dbregs=0\n", ctx); | ||
1362 | } else { | ||
1363 | pfm_sessions.pfs_sys_use_dbregs--; | ||
1364 | } | ||
1365 | } | ||
1366 | pfm_sessions.pfs_sys_sessions--; | ||
1367 | } else { | ||
1368 | pfm_sessions.pfs_task_sessions--; | ||
1369 | } | ||
1370 | DPRINT(("out sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n", | ||
1371 | pfm_sessions.pfs_sys_sessions, | ||
1372 | pfm_sessions.pfs_task_sessions, | ||
1373 | pfm_sessions.pfs_sys_use_dbregs, | ||
1374 | is_syswide, | ||
1375 | cpu)); | ||
1376 | |||
1377 | UNLOCK_PFS(flags); | ||
1378 | |||
1379 | return 0; | ||
1380 | } | ||
1381 | |||
1382 | /* | ||
1383 | * removes virtual mapping of the sampling buffer. | ||
1384 | * IMPORTANT: cannot be called with interrupts disable, e.g. inside | ||
1385 | * a PROTECT_CTX() section. | ||
1386 | */ | ||
1387 | static int | ||
1388 | pfm_remove_smpl_mapping(struct task_struct *task, void *vaddr, unsigned long size) | ||
1389 | { | ||
1390 | int r; | ||
1391 | |||
1392 | /* sanity checks */ | ||
1393 | if (task->mm == NULL || size == 0UL || vaddr == NULL) { | ||
1394 | printk(KERN_ERR "perfmon: pfm_remove_smpl_mapping [%d] invalid context mm=%p\n", task->pid, task->mm); | ||
1395 | return -EINVAL; | ||
1396 | } | ||
1397 | |||
1398 | DPRINT(("smpl_vaddr=%p size=%lu\n", vaddr, size)); | ||
1399 | |||
1400 | /* | ||
1401 | * does the actual unmapping | ||
1402 | */ | ||
1403 | down_write(&task->mm->mmap_sem); | ||
1404 | |||
1405 | DPRINT(("down_write done smpl_vaddr=%p size=%lu\n", vaddr, size)); | ||
1406 | |||
1407 | r = pfm_do_munmap(task->mm, (unsigned long)vaddr, size, 0); | ||
1408 | |||
1409 | up_write(&task->mm->mmap_sem); | ||
1410 | if (r !=0) { | ||
1411 | printk(KERN_ERR "perfmon: [%d] unable to unmap sampling buffer @%p size=%lu\n", task->pid, vaddr, size); | ||
1412 | } | ||
1413 | |||
1414 | DPRINT(("do_unmap(%p, %lu)=%d\n", vaddr, size, r)); | ||
1415 | |||
1416 | return 0; | ||
1417 | } | ||
1418 | |||
1419 | /* | ||
1420 | * free actual physical storage used by sampling buffer | ||
1421 | */ | ||
1422 | #if 0 | ||
1423 | static int | ||
1424 | pfm_free_smpl_buffer(pfm_context_t *ctx) | ||
1425 | { | ||
1426 | pfm_buffer_fmt_t *fmt; | ||
1427 | |||
1428 | if (ctx->ctx_smpl_hdr == NULL) goto invalid_free; | ||
1429 | |||
1430 | /* | ||
1431 | * we won't use the buffer format anymore | ||
1432 | */ | ||
1433 | fmt = ctx->ctx_buf_fmt; | ||
1434 | |||
1435 | DPRINT(("sampling buffer @%p size %lu vaddr=%p\n", | ||
1436 | ctx->ctx_smpl_hdr, | ||
1437 | ctx->ctx_smpl_size, | ||
1438 | ctx->ctx_smpl_vaddr)); | ||
1439 | |||
1440 | pfm_buf_fmt_exit(fmt, current, NULL, NULL); | ||
1441 | |||
1442 | /* | ||
1443 | * free the buffer | ||
1444 | */ | ||
1445 | pfm_rvfree(ctx->ctx_smpl_hdr, ctx->ctx_smpl_size); | ||
1446 | |||
1447 | ctx->ctx_smpl_hdr = NULL; | ||
1448 | ctx->ctx_smpl_size = 0UL; | ||
1449 | |||
1450 | return 0; | ||
1451 | |||
1452 | invalid_free: | ||
1453 | printk(KERN_ERR "perfmon: pfm_free_smpl_buffer [%d] no buffer\n", current->pid); | ||
1454 | return -EINVAL; | ||
1455 | } | ||
1456 | #endif | ||
1457 | |||
1458 | static inline void | ||
1459 | pfm_exit_smpl_buffer(pfm_buffer_fmt_t *fmt) | ||
1460 | { | ||
1461 | if (fmt == NULL) return; | ||
1462 | |||
1463 | pfm_buf_fmt_exit(fmt, current, NULL, NULL); | ||
1464 | |||
1465 | } | ||
1466 | |||
1467 | /* | ||
1468 | * pfmfs should _never_ be mounted by userland - too much of security hassle, | ||
1469 | * no real gain from having the whole whorehouse mounted. So we don't need | ||
1470 | * any operations on the root directory. However, we need a non-trivial | ||
1471 | * d_name - pfm: will go nicely and kill the special-casing in procfs. | ||
1472 | */ | ||
1473 | static struct vfsmount *pfmfs_mnt; | ||
1474 | |||
1475 | static int __init | ||
1476 | init_pfm_fs(void) | ||
1477 | { | ||
1478 | int err = register_filesystem(&pfm_fs_type); | ||
1479 | if (!err) { | ||
1480 | pfmfs_mnt = kern_mount(&pfm_fs_type); | ||
1481 | err = PTR_ERR(pfmfs_mnt); | ||
1482 | if (IS_ERR(pfmfs_mnt)) | ||
1483 | unregister_filesystem(&pfm_fs_type); | ||
1484 | else | ||
1485 | err = 0; | ||
1486 | } | ||
1487 | return err; | ||
1488 | } | ||
1489 | |||
1490 | static void __exit | ||
1491 | exit_pfm_fs(void) | ||
1492 | { | ||
1493 | unregister_filesystem(&pfm_fs_type); | ||
1494 | mntput(pfmfs_mnt); | ||
1495 | } | ||
1496 | |||
1497 | static ssize_t | ||
1498 | pfm_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos) | ||
1499 | { | ||
1500 | pfm_context_t *ctx; | ||
1501 | pfm_msg_t *msg; | ||
1502 | ssize_t ret; | ||
1503 | unsigned long flags; | ||
1504 | DECLARE_WAITQUEUE(wait, current); | ||
1505 | if (PFM_IS_FILE(filp) == 0) { | ||
1506 | printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", current->pid); | ||
1507 | return -EINVAL; | ||
1508 | } | ||
1509 | |||
1510 | ctx = (pfm_context_t *)filp->private_data; | ||
1511 | if (ctx == NULL) { | ||
1512 | printk(KERN_ERR "perfmon: pfm_read: NULL ctx [%d]\n", current->pid); | ||
1513 | return -EINVAL; | ||
1514 | } | ||
1515 | |||
1516 | /* | ||
1517 | * check even when there is no message | ||
1518 | */ | ||
1519 | if (size < sizeof(pfm_msg_t)) { | ||
1520 | DPRINT(("message is too small ctx=%p (>=%ld)\n", ctx, sizeof(pfm_msg_t))); | ||
1521 | return -EINVAL; | ||
1522 | } | ||
1523 | |||
1524 | PROTECT_CTX(ctx, flags); | ||
1525 | |||
1526 | /* | ||
1527 | * put ourselves on the wait queue | ||
1528 | */ | ||
1529 | add_wait_queue(&ctx->ctx_msgq_wait, &wait); | ||
1530 | |||
1531 | |||
1532 | for(;;) { | ||
1533 | /* | ||
1534 | * check wait queue | ||
1535 | */ | ||
1536 | |||
1537 | set_current_state(TASK_INTERRUPTIBLE); | ||
1538 | |||
1539 | DPRINT(("head=%d tail=%d\n", ctx->ctx_msgq_head, ctx->ctx_msgq_tail)); | ||
1540 | |||
1541 | ret = 0; | ||
1542 | if(PFM_CTXQ_EMPTY(ctx) == 0) break; | ||
1543 | |||
1544 | UNPROTECT_CTX(ctx, flags); | ||
1545 | |||
1546 | /* | ||
1547 | * check non-blocking read | ||
1548 | */ | ||
1549 | ret = -EAGAIN; | ||
1550 | if(filp->f_flags & O_NONBLOCK) break; | ||
1551 | |||
1552 | /* | ||
1553 | * check pending signals | ||
1554 | */ | ||
1555 | if(signal_pending(current)) { | ||
1556 | ret = -EINTR; | ||
1557 | break; | ||
1558 | } | ||
1559 | /* | ||
1560 | * no message, so wait | ||
1561 | */ | ||
1562 | schedule(); | ||
1563 | |||
1564 | PROTECT_CTX(ctx, flags); | ||
1565 | } | ||
1566 | DPRINT(("[%d] back to running ret=%ld\n", current->pid, ret)); | ||
1567 | set_current_state(TASK_RUNNING); | ||
1568 | remove_wait_queue(&ctx->ctx_msgq_wait, &wait); | ||
1569 | |||
1570 | if (ret < 0) goto abort; | ||
1571 | |||
1572 | ret = -EINVAL; | ||
1573 | msg = pfm_get_next_msg(ctx); | ||
1574 | if (msg == NULL) { | ||
1575 | printk(KERN_ERR "perfmon: pfm_read no msg for ctx=%p [%d]\n", ctx, current->pid); | ||
1576 | goto abort_locked; | ||
1577 | } | ||
1578 | |||
1579 | DPRINT(("[%d] fd=%d type=%d\n", current->pid, msg->pfm_gen_msg.msg_ctx_fd, msg->pfm_gen_msg.msg_type)); | ||
1580 | |||
1581 | ret = -EFAULT; | ||
1582 | if(copy_to_user(buf, msg, sizeof(pfm_msg_t)) == 0) ret = sizeof(pfm_msg_t); | ||
1583 | |||
1584 | abort_locked: | ||
1585 | UNPROTECT_CTX(ctx, flags); | ||
1586 | abort: | ||
1587 | return ret; | ||
1588 | } | ||
1589 | |||
1590 | static ssize_t | ||
1591 | pfm_write(struct file *file, const char __user *ubuf, | ||
1592 | size_t size, loff_t *ppos) | ||
1593 | { | ||
1594 | DPRINT(("pfm_write called\n")); | ||
1595 | return -EINVAL; | ||
1596 | } | ||
1597 | |||
1598 | static unsigned int | ||
1599 | pfm_poll(struct file *filp, poll_table * wait) | ||
1600 | { | ||
1601 | pfm_context_t *ctx; | ||
1602 | unsigned long flags; | ||
1603 | unsigned int mask = 0; | ||
1604 | |||
1605 | if (PFM_IS_FILE(filp) == 0) { | ||
1606 | printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", current->pid); | ||
1607 | return 0; | ||
1608 | } | ||
1609 | |||
1610 | ctx = (pfm_context_t *)filp->private_data; | ||
1611 | if (ctx == NULL) { | ||
1612 | printk(KERN_ERR "perfmon: pfm_poll: NULL ctx [%d]\n", current->pid); | ||
1613 | return 0; | ||
1614 | } | ||
1615 | |||
1616 | |||
1617 | DPRINT(("pfm_poll ctx_fd=%d before poll_wait\n", ctx->ctx_fd)); | ||
1618 | |||
1619 | poll_wait(filp, &ctx->ctx_msgq_wait, wait); | ||
1620 | |||
1621 | PROTECT_CTX(ctx, flags); | ||
1622 | |||
1623 | if (PFM_CTXQ_EMPTY(ctx) == 0) | ||
1624 | mask = POLLIN | POLLRDNORM; | ||
1625 | |||
1626 | UNPROTECT_CTX(ctx, flags); | ||
1627 | |||
1628 | DPRINT(("pfm_poll ctx_fd=%d mask=0x%x\n", ctx->ctx_fd, mask)); | ||
1629 | |||
1630 | return mask; | ||
1631 | } | ||
1632 | |||
1633 | static int | ||
1634 | pfm_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) | ||
1635 | { | ||
1636 | DPRINT(("pfm_ioctl called\n")); | ||
1637 | return -EINVAL; | ||
1638 | } | ||
1639 | |||
1640 | /* | ||
1641 | * interrupt cannot be masked when coming here | ||
1642 | */ | ||
1643 | static inline int | ||
1644 | pfm_do_fasync(int fd, struct file *filp, pfm_context_t *ctx, int on) | ||
1645 | { | ||
1646 | int ret; | ||
1647 | |||
1648 | ret = fasync_helper (fd, filp, on, &ctx->ctx_async_queue); | ||
1649 | |||
1650 | DPRINT(("pfm_fasync called by [%d] on ctx_fd=%d on=%d async_queue=%p ret=%d\n", | ||
1651 | current->pid, | ||
1652 | fd, | ||
1653 | on, | ||
1654 | ctx->ctx_async_queue, ret)); | ||
1655 | |||
1656 | return ret; | ||
1657 | } | ||
1658 | |||
1659 | static int | ||
1660 | pfm_fasync(int fd, struct file *filp, int on) | ||
1661 | { | ||
1662 | pfm_context_t *ctx; | ||
1663 | int ret; | ||
1664 | |||
1665 | if (PFM_IS_FILE(filp) == 0) { | ||
1666 | printk(KERN_ERR "perfmon: pfm_fasync bad magic [%d]\n", current->pid); | ||
1667 | return -EBADF; | ||
1668 | } | ||
1669 | |||
1670 | ctx = (pfm_context_t *)filp->private_data; | ||
1671 | if (ctx == NULL) { | ||
1672 | printk(KERN_ERR "perfmon: pfm_fasync NULL ctx [%d]\n", current->pid); | ||
1673 | return -EBADF; | ||
1674 | } | ||
1675 | /* | ||
1676 | * we cannot mask interrupts during this call because this may | ||
1677 | * may go to sleep if memory is not readily avalaible. | ||
1678 | * | ||
1679 | * We are protected from the conetxt disappearing by the get_fd()/put_fd() | ||
1680 | * done in caller. Serialization of this function is ensured by caller. | ||
1681 | */ | ||
1682 | ret = pfm_do_fasync(fd, filp, ctx, on); | ||
1683 | |||
1684 | |||
1685 | DPRINT(("pfm_fasync called on ctx_fd=%d on=%d async_queue=%p ret=%d\n", | ||
1686 | fd, | ||
1687 | on, | ||
1688 | ctx->ctx_async_queue, ret)); | ||
1689 | |||
1690 | return ret; | ||
1691 | } | ||
1692 | |||
1693 | #ifdef CONFIG_SMP | ||
1694 | /* | ||
1695 | * this function is exclusively called from pfm_close(). | ||
1696 | * The context is not protected at that time, nor are interrupts | ||
1697 | * on the remote CPU. That's necessary to avoid deadlocks. | ||
1698 | */ | ||
1699 | static void | ||
1700 | pfm_syswide_force_stop(void *info) | ||
1701 | { | ||
1702 | pfm_context_t *ctx = (pfm_context_t *)info; | ||
1703 | struct pt_regs *regs = ia64_task_regs(current); | ||
1704 | struct task_struct *owner; | ||
1705 | unsigned long flags; | ||
1706 | int ret; | ||
1707 | |||
1708 | if (ctx->ctx_cpu != smp_processor_id()) { | ||
1709 | printk(KERN_ERR "perfmon: pfm_syswide_force_stop for CPU%d but on CPU%d\n", | ||
1710 | ctx->ctx_cpu, | ||
1711 | smp_processor_id()); | ||
1712 | return; | ||
1713 | } | ||
1714 | owner = GET_PMU_OWNER(); | ||
1715 | if (owner != ctx->ctx_task) { | ||
1716 | printk(KERN_ERR "perfmon: pfm_syswide_force_stop CPU%d unexpected owner [%d] instead of [%d]\n", | ||
1717 | smp_processor_id(), | ||
1718 | owner->pid, ctx->ctx_task->pid); | ||
1719 | return; | ||
1720 | } | ||
1721 | if (GET_PMU_CTX() != ctx) { | ||
1722 | printk(KERN_ERR "perfmon: pfm_syswide_force_stop CPU%d unexpected ctx %p instead of %p\n", | ||
1723 | smp_processor_id(), | ||
1724 | GET_PMU_CTX(), ctx); | ||
1725 | return; | ||
1726 | } | ||
1727 | |||
1728 | DPRINT(("on CPU%d forcing system wide stop for [%d]\n", smp_processor_id(), ctx->ctx_task->pid)); | ||
1729 | /* | ||
1730 | * the context is already protected in pfm_close(), we simply | ||
1731 | * need to mask interrupts to avoid a PMU interrupt race on | ||
1732 | * this CPU | ||
1733 | */ | ||
1734 | local_irq_save(flags); | ||
1735 | |||
1736 | ret = pfm_context_unload(ctx, NULL, 0, regs); | ||
1737 | if (ret) { | ||
1738 | DPRINT(("context_unload returned %d\n", ret)); | ||
1739 | } | ||
1740 | |||
1741 | /* | ||
1742 | * unmask interrupts, PMU interrupts are now spurious here | ||
1743 | */ | ||
1744 | local_irq_restore(flags); | ||
1745 | } | ||
1746 | |||
1747 | static void | ||
1748 | pfm_syswide_cleanup_other_cpu(pfm_context_t *ctx) | ||
1749 | { | ||
1750 | int ret; | ||
1751 | |||
1752 | DPRINT(("calling CPU%d for cleanup\n", ctx->ctx_cpu)); | ||
1753 | ret = smp_call_function_single(ctx->ctx_cpu, pfm_syswide_force_stop, ctx, 0, 1); | ||
1754 | DPRINT(("called CPU%d for cleanup ret=%d\n", ctx->ctx_cpu, ret)); | ||
1755 | } | ||
1756 | #endif /* CONFIG_SMP */ | ||
1757 | |||
1758 | /* | ||
1759 | * called for each close(). Partially free resources. | ||
1760 | * When caller is self-monitoring, the context is unloaded. | ||
1761 | */ | ||
1762 | static int | ||
1763 | pfm_flush(struct file *filp) | ||
1764 | { | ||
1765 | pfm_context_t *ctx; | ||
1766 | struct task_struct *task; | ||
1767 | struct pt_regs *regs; | ||
1768 | unsigned long flags; | ||
1769 | unsigned long smpl_buf_size = 0UL; | ||
1770 | void *smpl_buf_vaddr = NULL; | ||
1771 | int state, is_system; | ||
1772 | |||
1773 | if (PFM_IS_FILE(filp) == 0) { | ||
1774 | DPRINT(("bad magic for\n")); | ||
1775 | return -EBADF; | ||
1776 | } | ||
1777 | |||
1778 | ctx = (pfm_context_t *)filp->private_data; | ||
1779 | if (ctx == NULL) { | ||
1780 | printk(KERN_ERR "perfmon: pfm_flush: NULL ctx [%d]\n", current->pid); | ||
1781 | return -EBADF; | ||
1782 | } | ||
1783 | |||
1784 | /* | ||
1785 | * remove our file from the async queue, if we use this mode. | ||
1786 | * This can be done without the context being protected. We come | ||
1787 | * here when the context has become unreacheable by other tasks. | ||
1788 | * | ||
1789 | * We may still have active monitoring at this point and we may | ||
1790 | * end up in pfm_overflow_handler(). However, fasync_helper() | ||
1791 | * operates with interrupts disabled and it cleans up the | ||
1792 | * queue. If the PMU handler is called prior to entering | ||
1793 | * fasync_helper() then it will send a signal. If it is | ||
1794 | * invoked after, it will find an empty queue and no | ||
1795 | * signal will be sent. In both case, we are safe | ||
1796 | */ | ||
1797 | if (filp->f_flags & FASYNC) { | ||
1798 | DPRINT(("cleaning up async_queue=%p\n", ctx->ctx_async_queue)); | ||
1799 | pfm_do_fasync (-1, filp, ctx, 0); | ||
1800 | } | ||
1801 | |||
1802 | PROTECT_CTX(ctx, flags); | ||
1803 | |||
1804 | state = ctx->ctx_state; | ||
1805 | is_system = ctx->ctx_fl_system; | ||
1806 | |||
1807 | task = PFM_CTX_TASK(ctx); | ||
1808 | regs = ia64_task_regs(task); | ||
1809 | |||
1810 | DPRINT(("ctx_state=%d is_current=%d\n", | ||
1811 | state, | ||
1812 | task == current ? 1 : 0)); | ||
1813 | |||
1814 | /* | ||
1815 | * if state == UNLOADED, then task is NULL | ||
1816 | */ | ||
1817 | |||
1818 | /* | ||
1819 | * we must stop and unload because we are losing access to the context. | ||
1820 | */ | ||
1821 | if (task == current) { | ||
1822 | #ifdef CONFIG_SMP | ||
1823 | /* | ||
1824 | * the task IS the owner but it migrated to another CPU: that's bad | ||
1825 | * but we must handle this cleanly. Unfortunately, the kernel does | ||
1826 | * not provide a mechanism to block migration (while the context is loaded). | ||
1827 | * | ||
1828 | * We need to release the resource on the ORIGINAL cpu. | ||
1829 | */ | ||
1830 | if (is_system && ctx->ctx_cpu != smp_processor_id()) { | ||
1831 | |||
1832 | DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu)); | ||
1833 | /* | ||
1834 | * keep context protected but unmask interrupt for IPI | ||
1835 | */ | ||
1836 | local_irq_restore(flags); | ||
1837 | |||
1838 | pfm_syswide_cleanup_other_cpu(ctx); | ||
1839 | |||
1840 | /* | ||
1841 | * restore interrupt masking | ||
1842 | */ | ||
1843 | local_irq_save(flags); | ||
1844 | |||
1845 | /* | ||
1846 | * context is unloaded at this point | ||
1847 | */ | ||
1848 | } else | ||
1849 | #endif /* CONFIG_SMP */ | ||
1850 | { | ||
1851 | |||
1852 | DPRINT(("forcing unload\n")); | ||
1853 | /* | ||
1854 | * stop and unload, returning with state UNLOADED | ||
1855 | * and session unreserved. | ||
1856 | */ | ||
1857 | pfm_context_unload(ctx, NULL, 0, regs); | ||
1858 | |||
1859 | DPRINT(("ctx_state=%d\n", ctx->ctx_state)); | ||
1860 | } | ||
1861 | } | ||
1862 | |||
1863 | /* | ||
1864 | * remove virtual mapping, if any, for the calling task. | ||
1865 | * cannot reset ctx field until last user is calling close(). | ||
1866 | * | ||
1867 | * ctx_smpl_vaddr must never be cleared because it is needed | ||
1868 | * by every task with access to the context | ||
1869 | * | ||
1870 | * When called from do_exit(), the mm context is gone already, therefore | ||
1871 | * mm is NULL, i.e., the VMA is already gone and we do not have to | ||
1872 | * do anything here | ||
1873 | */ | ||
1874 | if (ctx->ctx_smpl_vaddr && current->mm) { | ||
1875 | smpl_buf_vaddr = ctx->ctx_smpl_vaddr; | ||
1876 | smpl_buf_size = ctx->ctx_smpl_size; | ||
1877 | } | ||
1878 | |||
1879 | UNPROTECT_CTX(ctx, flags); | ||
1880 | |||
1881 | /* | ||
1882 | * if there was a mapping, then we systematically remove it | ||
1883 | * at this point. Cannot be done inside critical section | ||
1884 | * because some VM function reenables interrupts. | ||
1885 | * | ||
1886 | */ | ||
1887 | if (smpl_buf_vaddr) pfm_remove_smpl_mapping(current, smpl_buf_vaddr, smpl_buf_size); | ||
1888 | |||
1889 | return 0; | ||
1890 | } | ||
1891 | /* | ||
1892 | * called either on explicit close() or from exit_files(). | ||
1893 | * Only the LAST user of the file gets to this point, i.e., it is | ||
1894 | * called only ONCE. | ||
1895 | * | ||
1896 | * IMPORTANT: we get called ONLY when the refcnt on the file gets to zero | ||
1897 | * (fput()),i.e, last task to access the file. Nobody else can access the | ||
1898 | * file at this point. | ||
1899 | * | ||
1900 | * When called from exit_files(), the VMA has been freed because exit_mm() | ||
1901 | * is executed before exit_files(). | ||
1902 | * | ||
1903 | * When called from exit_files(), the current task is not yet ZOMBIE but we | ||
1904 | * flush the PMU state to the context. | ||
1905 | */ | ||
1906 | static int | ||
1907 | pfm_close(struct inode *inode, struct file *filp) | ||
1908 | { | ||
1909 | pfm_context_t *ctx; | ||
1910 | struct task_struct *task; | ||
1911 | struct pt_regs *regs; | ||
1912 | DECLARE_WAITQUEUE(wait, current); | ||
1913 | unsigned long flags; | ||
1914 | unsigned long smpl_buf_size = 0UL; | ||
1915 | void *smpl_buf_addr = NULL; | ||
1916 | int free_possible = 1; | ||
1917 | int state, is_system; | ||
1918 | |||
1919 | DPRINT(("pfm_close called private=%p\n", filp->private_data)); | ||
1920 | |||
1921 | if (PFM_IS_FILE(filp) == 0) { | ||
1922 | DPRINT(("bad magic\n")); | ||
1923 | return -EBADF; | ||
1924 | } | ||
1925 | |||
1926 | ctx = (pfm_context_t *)filp->private_data; | ||
1927 | if (ctx == NULL) { | ||
1928 | printk(KERN_ERR "perfmon: pfm_close: NULL ctx [%d]\n", current->pid); | ||
1929 | return -EBADF; | ||
1930 | } | ||
1931 | |||
1932 | PROTECT_CTX(ctx, flags); | ||
1933 | |||
1934 | state = ctx->ctx_state; | ||
1935 | is_system = ctx->ctx_fl_system; | ||
1936 | |||
1937 | task = PFM_CTX_TASK(ctx); | ||
1938 | regs = ia64_task_regs(task); | ||
1939 | |||
1940 | DPRINT(("ctx_state=%d is_current=%d\n", | ||
1941 | state, | ||
1942 | task == current ? 1 : 0)); | ||
1943 | |||
1944 | /* | ||
1945 | * if task == current, then pfm_flush() unloaded the context | ||
1946 | */ | ||
1947 | if (state == PFM_CTX_UNLOADED) goto doit; | ||
1948 | |||
1949 | /* | ||
1950 | * context is loaded/masked and task != current, we need to | ||
1951 | * either force an unload or go zombie | ||
1952 | */ | ||
1953 | |||
1954 | /* | ||
1955 | * The task is currently blocked or will block after an overflow. | ||
1956 | * we must force it to wakeup to get out of the | ||
1957 | * MASKED state and transition to the unloaded state by itself. | ||
1958 | * | ||
1959 | * This situation is only possible for per-task mode | ||
1960 | */ | ||
1961 | if (state == PFM_CTX_MASKED && CTX_OVFL_NOBLOCK(ctx) == 0) { | ||
1962 | |||
1963 | /* | ||
1964 | * set a "partial" zombie state to be checked | ||
1965 | * upon return from down() in pfm_handle_work(). | ||
1966 | * | ||
1967 | * We cannot use the ZOMBIE state, because it is checked | ||
1968 | * by pfm_load_regs() which is called upon wakeup from down(). | ||
1969 | * In such case, it would free the context and then we would | ||
1970 | * return to pfm_handle_work() which would access the | ||
1971 | * stale context. Instead, we set a flag invisible to pfm_load_regs() | ||
1972 | * but visible to pfm_handle_work(). | ||
1973 | * | ||
1974 | * For some window of time, we have a zombie context with | ||
1975 | * ctx_state = MASKED and not ZOMBIE | ||
1976 | */ | ||
1977 | ctx->ctx_fl_going_zombie = 1; | ||
1978 | |||
1979 | /* | ||
1980 | * force task to wake up from MASKED state | ||
1981 | */ | ||
1982 | up(&ctx->ctx_restart_sem); | ||
1983 | |||
1984 | DPRINT(("waking up ctx_state=%d\n", state)); | ||
1985 | |||
1986 | /* | ||
1987 | * put ourself to sleep waiting for the other | ||
1988 | * task to report completion | ||
1989 | * | ||
1990 | * the context is protected by mutex, therefore there | ||
1991 | * is no risk of being notified of completion before | ||
1992 | * begin actually on the waitq. | ||
1993 | */ | ||
1994 | set_current_state(TASK_INTERRUPTIBLE); | ||
1995 | add_wait_queue(&ctx->ctx_zombieq, &wait); | ||
1996 | |||
1997 | UNPROTECT_CTX(ctx, flags); | ||
1998 | |||
1999 | /* | ||
2000 | * XXX: check for signals : | ||
2001 | * - ok for explicit close | ||
2002 | * - not ok when coming from exit_files() | ||
2003 | */ | ||
2004 | schedule(); | ||
2005 | |||
2006 | |||
2007 | PROTECT_CTX(ctx, flags); | ||
2008 | |||
2009 | |||
2010 | remove_wait_queue(&ctx->ctx_zombieq, &wait); | ||
2011 | set_current_state(TASK_RUNNING); | ||
2012 | |||
2013 | /* | ||
2014 | * context is unloaded at this point | ||
2015 | */ | ||
2016 | DPRINT(("after zombie wakeup ctx_state=%d for\n", state)); | ||
2017 | } | ||
2018 | else if (task != current) { | ||
2019 | #ifdef CONFIG_SMP | ||
2020 | /* | ||
2021 | * switch context to zombie state | ||
2022 | */ | ||
2023 | ctx->ctx_state = PFM_CTX_ZOMBIE; | ||
2024 | |||
2025 | DPRINT(("zombie ctx for [%d]\n", task->pid)); | ||
2026 | /* | ||
2027 | * cannot free the context on the spot. deferred until | ||
2028 | * the task notices the ZOMBIE state | ||
2029 | */ | ||
2030 | free_possible = 0; | ||
2031 | #else | ||
2032 | pfm_context_unload(ctx, NULL, 0, regs); | ||
2033 | #endif | ||
2034 | } | ||
2035 | |||
2036 | doit: | ||
2037 | /* reload state, may have changed during opening of critical section */ | ||
2038 | state = ctx->ctx_state; | ||
2039 | |||
2040 | /* | ||
2041 | * the context is still attached to a task (possibly current) | ||
2042 | * we cannot destroy it right now | ||
2043 | */ | ||
2044 | |||
2045 | /* | ||
2046 | * we must free the sampling buffer right here because | ||
2047 | * we cannot rely on it being cleaned up later by the | ||
2048 | * monitored task. It is not possible to free vmalloc'ed | ||
2049 | * memory in pfm_load_regs(). Instead, we remove the buffer | ||
2050 | * now. should there be subsequent PMU overflow originally | ||
2051 | * meant for sampling, the will be converted to spurious | ||
2052 | * and that's fine because the monitoring tools is gone anyway. | ||
2053 | */ | ||
2054 | if (ctx->ctx_smpl_hdr) { | ||
2055 | smpl_buf_addr = ctx->ctx_smpl_hdr; | ||
2056 | smpl_buf_size = ctx->ctx_smpl_size; | ||
2057 | /* no more sampling */ | ||
2058 | ctx->ctx_smpl_hdr = NULL; | ||
2059 | ctx->ctx_fl_is_sampling = 0; | ||
2060 | } | ||
2061 | |||
2062 | DPRINT(("ctx_state=%d free_possible=%d addr=%p size=%lu\n", | ||
2063 | state, | ||
2064 | free_possible, | ||
2065 | smpl_buf_addr, | ||
2066 | smpl_buf_size)); | ||
2067 | |||
2068 | if (smpl_buf_addr) pfm_exit_smpl_buffer(ctx->ctx_buf_fmt); | ||
2069 | |||
2070 | /* | ||
2071 | * UNLOADED that the session has already been unreserved. | ||
2072 | */ | ||
2073 | if (state == PFM_CTX_ZOMBIE) { | ||
2074 | pfm_unreserve_session(ctx, ctx->ctx_fl_system , ctx->ctx_cpu); | ||
2075 | } | ||
2076 | |||
2077 | /* | ||
2078 | * disconnect file descriptor from context must be done | ||
2079 | * before we unlock. | ||
2080 | */ | ||
2081 | filp->private_data = NULL; | ||
2082 | |||
2083 | /* | ||
2084 | * if we free on the spot, the context is now completely unreacheable | ||
2085 | * from the callers side. The monitored task side is also cut, so we | ||
2086 | * can freely cut. | ||
2087 | * | ||
2088 | * If we have a deferred free, only the caller side is disconnected. | ||
2089 | */ | ||
2090 | UNPROTECT_CTX(ctx, flags); | ||
2091 | |||
2092 | /* | ||
2093 | * All memory free operations (especially for vmalloc'ed memory) | ||
2094 | * MUST be done with interrupts ENABLED. | ||
2095 | */ | ||
2096 | if (smpl_buf_addr) pfm_rvfree(smpl_buf_addr, smpl_buf_size); | ||
2097 | |||
2098 | /* | ||
2099 | * return the memory used by the context | ||
2100 | */ | ||
2101 | if (free_possible) pfm_context_free(ctx); | ||
2102 | |||
2103 | return 0; | ||
2104 | } | ||
2105 | |||
2106 | static int | ||
2107 | pfm_no_open(struct inode *irrelevant, struct file *dontcare) | ||
2108 | { | ||
2109 | DPRINT(("pfm_no_open called\n")); | ||
2110 | return -ENXIO; | ||
2111 | } | ||
2112 | |||
2113 | |||
2114 | |||
2115 | static struct file_operations pfm_file_ops = { | ||
2116 | .llseek = no_llseek, | ||
2117 | .read = pfm_read, | ||
2118 | .write = pfm_write, | ||
2119 | .poll = pfm_poll, | ||
2120 | .ioctl = pfm_ioctl, | ||
2121 | .open = pfm_no_open, /* special open code to disallow open via /proc */ | ||
2122 | .fasync = pfm_fasync, | ||
2123 | .release = pfm_close, | ||
2124 | .flush = pfm_flush | ||
2125 | }; | ||
2126 | |||
2127 | static int | ||
2128 | pfmfs_delete_dentry(struct dentry *dentry) | ||
2129 | { | ||
2130 | return 1; | ||
2131 | } | ||
2132 | |||
2133 | static struct dentry_operations pfmfs_dentry_operations = { | ||
2134 | .d_delete = pfmfs_delete_dentry, | ||
2135 | }; | ||
2136 | |||
2137 | |||
2138 | static int | ||
2139 | pfm_alloc_fd(struct file **cfile) | ||
2140 | { | ||
2141 | int fd, ret = 0; | ||
2142 | struct file *file = NULL; | ||
2143 | struct inode * inode; | ||
2144 | char name[32]; | ||
2145 | struct qstr this; | ||
2146 | |||
2147 | fd = get_unused_fd(); | ||
2148 | if (fd < 0) return -ENFILE; | ||
2149 | |||
2150 | ret = -ENFILE; | ||
2151 | |||
2152 | file = get_empty_filp(); | ||
2153 | if (!file) goto out; | ||
2154 | |||
2155 | /* | ||
2156 | * allocate a new inode | ||
2157 | */ | ||
2158 | inode = new_inode(pfmfs_mnt->mnt_sb); | ||
2159 | if (!inode) goto out; | ||
2160 | |||
2161 | DPRINT(("new inode ino=%ld @%p\n", inode->i_ino, inode)); | ||
2162 | |||
2163 | inode->i_mode = S_IFCHR|S_IRUGO; | ||
2164 | inode->i_uid = current->fsuid; | ||
2165 | inode->i_gid = current->fsgid; | ||
2166 | |||
2167 | sprintf(name, "[%lu]", inode->i_ino); | ||
2168 | this.name = name; | ||
2169 | this.len = strlen(name); | ||
2170 | this.hash = inode->i_ino; | ||
2171 | |||
2172 | ret = -ENOMEM; | ||
2173 | |||
2174 | /* | ||
2175 | * allocate a new dcache entry | ||
2176 | */ | ||
2177 | file->f_dentry = d_alloc(pfmfs_mnt->mnt_sb->s_root, &this); | ||
2178 | if (!file->f_dentry) goto out; | ||
2179 | |||
2180 | file->f_dentry->d_op = &pfmfs_dentry_operations; | ||
2181 | |||
2182 | d_add(file->f_dentry, inode); | ||
2183 | file->f_vfsmnt = mntget(pfmfs_mnt); | ||
2184 | file->f_mapping = inode->i_mapping; | ||
2185 | |||
2186 | file->f_op = &pfm_file_ops; | ||
2187 | file->f_mode = FMODE_READ; | ||
2188 | file->f_flags = O_RDONLY; | ||
2189 | file->f_pos = 0; | ||
2190 | |||
2191 | /* | ||
2192 | * may have to delay until context is attached? | ||
2193 | */ | ||
2194 | fd_install(fd, file); | ||
2195 | |||
2196 | /* | ||
2197 | * the file structure we will use | ||
2198 | */ | ||
2199 | *cfile = file; | ||
2200 | |||
2201 | return fd; | ||
2202 | out: | ||
2203 | if (file) put_filp(file); | ||
2204 | put_unused_fd(fd); | ||
2205 | return ret; | ||
2206 | } | ||
2207 | |||
2208 | static void | ||
2209 | pfm_free_fd(int fd, struct file *file) | ||
2210 | { | ||
2211 | struct files_struct *files = current->files; | ||
2212 | |||
2213 | /* | ||
2214 | * there ie no fd_uninstall(), so we do it here | ||
2215 | */ | ||
2216 | spin_lock(&files->file_lock); | ||
2217 | files->fd[fd] = NULL; | ||
2218 | spin_unlock(&files->file_lock); | ||
2219 | |||
2220 | if (file) put_filp(file); | ||
2221 | put_unused_fd(fd); | ||
2222 | } | ||
2223 | |||
2224 | static int | ||
2225 | pfm_remap_buffer(struct vm_area_struct *vma, unsigned long buf, unsigned long addr, unsigned long size) | ||
2226 | { | ||
2227 | DPRINT(("CPU%d buf=0x%lx addr=0x%lx size=%ld\n", smp_processor_id(), buf, addr, size)); | ||
2228 | |||
2229 | while (size > 0) { | ||
2230 | unsigned long pfn = ia64_tpa(buf) >> PAGE_SHIFT; | ||
2231 | |||
2232 | |||
2233 | if (remap_pfn_range(vma, addr, pfn, PAGE_SIZE, PAGE_READONLY)) | ||
2234 | return -ENOMEM; | ||
2235 | |||
2236 | addr += PAGE_SIZE; | ||
2237 | buf += PAGE_SIZE; | ||
2238 | size -= PAGE_SIZE; | ||
2239 | } | ||
2240 | return 0; | ||
2241 | } | ||
2242 | |||
2243 | /* | ||
2244 | * allocate a sampling buffer and remaps it into the user address space of the task | ||
2245 | */ | ||
2246 | static int | ||
2247 | pfm_smpl_buffer_alloc(struct task_struct *task, pfm_context_t *ctx, unsigned long rsize, void **user_vaddr) | ||
2248 | { | ||
2249 | struct mm_struct *mm = task->mm; | ||
2250 | struct vm_area_struct *vma = NULL; | ||
2251 | unsigned long size; | ||
2252 | void *smpl_buf; | ||
2253 | |||
2254 | |||
2255 | /* | ||
2256 | * the fixed header + requested size and align to page boundary | ||
2257 | */ | ||
2258 | size = PAGE_ALIGN(rsize); | ||
2259 | |||
2260 | DPRINT(("sampling buffer rsize=%lu size=%lu bytes\n", rsize, size)); | ||
2261 | |||
2262 | /* | ||
2263 | * check requested size to avoid Denial-of-service attacks | ||
2264 | * XXX: may have to refine this test | ||
2265 | * Check against address space limit. | ||
2266 | * | ||
2267 | * if ((mm->total_vm << PAGE_SHIFT) + len> task->rlim[RLIMIT_AS].rlim_cur) | ||
2268 | * return -ENOMEM; | ||
2269 | */ | ||
2270 | if (size > task->signal->rlim[RLIMIT_MEMLOCK].rlim_cur) | ||
2271 | return -ENOMEM; | ||
2272 | |||
2273 | /* | ||
2274 | * We do the easy to undo allocations first. | ||
2275 | * | ||
2276 | * pfm_rvmalloc(), clears the buffer, so there is no leak | ||
2277 | */ | ||
2278 | smpl_buf = pfm_rvmalloc(size); | ||
2279 | if (smpl_buf == NULL) { | ||
2280 | DPRINT(("Can't allocate sampling buffer\n")); | ||
2281 | return -ENOMEM; | ||
2282 | } | ||
2283 | |||
2284 | DPRINT(("smpl_buf @%p\n", smpl_buf)); | ||
2285 | |||
2286 | /* allocate vma */ | ||
2287 | vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); | ||
2288 | if (!vma) { | ||
2289 | DPRINT(("Cannot allocate vma\n")); | ||
2290 | goto error_kmem; | ||
2291 | } | ||
2292 | memset(vma, 0, sizeof(*vma)); | ||
2293 | |||
2294 | /* | ||
2295 | * partially initialize the vma for the sampling buffer | ||
2296 | */ | ||
2297 | vma->vm_mm = mm; | ||
2298 | vma->vm_flags = VM_READ| VM_MAYREAD |VM_RESERVED; | ||
2299 | vma->vm_page_prot = PAGE_READONLY; /* XXX may need to change */ | ||
2300 | |||
2301 | /* | ||
2302 | * Now we have everything we need and we can initialize | ||
2303 | * and connect all the data structures | ||
2304 | */ | ||
2305 | |||
2306 | ctx->ctx_smpl_hdr = smpl_buf; | ||
2307 | ctx->ctx_smpl_size = size; /* aligned size */ | ||
2308 | |||
2309 | /* | ||
2310 | * Let's do the difficult operations next. | ||
2311 | * | ||
2312 | * now we atomically find some area in the address space and | ||
2313 | * remap the buffer in it. | ||
2314 | */ | ||
2315 | down_write(&task->mm->mmap_sem); | ||
2316 | |||
2317 | /* find some free area in address space, must have mmap sem held */ | ||
2318 | vma->vm_start = pfm_get_unmapped_area(NULL, 0, size, 0, MAP_PRIVATE|MAP_ANONYMOUS, 0); | ||
2319 | if (vma->vm_start == 0UL) { | ||
2320 | DPRINT(("Cannot find unmapped area for size %ld\n", size)); | ||
2321 | up_write(&task->mm->mmap_sem); | ||
2322 | goto error; | ||
2323 | } | ||
2324 | vma->vm_end = vma->vm_start + size; | ||
2325 | vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT; | ||
2326 | |||
2327 | DPRINT(("aligned size=%ld, hdr=%p mapped @0x%lx\n", size, ctx->ctx_smpl_hdr, vma->vm_start)); | ||
2328 | |||
2329 | /* can only be applied to current task, need to have the mm semaphore held when called */ | ||
2330 | if (pfm_remap_buffer(vma, (unsigned long)smpl_buf, vma->vm_start, size)) { | ||
2331 | DPRINT(("Can't remap buffer\n")); | ||
2332 | up_write(&task->mm->mmap_sem); | ||
2333 | goto error; | ||
2334 | } | ||
2335 | |||
2336 | /* | ||
2337 | * now insert the vma in the vm list for the process, must be | ||
2338 | * done with mmap lock held | ||
2339 | */ | ||
2340 | insert_vm_struct(mm, vma); | ||
2341 | |||
2342 | mm->total_vm += size >> PAGE_SHIFT; | ||
2343 | vm_stat_account(vma); | ||
2344 | up_write(&task->mm->mmap_sem); | ||
2345 | |||
2346 | /* | ||
2347 | * keep track of user level virtual address | ||
2348 | */ | ||
2349 | ctx->ctx_smpl_vaddr = (void *)vma->vm_start; | ||
2350 | *(unsigned long *)user_vaddr = vma->vm_start; | ||
2351 | |||
2352 | return 0; | ||
2353 | |||
2354 | error: | ||
2355 | kmem_cache_free(vm_area_cachep, vma); | ||
2356 | error_kmem: | ||
2357 | pfm_rvfree(smpl_buf, size); | ||
2358 | |||
2359 | return -ENOMEM; | ||
2360 | } | ||
2361 | |||
2362 | /* | ||
2363 | * XXX: do something better here | ||
2364 | */ | ||
2365 | static int | ||
2366 | pfm_bad_permissions(struct task_struct *task) | ||
2367 | { | ||
2368 | /* inspired by ptrace_attach() */ | ||
2369 | DPRINT(("cur: uid=%d gid=%d task: euid=%d suid=%d uid=%d egid=%d sgid=%d\n", | ||
2370 | current->uid, | ||
2371 | current->gid, | ||
2372 | task->euid, | ||
2373 | task->suid, | ||
2374 | task->uid, | ||
2375 | task->egid, | ||
2376 | task->sgid)); | ||
2377 | |||
2378 | return ((current->uid != task->euid) | ||
2379 | || (current->uid != task->suid) | ||
2380 | || (current->uid != task->uid) | ||
2381 | || (current->gid != task->egid) | ||
2382 | || (current->gid != task->sgid) | ||
2383 | || (current->gid != task->gid)) && !capable(CAP_SYS_PTRACE); | ||
2384 | } | ||
2385 | |||
2386 | static int | ||
2387 | pfarg_is_sane(struct task_struct *task, pfarg_context_t *pfx) | ||
2388 | { | ||
2389 | int ctx_flags; | ||
2390 | |||
2391 | /* valid signal */ | ||
2392 | |||
2393 | ctx_flags = pfx->ctx_flags; | ||
2394 | |||
2395 | if (ctx_flags & PFM_FL_SYSTEM_WIDE) { | ||
2396 | |||
2397 | /* | ||
2398 | * cannot block in this mode | ||
2399 | */ | ||
2400 | if (ctx_flags & PFM_FL_NOTIFY_BLOCK) { | ||
2401 | DPRINT(("cannot use blocking mode when in system wide monitoring\n")); | ||
2402 | return -EINVAL; | ||
2403 | } | ||
2404 | } else { | ||
2405 | } | ||
2406 | /* probably more to add here */ | ||
2407 | |||
2408 | return 0; | ||
2409 | } | ||
2410 | |||
2411 | static int | ||
2412 | pfm_setup_buffer_fmt(struct task_struct *task, pfm_context_t *ctx, unsigned int ctx_flags, | ||
2413 | unsigned int cpu, pfarg_context_t *arg) | ||
2414 | { | ||
2415 | pfm_buffer_fmt_t *fmt = NULL; | ||
2416 | unsigned long size = 0UL; | ||
2417 | void *uaddr = NULL; | ||
2418 | void *fmt_arg = NULL; | ||
2419 | int ret = 0; | ||
2420 | #define PFM_CTXARG_BUF_ARG(a) (pfm_buffer_fmt_t *)(a+1) | ||
2421 | |||
2422 | /* invoke and lock buffer format, if found */ | ||
2423 | fmt = pfm_find_buffer_fmt(arg->ctx_smpl_buf_id); | ||
2424 | if (fmt == NULL) { | ||
2425 | DPRINT(("[%d] cannot find buffer format\n", task->pid)); | ||
2426 | return -EINVAL; | ||
2427 | } | ||
2428 | |||
2429 | /* | ||
2430 | * buffer argument MUST be contiguous to pfarg_context_t | ||
2431 | */ | ||
2432 | if (fmt->fmt_arg_size) fmt_arg = PFM_CTXARG_BUF_ARG(arg); | ||
2433 | |||
2434 | ret = pfm_buf_fmt_validate(fmt, task, ctx_flags, cpu, fmt_arg); | ||
2435 | |||
2436 | DPRINT(("[%d] after validate(0x%x,%d,%p)=%d\n", task->pid, ctx_flags, cpu, fmt_arg, ret)); | ||
2437 | |||
2438 | if (ret) goto error; | ||
2439 | |||
2440 | /* link buffer format and context */ | ||
2441 | ctx->ctx_buf_fmt = fmt; | ||
2442 | |||
2443 | /* | ||
2444 | * check if buffer format wants to use perfmon buffer allocation/mapping service | ||
2445 | */ | ||
2446 | ret = pfm_buf_fmt_getsize(fmt, task, ctx_flags, cpu, fmt_arg, &size); | ||
2447 | if (ret) goto error; | ||
2448 | |||
2449 | if (size) { | ||
2450 | /* | ||
2451 | * buffer is always remapped into the caller's address space | ||
2452 | */ | ||
2453 | ret = pfm_smpl_buffer_alloc(current, ctx, size, &uaddr); | ||
2454 | if (ret) goto error; | ||
2455 | |||
2456 | /* keep track of user address of buffer */ | ||
2457 | arg->ctx_smpl_vaddr = uaddr; | ||
2458 | } | ||
2459 | ret = pfm_buf_fmt_init(fmt, task, ctx->ctx_smpl_hdr, ctx_flags, cpu, fmt_arg); | ||
2460 | |||
2461 | error: | ||
2462 | return ret; | ||
2463 | } | ||
2464 | |||
2465 | static void | ||
2466 | pfm_reset_pmu_state(pfm_context_t *ctx) | ||
2467 | { | ||
2468 | int i; | ||
2469 | |||
2470 | /* | ||
2471 | * install reset values for PMC. | ||
2472 | */ | ||
2473 | for (i=1; PMC_IS_LAST(i) == 0; i++) { | ||
2474 | if (PMC_IS_IMPL(i) == 0) continue; | ||
2475 | ctx->ctx_pmcs[i] = PMC_DFL_VAL(i); | ||
2476 | DPRINT(("pmc[%d]=0x%lx\n", i, ctx->ctx_pmcs[i])); | ||
2477 | } | ||
2478 | /* | ||
2479 | * PMD registers are set to 0UL when the context in memset() | ||
2480 | */ | ||
2481 | |||
2482 | /* | ||
2483 | * On context switched restore, we must restore ALL pmc and ALL pmd even | ||
2484 | * when they are not actively used by the task. In UP, the incoming process | ||
2485 | * may otherwise pick up left over PMC, PMD state from the previous process. | ||
2486 | * As opposed to PMD, stale PMC can cause harm to the incoming | ||
2487 | * process because they may change what is being measured. | ||
2488 | * Therefore, we must systematically reinstall the entire | ||
2489 | * PMC state. In SMP, the same thing is possible on the | ||
2490 | * same CPU but also on between 2 CPUs. | ||
2491 | * | ||
2492 | * The problem with PMD is information leaking especially | ||
2493 | * to user level when psr.sp=0 | ||
2494 | * | ||
2495 | * There is unfortunately no easy way to avoid this problem | ||
2496 | * on either UP or SMP. This definitively slows down the | ||
2497 | * pfm_load_regs() function. | ||
2498 | */ | ||
2499 | |||
2500 | /* | ||
2501 | * bitmask of all PMCs accessible to this context | ||
2502 | * | ||
2503 | * PMC0 is treated differently. | ||
2504 | */ | ||
2505 | ctx->ctx_all_pmcs[0] = pmu_conf->impl_pmcs[0] & ~0x1; | ||
2506 | |||
2507 | /* | ||
2508 | * bitmask of all PMDs that are accesible to this context | ||
2509 | */ | ||
2510 | ctx->ctx_all_pmds[0] = pmu_conf->impl_pmds[0]; | ||
2511 | |||
2512 | DPRINT(("<%d> all_pmcs=0x%lx all_pmds=0x%lx\n", ctx->ctx_fd, ctx->ctx_all_pmcs[0],ctx->ctx_all_pmds[0])); | ||
2513 | |||
2514 | /* | ||
2515 | * useful in case of re-enable after disable | ||
2516 | */ | ||
2517 | ctx->ctx_used_ibrs[0] = 0UL; | ||
2518 | ctx->ctx_used_dbrs[0] = 0UL; | ||
2519 | } | ||
2520 | |||
2521 | static int | ||
2522 | pfm_ctx_getsize(void *arg, size_t *sz) | ||
2523 | { | ||
2524 | pfarg_context_t *req = (pfarg_context_t *)arg; | ||
2525 | pfm_buffer_fmt_t *fmt; | ||
2526 | |||
2527 | *sz = 0; | ||
2528 | |||
2529 | if (!pfm_uuid_cmp(req->ctx_smpl_buf_id, pfm_null_uuid)) return 0; | ||
2530 | |||
2531 | fmt = pfm_find_buffer_fmt(req->ctx_smpl_buf_id); | ||
2532 | if (fmt == NULL) { | ||
2533 | DPRINT(("cannot find buffer format\n")); | ||
2534 | return -EINVAL; | ||
2535 | } | ||
2536 | /* get just enough to copy in user parameters */ | ||
2537 | *sz = fmt->fmt_arg_size; | ||
2538 | DPRINT(("arg_size=%lu\n", *sz)); | ||
2539 | |||
2540 | return 0; | ||
2541 | } | ||
2542 | |||
2543 | |||
2544 | |||
2545 | /* | ||
2546 | * cannot attach if : | ||
2547 | * - kernel task | ||
2548 | * - task not owned by caller | ||
2549 | * - task incompatible with context mode | ||
2550 | */ | ||
2551 | static int | ||
2552 | pfm_task_incompatible(pfm_context_t *ctx, struct task_struct *task) | ||
2553 | { | ||
2554 | /* | ||
2555 | * no kernel task or task not owner by caller | ||
2556 | */ | ||
2557 | if (task->mm == NULL) { | ||
2558 | DPRINT(("task [%d] has not memory context (kernel thread)\n", task->pid)); | ||
2559 | return -EPERM; | ||
2560 | } | ||
2561 | if (pfm_bad_permissions(task)) { | ||
2562 | DPRINT(("no permission to attach to [%d]\n", task->pid)); | ||
2563 | return -EPERM; | ||
2564 | } | ||
2565 | /* | ||
2566 | * cannot block in self-monitoring mode | ||
2567 | */ | ||
2568 | if (CTX_OVFL_NOBLOCK(ctx) == 0 && task == current) { | ||
2569 | DPRINT(("cannot load a blocking context on self for [%d]\n", task->pid)); | ||
2570 | return -EINVAL; | ||
2571 | } | ||
2572 | |||
2573 | if (task->exit_state == EXIT_ZOMBIE) { | ||
2574 | DPRINT(("cannot attach to zombie task [%d]\n", task->pid)); | ||
2575 | return -EBUSY; | ||
2576 | } | ||
2577 | |||
2578 | /* | ||
2579 | * always ok for self | ||
2580 | */ | ||
2581 | if (task == current) return 0; | ||
2582 | |||
2583 | if ((task->state != TASK_STOPPED) && (task->state != TASK_TRACED)) { | ||
2584 | DPRINT(("cannot attach to non-stopped task [%d] state=%ld\n", task->pid, task->state)); | ||
2585 | return -EBUSY; | ||
2586 | } | ||
2587 | /* | ||
2588 | * make sure the task is off any CPU | ||
2589 | */ | ||
2590 | wait_task_inactive(task); | ||
2591 | |||
2592 | /* more to come... */ | ||
2593 | |||
2594 | return 0; | ||
2595 | } | ||
2596 | |||
2597 | static int | ||
2598 | pfm_get_task(pfm_context_t *ctx, pid_t pid, struct task_struct **task) | ||
2599 | { | ||
2600 | struct task_struct *p = current; | ||
2601 | int ret; | ||
2602 | |||
2603 | /* XXX: need to add more checks here */ | ||
2604 | if (pid < 2) return -EPERM; | ||
2605 | |||
2606 | if (pid != current->pid) { | ||
2607 | |||
2608 | read_lock(&tasklist_lock); | ||
2609 | |||
2610 | p = find_task_by_pid(pid); | ||
2611 | |||
2612 | /* make sure task cannot go away while we operate on it */ | ||
2613 | if (p) get_task_struct(p); | ||
2614 | |||
2615 | read_unlock(&tasklist_lock); | ||
2616 | |||
2617 | if (p == NULL) return -ESRCH; | ||
2618 | } | ||
2619 | |||
2620 | ret = pfm_task_incompatible(ctx, p); | ||
2621 | if (ret == 0) { | ||
2622 | *task = p; | ||
2623 | } else if (p != current) { | ||
2624 | pfm_put_task(p); | ||
2625 | } | ||
2626 | return ret; | ||
2627 | } | ||
2628 | |||
2629 | |||
2630 | |||
2631 | static int | ||
2632 | pfm_context_create(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) | ||
2633 | { | ||
2634 | pfarg_context_t *req = (pfarg_context_t *)arg; | ||
2635 | struct file *filp; | ||
2636 | int ctx_flags; | ||
2637 | int ret; | ||
2638 | |||
2639 | /* let's check the arguments first */ | ||
2640 | ret = pfarg_is_sane(current, req); | ||
2641 | if (ret < 0) return ret; | ||
2642 | |||
2643 | ctx_flags = req->ctx_flags; | ||
2644 | |||
2645 | ret = -ENOMEM; | ||
2646 | |||
2647 | ctx = pfm_context_alloc(); | ||
2648 | if (!ctx) goto error; | ||
2649 | |||
2650 | ret = pfm_alloc_fd(&filp); | ||
2651 | if (ret < 0) goto error_file; | ||
2652 | |||
2653 | req->ctx_fd = ctx->ctx_fd = ret; | ||
2654 | |||
2655 | /* | ||
2656 | * attach context to file | ||
2657 | */ | ||
2658 | filp->private_data = ctx; | ||
2659 | |||
2660 | /* | ||
2661 | * does the user want to sample? | ||
2662 | */ | ||
2663 | if (pfm_uuid_cmp(req->ctx_smpl_buf_id, pfm_null_uuid)) { | ||
2664 | ret = pfm_setup_buffer_fmt(current, ctx, ctx_flags, 0, req); | ||
2665 | if (ret) goto buffer_error; | ||
2666 | } | ||
2667 | |||
2668 | /* | ||
2669 | * init context protection lock | ||
2670 | */ | ||
2671 | spin_lock_init(&ctx->ctx_lock); | ||
2672 | |||
2673 | /* | ||
2674 | * context is unloaded | ||
2675 | */ | ||
2676 | ctx->ctx_state = PFM_CTX_UNLOADED; | ||
2677 | |||
2678 | /* | ||
2679 | * initialization of context's flags | ||
2680 | */ | ||
2681 | ctx->ctx_fl_block = (ctx_flags & PFM_FL_NOTIFY_BLOCK) ? 1 : 0; | ||
2682 | ctx->ctx_fl_system = (ctx_flags & PFM_FL_SYSTEM_WIDE) ? 1: 0; | ||
2683 | ctx->ctx_fl_is_sampling = ctx->ctx_buf_fmt ? 1 : 0; /* assume record() is defined */ | ||
2684 | ctx->ctx_fl_no_msg = (ctx_flags & PFM_FL_OVFL_NO_MSG) ? 1: 0; | ||
2685 | /* | ||
2686 | * will move to set properties | ||
2687 | * ctx->ctx_fl_excl_idle = (ctx_flags & PFM_FL_EXCL_IDLE) ? 1: 0; | ||
2688 | */ | ||
2689 | |||
2690 | /* | ||
2691 | * init restart semaphore to locked | ||
2692 | */ | ||
2693 | sema_init(&ctx->ctx_restart_sem, 0); | ||
2694 | |||
2695 | /* | ||
2696 | * activation is used in SMP only | ||
2697 | */ | ||
2698 | ctx->ctx_last_activation = PFM_INVALID_ACTIVATION; | ||
2699 | SET_LAST_CPU(ctx, -1); | ||
2700 | |||
2701 | /* | ||
2702 | * initialize notification message queue | ||
2703 | */ | ||
2704 | ctx->ctx_msgq_head = ctx->ctx_msgq_tail = 0; | ||
2705 | init_waitqueue_head(&ctx->ctx_msgq_wait); | ||
2706 | init_waitqueue_head(&ctx->ctx_zombieq); | ||
2707 | |||
2708 | DPRINT(("ctx=%p flags=0x%x system=%d notify_block=%d excl_idle=%d no_msg=%d ctx_fd=%d \n", | ||
2709 | ctx, | ||
2710 | ctx_flags, | ||
2711 | ctx->ctx_fl_system, | ||
2712 | ctx->ctx_fl_block, | ||
2713 | ctx->ctx_fl_excl_idle, | ||
2714 | ctx->ctx_fl_no_msg, | ||
2715 | ctx->ctx_fd)); | ||
2716 | |||
2717 | /* | ||
2718 | * initialize soft PMU state | ||
2719 | */ | ||
2720 | pfm_reset_pmu_state(ctx); | ||
2721 | |||
2722 | return 0; | ||
2723 | |||
2724 | buffer_error: | ||
2725 | pfm_free_fd(ctx->ctx_fd, filp); | ||
2726 | |||
2727 | if (ctx->ctx_buf_fmt) { | ||
2728 | pfm_buf_fmt_exit(ctx->ctx_buf_fmt, current, NULL, regs); | ||
2729 | } | ||
2730 | error_file: | ||
2731 | pfm_context_free(ctx); | ||
2732 | |||
2733 | error: | ||
2734 | return ret; | ||
2735 | } | ||
2736 | |||
2737 | static inline unsigned long | ||
2738 | pfm_new_counter_value (pfm_counter_t *reg, int is_long_reset) | ||
2739 | { | ||
2740 | unsigned long val = is_long_reset ? reg->long_reset : reg->short_reset; | ||
2741 | unsigned long new_seed, old_seed = reg->seed, mask = reg->mask; | ||
2742 | extern unsigned long carta_random32 (unsigned long seed); | ||
2743 | |||
2744 | if (reg->flags & PFM_REGFL_RANDOM) { | ||
2745 | new_seed = carta_random32(old_seed); | ||
2746 | val -= (old_seed & mask); /* counter values are negative numbers! */ | ||
2747 | if ((mask >> 32) != 0) | ||
2748 | /* construct a full 64-bit random value: */ | ||
2749 | new_seed |= carta_random32(old_seed >> 32) << 32; | ||
2750 | reg->seed = new_seed; | ||
2751 | } | ||
2752 | reg->lval = val; | ||
2753 | return val; | ||
2754 | } | ||
2755 | |||
2756 | static void | ||
2757 | pfm_reset_regs_masked(pfm_context_t *ctx, unsigned long *ovfl_regs, int is_long_reset) | ||
2758 | { | ||
2759 | unsigned long mask = ovfl_regs[0]; | ||
2760 | unsigned long reset_others = 0UL; | ||
2761 | unsigned long val; | ||
2762 | int i; | ||
2763 | |||
2764 | /* | ||
2765 | * now restore reset value on sampling overflowed counters | ||
2766 | */ | ||
2767 | mask >>= PMU_FIRST_COUNTER; | ||
2768 | for(i = PMU_FIRST_COUNTER; mask; i++, mask >>= 1) { | ||
2769 | |||
2770 | if ((mask & 0x1UL) == 0UL) continue; | ||
2771 | |||
2772 | ctx->ctx_pmds[i].val = val = pfm_new_counter_value(ctx->ctx_pmds+ i, is_long_reset); | ||
2773 | reset_others |= ctx->ctx_pmds[i].reset_pmds[0]; | ||
2774 | |||
2775 | DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n", is_long_reset ? "long" : "short", i, val)); | ||
2776 | } | ||
2777 | |||
2778 | /* | ||
2779 | * Now take care of resetting the other registers | ||
2780 | */ | ||
2781 | for(i = 0; reset_others; i++, reset_others >>= 1) { | ||
2782 | |||
2783 | if ((reset_others & 0x1) == 0) continue; | ||
2784 | |||
2785 | ctx->ctx_pmds[i].val = val = pfm_new_counter_value(ctx->ctx_pmds + i, is_long_reset); | ||
2786 | |||
2787 | DPRINT_ovfl(("%s reset_others pmd[%d]=%lx\n", | ||
2788 | is_long_reset ? "long" : "short", i, val)); | ||
2789 | } | ||
2790 | } | ||
2791 | |||
2792 | static void | ||
2793 | pfm_reset_regs(pfm_context_t *ctx, unsigned long *ovfl_regs, int is_long_reset) | ||
2794 | { | ||
2795 | unsigned long mask = ovfl_regs[0]; | ||
2796 | unsigned long reset_others = 0UL; | ||
2797 | unsigned long val; | ||
2798 | int i; | ||
2799 | |||
2800 | DPRINT_ovfl(("ovfl_regs=0x%lx is_long_reset=%d\n", ovfl_regs[0], is_long_reset)); | ||
2801 | |||
2802 | if (ctx->ctx_state == PFM_CTX_MASKED) { | ||
2803 | pfm_reset_regs_masked(ctx, ovfl_regs, is_long_reset); | ||
2804 | return; | ||
2805 | } | ||
2806 | |||
2807 | /* | ||
2808 | * now restore reset value on sampling overflowed counters | ||
2809 | */ | ||
2810 | mask >>= PMU_FIRST_COUNTER; | ||
2811 | for(i = PMU_FIRST_COUNTER; mask; i++, mask >>= 1) { | ||
2812 | |||
2813 | if ((mask & 0x1UL) == 0UL) continue; | ||
2814 | |||
2815 | val = pfm_new_counter_value(ctx->ctx_pmds+ i, is_long_reset); | ||
2816 | reset_others |= ctx->ctx_pmds[i].reset_pmds[0]; | ||
2817 | |||
2818 | DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n", is_long_reset ? "long" : "short", i, val)); | ||
2819 | |||
2820 | pfm_write_soft_counter(ctx, i, val); | ||
2821 | } | ||
2822 | |||
2823 | /* | ||
2824 | * Now take care of resetting the other registers | ||
2825 | */ | ||
2826 | for(i = 0; reset_others; i++, reset_others >>= 1) { | ||
2827 | |||
2828 | if ((reset_others & 0x1) == 0) continue; | ||
2829 | |||
2830 | val = pfm_new_counter_value(ctx->ctx_pmds + i, is_long_reset); | ||
2831 | |||
2832 | if (PMD_IS_COUNTING(i)) { | ||
2833 | pfm_write_soft_counter(ctx, i, val); | ||
2834 | } else { | ||
2835 | ia64_set_pmd(i, val); | ||
2836 | } | ||
2837 | DPRINT_ovfl(("%s reset_others pmd[%d]=%lx\n", | ||
2838 | is_long_reset ? "long" : "short", i, val)); | ||
2839 | } | ||
2840 | ia64_srlz_d(); | ||
2841 | } | ||
2842 | |||
2843 | static int | ||
2844 | pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) | ||
2845 | { | ||
2846 | struct thread_struct *thread = NULL; | ||
2847 | struct task_struct *task; | ||
2848 | pfarg_reg_t *req = (pfarg_reg_t *)arg; | ||
2849 | unsigned long value, pmc_pm; | ||
2850 | unsigned long smpl_pmds, reset_pmds, impl_pmds; | ||
2851 | unsigned int cnum, reg_flags, flags, pmc_type; | ||
2852 | int i, can_access_pmu = 0, is_loaded, is_system, expert_mode; | ||
2853 | int is_monitor, is_counting, state; | ||
2854 | int ret = -EINVAL; | ||
2855 | pfm_reg_check_t wr_func; | ||
2856 | #define PFM_CHECK_PMC_PM(x, y, z) ((x)->ctx_fl_system ^ PMC_PM(y, z)) | ||
2857 | |||
2858 | state = ctx->ctx_state; | ||
2859 | is_loaded = state == PFM_CTX_LOADED ? 1 : 0; | ||
2860 | is_system = ctx->ctx_fl_system; | ||
2861 | task = ctx->ctx_task; | ||
2862 | impl_pmds = pmu_conf->impl_pmds[0]; | ||
2863 | |||
2864 | if (state == PFM_CTX_ZOMBIE) return -EINVAL; | ||
2865 | |||
2866 | if (is_loaded) { | ||
2867 | thread = &task->thread; | ||
2868 | /* | ||
2869 | * In system wide and when the context is loaded, access can only happen | ||
2870 | * when the caller is running on the CPU being monitored by the session. | ||
2871 | * It does not have to be the owner (ctx_task) of the context per se. | ||
2872 | */ | ||
2873 | if (is_system && ctx->ctx_cpu != smp_processor_id()) { | ||
2874 | DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu)); | ||
2875 | return -EBUSY; | ||
2876 | } | ||
2877 | can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0; | ||
2878 | } | ||
2879 | expert_mode = pfm_sysctl.expert_mode; | ||
2880 | |||
2881 | for (i = 0; i < count; i++, req++) { | ||
2882 | |||
2883 | cnum = req->reg_num; | ||
2884 | reg_flags = req->reg_flags; | ||
2885 | value = req->reg_value; | ||
2886 | smpl_pmds = req->reg_smpl_pmds[0]; | ||
2887 | reset_pmds = req->reg_reset_pmds[0]; | ||
2888 | flags = 0; | ||
2889 | |||
2890 | |||
2891 | if (cnum >= PMU_MAX_PMCS) { | ||
2892 | DPRINT(("pmc%u is invalid\n", cnum)); | ||
2893 | goto error; | ||
2894 | } | ||
2895 | |||
2896 | pmc_type = pmu_conf->pmc_desc[cnum].type; | ||
2897 | pmc_pm = (value >> pmu_conf->pmc_desc[cnum].pm_pos) & 0x1; | ||
2898 | is_counting = (pmc_type & PFM_REG_COUNTING) == PFM_REG_COUNTING ? 1 : 0; | ||
2899 | is_monitor = (pmc_type & PFM_REG_MONITOR) == PFM_REG_MONITOR ? 1 : 0; | ||
2900 | |||
2901 | /* | ||
2902 | * we reject all non implemented PMC as well | ||
2903 | * as attempts to modify PMC[0-3] which are used | ||
2904 | * as status registers by the PMU | ||
2905 | */ | ||
2906 | if ((pmc_type & PFM_REG_IMPL) == 0 || (pmc_type & PFM_REG_CONTROL) == PFM_REG_CONTROL) { | ||
2907 | DPRINT(("pmc%u is unimplemented or no-access pmc_type=%x\n", cnum, pmc_type)); | ||
2908 | goto error; | ||
2909 | } | ||
2910 | wr_func = pmu_conf->pmc_desc[cnum].write_check; | ||
2911 | /* | ||
2912 | * If the PMC is a monitor, then if the value is not the default: | ||
2913 | * - system-wide session: PMCx.pm=1 (privileged monitor) | ||
2914 | * - per-task : PMCx.pm=0 (user monitor) | ||
2915 | */ | ||
2916 | if (is_monitor && value != PMC_DFL_VAL(cnum) && is_system ^ pmc_pm) { | ||
2917 | DPRINT(("pmc%u pmc_pm=%lu is_system=%d\n", | ||
2918 | cnum, | ||
2919 | pmc_pm, | ||
2920 | is_system)); | ||
2921 | goto error; | ||
2922 | } | ||
2923 | |||
2924 | if (is_counting) { | ||
2925 | /* | ||
2926 | * enforce generation of overflow interrupt. Necessary on all | ||
2927 | * CPUs. | ||
2928 | */ | ||
2929 | value |= 1 << PMU_PMC_OI; | ||
2930 | |||
2931 | if (reg_flags & PFM_REGFL_OVFL_NOTIFY) { | ||
2932 | flags |= PFM_REGFL_OVFL_NOTIFY; | ||
2933 | } | ||
2934 | |||
2935 | if (reg_flags & PFM_REGFL_RANDOM) flags |= PFM_REGFL_RANDOM; | ||
2936 | |||
2937 | /* verify validity of smpl_pmds */ | ||
2938 | if ((smpl_pmds & impl_pmds) != smpl_pmds) { | ||
2939 | DPRINT(("invalid smpl_pmds 0x%lx for pmc%u\n", smpl_pmds, cnum)); | ||
2940 | goto error; | ||
2941 | } | ||
2942 | |||
2943 | /* verify validity of reset_pmds */ | ||
2944 | if ((reset_pmds & impl_pmds) != reset_pmds) { | ||
2945 | DPRINT(("invalid reset_pmds 0x%lx for pmc%u\n", reset_pmds, cnum)); | ||
2946 | goto error; | ||
2947 | } | ||
2948 | } else { | ||
2949 | if (reg_flags & (PFM_REGFL_OVFL_NOTIFY|PFM_REGFL_RANDOM)) { | ||
2950 | DPRINT(("cannot set ovfl_notify or random on pmc%u\n", cnum)); | ||
2951 | goto error; | ||
2952 | } | ||
2953 | /* eventid on non-counting monitors are ignored */ | ||
2954 | } | ||
2955 | |||
2956 | /* | ||
2957 | * execute write checker, if any | ||
2958 | */ | ||
2959 | if (likely(expert_mode == 0 && wr_func)) { | ||
2960 | ret = (*wr_func)(task, ctx, cnum, &value, regs); | ||
2961 | if (ret) goto error; | ||
2962 | ret = -EINVAL; | ||
2963 | } | ||
2964 | |||
2965 | /* | ||
2966 | * no error on this register | ||
2967 | */ | ||
2968 | PFM_REG_RETFLAG_SET(req->reg_flags, 0); | ||
2969 | |||
2970 | /* | ||
2971 | * Now we commit the changes to the software state | ||
2972 | */ | ||
2973 | |||
2974 | /* | ||
2975 | * update overflow information | ||
2976 | */ | ||
2977 | if (is_counting) { | ||
2978 | /* | ||
2979 | * full flag update each time a register is programmed | ||
2980 | */ | ||
2981 | ctx->ctx_pmds[cnum].flags = flags; | ||
2982 | |||
2983 | ctx->ctx_pmds[cnum].reset_pmds[0] = reset_pmds; | ||
2984 | ctx->ctx_pmds[cnum].smpl_pmds[0] = smpl_pmds; | ||
2985 | ctx->ctx_pmds[cnum].eventid = req->reg_smpl_eventid; | ||
2986 | |||
2987 | /* | ||
2988 | * Mark all PMDS to be accessed as used. | ||
2989 | * | ||
2990 | * We do not keep track of PMC because we have to | ||
2991 | * systematically restore ALL of them. | ||
2992 | * | ||
2993 | * We do not update the used_monitors mask, because | ||
2994 | * if we have not programmed them, then will be in | ||
2995 | * a quiescent state, therefore we will not need to | ||
2996 | * mask/restore then when context is MASKED. | ||
2997 | */ | ||
2998 | CTX_USED_PMD(ctx, reset_pmds); | ||
2999 | CTX_USED_PMD(ctx, smpl_pmds); | ||
3000 | /* | ||
3001 | * make sure we do not try to reset on | ||
3002 | * restart because we have established new values | ||
3003 | */ | ||
3004 | if (state == PFM_CTX_MASKED) ctx->ctx_ovfl_regs[0] &= ~1UL << cnum; | ||
3005 | } | ||
3006 | /* | ||
3007 | * Needed in case the user does not initialize the equivalent | ||
3008 | * PMD. Clearing is done indirectly via pfm_reset_pmu_state() so there is no | ||
3009 | * possible leak here. | ||
3010 | */ | ||
3011 | CTX_USED_PMD(ctx, pmu_conf->pmc_desc[cnum].dep_pmd[0]); | ||
3012 | |||
3013 | /* | ||
3014 | * keep track of the monitor PMC that we are using. | ||
3015 | * we save the value of the pmc in ctx_pmcs[] and if | ||
3016 | * the monitoring is not stopped for the context we also | ||
3017 | * place it in the saved state area so that it will be | ||
3018 | * picked up later by the context switch code. | ||
3019 | * | ||
3020 | * The value in ctx_pmcs[] can only be changed in pfm_write_pmcs(). | ||
3021 | * | ||
3022 | * The value in thread->pmcs[] may be modified on overflow, i.e., when | ||
3023 | * monitoring needs to be stopped. | ||
3024 | */ | ||
3025 | if (is_monitor) CTX_USED_MONITOR(ctx, 1UL << cnum); | ||
3026 | |||
3027 | /* | ||
3028 | * update context state | ||
3029 | */ | ||
3030 | ctx->ctx_pmcs[cnum] = value; | ||
3031 | |||
3032 | if (is_loaded) { | ||
3033 | /* | ||
3034 | * write thread state | ||
3035 | */ | ||
3036 | if (is_system == 0) thread->pmcs[cnum] = value; | ||
3037 | |||
3038 | /* | ||
3039 | * write hardware register if we can | ||
3040 | */ | ||
3041 | if (can_access_pmu) { | ||
3042 | ia64_set_pmc(cnum, value); | ||
3043 | } | ||
3044 | #ifdef CONFIG_SMP | ||
3045 | else { | ||
3046 | /* | ||
3047 | * per-task SMP only here | ||
3048 | * | ||
3049 | * we are guaranteed that the task is not running on the other CPU, | ||
3050 | * we indicate that this PMD will need to be reloaded if the task | ||
3051 | * is rescheduled on the CPU it ran last on. | ||
3052 | */ | ||
3053 | ctx->ctx_reload_pmcs[0] |= 1UL << cnum; | ||
3054 | } | ||
3055 | #endif | ||
3056 | } | ||
3057 | |||
3058 | DPRINT(("pmc[%u]=0x%lx ld=%d apmu=%d flags=0x%x all_pmcs=0x%lx used_pmds=0x%lx eventid=%ld smpl_pmds=0x%lx reset_pmds=0x%lx reloads_pmcs=0x%lx used_monitors=0x%lx ovfl_regs=0x%lx\n", | ||
3059 | cnum, | ||
3060 | value, | ||
3061 | is_loaded, | ||
3062 | can_access_pmu, | ||
3063 | flags, | ||
3064 | ctx->ctx_all_pmcs[0], | ||
3065 | ctx->ctx_used_pmds[0], | ||
3066 | ctx->ctx_pmds[cnum].eventid, | ||
3067 | smpl_pmds, | ||
3068 | reset_pmds, | ||
3069 | ctx->ctx_reload_pmcs[0], | ||
3070 | ctx->ctx_used_monitors[0], | ||
3071 | ctx->ctx_ovfl_regs[0])); | ||
3072 | } | ||
3073 | |||
3074 | /* | ||
3075 | * make sure the changes are visible | ||
3076 | */ | ||
3077 | if (can_access_pmu) ia64_srlz_d(); | ||
3078 | |||
3079 | return 0; | ||
3080 | error: | ||
3081 | PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL); | ||
3082 | return ret; | ||
3083 | } | ||
3084 | |||
3085 | static int | ||
3086 | pfm_write_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) | ||
3087 | { | ||
3088 | struct thread_struct *thread = NULL; | ||
3089 | struct task_struct *task; | ||
3090 | pfarg_reg_t *req = (pfarg_reg_t *)arg; | ||
3091 | unsigned long value, hw_value, ovfl_mask; | ||
3092 | unsigned int cnum; | ||
3093 | int i, can_access_pmu = 0, state; | ||
3094 | int is_counting, is_loaded, is_system, expert_mode; | ||
3095 | int ret = -EINVAL; | ||
3096 | pfm_reg_check_t wr_func; | ||
3097 | |||
3098 | |||
3099 | state = ctx->ctx_state; | ||
3100 | is_loaded = state == PFM_CTX_LOADED ? 1 : 0; | ||
3101 | is_system = ctx->ctx_fl_system; | ||
3102 | ovfl_mask = pmu_conf->ovfl_val; | ||
3103 | task = ctx->ctx_task; | ||
3104 | |||
3105 | if (unlikely(state == PFM_CTX_ZOMBIE)) return -EINVAL; | ||
3106 | |||
3107 | /* | ||
3108 | * on both UP and SMP, we can only write to the PMC when the task is | ||
3109 | * the owner of the local PMU. | ||
3110 | */ | ||
3111 | if (likely(is_loaded)) { | ||
3112 | thread = &task->thread; | ||
3113 | /* | ||
3114 | * In system wide and when the context is loaded, access can only happen | ||
3115 | * when the caller is running on the CPU being monitored by the session. | ||
3116 | * It does not have to be the owner (ctx_task) of the context per se. | ||
3117 | */ | ||
3118 | if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) { | ||
3119 | DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu)); | ||
3120 | return -EBUSY; | ||
3121 | } | ||
3122 | can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0; | ||
3123 | } | ||
3124 | expert_mode = pfm_sysctl.expert_mode; | ||
3125 | |||
3126 | for (i = 0; i < count; i++, req++) { | ||
3127 | |||
3128 | cnum = req->reg_num; | ||
3129 | value = req->reg_value; | ||
3130 | |||
3131 | if (!PMD_IS_IMPL(cnum)) { | ||
3132 | DPRINT(("pmd[%u] is unimplemented or invalid\n", cnum)); | ||
3133 | goto abort_mission; | ||
3134 | } | ||
3135 | is_counting = PMD_IS_COUNTING(cnum); | ||
3136 | wr_func = pmu_conf->pmd_desc[cnum].write_check; | ||
3137 | |||
3138 | /* | ||
3139 | * execute write checker, if any | ||
3140 | */ | ||
3141 | if (unlikely(expert_mode == 0 && wr_func)) { | ||
3142 | unsigned long v = value; | ||
3143 | |||
3144 | ret = (*wr_func)(task, ctx, cnum, &v, regs); | ||
3145 | if (ret) goto abort_mission; | ||
3146 | |||
3147 | value = v; | ||
3148 | ret = -EINVAL; | ||
3149 | } | ||
3150 | |||
3151 | /* | ||
3152 | * no error on this register | ||
3153 | */ | ||
3154 | PFM_REG_RETFLAG_SET(req->reg_flags, 0); | ||
3155 | |||
3156 | /* | ||
3157 | * now commit changes to software state | ||
3158 | */ | ||
3159 | hw_value = value; | ||
3160 | |||
3161 | /* | ||
3162 | * update virtualized (64bits) counter | ||
3163 | */ | ||
3164 | if (is_counting) { | ||
3165 | /* | ||
3166 | * write context state | ||
3167 | */ | ||
3168 | ctx->ctx_pmds[cnum].lval = value; | ||
3169 | |||
3170 | /* | ||
3171 | * when context is load we use the split value | ||
3172 | */ | ||
3173 | if (is_loaded) { | ||
3174 | hw_value = value & ovfl_mask; | ||
3175 | value = value & ~ovfl_mask; | ||
3176 | } | ||
3177 | } | ||
3178 | /* | ||
3179 | * update reset values (not just for counters) | ||
3180 | */ | ||
3181 | ctx->ctx_pmds[cnum].long_reset = req->reg_long_reset; | ||
3182 | ctx->ctx_pmds[cnum].short_reset = req->reg_short_reset; | ||
3183 | |||
3184 | /* | ||
3185 | * update randomization parameters (not just for counters) | ||
3186 | */ | ||
3187 | ctx->ctx_pmds[cnum].seed = req->reg_random_seed; | ||
3188 | ctx->ctx_pmds[cnum].mask = req->reg_random_mask; | ||
3189 | |||
3190 | /* | ||
3191 | * update context value | ||
3192 | */ | ||
3193 | ctx->ctx_pmds[cnum].val = value; | ||
3194 | |||
3195 | /* | ||
3196 | * Keep track of what we use | ||
3197 | * | ||
3198 | * We do not keep track of PMC because we have to | ||
3199 | * systematically restore ALL of them. | ||
3200 | */ | ||
3201 | CTX_USED_PMD(ctx, PMD_PMD_DEP(cnum)); | ||
3202 | |||
3203 | /* | ||
3204 | * mark this PMD register used as well | ||
3205 | */ | ||
3206 | CTX_USED_PMD(ctx, RDEP(cnum)); | ||
3207 | |||
3208 | /* | ||
3209 | * make sure we do not try to reset on | ||
3210 | * restart because we have established new values | ||
3211 | */ | ||
3212 | if (is_counting && state == PFM_CTX_MASKED) { | ||
3213 | ctx->ctx_ovfl_regs[0] &= ~1UL << cnum; | ||
3214 | } | ||
3215 | |||
3216 | if (is_loaded) { | ||
3217 | /* | ||
3218 | * write thread state | ||
3219 | */ | ||
3220 | if (is_system == 0) thread->pmds[cnum] = hw_value; | ||
3221 | |||
3222 | /* | ||
3223 | * write hardware register if we can | ||
3224 | */ | ||
3225 | if (can_access_pmu) { | ||
3226 | ia64_set_pmd(cnum, hw_value); | ||
3227 | } else { | ||
3228 | #ifdef CONFIG_SMP | ||
3229 | /* | ||
3230 | * we are guaranteed that the task is not running on the other CPU, | ||
3231 | * we indicate that this PMD will need to be reloaded if the task | ||
3232 | * is rescheduled on the CPU it ran last on. | ||
3233 | */ | ||
3234 | ctx->ctx_reload_pmds[0] |= 1UL << cnum; | ||
3235 | #endif | ||
3236 | } | ||
3237 | } | ||
3238 | |||
3239 | DPRINT(("pmd[%u]=0x%lx ld=%d apmu=%d, hw_value=0x%lx ctx_pmd=0x%lx short_reset=0x%lx " | ||
3240 | "long_reset=0x%lx notify=%c seed=0x%lx mask=0x%lx used_pmds=0x%lx reset_pmds=0x%lx reload_pmds=0x%lx all_pmds=0x%lx ovfl_regs=0x%lx\n", | ||
3241 | cnum, | ||
3242 | value, | ||
3243 | is_loaded, | ||
3244 | can_access_pmu, | ||
3245 | hw_value, | ||
3246 | ctx->ctx_pmds[cnum].val, | ||
3247 | ctx->ctx_pmds[cnum].short_reset, | ||
3248 | ctx->ctx_pmds[cnum].long_reset, | ||
3249 | PMC_OVFL_NOTIFY(ctx, cnum) ? 'Y':'N', | ||
3250 | ctx->ctx_pmds[cnum].seed, | ||
3251 | ctx->ctx_pmds[cnum].mask, | ||
3252 | ctx->ctx_used_pmds[0], | ||
3253 | ctx->ctx_pmds[cnum].reset_pmds[0], | ||
3254 | ctx->ctx_reload_pmds[0], | ||
3255 | ctx->ctx_all_pmds[0], | ||
3256 | ctx->ctx_ovfl_regs[0])); | ||
3257 | } | ||
3258 | |||
3259 | /* | ||
3260 | * make changes visible | ||
3261 | */ | ||
3262 | if (can_access_pmu) ia64_srlz_d(); | ||
3263 | |||
3264 | return 0; | ||
3265 | |||
3266 | abort_mission: | ||
3267 | /* | ||
3268 | * for now, we have only one possibility for error | ||
3269 | */ | ||
3270 | PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL); | ||
3271 | return ret; | ||
3272 | } | ||
3273 | |||
3274 | /* | ||
3275 | * By the way of PROTECT_CONTEXT(), interrupts are masked while we are in this function. | ||
3276 | * Therefore we know, we do not have to worry about the PMU overflow interrupt. If an | ||
3277 | * interrupt is delivered during the call, it will be kept pending until we leave, making | ||
3278 | * it appears as if it had been generated at the UNPROTECT_CONTEXT(). At least we are | ||
3279 | * guaranteed to return consistent data to the user, it may simply be old. It is not | ||
3280 | * trivial to treat the overflow while inside the call because you may end up in | ||
3281 | * some module sampling buffer code causing deadlocks. | ||
3282 | */ | ||
3283 | static int | ||
3284 | pfm_read_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) | ||
3285 | { | ||
3286 | struct thread_struct *thread = NULL; | ||
3287 | struct task_struct *task; | ||
3288 | unsigned long val = 0UL, lval, ovfl_mask, sval; | ||
3289 | pfarg_reg_t *req = (pfarg_reg_t *)arg; | ||
3290 | unsigned int cnum, reg_flags = 0; | ||
3291 | int i, can_access_pmu = 0, state; | ||
3292 | int is_loaded, is_system, is_counting, expert_mode; | ||
3293 | int ret = -EINVAL; | ||
3294 | pfm_reg_check_t rd_func; | ||
3295 | |||
3296 | /* | ||
3297 | * access is possible when loaded only for | ||
3298 | * self-monitoring tasks or in UP mode | ||
3299 | */ | ||
3300 | |||
3301 | state = ctx->ctx_state; | ||
3302 | is_loaded = state == PFM_CTX_LOADED ? 1 : 0; | ||
3303 | is_system = ctx->ctx_fl_system; | ||
3304 | ovfl_mask = pmu_conf->ovfl_val; | ||
3305 | task = ctx->ctx_task; | ||
3306 | |||
3307 | if (state == PFM_CTX_ZOMBIE) return -EINVAL; | ||
3308 | |||
3309 | if (likely(is_loaded)) { | ||
3310 | thread = &task->thread; | ||
3311 | /* | ||
3312 | * In system wide and when the context is loaded, access can only happen | ||
3313 | * when the caller is running on the CPU being monitored by the session. | ||
3314 | * It does not have to be the owner (ctx_task) of the context per se. | ||
3315 | */ | ||
3316 | if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) { | ||
3317 | DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu)); | ||
3318 | return -EBUSY; | ||
3319 | } | ||
3320 | /* | ||
3321 | * this can be true when not self-monitoring only in UP | ||
3322 | */ | ||
3323 | can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0; | ||
3324 | |||
3325 | if (can_access_pmu) ia64_srlz_d(); | ||
3326 | } | ||
3327 | expert_mode = pfm_sysctl.expert_mode; | ||
3328 | |||
3329 | DPRINT(("ld=%d apmu=%d ctx_state=%d\n", | ||
3330 | is_loaded, | ||
3331 | can_access_pmu, | ||
3332 | state)); | ||
3333 | |||
3334 | /* | ||
3335 | * on both UP and SMP, we can only read the PMD from the hardware register when | ||
3336 | * the task is the owner of the local PMU. | ||
3337 | */ | ||
3338 | |||
3339 | for (i = 0; i < count; i++, req++) { | ||
3340 | |||
3341 | cnum = req->reg_num; | ||
3342 | reg_flags = req->reg_flags; | ||
3343 | |||
3344 | if (unlikely(!PMD_IS_IMPL(cnum))) goto error; | ||
3345 | /* | ||
3346 | * we can only read the register that we use. That includes | ||
3347 | * the one we explicitely initialize AND the one we want included | ||
3348 | * in the sampling buffer (smpl_regs). | ||
3349 | * | ||
3350 | * Having this restriction allows optimization in the ctxsw routine | ||
3351 | * without compromising security (leaks) | ||
3352 | */ | ||
3353 | if (unlikely(!CTX_IS_USED_PMD(ctx, cnum))) goto error; | ||
3354 | |||
3355 | sval = ctx->ctx_pmds[cnum].val; | ||
3356 | lval = ctx->ctx_pmds[cnum].lval; | ||
3357 | is_counting = PMD_IS_COUNTING(cnum); | ||
3358 | |||
3359 | /* | ||
3360 | * If the task is not the current one, then we check if the | ||
3361 | * PMU state is still in the local live register due to lazy ctxsw. | ||
3362 | * If true, then we read directly from the registers. | ||
3363 | */ | ||
3364 | if (can_access_pmu){ | ||
3365 | val = ia64_get_pmd(cnum); | ||
3366 | } else { | ||
3367 | /* | ||
3368 | * context has been saved | ||
3369 | * if context is zombie, then task does not exist anymore. | ||
3370 | * In this case, we use the full value saved in the context (pfm_flush_regs()). | ||
3371 | */ | ||
3372 | val = is_loaded ? thread->pmds[cnum] : 0UL; | ||
3373 | } | ||
3374 | rd_func = pmu_conf->pmd_desc[cnum].read_check; | ||
3375 | |||
3376 | if (is_counting) { | ||
3377 | /* | ||
3378 | * XXX: need to check for overflow when loaded | ||
3379 | */ | ||
3380 | val &= ovfl_mask; | ||
3381 | val += sval; | ||
3382 | } | ||
3383 | |||
3384 | /* | ||
3385 | * execute read checker, if any | ||
3386 | */ | ||
3387 | if (unlikely(expert_mode == 0 && rd_func)) { | ||
3388 | unsigned long v = val; | ||
3389 | ret = (*rd_func)(ctx->ctx_task, ctx, cnum, &v, regs); | ||
3390 | if (ret) goto error; | ||
3391 | val = v; | ||
3392 | ret = -EINVAL; | ||
3393 | } | ||
3394 | |||
3395 | PFM_REG_RETFLAG_SET(reg_flags, 0); | ||
3396 | |||
3397 | DPRINT(("pmd[%u]=0x%lx\n", cnum, val)); | ||
3398 | |||
3399 | /* | ||
3400 | * update register return value, abort all if problem during copy. | ||
3401 | * we only modify the reg_flags field. no check mode is fine because | ||
3402 | * access has been verified upfront in sys_perfmonctl(). | ||
3403 | */ | ||
3404 | req->reg_value = val; | ||
3405 | req->reg_flags = reg_flags; | ||
3406 | req->reg_last_reset_val = lval; | ||
3407 | } | ||
3408 | |||
3409 | return 0; | ||
3410 | |||
3411 | error: | ||
3412 | PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL); | ||
3413 | return ret; | ||
3414 | } | ||
3415 | |||
3416 | int | ||
3417 | pfm_mod_write_pmcs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs) | ||
3418 | { | ||
3419 | pfm_context_t *ctx; | ||
3420 | |||
3421 | if (req == NULL) return -EINVAL; | ||
3422 | |||
3423 | ctx = GET_PMU_CTX(); | ||
3424 | |||
3425 | if (ctx == NULL) return -EINVAL; | ||
3426 | |||
3427 | /* | ||
3428 | * for now limit to current task, which is enough when calling | ||
3429 | * from overflow handler | ||
3430 | */ | ||
3431 | if (task != current && ctx->ctx_fl_system == 0) return -EBUSY; | ||
3432 | |||
3433 | return pfm_write_pmcs(ctx, req, nreq, regs); | ||
3434 | } | ||
3435 | EXPORT_SYMBOL(pfm_mod_write_pmcs); | ||
3436 | |||
3437 | int | ||
3438 | pfm_mod_read_pmds(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs) | ||
3439 | { | ||
3440 | pfm_context_t *ctx; | ||
3441 | |||
3442 | if (req == NULL) return -EINVAL; | ||
3443 | |||
3444 | ctx = GET_PMU_CTX(); | ||
3445 | |||
3446 | if (ctx == NULL) return -EINVAL; | ||
3447 | |||
3448 | /* | ||
3449 | * for now limit to current task, which is enough when calling | ||
3450 | * from overflow handler | ||
3451 | */ | ||
3452 | if (task != current && ctx->ctx_fl_system == 0) return -EBUSY; | ||
3453 | |||
3454 | return pfm_read_pmds(ctx, req, nreq, regs); | ||
3455 | } | ||
3456 | EXPORT_SYMBOL(pfm_mod_read_pmds); | ||
3457 | |||
3458 | /* | ||
3459 | * Only call this function when a process it trying to | ||
3460 | * write the debug registers (reading is always allowed) | ||
3461 | */ | ||
3462 | int | ||
3463 | pfm_use_debug_registers(struct task_struct *task) | ||
3464 | { | ||
3465 | pfm_context_t *ctx = task->thread.pfm_context; | ||
3466 | unsigned long flags; | ||
3467 | int ret = 0; | ||
3468 | |||
3469 | if (pmu_conf->use_rr_dbregs == 0) return 0; | ||
3470 | |||
3471 | DPRINT(("called for [%d]\n", task->pid)); | ||
3472 | |||
3473 | /* | ||
3474 | * do it only once | ||
3475 | */ | ||
3476 | if (task->thread.flags & IA64_THREAD_DBG_VALID) return 0; | ||
3477 | |||
3478 | /* | ||
3479 | * Even on SMP, we do not need to use an atomic here because | ||
3480 | * the only way in is via ptrace() and this is possible only when the | ||
3481 | * process is stopped. Even in the case where the ctxsw out is not totally | ||
3482 | * completed by the time we come here, there is no way the 'stopped' process | ||
3483 | * could be in the middle of fiddling with the pfm_write_ibr_dbr() routine. | ||
3484 | * So this is always safe. | ||
3485 | */ | ||
3486 | if (ctx && ctx->ctx_fl_using_dbreg == 1) return -1; | ||
3487 | |||
3488 | LOCK_PFS(flags); | ||
3489 | |||
3490 | /* | ||
3491 | * We cannot allow setting breakpoints when system wide monitoring | ||
3492 | * sessions are using the debug registers. | ||
3493 | */ | ||
3494 | if (pfm_sessions.pfs_sys_use_dbregs> 0) | ||
3495 | ret = -1; | ||
3496 | else | ||
3497 | pfm_sessions.pfs_ptrace_use_dbregs++; | ||
3498 | |||
3499 | DPRINT(("ptrace_use_dbregs=%u sys_use_dbregs=%u by [%d] ret = %d\n", | ||
3500 | pfm_sessions.pfs_ptrace_use_dbregs, | ||
3501 | pfm_sessions.pfs_sys_use_dbregs, | ||
3502 | task->pid, ret)); | ||
3503 | |||
3504 | UNLOCK_PFS(flags); | ||
3505 | |||
3506 | return ret; | ||
3507 | } | ||
3508 | |||
3509 | /* | ||
3510 | * This function is called for every task that exits with the | ||
3511 | * IA64_THREAD_DBG_VALID set. This indicates a task which was | ||
3512 | * able to use the debug registers for debugging purposes via | ||
3513 | * ptrace(). Therefore we know it was not using them for | ||
3514 | * perfmormance monitoring, so we only decrement the number | ||
3515 | * of "ptraced" debug register users to keep the count up to date | ||
3516 | */ | ||
3517 | int | ||
3518 | pfm_release_debug_registers(struct task_struct *task) | ||
3519 | { | ||
3520 | unsigned long flags; | ||
3521 | int ret; | ||
3522 | |||
3523 | if (pmu_conf->use_rr_dbregs == 0) return 0; | ||
3524 | |||
3525 | LOCK_PFS(flags); | ||
3526 | if (pfm_sessions.pfs_ptrace_use_dbregs == 0) { | ||
3527 | printk(KERN_ERR "perfmon: invalid release for [%d] ptrace_use_dbregs=0\n", task->pid); | ||
3528 | ret = -1; | ||
3529 | } else { | ||
3530 | pfm_sessions.pfs_ptrace_use_dbregs--; | ||
3531 | ret = 0; | ||
3532 | } | ||
3533 | UNLOCK_PFS(flags); | ||
3534 | |||
3535 | return ret; | ||
3536 | } | ||
3537 | |||
3538 | static int | ||
3539 | pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) | ||
3540 | { | ||
3541 | struct task_struct *task; | ||
3542 | pfm_buffer_fmt_t *fmt; | ||
3543 | pfm_ovfl_ctrl_t rst_ctrl; | ||
3544 | int state, is_system; | ||
3545 | int ret = 0; | ||
3546 | |||
3547 | state = ctx->ctx_state; | ||
3548 | fmt = ctx->ctx_buf_fmt; | ||
3549 | is_system = ctx->ctx_fl_system; | ||
3550 | task = PFM_CTX_TASK(ctx); | ||
3551 | |||
3552 | switch(state) { | ||
3553 | case PFM_CTX_MASKED: | ||
3554 | break; | ||
3555 | case PFM_CTX_LOADED: | ||
3556 | if (CTX_HAS_SMPL(ctx) && fmt->fmt_restart_active) break; | ||
3557 | /* fall through */ | ||
3558 | case PFM_CTX_UNLOADED: | ||
3559 | case PFM_CTX_ZOMBIE: | ||
3560 | DPRINT(("invalid state=%d\n", state)); | ||
3561 | return -EBUSY; | ||
3562 | default: | ||
3563 | DPRINT(("state=%d, cannot operate (no active_restart handler)\n", state)); | ||
3564 | return -EINVAL; | ||
3565 | } | ||
3566 | |||
3567 | /* | ||
3568 | * In system wide and when the context is loaded, access can only happen | ||
3569 | * when the caller is running on the CPU being monitored by the session. | ||
3570 | * It does not have to be the owner (ctx_task) of the context per se. | ||
3571 | */ | ||
3572 | if (is_system && ctx->ctx_cpu != smp_processor_id()) { | ||
3573 | DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu)); | ||
3574 | return -EBUSY; | ||
3575 | } | ||
3576 | |||
3577 | /* sanity check */ | ||
3578 | if (unlikely(task == NULL)) { | ||
3579 | printk(KERN_ERR "perfmon: [%d] pfm_restart no task\n", current->pid); | ||
3580 | return -EINVAL; | ||
3581 | } | ||
3582 | |||
3583 | if (task == current || is_system) { | ||
3584 | |||
3585 | fmt = ctx->ctx_buf_fmt; | ||
3586 | |||
3587 | DPRINT(("restarting self %d ovfl=0x%lx\n", | ||
3588 | task->pid, | ||
3589 | ctx->ctx_ovfl_regs[0])); | ||
3590 | |||
3591 | if (CTX_HAS_SMPL(ctx)) { | ||
3592 | |||
3593 | prefetch(ctx->ctx_smpl_hdr); | ||
3594 | |||
3595 | rst_ctrl.bits.mask_monitoring = 0; | ||
3596 | rst_ctrl.bits.reset_ovfl_pmds = 0; | ||
3597 | |||
3598 | if (state == PFM_CTX_LOADED) | ||
3599 | ret = pfm_buf_fmt_restart_active(fmt, task, &rst_ctrl, ctx->ctx_smpl_hdr, regs); | ||
3600 | else | ||
3601 | ret = pfm_buf_fmt_restart(fmt, task, &rst_ctrl, ctx->ctx_smpl_hdr, regs); | ||
3602 | } else { | ||
3603 | rst_ctrl.bits.mask_monitoring = 0; | ||
3604 | rst_ctrl.bits.reset_ovfl_pmds = 1; | ||
3605 | } | ||
3606 | |||
3607 | if (ret == 0) { | ||
3608 | if (rst_ctrl.bits.reset_ovfl_pmds) | ||
3609 | pfm_reset_regs(ctx, ctx->ctx_ovfl_regs, PFM_PMD_LONG_RESET); | ||
3610 | |||
3611 | if (rst_ctrl.bits.mask_monitoring == 0) { | ||
3612 | DPRINT(("resuming monitoring for [%d]\n", task->pid)); | ||
3613 | |||
3614 | if (state == PFM_CTX_MASKED) pfm_restore_monitoring(task); | ||
3615 | } else { | ||
3616 | DPRINT(("keeping monitoring stopped for [%d]\n", task->pid)); | ||
3617 | |||
3618 | // cannot use pfm_stop_monitoring(task, regs); | ||
3619 | } | ||
3620 | } | ||
3621 | /* | ||
3622 | * clear overflowed PMD mask to remove any stale information | ||
3623 | */ | ||
3624 | ctx->ctx_ovfl_regs[0] = 0UL; | ||
3625 | |||
3626 | /* | ||
3627 | * back to LOADED state | ||
3628 | */ | ||
3629 | ctx->ctx_state = PFM_CTX_LOADED; | ||
3630 | |||
3631 | /* | ||
3632 | * XXX: not really useful for self monitoring | ||
3633 | */ | ||
3634 | ctx->ctx_fl_can_restart = 0; | ||
3635 | |||
3636 | return 0; | ||
3637 | } | ||
3638 | |||
3639 | /* | ||
3640 | * restart another task | ||
3641 | */ | ||
3642 | |||
3643 | /* | ||
3644 | * When PFM_CTX_MASKED, we cannot issue a restart before the previous | ||
3645 | * one is seen by the task. | ||
3646 | */ | ||
3647 | if (state == PFM_CTX_MASKED) { | ||
3648 | if (ctx->ctx_fl_can_restart == 0) return -EINVAL; | ||
3649 | /* | ||
3650 | * will prevent subsequent restart before this one is | ||
3651 | * seen by other task | ||
3652 | */ | ||
3653 | ctx->ctx_fl_can_restart = 0; | ||
3654 | } | ||
3655 | |||
3656 | /* | ||
3657 | * if blocking, then post the semaphore is PFM_CTX_MASKED, i.e. | ||
3658 | * the task is blocked or on its way to block. That's the normal | ||
3659 | * restart path. If the monitoring is not masked, then the task | ||
3660 | * can be actively monitoring and we cannot directly intervene. | ||
3661 | * Therefore we use the trap mechanism to catch the task and | ||
3662 | * force it to reset the buffer/reset PMDs. | ||
3663 | * | ||
3664 | * if non-blocking, then we ensure that the task will go into | ||
3665 | * pfm_handle_work() before returning to user mode. | ||
3666 | * | ||
3667 | * We cannot explicitely reset another task, it MUST always | ||
3668 | * be done by the task itself. This works for system wide because | ||
3669 | * the tool that is controlling the session is logically doing | ||
3670 | * "self-monitoring". | ||
3671 | */ | ||
3672 | if (CTX_OVFL_NOBLOCK(ctx) == 0 && state == PFM_CTX_MASKED) { | ||
3673 | DPRINT(("unblocking [%d] \n", task->pid)); | ||
3674 | up(&ctx->ctx_restart_sem); | ||
3675 | } else { | ||
3676 | DPRINT(("[%d] armed exit trap\n", task->pid)); | ||
3677 | |||
3678 | ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_RESET; | ||
3679 | |||
3680 | PFM_SET_WORK_PENDING(task, 1); | ||
3681 | |||
3682 | pfm_set_task_notify(task); | ||
3683 | |||
3684 | /* | ||
3685 | * XXX: send reschedule if task runs on another CPU | ||
3686 | */ | ||
3687 | } | ||
3688 | return 0; | ||
3689 | } | ||
3690 | |||
3691 | static int | ||
3692 | pfm_debug(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) | ||
3693 | { | ||
3694 | unsigned int m = *(unsigned int *)arg; | ||
3695 | |||
3696 | pfm_sysctl.debug = m == 0 ? 0 : 1; | ||
3697 | |||
3698 | pfm_debug_var = pfm_sysctl.debug; | ||
3699 | |||
3700 | printk(KERN_INFO "perfmon debugging %s (timing reset)\n", pfm_sysctl.debug ? "on" : "off"); | ||
3701 | |||
3702 | if (m == 0) { | ||
3703 | memset(pfm_stats, 0, sizeof(pfm_stats)); | ||
3704 | for(m=0; m < NR_CPUS; m++) pfm_stats[m].pfm_ovfl_intr_cycles_min = ~0UL; | ||
3705 | } | ||
3706 | return 0; | ||
3707 | } | ||
3708 | |||
3709 | /* | ||
3710 | * arg can be NULL and count can be zero for this function | ||
3711 | */ | ||
3712 | static int | ||
3713 | pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) | ||
3714 | { | ||
3715 | struct thread_struct *thread = NULL; | ||
3716 | struct task_struct *task; | ||
3717 | pfarg_dbreg_t *req = (pfarg_dbreg_t *)arg; | ||
3718 | unsigned long flags; | ||
3719 | dbreg_t dbreg; | ||
3720 | unsigned int rnum; | ||
3721 | int first_time; | ||
3722 | int ret = 0, state; | ||
3723 | int i, can_access_pmu = 0; | ||
3724 | int is_system, is_loaded; | ||
3725 | |||
3726 | if (pmu_conf->use_rr_dbregs == 0) return -EINVAL; | ||
3727 | |||
3728 | state = ctx->ctx_state; | ||
3729 | is_loaded = state == PFM_CTX_LOADED ? 1 : 0; | ||
3730 | is_system = ctx->ctx_fl_system; | ||
3731 | task = ctx->ctx_task; | ||
3732 | |||
3733 | if (state == PFM_CTX_ZOMBIE) return -EINVAL; | ||
3734 | |||
3735 | /* | ||
3736 | * on both UP and SMP, we can only write to the PMC when the task is | ||
3737 | * the owner of the local PMU. | ||
3738 | */ | ||
3739 | if (is_loaded) { | ||
3740 | thread = &task->thread; | ||
3741 | /* | ||
3742 | * In system wide and when the context is loaded, access can only happen | ||
3743 | * when the caller is running on the CPU being monitored by the session. | ||
3744 | * It does not have to be the owner (ctx_task) of the context per se. | ||
3745 | */ | ||
3746 | if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) { | ||
3747 | DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu)); | ||
3748 | return -EBUSY; | ||
3749 | } | ||
3750 | can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0; | ||
3751 | } | ||
3752 | |||
3753 | /* | ||
3754 | * we do not need to check for ipsr.db because we do clear ibr.x, dbr.r, and dbr.w | ||
3755 | * ensuring that no real breakpoint can be installed via this call. | ||
3756 | * | ||
3757 | * IMPORTANT: regs can be NULL in this function | ||
3758 | */ | ||
3759 | |||
3760 | first_time = ctx->ctx_fl_using_dbreg == 0; | ||
3761 | |||
3762 | /* | ||
3763 | * don't bother if we are loaded and task is being debugged | ||
3764 | */ | ||
3765 | if (is_loaded && (thread->flags & IA64_THREAD_DBG_VALID) != 0) { | ||
3766 | DPRINT(("debug registers already in use for [%d]\n", task->pid)); | ||
3767 | return -EBUSY; | ||
3768 | } | ||
3769 | |||
3770 | /* | ||
3771 | * check for debug registers in system wide mode | ||
3772 | * | ||
3773 | * If though a check is done in pfm_context_load(), | ||
3774 | * we must repeat it here, in case the registers are | ||
3775 | * written after the context is loaded | ||
3776 | */ | ||
3777 | if (is_loaded) { | ||
3778 | LOCK_PFS(flags); | ||
3779 | |||
3780 | if (first_time && is_system) { | ||
3781 | if (pfm_sessions.pfs_ptrace_use_dbregs) | ||
3782 | ret = -EBUSY; | ||
3783 | else | ||
3784 | pfm_sessions.pfs_sys_use_dbregs++; | ||
3785 | } | ||
3786 | UNLOCK_PFS(flags); | ||
3787 | } | ||
3788 | |||
3789 | if (ret != 0) return ret; | ||
3790 | |||
3791 | /* | ||
3792 | * mark ourself as user of the debug registers for | ||
3793 | * perfmon purposes. | ||
3794 | */ | ||
3795 | ctx->ctx_fl_using_dbreg = 1; | ||
3796 | |||
3797 | /* | ||
3798 | * clear hardware registers to make sure we don't | ||
3799 | * pick up stale state. | ||
3800 | * | ||
3801 | * for a system wide session, we do not use | ||
3802 | * thread.dbr, thread.ibr because this process | ||
3803 | * never leaves the current CPU and the state | ||
3804 | * is shared by all processes running on it | ||
3805 | */ | ||
3806 | if (first_time && can_access_pmu) { | ||
3807 | DPRINT(("[%d] clearing ibrs, dbrs\n", task->pid)); | ||
3808 | for (i=0; i < pmu_conf->num_ibrs; i++) { | ||
3809 | ia64_set_ibr(i, 0UL); | ||
3810 | ia64_dv_serialize_instruction(); | ||
3811 | } | ||
3812 | ia64_srlz_i(); | ||
3813 | for (i=0; i < pmu_conf->num_dbrs; i++) { | ||
3814 | ia64_set_dbr(i, 0UL); | ||
3815 | ia64_dv_serialize_data(); | ||
3816 | } | ||
3817 | ia64_srlz_d(); | ||
3818 | } | ||
3819 | |||
3820 | /* | ||
3821 | * Now install the values into the registers | ||
3822 | */ | ||
3823 | for (i = 0; i < count; i++, req++) { | ||
3824 | |||
3825 | rnum = req->dbreg_num; | ||
3826 | dbreg.val = req->dbreg_value; | ||
3827 | |||
3828 | ret = -EINVAL; | ||
3829 | |||
3830 | if ((mode == PFM_CODE_RR && rnum >= PFM_NUM_IBRS) || ((mode == PFM_DATA_RR) && rnum >= PFM_NUM_DBRS)) { | ||
3831 | DPRINT(("invalid register %u val=0x%lx mode=%d i=%d count=%d\n", | ||
3832 | rnum, dbreg.val, mode, i, count)); | ||
3833 | |||
3834 | goto abort_mission; | ||
3835 | } | ||
3836 | |||
3837 | /* | ||
3838 | * make sure we do not install enabled breakpoint | ||
3839 | */ | ||
3840 | if (rnum & 0x1) { | ||
3841 | if (mode == PFM_CODE_RR) | ||
3842 | dbreg.ibr.ibr_x = 0; | ||
3843 | else | ||
3844 | dbreg.dbr.dbr_r = dbreg.dbr.dbr_w = 0; | ||
3845 | } | ||
3846 | |||
3847 | PFM_REG_RETFLAG_SET(req->dbreg_flags, 0); | ||
3848 | |||
3849 | /* | ||
3850 | * Debug registers, just like PMC, can only be modified | ||
3851 | * by a kernel call. Moreover, perfmon() access to those | ||
3852 | * registers are centralized in this routine. The hardware | ||
3853 | * does not modify the value of these registers, therefore, | ||
3854 | * if we save them as they are written, we can avoid having | ||
3855 | * to save them on context switch out. This is made possible | ||
3856 | * by the fact that when perfmon uses debug registers, ptrace() | ||
3857 | * won't be able to modify them concurrently. | ||
3858 | */ | ||
3859 | if (mode == PFM_CODE_RR) { | ||
3860 | CTX_USED_IBR(ctx, rnum); | ||
3861 | |||
3862 | if (can_access_pmu) { | ||
3863 | ia64_set_ibr(rnum, dbreg.val); | ||
3864 | ia64_dv_serialize_instruction(); | ||
3865 | } | ||
3866 | |||
3867 | ctx->ctx_ibrs[rnum] = dbreg.val; | ||
3868 | |||
3869 | DPRINT(("write ibr%u=0x%lx used_ibrs=0x%x ld=%d apmu=%d\n", | ||
3870 | rnum, dbreg.val, ctx->ctx_used_ibrs[0], is_loaded, can_access_pmu)); | ||
3871 | } else { | ||
3872 | CTX_USED_DBR(ctx, rnum); | ||
3873 | |||
3874 | if (can_access_pmu) { | ||
3875 | ia64_set_dbr(rnum, dbreg.val); | ||
3876 | ia64_dv_serialize_data(); | ||
3877 | } | ||
3878 | ctx->ctx_dbrs[rnum] = dbreg.val; | ||
3879 | |||
3880 | DPRINT(("write dbr%u=0x%lx used_dbrs=0x%x ld=%d apmu=%d\n", | ||
3881 | rnum, dbreg.val, ctx->ctx_used_dbrs[0], is_loaded, can_access_pmu)); | ||
3882 | } | ||
3883 | } | ||
3884 | |||
3885 | return 0; | ||
3886 | |||
3887 | abort_mission: | ||
3888 | /* | ||
3889 | * in case it was our first attempt, we undo the global modifications | ||
3890 | */ | ||
3891 | if (first_time) { | ||
3892 | LOCK_PFS(flags); | ||
3893 | if (ctx->ctx_fl_system) { | ||
3894 | pfm_sessions.pfs_sys_use_dbregs--; | ||
3895 | } | ||
3896 | UNLOCK_PFS(flags); | ||
3897 | ctx->ctx_fl_using_dbreg = 0; | ||
3898 | } | ||
3899 | /* | ||
3900 | * install error return flag | ||
3901 | */ | ||
3902 | PFM_REG_RETFLAG_SET(req->dbreg_flags, PFM_REG_RETFL_EINVAL); | ||
3903 | |||
3904 | return ret; | ||
3905 | } | ||
3906 | |||
3907 | static int | ||
3908 | pfm_write_ibrs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) | ||
3909 | { | ||
3910 | return pfm_write_ibr_dbr(PFM_CODE_RR, ctx, arg, count, regs); | ||
3911 | } | ||
3912 | |||
3913 | static int | ||
3914 | pfm_write_dbrs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) | ||
3915 | { | ||
3916 | return pfm_write_ibr_dbr(PFM_DATA_RR, ctx, arg, count, regs); | ||
3917 | } | ||
3918 | |||
3919 | int | ||
3920 | pfm_mod_write_ibrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs) | ||
3921 | { | ||
3922 | pfm_context_t *ctx; | ||
3923 | |||
3924 | if (req == NULL) return -EINVAL; | ||
3925 | |||
3926 | ctx = GET_PMU_CTX(); | ||
3927 | |||
3928 | if (ctx == NULL) return -EINVAL; | ||
3929 | |||
3930 | /* | ||
3931 | * for now limit to current task, which is enough when calling | ||
3932 | * from overflow handler | ||
3933 | */ | ||
3934 | if (task != current && ctx->ctx_fl_system == 0) return -EBUSY; | ||
3935 | |||
3936 | return pfm_write_ibrs(ctx, req, nreq, regs); | ||
3937 | } | ||
3938 | EXPORT_SYMBOL(pfm_mod_write_ibrs); | ||
3939 | |||
3940 | int | ||
3941 | pfm_mod_write_dbrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs) | ||
3942 | { | ||
3943 | pfm_context_t *ctx; | ||
3944 | |||
3945 | if (req == NULL) return -EINVAL; | ||
3946 | |||
3947 | ctx = GET_PMU_CTX(); | ||
3948 | |||
3949 | if (ctx == NULL) return -EINVAL; | ||
3950 | |||
3951 | /* | ||
3952 | * for now limit to current task, which is enough when calling | ||
3953 | * from overflow handler | ||
3954 | */ | ||
3955 | if (task != current && ctx->ctx_fl_system == 0) return -EBUSY; | ||
3956 | |||
3957 | return pfm_write_dbrs(ctx, req, nreq, regs); | ||
3958 | } | ||
3959 | EXPORT_SYMBOL(pfm_mod_write_dbrs); | ||
3960 | |||
3961 | |||
3962 | static int | ||
3963 | pfm_get_features(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) | ||
3964 | { | ||
3965 | pfarg_features_t *req = (pfarg_features_t *)arg; | ||
3966 | |||
3967 | req->ft_version = PFM_VERSION; | ||
3968 | return 0; | ||
3969 | } | ||
3970 | |||
3971 | static int | ||
3972 | pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) | ||
3973 | { | ||
3974 | struct pt_regs *tregs; | ||
3975 | struct task_struct *task = PFM_CTX_TASK(ctx); | ||
3976 | int state, is_system; | ||
3977 | |||
3978 | state = ctx->ctx_state; | ||
3979 | is_system = ctx->ctx_fl_system; | ||
3980 | |||
3981 | /* | ||
3982 | * context must be attached to issue the stop command (includes LOADED,MASKED,ZOMBIE) | ||
3983 | */ | ||
3984 | if (state == PFM_CTX_UNLOADED) return -EINVAL; | ||
3985 | |||
3986 | /* | ||
3987 | * In system wide and when the context is loaded, access can only happen | ||
3988 | * when the caller is running on the CPU being monitored by the session. | ||
3989 | * It does not have to be the owner (ctx_task) of the context per se. | ||
3990 | */ | ||
3991 | if (is_system && ctx->ctx_cpu != smp_processor_id()) { | ||
3992 | DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu)); | ||
3993 | return -EBUSY; | ||
3994 | } | ||
3995 | DPRINT(("task [%d] ctx_state=%d is_system=%d\n", | ||
3996 | PFM_CTX_TASK(ctx)->pid, | ||
3997 | state, | ||
3998 | is_system)); | ||
3999 | /* | ||
4000 | * in system mode, we need to update the PMU directly | ||
4001 | * and the user level state of the caller, which may not | ||
4002 | * necessarily be the creator of the context. | ||
4003 | */ | ||
4004 | if (is_system) { | ||
4005 | /* | ||
4006 | * Update local PMU first | ||
4007 | * | ||
4008 | * disable dcr pp | ||
4009 | */ | ||
4010 | ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP); | ||
4011 | ia64_srlz_i(); | ||
4012 | |||
4013 | /* | ||
4014 | * update local cpuinfo | ||
4015 | */ | ||
4016 | PFM_CPUINFO_CLEAR(PFM_CPUINFO_DCR_PP); | ||
4017 | |||
4018 | /* | ||
4019 | * stop monitoring, does srlz.i | ||
4020 | */ | ||
4021 | pfm_clear_psr_pp(); | ||
4022 | |||
4023 | /* | ||
4024 | * stop monitoring in the caller | ||
4025 | */ | ||
4026 | ia64_psr(regs)->pp = 0; | ||
4027 | |||
4028 | return 0; | ||
4029 | } | ||
4030 | /* | ||
4031 | * per-task mode | ||
4032 | */ | ||
4033 | |||
4034 | if (task == current) { | ||
4035 | /* stop monitoring at kernel level */ | ||
4036 | pfm_clear_psr_up(); | ||
4037 | |||
4038 | /* | ||
4039 | * stop monitoring at the user level | ||
4040 | */ | ||
4041 | ia64_psr(regs)->up = 0; | ||
4042 | } else { | ||
4043 | tregs = ia64_task_regs(task); | ||
4044 | |||
4045 | /* | ||
4046 | * stop monitoring at the user level | ||
4047 | */ | ||
4048 | ia64_psr(tregs)->up = 0; | ||
4049 | |||
4050 | /* | ||
4051 | * monitoring disabled in kernel at next reschedule | ||
4052 | */ | ||
4053 | ctx->ctx_saved_psr_up = 0; | ||
4054 | DPRINT(("task=[%d]\n", task->pid)); | ||
4055 | } | ||
4056 | return 0; | ||
4057 | } | ||
4058 | |||
4059 | |||
4060 | static int | ||
4061 | pfm_start(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) | ||
4062 | { | ||
4063 | struct pt_regs *tregs; | ||
4064 | int state, is_system; | ||
4065 | |||
4066 | state = ctx->ctx_state; | ||
4067 | is_system = ctx->ctx_fl_system; | ||
4068 | |||
4069 | if (state != PFM_CTX_LOADED) return -EINVAL; | ||
4070 | |||
4071 | /* | ||
4072 | * In system wide and when the context is loaded, access can only happen | ||
4073 | * when the caller is running on the CPU being monitored by the session. | ||
4074 | * It does not have to be the owner (ctx_task) of the context per se. | ||
4075 | */ | ||
4076 | if (is_system && ctx->ctx_cpu != smp_processor_id()) { | ||
4077 | DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu)); | ||
4078 | return -EBUSY; | ||
4079 | } | ||
4080 | |||
4081 | /* | ||
4082 | * in system mode, we need to update the PMU directly | ||
4083 | * and the user level state of the caller, which may not | ||
4084 | * necessarily be the creator of the context. | ||
4085 | */ | ||
4086 | if (is_system) { | ||
4087 | |||
4088 | /* | ||
4089 | * set user level psr.pp for the caller | ||
4090 | */ | ||
4091 | ia64_psr(regs)->pp = 1; | ||
4092 | |||
4093 | /* | ||
4094 | * now update the local PMU and cpuinfo | ||
4095 | */ | ||
4096 | PFM_CPUINFO_SET(PFM_CPUINFO_DCR_PP); | ||
4097 | |||
4098 | /* | ||
4099 | * start monitoring at kernel level | ||
4100 | */ | ||
4101 | pfm_set_psr_pp(); | ||
4102 | |||
4103 | /* enable dcr pp */ | ||
4104 | ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) | IA64_DCR_PP); | ||
4105 | ia64_srlz_i(); | ||
4106 | |||
4107 | return 0; | ||
4108 | } | ||
4109 | |||
4110 | /* | ||
4111 | * per-process mode | ||
4112 | */ | ||
4113 | |||
4114 | if (ctx->ctx_task == current) { | ||
4115 | |||
4116 | /* start monitoring at kernel level */ | ||
4117 | pfm_set_psr_up(); | ||
4118 | |||
4119 | /* | ||
4120 | * activate monitoring at user level | ||
4121 | */ | ||
4122 | ia64_psr(regs)->up = 1; | ||
4123 | |||
4124 | } else { | ||
4125 | tregs = ia64_task_regs(ctx->ctx_task); | ||
4126 | |||
4127 | /* | ||
4128 | * start monitoring at the kernel level the next | ||
4129 | * time the task is scheduled | ||
4130 | */ | ||
4131 | ctx->ctx_saved_psr_up = IA64_PSR_UP; | ||
4132 | |||
4133 | /* | ||
4134 | * activate monitoring at user level | ||
4135 | */ | ||
4136 | ia64_psr(tregs)->up = 1; | ||
4137 | } | ||
4138 | return 0; | ||
4139 | } | ||
4140 | |||
4141 | static int | ||
4142 | pfm_get_pmc_reset(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) | ||
4143 | { | ||
4144 | pfarg_reg_t *req = (pfarg_reg_t *)arg; | ||
4145 | unsigned int cnum; | ||
4146 | int i; | ||
4147 | int ret = -EINVAL; | ||
4148 | |||
4149 | for (i = 0; i < count; i++, req++) { | ||
4150 | |||
4151 | cnum = req->reg_num; | ||
4152 | |||
4153 | if (!PMC_IS_IMPL(cnum)) goto abort_mission; | ||
4154 | |||
4155 | req->reg_value = PMC_DFL_VAL(cnum); | ||
4156 | |||
4157 | PFM_REG_RETFLAG_SET(req->reg_flags, 0); | ||
4158 | |||
4159 | DPRINT(("pmc_reset_val pmc[%u]=0x%lx\n", cnum, req->reg_value)); | ||
4160 | } | ||
4161 | return 0; | ||
4162 | |||
4163 | abort_mission: | ||
4164 | PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL); | ||
4165 | return ret; | ||
4166 | } | ||
4167 | |||
4168 | static int | ||
4169 | pfm_check_task_exist(pfm_context_t *ctx) | ||
4170 | { | ||
4171 | struct task_struct *g, *t; | ||
4172 | int ret = -ESRCH; | ||
4173 | |||
4174 | read_lock(&tasklist_lock); | ||
4175 | |||
4176 | do_each_thread (g, t) { | ||
4177 | if (t->thread.pfm_context == ctx) { | ||
4178 | ret = 0; | ||
4179 | break; | ||
4180 | } | ||
4181 | } while_each_thread (g, t); | ||
4182 | |||
4183 | read_unlock(&tasklist_lock); | ||
4184 | |||
4185 | DPRINT(("pfm_check_task_exist: ret=%d ctx=%p\n", ret, ctx)); | ||
4186 | |||
4187 | return ret; | ||
4188 | } | ||
4189 | |||
4190 | static int | ||
4191 | pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) | ||
4192 | { | ||
4193 | struct task_struct *task; | ||
4194 | struct thread_struct *thread; | ||
4195 | struct pfm_context_t *old; | ||
4196 | unsigned long flags; | ||
4197 | #ifndef CONFIG_SMP | ||
4198 | struct task_struct *owner_task = NULL; | ||
4199 | #endif | ||
4200 | pfarg_load_t *req = (pfarg_load_t *)arg; | ||
4201 | unsigned long *pmcs_source, *pmds_source; | ||
4202 | int the_cpu; | ||
4203 | int ret = 0; | ||
4204 | int state, is_system, set_dbregs = 0; | ||
4205 | |||
4206 | state = ctx->ctx_state; | ||
4207 | is_system = ctx->ctx_fl_system; | ||
4208 | /* | ||
4209 | * can only load from unloaded or terminated state | ||
4210 | */ | ||
4211 | if (state != PFM_CTX_UNLOADED) { | ||
4212 | DPRINT(("cannot load to [%d], invalid ctx_state=%d\n", | ||
4213 | req->load_pid, | ||
4214 | ctx->ctx_state)); | ||
4215 | return -EINVAL; | ||
4216 | } | ||
4217 | |||
4218 | DPRINT(("load_pid [%d] using_dbreg=%d\n", req->load_pid, ctx->ctx_fl_using_dbreg)); | ||
4219 | |||
4220 | if (CTX_OVFL_NOBLOCK(ctx) == 0 && req->load_pid == current->pid) { | ||
4221 | DPRINT(("cannot use blocking mode on self\n")); | ||
4222 | return -EINVAL; | ||
4223 | } | ||
4224 | |||
4225 | ret = pfm_get_task(ctx, req->load_pid, &task); | ||
4226 | if (ret) { | ||
4227 | DPRINT(("load_pid [%d] get_task=%d\n", req->load_pid, ret)); | ||
4228 | return ret; | ||
4229 | } | ||
4230 | |||
4231 | ret = -EINVAL; | ||
4232 | |||
4233 | /* | ||
4234 | * system wide is self monitoring only | ||
4235 | */ | ||
4236 | if (is_system && task != current) { | ||
4237 | DPRINT(("system wide is self monitoring only load_pid=%d\n", | ||
4238 | req->load_pid)); | ||
4239 | goto error; | ||
4240 | } | ||
4241 | |||
4242 | thread = &task->thread; | ||
4243 | |||
4244 | ret = 0; | ||
4245 | /* | ||
4246 | * cannot load a context which is using range restrictions, | ||
4247 | * into a task that is being debugged. | ||
4248 | */ | ||
4249 | if (ctx->ctx_fl_using_dbreg) { | ||
4250 | if (thread->flags & IA64_THREAD_DBG_VALID) { | ||
4251 | ret = -EBUSY; | ||
4252 | DPRINT(("load_pid [%d] task is debugged, cannot load range restrictions\n", req->load_pid)); | ||
4253 | goto error; | ||
4254 | } | ||
4255 | LOCK_PFS(flags); | ||
4256 | |||
4257 | if (is_system) { | ||
4258 | if (pfm_sessions.pfs_ptrace_use_dbregs) { | ||
4259 | DPRINT(("cannot load [%d] dbregs in use\n", task->pid)); | ||
4260 | ret = -EBUSY; | ||
4261 | } else { | ||
4262 | pfm_sessions.pfs_sys_use_dbregs++; | ||
4263 | DPRINT(("load [%d] increased sys_use_dbreg=%u\n", task->pid, pfm_sessions.pfs_sys_use_dbregs)); | ||
4264 | set_dbregs = 1; | ||
4265 | } | ||
4266 | } | ||
4267 | |||
4268 | UNLOCK_PFS(flags); | ||
4269 | |||
4270 | if (ret) goto error; | ||
4271 | } | ||
4272 | |||
4273 | /* | ||
4274 | * SMP system-wide monitoring implies self-monitoring. | ||
4275 | * | ||
4276 | * The programming model expects the task to | ||
4277 | * be pinned on a CPU throughout the session. | ||
4278 | * Here we take note of the current CPU at the | ||
4279 | * time the context is loaded. No call from | ||
4280 | * another CPU will be allowed. | ||
4281 | * | ||
4282 | * The pinning via shed_setaffinity() | ||
4283 | * must be done by the calling task prior | ||
4284 | * to this call. | ||
4285 | * | ||
4286 | * systemwide: keep track of CPU this session is supposed to run on | ||
4287 | */ | ||
4288 | the_cpu = ctx->ctx_cpu = smp_processor_id(); | ||
4289 | |||
4290 | ret = -EBUSY; | ||
4291 | /* | ||
4292 | * now reserve the session | ||
4293 | */ | ||
4294 | ret = pfm_reserve_session(current, is_system, the_cpu); | ||
4295 | if (ret) goto error; | ||
4296 | |||
4297 | /* | ||
4298 | * task is necessarily stopped at this point. | ||
4299 | * | ||
4300 | * If the previous context was zombie, then it got removed in | ||
4301 | * pfm_save_regs(). Therefore we should not see it here. | ||
4302 | * If we see a context, then this is an active context | ||
4303 | * | ||
4304 | * XXX: needs to be atomic | ||
4305 | */ | ||
4306 | DPRINT(("before cmpxchg() old_ctx=%p new_ctx=%p\n", | ||
4307 | thread->pfm_context, ctx)); | ||
4308 | |||
4309 | old = ia64_cmpxchg(acq, &thread->pfm_context, NULL, ctx, sizeof(pfm_context_t *)); | ||
4310 | if (old != NULL) { | ||
4311 | DPRINT(("load_pid [%d] already has a context\n", req->load_pid)); | ||
4312 | goto error_unres; | ||
4313 | } | ||
4314 | |||
4315 | pfm_reset_msgq(ctx); | ||
4316 | |||
4317 | ctx->ctx_state = PFM_CTX_LOADED; | ||
4318 | |||
4319 | /* | ||
4320 | * link context to task | ||
4321 | */ | ||
4322 | ctx->ctx_task = task; | ||
4323 | |||
4324 | if (is_system) { | ||
4325 | /* | ||
4326 | * we load as stopped | ||
4327 | */ | ||
4328 | PFM_CPUINFO_SET(PFM_CPUINFO_SYST_WIDE); | ||
4329 | PFM_CPUINFO_CLEAR(PFM_CPUINFO_DCR_PP); | ||
4330 | |||
4331 | if (ctx->ctx_fl_excl_idle) PFM_CPUINFO_SET(PFM_CPUINFO_EXCL_IDLE); | ||
4332 | } else { | ||
4333 | thread->flags |= IA64_THREAD_PM_VALID; | ||
4334 | } | ||
4335 | |||
4336 | /* | ||
4337 | * propagate into thread-state | ||
4338 | */ | ||
4339 | pfm_copy_pmds(task, ctx); | ||
4340 | pfm_copy_pmcs(task, ctx); | ||
4341 | |||
4342 | pmcs_source = thread->pmcs; | ||
4343 | pmds_source = thread->pmds; | ||
4344 | |||
4345 | /* | ||
4346 | * always the case for system-wide | ||
4347 | */ | ||
4348 | if (task == current) { | ||
4349 | |||
4350 | if (is_system == 0) { | ||
4351 | |||
4352 | /* allow user level control */ | ||
4353 | ia64_psr(regs)->sp = 0; | ||
4354 | DPRINT(("clearing psr.sp for [%d]\n", task->pid)); | ||
4355 | |||
4356 | SET_LAST_CPU(ctx, smp_processor_id()); | ||
4357 | INC_ACTIVATION(); | ||
4358 | SET_ACTIVATION(ctx); | ||
4359 | #ifndef CONFIG_SMP | ||
4360 | /* | ||
4361 | * push the other task out, if any | ||
4362 | */ | ||
4363 | owner_task = GET_PMU_OWNER(); | ||
4364 | if (owner_task) pfm_lazy_save_regs(owner_task); | ||
4365 | #endif | ||
4366 | } | ||
4367 | /* | ||
4368 | * load all PMD from ctx to PMU (as opposed to thread state) | ||
4369 | * restore all PMC from ctx to PMU | ||
4370 | */ | ||
4371 | pfm_restore_pmds(pmds_source, ctx->ctx_all_pmds[0]); | ||
4372 | pfm_restore_pmcs(pmcs_source, ctx->ctx_all_pmcs[0]); | ||
4373 | |||
4374 | ctx->ctx_reload_pmcs[0] = 0UL; | ||
4375 | ctx->ctx_reload_pmds[0] = 0UL; | ||
4376 | |||
4377 | /* | ||
4378 | * guaranteed safe by earlier check against DBG_VALID | ||
4379 | */ | ||
4380 | if (ctx->ctx_fl_using_dbreg) { | ||
4381 | pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs); | ||
4382 | pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs); | ||
4383 | } | ||
4384 | /* | ||
4385 | * set new ownership | ||
4386 | */ | ||
4387 | SET_PMU_OWNER(task, ctx); | ||
4388 | |||
4389 | DPRINT(("context loaded on PMU for [%d]\n", task->pid)); | ||
4390 | } else { | ||
4391 | /* | ||
4392 | * when not current, task MUST be stopped, so this is safe | ||
4393 | */ | ||
4394 | regs = ia64_task_regs(task); | ||
4395 | |||
4396 | /* force a full reload */ | ||
4397 | ctx->ctx_last_activation = PFM_INVALID_ACTIVATION; | ||
4398 | SET_LAST_CPU(ctx, -1); | ||
4399 | |||
4400 | /* initial saved psr (stopped) */ | ||
4401 | ctx->ctx_saved_psr_up = 0UL; | ||
4402 | ia64_psr(regs)->up = ia64_psr(regs)->pp = 0; | ||
4403 | } | ||
4404 | |||
4405 | ret = 0; | ||
4406 | |||
4407 | error_unres: | ||
4408 | if (ret) pfm_unreserve_session(ctx, ctx->ctx_fl_system, the_cpu); | ||
4409 | error: | ||
4410 | /* | ||
4411 | * we must undo the dbregs setting (for system-wide) | ||
4412 | */ | ||
4413 | if (ret && set_dbregs) { | ||
4414 | LOCK_PFS(flags); | ||
4415 | pfm_sessions.pfs_sys_use_dbregs--; | ||
4416 | UNLOCK_PFS(flags); | ||
4417 | } | ||
4418 | /* | ||
4419 | * release task, there is now a link with the context | ||
4420 | */ | ||
4421 | if (is_system == 0 && task != current) { | ||
4422 | pfm_put_task(task); | ||
4423 | |||
4424 | if (ret == 0) { | ||
4425 | ret = pfm_check_task_exist(ctx); | ||
4426 | if (ret) { | ||
4427 | ctx->ctx_state = PFM_CTX_UNLOADED; | ||
4428 | ctx->ctx_task = NULL; | ||
4429 | } | ||
4430 | } | ||
4431 | } | ||
4432 | return ret; | ||
4433 | } | ||
4434 | |||
4435 | /* | ||
4436 | * in this function, we do not need to increase the use count | ||
4437 | * for the task via get_task_struct(), because we hold the | ||
4438 | * context lock. If the task were to disappear while having | ||
4439 | * a context attached, it would go through pfm_exit_thread() | ||
4440 | * which also grabs the context lock and would therefore be blocked | ||
4441 | * until we are here. | ||
4442 | */ | ||
4443 | static void pfm_flush_pmds(struct task_struct *, pfm_context_t *ctx); | ||
4444 | |||
4445 | static int | ||
4446 | pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) | ||
4447 | { | ||
4448 | struct task_struct *task = PFM_CTX_TASK(ctx); | ||
4449 | struct pt_regs *tregs; | ||
4450 | int prev_state, is_system; | ||
4451 | int ret; | ||
4452 | |||
4453 | DPRINT(("ctx_state=%d task [%d]\n", ctx->ctx_state, task ? task->pid : -1)); | ||
4454 | |||
4455 | prev_state = ctx->ctx_state; | ||
4456 | is_system = ctx->ctx_fl_system; | ||
4457 | |||
4458 | /* | ||
4459 | * unload only when necessary | ||
4460 | */ | ||
4461 | if (prev_state == PFM_CTX_UNLOADED) { | ||
4462 | DPRINT(("ctx_state=%d, nothing to do\n", prev_state)); | ||
4463 | return 0; | ||
4464 | } | ||
4465 | |||
4466 | /* | ||
4467 | * clear psr and dcr bits | ||
4468 | */ | ||
4469 | ret = pfm_stop(ctx, NULL, 0, regs); | ||
4470 | if (ret) return ret; | ||
4471 | |||
4472 | ctx->ctx_state = PFM_CTX_UNLOADED; | ||
4473 | |||
4474 | /* | ||
4475 | * in system mode, we need to update the PMU directly | ||
4476 | * and the user level state of the caller, which may not | ||
4477 | * necessarily be the creator of the context. | ||
4478 | */ | ||
4479 | if (is_system) { | ||
4480 | |||
4481 | /* | ||
4482 | * Update cpuinfo | ||
4483 | * | ||
4484 | * local PMU is taken care of in pfm_stop() | ||
4485 | */ | ||
4486 | PFM_CPUINFO_CLEAR(PFM_CPUINFO_SYST_WIDE); | ||
4487 | PFM_CPUINFO_CLEAR(PFM_CPUINFO_EXCL_IDLE); | ||
4488 | |||
4489 | /* | ||
4490 | * save PMDs in context | ||
4491 | * release ownership | ||
4492 | */ | ||
4493 | pfm_flush_pmds(current, ctx); | ||
4494 | |||
4495 | /* | ||
4496 | * at this point we are done with the PMU | ||
4497 | * so we can unreserve the resource. | ||
4498 | */ | ||
4499 | if (prev_state != PFM_CTX_ZOMBIE) | ||
4500 | pfm_unreserve_session(ctx, 1 , ctx->ctx_cpu); | ||
4501 | |||
4502 | /* | ||
4503 | * disconnect context from task | ||
4504 | */ | ||
4505 | task->thread.pfm_context = NULL; | ||
4506 | /* | ||
4507 | * disconnect task from context | ||
4508 | */ | ||
4509 | ctx->ctx_task = NULL; | ||
4510 | |||
4511 | /* | ||
4512 | * There is nothing more to cleanup here. | ||
4513 | */ | ||
4514 | return 0; | ||
4515 | } | ||
4516 | |||
4517 | /* | ||
4518 | * per-task mode | ||
4519 | */ | ||
4520 | tregs = task == current ? regs : ia64_task_regs(task); | ||
4521 | |||
4522 | if (task == current) { | ||
4523 | /* | ||
4524 | * cancel user level control | ||
4525 | */ | ||
4526 | ia64_psr(regs)->sp = 1; | ||
4527 | |||
4528 | DPRINT(("setting psr.sp for [%d]\n", task->pid)); | ||
4529 | } | ||
4530 | /* | ||
4531 | * save PMDs to context | ||
4532 | * release ownership | ||
4533 | */ | ||
4534 | pfm_flush_pmds(task, ctx); | ||
4535 | |||
4536 | /* | ||
4537 | * at this point we are done with the PMU | ||
4538 | * so we can unreserve the resource. | ||
4539 | * | ||
4540 | * when state was ZOMBIE, we have already unreserved. | ||
4541 | */ | ||
4542 | if (prev_state != PFM_CTX_ZOMBIE) | ||
4543 | pfm_unreserve_session(ctx, 0 , ctx->ctx_cpu); | ||
4544 | |||
4545 | /* | ||
4546 | * reset activation counter and psr | ||
4547 | */ | ||
4548 | ctx->ctx_last_activation = PFM_INVALID_ACTIVATION; | ||
4549 | SET_LAST_CPU(ctx, -1); | ||
4550 | |||
4551 | /* | ||
4552 | * PMU state will not be restored | ||
4553 | */ | ||
4554 | task->thread.flags &= ~IA64_THREAD_PM_VALID; | ||
4555 | |||
4556 | /* | ||
4557 | * break links between context and task | ||
4558 | */ | ||
4559 | task->thread.pfm_context = NULL; | ||
4560 | ctx->ctx_task = NULL; | ||
4561 | |||
4562 | PFM_SET_WORK_PENDING(task, 0); | ||
4563 | |||
4564 | ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_NONE; | ||
4565 | ctx->ctx_fl_can_restart = 0; | ||
4566 | ctx->ctx_fl_going_zombie = 0; | ||
4567 | |||
4568 | DPRINT(("disconnected [%d] from context\n", task->pid)); | ||
4569 | |||
4570 | return 0; | ||
4571 | } | ||
4572 | |||
4573 | |||
4574 | /* | ||
4575 | * called only from exit_thread(): task == current | ||
4576 | * we come here only if current has a context attached (loaded or masked) | ||
4577 | */ | ||
4578 | void | ||
4579 | pfm_exit_thread(struct task_struct *task) | ||
4580 | { | ||
4581 | pfm_context_t *ctx; | ||
4582 | unsigned long flags; | ||
4583 | struct pt_regs *regs = ia64_task_regs(task); | ||
4584 | int ret, state; | ||
4585 | int free_ok = 0; | ||
4586 | |||
4587 | ctx = PFM_GET_CTX(task); | ||
4588 | |||
4589 | PROTECT_CTX(ctx, flags); | ||
4590 | |||
4591 | DPRINT(("state=%d task [%d]\n", ctx->ctx_state, task->pid)); | ||
4592 | |||
4593 | state = ctx->ctx_state; | ||
4594 | switch(state) { | ||
4595 | case PFM_CTX_UNLOADED: | ||
4596 | /* | ||
4597 | * only comes to thios function if pfm_context is not NULL, i.e., cannot | ||
4598 | * be in unloaded state | ||
4599 | */ | ||
4600 | printk(KERN_ERR "perfmon: pfm_exit_thread [%d] ctx unloaded\n", task->pid); | ||
4601 | break; | ||
4602 | case PFM_CTX_LOADED: | ||
4603 | case PFM_CTX_MASKED: | ||
4604 | ret = pfm_context_unload(ctx, NULL, 0, regs); | ||
4605 | if (ret) { | ||
4606 | printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task->pid, state, ret); | ||
4607 | } | ||
4608 | DPRINT(("ctx unloaded for current state was %d\n", state)); | ||
4609 | |||
4610 | pfm_end_notify_user(ctx); | ||
4611 | break; | ||
4612 | case PFM_CTX_ZOMBIE: | ||
4613 | ret = pfm_context_unload(ctx, NULL, 0, regs); | ||
4614 | if (ret) { | ||
4615 | printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task->pid, state, ret); | ||
4616 | } | ||
4617 | free_ok = 1; | ||
4618 | break; | ||
4619 | default: | ||
4620 | printk(KERN_ERR "perfmon: pfm_exit_thread [%d] unexpected state=%d\n", task->pid, state); | ||
4621 | break; | ||
4622 | } | ||
4623 | UNPROTECT_CTX(ctx, flags); | ||
4624 | |||
4625 | { u64 psr = pfm_get_psr(); | ||
4626 | BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP)); | ||
4627 | BUG_ON(GET_PMU_OWNER()); | ||
4628 | BUG_ON(ia64_psr(regs)->up); | ||
4629 | BUG_ON(ia64_psr(regs)->pp); | ||
4630 | } | ||
4631 | |||
4632 | /* | ||
4633 | * All memory free operations (especially for vmalloc'ed memory) | ||
4634 | * MUST be done with interrupts ENABLED. | ||
4635 | */ | ||
4636 | if (free_ok) pfm_context_free(ctx); | ||
4637 | } | ||
4638 | |||
4639 | /* | ||
4640 | * functions MUST be listed in the increasing order of their index (see permfon.h) | ||
4641 | */ | ||
4642 | #define PFM_CMD(name, flags, arg_count, arg_type, getsz) { name, #name, flags, arg_count, sizeof(arg_type), getsz } | ||
4643 | #define PFM_CMD_S(name, flags) { name, #name, flags, 0, 0, NULL } | ||
4644 | #define PFM_CMD_PCLRWS (PFM_CMD_FD|PFM_CMD_ARG_RW|PFM_CMD_STOP) | ||
4645 | #define PFM_CMD_PCLRW (PFM_CMD_FD|PFM_CMD_ARG_RW) | ||
4646 | #define PFM_CMD_NONE { NULL, "no-cmd", 0, 0, 0, NULL} | ||
4647 | |||
4648 | static pfm_cmd_desc_t pfm_cmd_tab[]={ | ||
4649 | /* 0 */PFM_CMD_NONE, | ||
4650 | /* 1 */PFM_CMD(pfm_write_pmcs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL), | ||
4651 | /* 2 */PFM_CMD(pfm_write_pmds, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL), | ||
4652 | /* 3 */PFM_CMD(pfm_read_pmds, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL), | ||
4653 | /* 4 */PFM_CMD_S(pfm_stop, PFM_CMD_PCLRWS), | ||
4654 | /* 5 */PFM_CMD_S(pfm_start, PFM_CMD_PCLRWS), | ||
4655 | /* 6 */PFM_CMD_NONE, | ||
4656 | /* 7 */PFM_CMD_NONE, | ||
4657 | /* 8 */PFM_CMD(pfm_context_create, PFM_CMD_ARG_RW, 1, pfarg_context_t, pfm_ctx_getsize), | ||
4658 | /* 9 */PFM_CMD_NONE, | ||
4659 | /* 10 */PFM_CMD_S(pfm_restart, PFM_CMD_PCLRW), | ||
4660 | /* 11 */PFM_CMD_NONE, | ||
4661 | /* 12 */PFM_CMD(pfm_get_features, PFM_CMD_ARG_RW, 1, pfarg_features_t, NULL), | ||
4662 | /* 13 */PFM_CMD(pfm_debug, 0, 1, unsigned int, NULL), | ||
4663 | /* 14 */PFM_CMD_NONE, | ||
4664 | /* 15 */PFM_CMD(pfm_get_pmc_reset, PFM_CMD_ARG_RW, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL), | ||
4665 | /* 16 */PFM_CMD(pfm_context_load, PFM_CMD_PCLRWS, 1, pfarg_load_t, NULL), | ||
4666 | /* 17 */PFM_CMD_S(pfm_context_unload, PFM_CMD_PCLRWS), | ||
4667 | /* 18 */PFM_CMD_NONE, | ||
4668 | /* 19 */PFM_CMD_NONE, | ||
4669 | /* 20 */PFM_CMD_NONE, | ||
4670 | /* 21 */PFM_CMD_NONE, | ||
4671 | /* 22 */PFM_CMD_NONE, | ||
4672 | /* 23 */PFM_CMD_NONE, | ||
4673 | /* 24 */PFM_CMD_NONE, | ||
4674 | /* 25 */PFM_CMD_NONE, | ||
4675 | /* 26 */PFM_CMD_NONE, | ||
4676 | /* 27 */PFM_CMD_NONE, | ||
4677 | /* 28 */PFM_CMD_NONE, | ||
4678 | /* 29 */PFM_CMD_NONE, | ||
4679 | /* 30 */PFM_CMD_NONE, | ||
4680 | /* 31 */PFM_CMD_NONE, | ||
4681 | /* 32 */PFM_CMD(pfm_write_ibrs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_dbreg_t, NULL), | ||
4682 | /* 33 */PFM_CMD(pfm_write_dbrs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_dbreg_t, NULL) | ||
4683 | }; | ||
4684 | #define PFM_CMD_COUNT (sizeof(pfm_cmd_tab)/sizeof(pfm_cmd_desc_t)) | ||
4685 | |||
4686 | static int | ||
4687 | pfm_check_task_state(pfm_context_t *ctx, int cmd, unsigned long flags) | ||
4688 | { | ||
4689 | struct task_struct *task; | ||
4690 | int state, old_state; | ||
4691 | |||
4692 | recheck: | ||
4693 | state = ctx->ctx_state; | ||
4694 | task = ctx->ctx_task; | ||
4695 | |||
4696 | if (task == NULL) { | ||
4697 | DPRINT(("context %d no task, state=%d\n", ctx->ctx_fd, state)); | ||
4698 | return 0; | ||
4699 | } | ||
4700 | |||
4701 | DPRINT(("context %d state=%d [%d] task_state=%ld must_stop=%d\n", | ||
4702 | ctx->ctx_fd, | ||
4703 | state, | ||
4704 | task->pid, | ||
4705 | task->state, PFM_CMD_STOPPED(cmd))); | ||
4706 | |||
4707 | /* | ||
4708 | * self-monitoring always ok. | ||
4709 | * | ||
4710 | * for system-wide the caller can either be the creator of the | ||
4711 | * context (to one to which the context is attached to) OR | ||
4712 | * a task running on the same CPU as the session. | ||
4713 | */ | ||
4714 | if (task == current || ctx->ctx_fl_system) return 0; | ||
4715 | |||
4716 | /* | ||
4717 | * if context is UNLOADED we are safe to go | ||
4718 | */ | ||
4719 | if (state == PFM_CTX_UNLOADED) return 0; | ||
4720 | |||
4721 | /* | ||
4722 | * no command can operate on a zombie context | ||
4723 | */ | ||
4724 | if (state == PFM_CTX_ZOMBIE) { | ||
4725 | DPRINT(("cmd %d state zombie cannot operate on context\n", cmd)); | ||
4726 | return -EINVAL; | ||
4727 | } | ||
4728 | |||
4729 | /* | ||
4730 | * context is LOADED or MASKED. Some commands may need to have | ||
4731 | * the task stopped. | ||
4732 | * | ||
4733 | * We could lift this restriction for UP but it would mean that | ||
4734 | * the user has no guarantee the task would not run between | ||
4735 | * two successive calls to perfmonctl(). That's probably OK. | ||
4736 | * If this user wants to ensure the task does not run, then | ||
4737 | * the task must be stopped. | ||
4738 | */ | ||
4739 | if (PFM_CMD_STOPPED(cmd)) { | ||
4740 | if ((task->state != TASK_STOPPED) && (task->state != TASK_TRACED)) { | ||
4741 | DPRINT(("[%d] task not in stopped state\n", task->pid)); | ||
4742 | return -EBUSY; | ||
4743 | } | ||
4744 | /* | ||
4745 | * task is now stopped, wait for ctxsw out | ||
4746 | * | ||
4747 | * This is an interesting point in the code. | ||
4748 | * We need to unprotect the context because | ||
4749 | * the pfm_save_regs() routines needs to grab | ||
4750 | * the same lock. There are danger in doing | ||
4751 | * this because it leaves a window open for | ||
4752 | * another task to get access to the context | ||
4753 | * and possibly change its state. The one thing | ||
4754 | * that is not possible is for the context to disappear | ||
4755 | * because we are protected by the VFS layer, i.e., | ||
4756 | * get_fd()/put_fd(). | ||
4757 | */ | ||
4758 | old_state = state; | ||
4759 | |||
4760 | UNPROTECT_CTX(ctx, flags); | ||
4761 | |||
4762 | wait_task_inactive(task); | ||
4763 | |||
4764 | PROTECT_CTX(ctx, flags); | ||
4765 | |||
4766 | /* | ||
4767 | * we must recheck to verify if state has changed | ||
4768 | */ | ||
4769 | if (ctx->ctx_state != old_state) { | ||
4770 | DPRINT(("old_state=%d new_state=%d\n", old_state, ctx->ctx_state)); | ||
4771 | goto recheck; | ||
4772 | } | ||
4773 | } | ||
4774 | return 0; | ||
4775 | } | ||
4776 | |||
4777 | /* | ||
4778 | * system-call entry point (must return long) | ||
4779 | */ | ||
4780 | asmlinkage long | ||
4781 | sys_perfmonctl (int fd, int cmd, void __user *arg, int count) | ||
4782 | { | ||
4783 | struct file *file = NULL; | ||
4784 | pfm_context_t *ctx = NULL; | ||
4785 | unsigned long flags = 0UL; | ||
4786 | void *args_k = NULL; | ||
4787 | long ret; /* will expand int return types */ | ||
4788 | size_t base_sz, sz, xtra_sz = 0; | ||
4789 | int narg, completed_args = 0, call_made = 0, cmd_flags; | ||
4790 | int (*func)(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs); | ||
4791 | int (*getsize)(void *arg, size_t *sz); | ||
4792 | #define PFM_MAX_ARGSIZE 4096 | ||
4793 | |||
4794 | /* | ||
4795 | * reject any call if perfmon was disabled at initialization | ||
4796 | */ | ||
4797 | if (unlikely(pmu_conf == NULL)) return -ENOSYS; | ||
4798 | |||
4799 | if (unlikely(cmd < 0 || cmd >= PFM_CMD_COUNT)) { | ||
4800 | DPRINT(("invalid cmd=%d\n", cmd)); | ||
4801 | return -EINVAL; | ||
4802 | } | ||
4803 | |||
4804 | func = pfm_cmd_tab[cmd].cmd_func; | ||
4805 | narg = pfm_cmd_tab[cmd].cmd_narg; | ||
4806 | base_sz = pfm_cmd_tab[cmd].cmd_argsize; | ||
4807 | getsize = pfm_cmd_tab[cmd].cmd_getsize; | ||
4808 | cmd_flags = pfm_cmd_tab[cmd].cmd_flags; | ||
4809 | |||
4810 | if (unlikely(func == NULL)) { | ||
4811 | DPRINT(("invalid cmd=%d\n", cmd)); | ||
4812 | return -EINVAL; | ||
4813 | } | ||
4814 | |||
4815 | DPRINT(("cmd=%s idx=%d narg=0x%x argsz=%lu count=%d\n", | ||
4816 | PFM_CMD_NAME(cmd), | ||
4817 | cmd, | ||
4818 | narg, | ||
4819 | base_sz, | ||
4820 | count)); | ||
4821 | |||
4822 | /* | ||
4823 | * check if number of arguments matches what the command expects | ||
4824 | */ | ||
4825 | if (unlikely((narg == PFM_CMD_ARG_MANY && count <= 0) || (narg > 0 && narg != count))) | ||
4826 | return -EINVAL; | ||
4827 | |||
4828 | restart_args: | ||
4829 | sz = xtra_sz + base_sz*count; | ||
4830 | /* | ||
4831 | * limit abuse to min page size | ||
4832 | */ | ||
4833 | if (unlikely(sz > PFM_MAX_ARGSIZE)) { | ||
4834 | printk(KERN_ERR "perfmon: [%d] argument too big %lu\n", current->pid, sz); | ||
4835 | return -E2BIG; | ||
4836 | } | ||
4837 | |||
4838 | /* | ||
4839 | * allocate default-sized argument buffer | ||
4840 | */ | ||
4841 | if (likely(count && args_k == NULL)) { | ||
4842 | args_k = kmalloc(PFM_MAX_ARGSIZE, GFP_KERNEL); | ||
4843 | if (args_k == NULL) return -ENOMEM; | ||
4844 | } | ||
4845 | |||
4846 | ret = -EFAULT; | ||
4847 | |||
4848 | /* | ||
4849 | * copy arguments | ||
4850 | * | ||
4851 | * assume sz = 0 for command without parameters | ||
4852 | */ | ||
4853 | if (sz && copy_from_user(args_k, arg, sz)) { | ||
4854 | DPRINT(("cannot copy_from_user %lu bytes @%p\n", sz, arg)); | ||
4855 | goto error_args; | ||
4856 | } | ||
4857 | |||
4858 | /* | ||
4859 | * check if command supports extra parameters | ||
4860 | */ | ||
4861 | if (completed_args == 0 && getsize) { | ||
4862 | /* | ||
4863 | * get extra parameters size (based on main argument) | ||
4864 | */ | ||
4865 | ret = (*getsize)(args_k, &xtra_sz); | ||
4866 | if (ret) goto error_args; | ||
4867 | |||
4868 | completed_args = 1; | ||
4869 | |||
4870 | DPRINT(("restart_args sz=%lu xtra_sz=%lu\n", sz, xtra_sz)); | ||
4871 | |||
4872 | /* retry if necessary */ | ||
4873 | if (likely(xtra_sz)) goto restart_args; | ||
4874 | } | ||
4875 | |||
4876 | if (unlikely((cmd_flags & PFM_CMD_FD) == 0)) goto skip_fd; | ||
4877 | |||
4878 | ret = -EBADF; | ||
4879 | |||
4880 | file = fget(fd); | ||
4881 | if (unlikely(file == NULL)) { | ||
4882 | DPRINT(("invalid fd %d\n", fd)); | ||
4883 | goto error_args; | ||
4884 | } | ||
4885 | if (unlikely(PFM_IS_FILE(file) == 0)) { | ||
4886 | DPRINT(("fd %d not related to perfmon\n", fd)); | ||
4887 | goto error_args; | ||
4888 | } | ||
4889 | |||
4890 | ctx = (pfm_context_t *)file->private_data; | ||
4891 | if (unlikely(ctx == NULL)) { | ||
4892 | DPRINT(("no context for fd %d\n", fd)); | ||
4893 | goto error_args; | ||
4894 | } | ||
4895 | prefetch(&ctx->ctx_state); | ||
4896 | |||
4897 | PROTECT_CTX(ctx, flags); | ||
4898 | |||
4899 | /* | ||
4900 | * check task is stopped | ||
4901 | */ | ||
4902 | ret = pfm_check_task_state(ctx, cmd, flags); | ||
4903 | if (unlikely(ret)) goto abort_locked; | ||
4904 | |||
4905 | skip_fd: | ||
4906 | ret = (*func)(ctx, args_k, count, ia64_task_regs(current)); | ||
4907 | |||
4908 | call_made = 1; | ||
4909 | |||
4910 | abort_locked: | ||
4911 | if (likely(ctx)) { | ||
4912 | DPRINT(("context unlocked\n")); | ||
4913 | UNPROTECT_CTX(ctx, flags); | ||
4914 | fput(file); | ||
4915 | } | ||
4916 | |||
4917 | /* copy argument back to user, if needed */ | ||
4918 | if (call_made && PFM_CMD_RW_ARG(cmd) && copy_to_user(arg, args_k, base_sz*count)) ret = -EFAULT; | ||
4919 | |||
4920 | error_args: | ||
4921 | if (args_k) kfree(args_k); | ||
4922 | |||
4923 | DPRINT(("cmd=%s ret=%ld\n", PFM_CMD_NAME(cmd), ret)); | ||
4924 | |||
4925 | return ret; | ||
4926 | } | ||
4927 | |||
4928 | static void | ||
4929 | pfm_resume_after_ovfl(pfm_context_t *ctx, unsigned long ovfl_regs, struct pt_regs *regs) | ||
4930 | { | ||
4931 | pfm_buffer_fmt_t *fmt = ctx->ctx_buf_fmt; | ||
4932 | pfm_ovfl_ctrl_t rst_ctrl; | ||
4933 | int state; | ||
4934 | int ret = 0; | ||
4935 | |||
4936 | state = ctx->ctx_state; | ||
4937 | /* | ||
4938 | * Unlock sampling buffer and reset index atomically | ||
4939 | * XXX: not really needed when blocking | ||
4940 | */ | ||
4941 | if (CTX_HAS_SMPL(ctx)) { | ||
4942 | |||
4943 | rst_ctrl.bits.mask_monitoring = 0; | ||
4944 | rst_ctrl.bits.reset_ovfl_pmds = 0; | ||
4945 | |||
4946 | if (state == PFM_CTX_LOADED) | ||
4947 | ret = pfm_buf_fmt_restart_active(fmt, current, &rst_ctrl, ctx->ctx_smpl_hdr, regs); | ||
4948 | else | ||
4949 | ret = pfm_buf_fmt_restart(fmt, current, &rst_ctrl, ctx->ctx_smpl_hdr, regs); | ||
4950 | } else { | ||
4951 | rst_ctrl.bits.mask_monitoring = 0; | ||
4952 | rst_ctrl.bits.reset_ovfl_pmds = 1; | ||
4953 | } | ||
4954 | |||
4955 | if (ret == 0) { | ||
4956 | if (rst_ctrl.bits.reset_ovfl_pmds) { | ||
4957 | pfm_reset_regs(ctx, &ovfl_regs, PFM_PMD_LONG_RESET); | ||
4958 | } | ||
4959 | if (rst_ctrl.bits.mask_monitoring == 0) { | ||
4960 | DPRINT(("resuming monitoring\n")); | ||
4961 | if (ctx->ctx_state == PFM_CTX_MASKED) pfm_restore_monitoring(current); | ||
4962 | } else { | ||
4963 | DPRINT(("stopping monitoring\n")); | ||
4964 | //pfm_stop_monitoring(current, regs); | ||
4965 | } | ||
4966 | ctx->ctx_state = PFM_CTX_LOADED; | ||
4967 | } | ||
4968 | } | ||
4969 | |||
4970 | /* | ||
4971 | * context MUST BE LOCKED when calling | ||
4972 | * can only be called for current | ||
4973 | */ | ||
4974 | static void | ||
4975 | pfm_context_force_terminate(pfm_context_t *ctx, struct pt_regs *regs) | ||
4976 | { | ||
4977 | int ret; | ||
4978 | |||
4979 | DPRINT(("entering for [%d]\n", current->pid)); | ||
4980 | |||
4981 | ret = pfm_context_unload(ctx, NULL, 0, regs); | ||
4982 | if (ret) { | ||
4983 | printk(KERN_ERR "pfm_context_force_terminate: [%d] unloaded failed with %d\n", current->pid, ret); | ||
4984 | } | ||
4985 | |||
4986 | /* | ||
4987 | * and wakeup controlling task, indicating we are now disconnected | ||
4988 | */ | ||
4989 | wake_up_interruptible(&ctx->ctx_zombieq); | ||
4990 | |||
4991 | /* | ||
4992 | * given that context is still locked, the controlling | ||
4993 | * task will only get access when we return from | ||
4994 | * pfm_handle_work(). | ||
4995 | */ | ||
4996 | } | ||
4997 | |||
4998 | static int pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds); | ||
4999 | |||
5000 | void | ||
5001 | pfm_handle_work(void) | ||
5002 | { | ||
5003 | pfm_context_t *ctx; | ||
5004 | struct pt_regs *regs; | ||
5005 | unsigned long flags; | ||
5006 | unsigned long ovfl_regs; | ||
5007 | unsigned int reason; | ||
5008 | int ret; | ||
5009 | |||
5010 | ctx = PFM_GET_CTX(current); | ||
5011 | if (ctx == NULL) { | ||
5012 | printk(KERN_ERR "perfmon: [%d] has no PFM context\n", current->pid); | ||
5013 | return; | ||
5014 | } | ||
5015 | |||
5016 | PROTECT_CTX(ctx, flags); | ||
5017 | |||
5018 | PFM_SET_WORK_PENDING(current, 0); | ||
5019 | |||
5020 | pfm_clear_task_notify(); | ||
5021 | |||
5022 | regs = ia64_task_regs(current); | ||
5023 | |||
5024 | /* | ||
5025 | * extract reason for being here and clear | ||
5026 | */ | ||
5027 | reason = ctx->ctx_fl_trap_reason; | ||
5028 | ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_NONE; | ||
5029 | ovfl_regs = ctx->ctx_ovfl_regs[0]; | ||
5030 | |||
5031 | DPRINT(("reason=%d state=%d\n", reason, ctx->ctx_state)); | ||
5032 | |||
5033 | /* | ||
5034 | * must be done before we check for simple-reset mode | ||
5035 | */ | ||
5036 | if (ctx->ctx_fl_going_zombie || ctx->ctx_state == PFM_CTX_ZOMBIE) goto do_zombie; | ||
5037 | |||
5038 | |||
5039 | //if (CTX_OVFL_NOBLOCK(ctx)) goto skip_blocking; | ||
5040 | if (reason == PFM_TRAP_REASON_RESET) goto skip_blocking; | ||
5041 | |||
5042 | UNPROTECT_CTX(ctx, flags); | ||
5043 | |||
5044 | /* | ||
5045 | * pfm_handle_work() is currently called with interrupts disabled. | ||
5046 | * The down_interruptible call may sleep, therefore we | ||
5047 | * must re-enable interrupts to avoid deadlocks. It is | ||
5048 | * safe to do so because this function is called ONLY | ||
5049 | * when returning to user level (PUStk=1), in which case | ||
5050 | * there is no risk of kernel stack overflow due to deep | ||
5051 | * interrupt nesting. | ||
5052 | */ | ||
5053 | BUG_ON(flags & IA64_PSR_I); | ||
5054 | local_irq_enable(); | ||
5055 | |||
5056 | DPRINT(("before block sleeping\n")); | ||
5057 | |||
5058 | /* | ||
5059 | * may go through without blocking on SMP systems | ||
5060 | * if restart has been received already by the time we call down() | ||
5061 | */ | ||
5062 | ret = down_interruptible(&ctx->ctx_restart_sem); | ||
5063 | |||
5064 | DPRINT(("after block sleeping ret=%d\n", ret)); | ||
5065 | |||
5066 | /* | ||
5067 | * disable interrupts to restore state we had upon entering | ||
5068 | * this function | ||
5069 | */ | ||
5070 | local_irq_disable(); | ||
5071 | |||
5072 | PROTECT_CTX(ctx, flags); | ||
5073 | |||
5074 | /* | ||
5075 | * we need to read the ovfl_regs only after wake-up | ||
5076 | * because we may have had pfm_write_pmds() in between | ||
5077 | * and that can changed PMD values and therefore | ||
5078 | * ovfl_regs is reset for these new PMD values. | ||
5079 | */ | ||
5080 | ovfl_regs = ctx->ctx_ovfl_regs[0]; | ||
5081 | |||
5082 | if (ctx->ctx_fl_going_zombie) { | ||
5083 | do_zombie: | ||
5084 | DPRINT(("context is zombie, bailing out\n")); | ||
5085 | pfm_context_force_terminate(ctx, regs); | ||
5086 | goto nothing_to_do; | ||
5087 | } | ||
5088 | /* | ||
5089 | * in case of interruption of down() we don't restart anything | ||
5090 | */ | ||
5091 | if (ret < 0) goto nothing_to_do; | ||
5092 | |||
5093 | skip_blocking: | ||
5094 | pfm_resume_after_ovfl(ctx, ovfl_regs, regs); | ||
5095 | ctx->ctx_ovfl_regs[0] = 0UL; | ||
5096 | |||
5097 | nothing_to_do: | ||
5098 | |||
5099 | UNPROTECT_CTX(ctx, flags); | ||
5100 | } | ||
5101 | |||
5102 | static int | ||
5103 | pfm_notify_user(pfm_context_t *ctx, pfm_msg_t *msg) | ||
5104 | { | ||
5105 | if (ctx->ctx_state == PFM_CTX_ZOMBIE) { | ||
5106 | DPRINT(("ignoring overflow notification, owner is zombie\n")); | ||
5107 | return 0; | ||
5108 | } | ||
5109 | |||
5110 | DPRINT(("waking up somebody\n")); | ||
5111 | |||
5112 | if (msg) wake_up_interruptible(&ctx->ctx_msgq_wait); | ||
5113 | |||
5114 | /* | ||
5115 | * safe, we are not in intr handler, nor in ctxsw when | ||
5116 | * we come here | ||
5117 | */ | ||
5118 | kill_fasync (&ctx->ctx_async_queue, SIGIO, POLL_IN); | ||
5119 | |||
5120 | return 0; | ||
5121 | } | ||
5122 | |||
5123 | static int | ||
5124 | pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds) | ||
5125 | { | ||
5126 | pfm_msg_t *msg = NULL; | ||
5127 | |||
5128 | if (ctx->ctx_fl_no_msg == 0) { | ||
5129 | msg = pfm_get_new_msg(ctx); | ||
5130 | if (msg == NULL) { | ||
5131 | printk(KERN_ERR "perfmon: pfm_ovfl_notify_user no more notification msgs\n"); | ||
5132 | return -1; | ||
5133 | } | ||
5134 | |||
5135 | msg->pfm_ovfl_msg.msg_type = PFM_MSG_OVFL; | ||
5136 | msg->pfm_ovfl_msg.msg_ctx_fd = ctx->ctx_fd; | ||
5137 | msg->pfm_ovfl_msg.msg_active_set = 0; | ||
5138 | msg->pfm_ovfl_msg.msg_ovfl_pmds[0] = ovfl_pmds; | ||
5139 | msg->pfm_ovfl_msg.msg_ovfl_pmds[1] = 0UL; | ||
5140 | msg->pfm_ovfl_msg.msg_ovfl_pmds[2] = 0UL; | ||
5141 | msg->pfm_ovfl_msg.msg_ovfl_pmds[3] = 0UL; | ||
5142 | msg->pfm_ovfl_msg.msg_tstamp = 0UL; | ||
5143 | } | ||
5144 | |||
5145 | DPRINT(("ovfl msg: msg=%p no_msg=%d fd=%d ovfl_pmds=0x%lx\n", | ||
5146 | msg, | ||
5147 | ctx->ctx_fl_no_msg, | ||
5148 | ctx->ctx_fd, | ||
5149 | ovfl_pmds)); | ||
5150 | |||
5151 | return pfm_notify_user(ctx, msg); | ||
5152 | } | ||
5153 | |||
5154 | static int | ||
5155 | pfm_end_notify_user(pfm_context_t *ctx) | ||
5156 | { | ||
5157 | pfm_msg_t *msg; | ||
5158 | |||
5159 | msg = pfm_get_new_msg(ctx); | ||
5160 | if (msg == NULL) { | ||
5161 | printk(KERN_ERR "perfmon: pfm_end_notify_user no more notification msgs\n"); | ||
5162 | return -1; | ||
5163 | } | ||
5164 | /* no leak */ | ||
5165 | memset(msg, 0, sizeof(*msg)); | ||
5166 | |||
5167 | msg->pfm_end_msg.msg_type = PFM_MSG_END; | ||
5168 | msg->pfm_end_msg.msg_ctx_fd = ctx->ctx_fd; | ||
5169 | msg->pfm_ovfl_msg.msg_tstamp = 0UL; | ||
5170 | |||
5171 | DPRINT(("end msg: msg=%p no_msg=%d ctx_fd=%d\n", | ||
5172 | msg, | ||
5173 | ctx->ctx_fl_no_msg, | ||
5174 | ctx->ctx_fd)); | ||
5175 | |||
5176 | return pfm_notify_user(ctx, msg); | ||
5177 | } | ||
5178 | |||
5179 | /* | ||
5180 | * main overflow processing routine. | ||
5181 | * it can be called from the interrupt path or explicitely during the context switch code | ||
5182 | */ | ||
5183 | static void | ||
5184 | pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, struct pt_regs *regs) | ||
5185 | { | ||
5186 | pfm_ovfl_arg_t *ovfl_arg; | ||
5187 | unsigned long mask; | ||
5188 | unsigned long old_val, ovfl_val, new_val; | ||
5189 | unsigned long ovfl_notify = 0UL, ovfl_pmds = 0UL, smpl_pmds = 0UL, reset_pmds; | ||
5190 | unsigned long tstamp; | ||
5191 | pfm_ovfl_ctrl_t ovfl_ctrl; | ||
5192 | unsigned int i, has_smpl; | ||
5193 | int must_notify = 0; | ||
5194 | |||
5195 | if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) goto stop_monitoring; | ||
5196 | |||
5197 | /* | ||
5198 | * sanity test. Should never happen | ||
5199 | */ | ||
5200 | if (unlikely((pmc0 & 0x1) == 0)) goto sanity_check; | ||
5201 | |||
5202 | tstamp = ia64_get_itc(); | ||
5203 | mask = pmc0 >> PMU_FIRST_COUNTER; | ||
5204 | ovfl_val = pmu_conf->ovfl_val; | ||
5205 | has_smpl = CTX_HAS_SMPL(ctx); | ||
5206 | |||
5207 | DPRINT_ovfl(("pmc0=0x%lx pid=%d iip=0x%lx, %s " | ||
5208 | "used_pmds=0x%lx\n", | ||
5209 | pmc0, | ||
5210 | task ? task->pid: -1, | ||
5211 | (regs ? regs->cr_iip : 0), | ||
5212 | CTX_OVFL_NOBLOCK(ctx) ? "nonblocking" : "blocking", | ||
5213 | ctx->ctx_used_pmds[0])); | ||
5214 | |||
5215 | |||
5216 | /* | ||
5217 | * first we update the virtual counters | ||
5218 | * assume there was a prior ia64_srlz_d() issued | ||
5219 | */ | ||
5220 | for (i = PMU_FIRST_COUNTER; mask ; i++, mask >>= 1) { | ||
5221 | |||
5222 | /* skip pmd which did not overflow */ | ||
5223 | if ((mask & 0x1) == 0) continue; | ||
5224 | |||
5225 | /* | ||
5226 | * Note that the pmd is not necessarily 0 at this point as qualified events | ||
5227 | * may have happened before the PMU was frozen. The residual count is not | ||
5228 | * taken into consideration here but will be with any read of the pmd via | ||
5229 | * pfm_read_pmds(). | ||
5230 | */ | ||
5231 | old_val = new_val = ctx->ctx_pmds[i].val; | ||
5232 | new_val += 1 + ovfl_val; | ||
5233 | ctx->ctx_pmds[i].val = new_val; | ||
5234 | |||
5235 | /* | ||
5236 | * check for overflow condition | ||
5237 | */ | ||
5238 | if (likely(old_val > new_val)) { | ||
5239 | ovfl_pmds |= 1UL << i; | ||
5240 | if (PMC_OVFL_NOTIFY(ctx, i)) ovfl_notify |= 1UL << i; | ||
5241 | } | ||
5242 | |||
5243 | DPRINT_ovfl(("ctx_pmd[%d].val=0x%lx old_val=0x%lx pmd=0x%lx ovfl_pmds=0x%lx ovfl_notify=0x%lx\n", | ||
5244 | i, | ||
5245 | new_val, | ||
5246 | old_val, | ||
5247 | ia64_get_pmd(i) & ovfl_val, | ||
5248 | ovfl_pmds, | ||
5249 | ovfl_notify)); | ||
5250 | } | ||
5251 | |||
5252 | /* | ||
5253 | * there was no 64-bit overflow, nothing else to do | ||
5254 | */ | ||
5255 | if (ovfl_pmds == 0UL) return; | ||
5256 | |||
5257 | /* | ||
5258 | * reset all control bits | ||
5259 | */ | ||
5260 | ovfl_ctrl.val = 0; | ||
5261 | reset_pmds = 0UL; | ||
5262 | |||
5263 | /* | ||
5264 | * if a sampling format module exists, then we "cache" the overflow by | ||
5265 | * calling the module's handler() routine. | ||
5266 | */ | ||
5267 | if (has_smpl) { | ||
5268 | unsigned long start_cycles, end_cycles; | ||
5269 | unsigned long pmd_mask; | ||
5270 | int j, k, ret = 0; | ||
5271 | int this_cpu = smp_processor_id(); | ||
5272 | |||
5273 | pmd_mask = ovfl_pmds >> PMU_FIRST_COUNTER; | ||
5274 | ovfl_arg = &ctx->ctx_ovfl_arg; | ||
5275 | |||
5276 | prefetch(ctx->ctx_smpl_hdr); | ||
5277 | |||
5278 | for(i=PMU_FIRST_COUNTER; pmd_mask && ret == 0; i++, pmd_mask >>=1) { | ||
5279 | |||
5280 | mask = 1UL << i; | ||
5281 | |||
5282 | if ((pmd_mask & 0x1) == 0) continue; | ||
5283 | |||
5284 | ovfl_arg->ovfl_pmd = (unsigned char )i; | ||
5285 | ovfl_arg->ovfl_notify = ovfl_notify & mask ? 1 : 0; | ||
5286 | ovfl_arg->active_set = 0; | ||
5287 | ovfl_arg->ovfl_ctrl.val = 0; /* module must fill in all fields */ | ||
5288 | ovfl_arg->smpl_pmds[0] = smpl_pmds = ctx->ctx_pmds[i].smpl_pmds[0]; | ||
5289 | |||
5290 | ovfl_arg->pmd_value = ctx->ctx_pmds[i].val; | ||
5291 | ovfl_arg->pmd_last_reset = ctx->ctx_pmds[i].lval; | ||
5292 | ovfl_arg->pmd_eventid = ctx->ctx_pmds[i].eventid; | ||
5293 | |||
5294 | /* | ||
5295 | * copy values of pmds of interest. Sampling format may copy them | ||
5296 | * into sampling buffer. | ||
5297 | */ | ||
5298 | if (smpl_pmds) { | ||
5299 | for(j=0, k=0; smpl_pmds; j++, smpl_pmds >>=1) { | ||
5300 | if ((smpl_pmds & 0x1) == 0) continue; | ||
5301 | ovfl_arg->smpl_pmds_values[k++] = PMD_IS_COUNTING(j) ? pfm_read_soft_counter(ctx, j) : ia64_get_pmd(j); | ||
5302 | DPRINT_ovfl(("smpl_pmd[%d]=pmd%u=0x%lx\n", k-1, j, ovfl_arg->smpl_pmds_values[k-1])); | ||
5303 | } | ||
5304 | } | ||
5305 | |||
5306 | pfm_stats[this_cpu].pfm_smpl_handler_calls++; | ||
5307 | |||
5308 | start_cycles = ia64_get_itc(); | ||
5309 | |||
5310 | /* | ||
5311 | * call custom buffer format record (handler) routine | ||
5312 | */ | ||
5313 | ret = (*ctx->ctx_buf_fmt->fmt_handler)(task, ctx->ctx_smpl_hdr, ovfl_arg, regs, tstamp); | ||
5314 | |||
5315 | end_cycles = ia64_get_itc(); | ||
5316 | |||
5317 | /* | ||
5318 | * For those controls, we take the union because they have | ||
5319 | * an all or nothing behavior. | ||
5320 | */ | ||
5321 | ovfl_ctrl.bits.notify_user |= ovfl_arg->ovfl_ctrl.bits.notify_user; | ||
5322 | ovfl_ctrl.bits.block_task |= ovfl_arg->ovfl_ctrl.bits.block_task; | ||
5323 | ovfl_ctrl.bits.mask_monitoring |= ovfl_arg->ovfl_ctrl.bits.mask_monitoring; | ||
5324 | /* | ||
5325 | * build the bitmask of pmds to reset now | ||
5326 | */ | ||
5327 | if (ovfl_arg->ovfl_ctrl.bits.reset_ovfl_pmds) reset_pmds |= mask; | ||
5328 | |||
5329 | pfm_stats[this_cpu].pfm_smpl_handler_cycles += end_cycles - start_cycles; | ||
5330 | } | ||
5331 | /* | ||
5332 | * when the module cannot handle the rest of the overflows, we abort right here | ||
5333 | */ | ||
5334 | if (ret && pmd_mask) { | ||
5335 | DPRINT(("handler aborts leftover ovfl_pmds=0x%lx\n", | ||
5336 | pmd_mask<<PMU_FIRST_COUNTER)); | ||
5337 | } | ||
5338 | /* | ||
5339 | * remove the pmds we reset now from the set of pmds to reset in pfm_restart() | ||
5340 | */ | ||
5341 | ovfl_pmds &= ~reset_pmds; | ||
5342 | } else { | ||
5343 | /* | ||
5344 | * when no sampling module is used, then the default | ||
5345 | * is to notify on overflow if requested by user | ||
5346 | */ | ||
5347 | ovfl_ctrl.bits.notify_user = ovfl_notify ? 1 : 0; | ||
5348 | ovfl_ctrl.bits.block_task = ovfl_notify ? 1 : 0; | ||
5349 | ovfl_ctrl.bits.mask_monitoring = ovfl_notify ? 1 : 0; /* XXX: change for saturation */ | ||
5350 | ovfl_ctrl.bits.reset_ovfl_pmds = ovfl_notify ? 0 : 1; | ||
5351 | /* | ||
5352 | * if needed, we reset all overflowed pmds | ||
5353 | */ | ||
5354 | if (ovfl_notify == 0) reset_pmds = ovfl_pmds; | ||
5355 | } | ||
5356 | |||
5357 | DPRINT_ovfl(("ovfl_pmds=0x%lx reset_pmds=0x%lx\n", ovfl_pmds, reset_pmds)); | ||
5358 | |||
5359 | /* | ||
5360 | * reset the requested PMD registers using the short reset values | ||
5361 | */ | ||
5362 | if (reset_pmds) { | ||
5363 | unsigned long bm = reset_pmds; | ||
5364 | pfm_reset_regs(ctx, &bm, PFM_PMD_SHORT_RESET); | ||
5365 | } | ||
5366 | |||
5367 | if (ovfl_notify && ovfl_ctrl.bits.notify_user) { | ||
5368 | /* | ||
5369 | * keep track of what to reset when unblocking | ||
5370 | */ | ||
5371 | ctx->ctx_ovfl_regs[0] = ovfl_pmds; | ||
5372 | |||
5373 | /* | ||
5374 | * check for blocking context | ||
5375 | */ | ||
5376 | if (CTX_OVFL_NOBLOCK(ctx) == 0 && ovfl_ctrl.bits.block_task) { | ||
5377 | |||
5378 | ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_BLOCK; | ||
5379 | |||
5380 | /* | ||
5381 | * set the perfmon specific checking pending work for the task | ||
5382 | */ | ||
5383 | PFM_SET_WORK_PENDING(task, 1); | ||
5384 | |||
5385 | /* | ||
5386 | * when coming from ctxsw, current still points to the | ||
5387 | * previous task, therefore we must work with task and not current. | ||
5388 | */ | ||
5389 | pfm_set_task_notify(task); | ||
5390 | } | ||
5391 | /* | ||
5392 | * defer until state is changed (shorten spin window). the context is locked | ||
5393 | * anyway, so the signal receiver would come spin for nothing. | ||
5394 | */ | ||
5395 | must_notify = 1; | ||
5396 | } | ||
5397 | |||
5398 | DPRINT_ovfl(("owner [%d] pending=%ld reason=%u ovfl_pmds=0x%lx ovfl_notify=0x%lx masked=%d\n", | ||
5399 | GET_PMU_OWNER() ? GET_PMU_OWNER()->pid : -1, | ||
5400 | PFM_GET_WORK_PENDING(task), | ||
5401 | ctx->ctx_fl_trap_reason, | ||
5402 | ovfl_pmds, | ||
5403 | ovfl_notify, | ||
5404 | ovfl_ctrl.bits.mask_monitoring ? 1 : 0)); | ||
5405 | /* | ||
5406 | * in case monitoring must be stopped, we toggle the psr bits | ||
5407 | */ | ||
5408 | if (ovfl_ctrl.bits.mask_monitoring) { | ||
5409 | pfm_mask_monitoring(task); | ||
5410 | ctx->ctx_state = PFM_CTX_MASKED; | ||
5411 | ctx->ctx_fl_can_restart = 1; | ||
5412 | } | ||
5413 | |||
5414 | /* | ||
5415 | * send notification now | ||
5416 | */ | ||
5417 | if (must_notify) pfm_ovfl_notify_user(ctx, ovfl_notify); | ||
5418 | |||
5419 | return; | ||
5420 | |||
5421 | sanity_check: | ||
5422 | printk(KERN_ERR "perfmon: CPU%d overflow handler [%d] pmc0=0x%lx\n", | ||
5423 | smp_processor_id(), | ||
5424 | task ? task->pid : -1, | ||
5425 | pmc0); | ||
5426 | return; | ||
5427 | |||
5428 | stop_monitoring: | ||
5429 | /* | ||
5430 | * in SMP, zombie context is never restored but reclaimed in pfm_load_regs(). | ||
5431 | * Moreover, zombies are also reclaimed in pfm_save_regs(). Therefore we can | ||
5432 | * come here as zombie only if the task is the current task. In which case, we | ||
5433 | * can access the PMU hardware directly. | ||
5434 | * | ||
5435 | * Note that zombies do have PM_VALID set. So here we do the minimal. | ||
5436 | * | ||
5437 | * In case the context was zombified it could not be reclaimed at the time | ||
5438 | * the monitoring program exited. At this point, the PMU reservation has been | ||
5439 | * returned, the sampiing buffer has been freed. We must convert this call | ||
5440 | * into a spurious interrupt. However, we must also avoid infinite overflows | ||
5441 | * by stopping monitoring for this task. We can only come here for a per-task | ||
5442 | * context. All we need to do is to stop monitoring using the psr bits which | ||
5443 | * are always task private. By re-enabling secure montioring, we ensure that | ||
5444 | * the monitored task will not be able to re-activate monitoring. | ||
5445 | * The task will eventually be context switched out, at which point the context | ||
5446 | * will be reclaimed (that includes releasing ownership of the PMU). | ||
5447 | * | ||
5448 | * So there might be a window of time where the number of per-task session is zero | ||
5449 | * yet one PMU might have a owner and get at most one overflow interrupt for a zombie | ||
5450 | * context. This is safe because if a per-task session comes in, it will push this one | ||
5451 | * out and by the virtue on pfm_save_regs(), this one will disappear. If a system wide | ||
5452 | * session is force on that CPU, given that we use task pinning, pfm_save_regs() will | ||
5453 | * also push our zombie context out. | ||
5454 | * | ||
5455 | * Overall pretty hairy stuff.... | ||
5456 | */ | ||
5457 | DPRINT(("ctx is zombie for [%d], converted to spurious\n", task ? task->pid: -1)); | ||
5458 | pfm_clear_psr_up(); | ||
5459 | ia64_psr(regs)->up = 0; | ||
5460 | ia64_psr(regs)->sp = 1; | ||
5461 | return; | ||
5462 | } | ||
5463 | |||
5464 | static int | ||
5465 | pfm_do_interrupt_handler(int irq, void *arg, struct pt_regs *regs) | ||
5466 | { | ||
5467 | struct task_struct *task; | ||
5468 | pfm_context_t *ctx; | ||
5469 | unsigned long flags; | ||
5470 | u64 pmc0; | ||
5471 | int this_cpu = smp_processor_id(); | ||
5472 | int retval = 0; | ||
5473 | |||
5474 | pfm_stats[this_cpu].pfm_ovfl_intr_count++; | ||
5475 | |||
5476 | /* | ||
5477 | * srlz.d done before arriving here | ||
5478 | */ | ||
5479 | pmc0 = ia64_get_pmc(0); | ||
5480 | |||
5481 | task = GET_PMU_OWNER(); | ||
5482 | ctx = GET_PMU_CTX(); | ||
5483 | |||
5484 | /* | ||
5485 | * if we have some pending bits set | ||
5486 | * assumes : if any PMC0.bit[63-1] is set, then PMC0.fr = 1 | ||
5487 | */ | ||
5488 | if (PMC0_HAS_OVFL(pmc0) && task) { | ||
5489 | /* | ||
5490 | * we assume that pmc0.fr is always set here | ||
5491 | */ | ||
5492 | |||
5493 | /* sanity check */ | ||
5494 | if (!ctx) goto report_spurious1; | ||
5495 | |||
5496 | if (ctx->ctx_fl_system == 0 && (task->thread.flags & IA64_THREAD_PM_VALID) == 0) | ||
5497 | goto report_spurious2; | ||
5498 | |||
5499 | PROTECT_CTX_NOPRINT(ctx, flags); | ||
5500 | |||
5501 | pfm_overflow_handler(task, ctx, pmc0, regs); | ||
5502 | |||
5503 | UNPROTECT_CTX_NOPRINT(ctx, flags); | ||
5504 | |||
5505 | } else { | ||
5506 | pfm_stats[this_cpu].pfm_spurious_ovfl_intr_count++; | ||
5507 | retval = -1; | ||
5508 | } | ||
5509 | /* | ||
5510 | * keep it unfrozen at all times | ||
5511 | */ | ||
5512 | pfm_unfreeze_pmu(); | ||
5513 | |||
5514 | return retval; | ||
5515 | |||
5516 | report_spurious1: | ||
5517 | printk(KERN_INFO "perfmon: spurious overflow interrupt on CPU%d: process %d has no PFM context\n", | ||
5518 | this_cpu, task->pid); | ||
5519 | pfm_unfreeze_pmu(); | ||
5520 | return -1; | ||
5521 | report_spurious2: | ||
5522 | printk(KERN_INFO "perfmon: spurious overflow interrupt on CPU%d: process %d, invalid flag\n", | ||
5523 | this_cpu, | ||
5524 | task->pid); | ||
5525 | pfm_unfreeze_pmu(); | ||
5526 | return -1; | ||
5527 | } | ||
5528 | |||
5529 | static irqreturn_t | ||
5530 | pfm_interrupt_handler(int irq, void *arg, struct pt_regs *regs) | ||
5531 | { | ||
5532 | unsigned long start_cycles, total_cycles; | ||
5533 | unsigned long min, max; | ||
5534 | int this_cpu; | ||
5535 | int ret; | ||
5536 | |||
5537 | this_cpu = get_cpu(); | ||
5538 | min = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min; | ||
5539 | max = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max; | ||
5540 | |||
5541 | start_cycles = ia64_get_itc(); | ||
5542 | |||
5543 | ret = pfm_do_interrupt_handler(irq, arg, regs); | ||
5544 | |||
5545 | total_cycles = ia64_get_itc(); | ||
5546 | |||
5547 | /* | ||
5548 | * don't measure spurious interrupts | ||
5549 | */ | ||
5550 | if (likely(ret == 0)) { | ||
5551 | total_cycles -= start_cycles; | ||
5552 | |||
5553 | if (total_cycles < min) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min = total_cycles; | ||
5554 | if (total_cycles > max) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max = total_cycles; | ||
5555 | |||
5556 | pfm_stats[this_cpu].pfm_ovfl_intr_cycles += total_cycles; | ||
5557 | } | ||
5558 | put_cpu_no_resched(); | ||
5559 | return IRQ_HANDLED; | ||
5560 | } | ||
5561 | |||
5562 | /* | ||
5563 | * /proc/perfmon interface, for debug only | ||
5564 | */ | ||
5565 | |||
5566 | #define PFM_PROC_SHOW_HEADER ((void *)NR_CPUS+1) | ||
5567 | |||
5568 | static void * | ||
5569 | pfm_proc_start(struct seq_file *m, loff_t *pos) | ||
5570 | { | ||
5571 | if (*pos == 0) { | ||
5572 | return PFM_PROC_SHOW_HEADER; | ||
5573 | } | ||
5574 | |||
5575 | while (*pos <= NR_CPUS) { | ||
5576 | if (cpu_online(*pos - 1)) { | ||
5577 | return (void *)*pos; | ||
5578 | } | ||
5579 | ++*pos; | ||
5580 | } | ||
5581 | return NULL; | ||
5582 | } | ||
5583 | |||
5584 | static void * | ||
5585 | pfm_proc_next(struct seq_file *m, void *v, loff_t *pos) | ||
5586 | { | ||
5587 | ++*pos; | ||
5588 | return pfm_proc_start(m, pos); | ||
5589 | } | ||
5590 | |||
5591 | static void | ||
5592 | pfm_proc_stop(struct seq_file *m, void *v) | ||
5593 | { | ||
5594 | } | ||
5595 | |||
5596 | static void | ||
5597 | pfm_proc_show_header(struct seq_file *m) | ||
5598 | { | ||
5599 | struct list_head * pos; | ||
5600 | pfm_buffer_fmt_t * entry; | ||
5601 | unsigned long flags; | ||
5602 | |||
5603 | seq_printf(m, | ||
5604 | "perfmon version : %u.%u\n" | ||
5605 | "model : %s\n" | ||
5606 | "fastctxsw : %s\n" | ||
5607 | "expert mode : %s\n" | ||
5608 | "ovfl_mask : 0x%lx\n" | ||
5609 | "PMU flags : 0x%x\n", | ||
5610 | PFM_VERSION_MAJ, PFM_VERSION_MIN, | ||
5611 | pmu_conf->pmu_name, | ||
5612 | pfm_sysctl.fastctxsw > 0 ? "Yes": "No", | ||
5613 | pfm_sysctl.expert_mode > 0 ? "Yes": "No", | ||
5614 | pmu_conf->ovfl_val, | ||
5615 | pmu_conf->flags); | ||
5616 | |||
5617 | LOCK_PFS(flags); | ||
5618 | |||
5619 | seq_printf(m, | ||
5620 | "proc_sessions : %u\n" | ||
5621 | "sys_sessions : %u\n" | ||
5622 | "sys_use_dbregs : %u\n" | ||
5623 | "ptrace_use_dbregs : %u\n", | ||
5624 | pfm_sessions.pfs_task_sessions, | ||
5625 | pfm_sessions.pfs_sys_sessions, | ||
5626 | pfm_sessions.pfs_sys_use_dbregs, | ||
5627 | pfm_sessions.pfs_ptrace_use_dbregs); | ||
5628 | |||
5629 | UNLOCK_PFS(flags); | ||
5630 | |||
5631 | spin_lock(&pfm_buffer_fmt_lock); | ||
5632 | |||
5633 | list_for_each(pos, &pfm_buffer_fmt_list) { | ||
5634 | entry = list_entry(pos, pfm_buffer_fmt_t, fmt_list); | ||
5635 | seq_printf(m, "format : %02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x %s\n", | ||
5636 | entry->fmt_uuid[0], | ||
5637 | entry->fmt_uuid[1], | ||
5638 | entry->fmt_uuid[2], | ||
5639 | entry->fmt_uuid[3], | ||
5640 | entry->fmt_uuid[4], | ||
5641 | entry->fmt_uuid[5], | ||
5642 | entry->fmt_uuid[6], | ||
5643 | entry->fmt_uuid[7], | ||
5644 | entry->fmt_uuid[8], | ||
5645 | entry->fmt_uuid[9], | ||
5646 | entry->fmt_uuid[10], | ||
5647 | entry->fmt_uuid[11], | ||
5648 | entry->fmt_uuid[12], | ||
5649 | entry->fmt_uuid[13], | ||
5650 | entry->fmt_uuid[14], | ||
5651 | entry->fmt_uuid[15], | ||
5652 | entry->fmt_name); | ||
5653 | } | ||
5654 | spin_unlock(&pfm_buffer_fmt_lock); | ||
5655 | |||
5656 | } | ||
5657 | |||
5658 | static int | ||
5659 | pfm_proc_show(struct seq_file *m, void *v) | ||
5660 | { | ||
5661 | unsigned long psr; | ||
5662 | unsigned int i; | ||
5663 | int cpu; | ||
5664 | |||
5665 | if (v == PFM_PROC_SHOW_HEADER) { | ||
5666 | pfm_proc_show_header(m); | ||
5667 | return 0; | ||
5668 | } | ||
5669 | |||
5670 | /* show info for CPU (v - 1) */ | ||
5671 | |||
5672 | cpu = (long)v - 1; | ||
5673 | seq_printf(m, | ||
5674 | "CPU%-2d overflow intrs : %lu\n" | ||
5675 | "CPU%-2d overflow cycles : %lu\n" | ||
5676 | "CPU%-2d overflow min : %lu\n" | ||
5677 | "CPU%-2d overflow max : %lu\n" | ||
5678 | "CPU%-2d smpl handler calls : %lu\n" | ||
5679 | "CPU%-2d smpl handler cycles : %lu\n" | ||
5680 | "CPU%-2d spurious intrs : %lu\n" | ||
5681 | "CPU%-2d replay intrs : %lu\n" | ||
5682 | "CPU%-2d syst_wide : %d\n" | ||
5683 | "CPU%-2d dcr_pp : %d\n" | ||
5684 | "CPU%-2d exclude idle : %d\n" | ||
5685 | "CPU%-2d owner : %d\n" | ||
5686 | "CPU%-2d context : %p\n" | ||
5687 | "CPU%-2d activations : %lu\n", | ||
5688 | cpu, pfm_stats[cpu].pfm_ovfl_intr_count, | ||
5689 | cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles, | ||
5690 | cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles_min, | ||
5691 | cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles_max, | ||
5692 | cpu, pfm_stats[cpu].pfm_smpl_handler_calls, | ||
5693 | cpu, pfm_stats[cpu].pfm_smpl_handler_cycles, | ||
5694 | cpu, pfm_stats[cpu].pfm_spurious_ovfl_intr_count, | ||
5695 | cpu, pfm_stats[cpu].pfm_replay_ovfl_intr_count, | ||
5696 | cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_SYST_WIDE ? 1 : 0, | ||
5697 | cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_DCR_PP ? 1 : 0, | ||
5698 | cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_EXCL_IDLE ? 1 : 0, | ||
5699 | cpu, pfm_get_cpu_data(pmu_owner, cpu) ? pfm_get_cpu_data(pmu_owner, cpu)->pid: -1, | ||
5700 | cpu, pfm_get_cpu_data(pmu_ctx, cpu), | ||
5701 | cpu, pfm_get_cpu_data(pmu_activation_number, cpu)); | ||
5702 | |||
5703 | if (num_online_cpus() == 1 && pfm_sysctl.debug > 0) { | ||
5704 | |||
5705 | psr = pfm_get_psr(); | ||
5706 | |||
5707 | ia64_srlz_d(); | ||
5708 | |||
5709 | seq_printf(m, | ||
5710 | "CPU%-2d psr : 0x%lx\n" | ||
5711 | "CPU%-2d pmc0 : 0x%lx\n", | ||
5712 | cpu, psr, | ||
5713 | cpu, ia64_get_pmc(0)); | ||
5714 | |||
5715 | for (i=0; PMC_IS_LAST(i) == 0; i++) { | ||
5716 | if (PMC_IS_COUNTING(i) == 0) continue; | ||
5717 | seq_printf(m, | ||
5718 | "CPU%-2d pmc%u : 0x%lx\n" | ||
5719 | "CPU%-2d pmd%u : 0x%lx\n", | ||
5720 | cpu, i, ia64_get_pmc(i), | ||
5721 | cpu, i, ia64_get_pmd(i)); | ||
5722 | } | ||
5723 | } | ||
5724 | return 0; | ||
5725 | } | ||
5726 | |||
5727 | struct seq_operations pfm_seq_ops = { | ||
5728 | .start = pfm_proc_start, | ||
5729 | .next = pfm_proc_next, | ||
5730 | .stop = pfm_proc_stop, | ||
5731 | .show = pfm_proc_show | ||
5732 | }; | ||
5733 | |||
5734 | static int | ||
5735 | pfm_proc_open(struct inode *inode, struct file *file) | ||
5736 | { | ||
5737 | return seq_open(file, &pfm_seq_ops); | ||
5738 | } | ||
5739 | |||
5740 | |||
5741 | /* | ||
5742 | * we come here as soon as local_cpu_data->pfm_syst_wide is set. this happens | ||
5743 | * during pfm_enable() hence before pfm_start(). We cannot assume monitoring | ||
5744 | * is active or inactive based on mode. We must rely on the value in | ||
5745 | * local_cpu_data->pfm_syst_info | ||
5746 | */ | ||
5747 | void | ||
5748 | pfm_syst_wide_update_task(struct task_struct *task, unsigned long info, int is_ctxswin) | ||
5749 | { | ||
5750 | struct pt_regs *regs; | ||
5751 | unsigned long dcr; | ||
5752 | unsigned long dcr_pp; | ||
5753 | |||
5754 | dcr_pp = info & PFM_CPUINFO_DCR_PP ? 1 : 0; | ||
5755 | |||
5756 | /* | ||
5757 | * pid 0 is guaranteed to be the idle task. There is one such task with pid 0 | ||
5758 | * on every CPU, so we can rely on the pid to identify the idle task. | ||
5759 | */ | ||
5760 | if ((info & PFM_CPUINFO_EXCL_IDLE) == 0 || task->pid) { | ||
5761 | regs = ia64_task_regs(task); | ||
5762 | ia64_psr(regs)->pp = is_ctxswin ? dcr_pp : 0; | ||
5763 | return; | ||
5764 | } | ||
5765 | /* | ||
5766 | * if monitoring has started | ||
5767 | */ | ||
5768 | if (dcr_pp) { | ||
5769 | dcr = ia64_getreg(_IA64_REG_CR_DCR); | ||
5770 | /* | ||
5771 | * context switching in? | ||
5772 | */ | ||
5773 | if (is_ctxswin) { | ||
5774 | /* mask monitoring for the idle task */ | ||
5775 | ia64_setreg(_IA64_REG_CR_DCR, dcr & ~IA64_DCR_PP); | ||
5776 | pfm_clear_psr_pp(); | ||
5777 | ia64_srlz_i(); | ||
5778 | return; | ||
5779 | } | ||
5780 | /* | ||
5781 | * context switching out | ||
5782 | * restore monitoring for next task | ||
5783 | * | ||
5784 | * Due to inlining this odd if-then-else construction generates | ||
5785 | * better code. | ||
5786 | */ | ||
5787 | ia64_setreg(_IA64_REG_CR_DCR, dcr |IA64_DCR_PP); | ||
5788 | pfm_set_psr_pp(); | ||
5789 | ia64_srlz_i(); | ||
5790 | } | ||
5791 | } | ||
5792 | |||
5793 | #ifdef CONFIG_SMP | ||
5794 | |||
5795 | static void | ||
5796 | pfm_force_cleanup(pfm_context_t *ctx, struct pt_regs *regs) | ||
5797 | { | ||
5798 | struct task_struct *task = ctx->ctx_task; | ||
5799 | |||
5800 | ia64_psr(regs)->up = 0; | ||
5801 | ia64_psr(regs)->sp = 1; | ||
5802 | |||
5803 | if (GET_PMU_OWNER() == task) { | ||
5804 | DPRINT(("cleared ownership for [%d]\n", ctx->ctx_task->pid)); | ||
5805 | SET_PMU_OWNER(NULL, NULL); | ||
5806 | } | ||
5807 | |||
5808 | /* | ||
5809 | * disconnect the task from the context and vice-versa | ||
5810 | */ | ||
5811 | PFM_SET_WORK_PENDING(task, 0); | ||
5812 | |||
5813 | task->thread.pfm_context = NULL; | ||
5814 | task->thread.flags &= ~IA64_THREAD_PM_VALID; | ||
5815 | |||
5816 | DPRINT(("force cleanup for [%d]\n", task->pid)); | ||
5817 | } | ||
5818 | |||
5819 | |||
5820 | /* | ||
5821 | * in 2.6, interrupts are masked when we come here and the runqueue lock is held | ||
5822 | */ | ||
5823 | void | ||
5824 | pfm_save_regs(struct task_struct *task) | ||
5825 | { | ||
5826 | pfm_context_t *ctx; | ||
5827 | struct thread_struct *t; | ||
5828 | unsigned long flags; | ||
5829 | u64 psr; | ||
5830 | |||
5831 | |||
5832 | ctx = PFM_GET_CTX(task); | ||
5833 | if (ctx == NULL) return; | ||
5834 | t = &task->thread; | ||
5835 | |||
5836 | /* | ||
5837 | * we always come here with interrupts ALREADY disabled by | ||
5838 | * the scheduler. So we simply need to protect against concurrent | ||
5839 | * access, not CPU concurrency. | ||
5840 | */ | ||
5841 | flags = pfm_protect_ctx_ctxsw(ctx); | ||
5842 | |||
5843 | if (ctx->ctx_state == PFM_CTX_ZOMBIE) { | ||
5844 | struct pt_regs *regs = ia64_task_regs(task); | ||
5845 | |||
5846 | pfm_clear_psr_up(); | ||
5847 | |||
5848 | pfm_force_cleanup(ctx, regs); | ||
5849 | |||
5850 | BUG_ON(ctx->ctx_smpl_hdr); | ||
5851 | |||
5852 | pfm_unprotect_ctx_ctxsw(ctx, flags); | ||
5853 | |||
5854 | pfm_context_free(ctx); | ||
5855 | return; | ||
5856 | } | ||
5857 | |||
5858 | /* | ||
5859 | * save current PSR: needed because we modify it | ||
5860 | */ | ||
5861 | ia64_srlz_d(); | ||
5862 | psr = pfm_get_psr(); | ||
5863 | |||
5864 | BUG_ON(psr & (IA64_PSR_I)); | ||
5865 | |||
5866 | /* | ||
5867 | * stop monitoring: | ||
5868 | * This is the last instruction which may generate an overflow | ||
5869 | * | ||
5870 | * We do not need to set psr.sp because, it is irrelevant in kernel. | ||
5871 | * It will be restored from ipsr when going back to user level | ||
5872 | */ | ||
5873 | pfm_clear_psr_up(); | ||
5874 | |||
5875 | /* | ||
5876 | * keep a copy of psr.up (for reload) | ||
5877 | */ | ||
5878 | ctx->ctx_saved_psr_up = psr & IA64_PSR_UP; | ||
5879 | |||
5880 | /* | ||
5881 | * release ownership of this PMU. | ||
5882 | * PM interrupts are masked, so nothing | ||
5883 | * can happen. | ||
5884 | */ | ||
5885 | SET_PMU_OWNER(NULL, NULL); | ||
5886 | |||
5887 | /* | ||
5888 | * we systematically save the PMD as we have no | ||
5889 | * guarantee we will be schedule at that same | ||
5890 | * CPU again. | ||
5891 | */ | ||
5892 | pfm_save_pmds(t->pmds, ctx->ctx_used_pmds[0]); | ||
5893 | |||
5894 | /* | ||
5895 | * save pmc0 ia64_srlz_d() done in pfm_save_pmds() | ||
5896 | * we will need it on the restore path to check | ||
5897 | * for pending overflow. | ||
5898 | */ | ||
5899 | t->pmcs[0] = ia64_get_pmc(0); | ||
5900 | |||
5901 | /* | ||
5902 | * unfreeze PMU if had pending overflows | ||
5903 | */ | ||
5904 | if (t->pmcs[0] & ~0x1UL) pfm_unfreeze_pmu(); | ||
5905 | |||
5906 | /* | ||
5907 | * finally, allow context access. | ||
5908 | * interrupts will still be masked after this call. | ||
5909 | */ | ||
5910 | pfm_unprotect_ctx_ctxsw(ctx, flags); | ||
5911 | } | ||
5912 | |||
5913 | #else /* !CONFIG_SMP */ | ||
5914 | void | ||
5915 | pfm_save_regs(struct task_struct *task) | ||
5916 | { | ||
5917 | pfm_context_t *ctx; | ||
5918 | u64 psr; | ||
5919 | |||
5920 | ctx = PFM_GET_CTX(task); | ||
5921 | if (ctx == NULL) return; | ||
5922 | |||
5923 | /* | ||
5924 | * save current PSR: needed because we modify it | ||
5925 | */ | ||
5926 | psr = pfm_get_psr(); | ||
5927 | |||
5928 | BUG_ON(psr & (IA64_PSR_I)); | ||
5929 | |||
5930 | /* | ||
5931 | * stop monitoring: | ||
5932 | * This is the last instruction which may generate an overflow | ||
5933 | * | ||
5934 | * We do not need to set psr.sp because, it is irrelevant in kernel. | ||
5935 | * It will be restored from ipsr when going back to user level | ||
5936 | */ | ||
5937 | pfm_clear_psr_up(); | ||
5938 | |||
5939 | /* | ||
5940 | * keep a copy of psr.up (for reload) | ||
5941 | */ | ||
5942 | ctx->ctx_saved_psr_up = psr & IA64_PSR_UP; | ||
5943 | } | ||
5944 | |||
5945 | static void | ||
5946 | pfm_lazy_save_regs (struct task_struct *task) | ||
5947 | { | ||
5948 | pfm_context_t *ctx; | ||
5949 | struct thread_struct *t; | ||
5950 | unsigned long flags; | ||
5951 | |||
5952 | { u64 psr = pfm_get_psr(); | ||
5953 | BUG_ON(psr & IA64_PSR_UP); | ||
5954 | } | ||
5955 | |||
5956 | ctx = PFM_GET_CTX(task); | ||
5957 | t = &task->thread; | ||
5958 | |||
5959 | /* | ||
5960 | * we need to mask PMU overflow here to | ||
5961 | * make sure that we maintain pmc0 until | ||
5962 | * we save it. overflow interrupts are | ||
5963 | * treated as spurious if there is no | ||
5964 | * owner. | ||
5965 | * | ||
5966 | * XXX: I don't think this is necessary | ||
5967 | */ | ||
5968 | PROTECT_CTX(ctx,flags); | ||
5969 | |||
5970 | /* | ||
5971 | * release ownership of this PMU. | ||
5972 | * must be done before we save the registers. | ||
5973 | * | ||
5974 | * after this call any PMU interrupt is treated | ||
5975 | * as spurious. | ||
5976 | */ | ||
5977 | SET_PMU_OWNER(NULL, NULL); | ||
5978 | |||
5979 | /* | ||
5980 | * save all the pmds we use | ||
5981 | */ | ||
5982 | pfm_save_pmds(t->pmds, ctx->ctx_used_pmds[0]); | ||
5983 | |||
5984 | /* | ||
5985 | * save pmc0 ia64_srlz_d() done in pfm_save_pmds() | ||
5986 | * it is needed to check for pended overflow | ||
5987 | * on the restore path | ||
5988 | */ | ||
5989 | t->pmcs[0] = ia64_get_pmc(0); | ||
5990 | |||
5991 | /* | ||
5992 | * unfreeze PMU if had pending overflows | ||
5993 | */ | ||
5994 | if (t->pmcs[0] & ~0x1UL) pfm_unfreeze_pmu(); | ||
5995 | |||
5996 | /* | ||
5997 | * now get can unmask PMU interrupts, they will | ||
5998 | * be treated as purely spurious and we will not | ||
5999 | * lose any information | ||
6000 | */ | ||
6001 | UNPROTECT_CTX(ctx,flags); | ||
6002 | } | ||
6003 | #endif /* CONFIG_SMP */ | ||
6004 | |||
6005 | #ifdef CONFIG_SMP | ||
6006 | /* | ||
6007 | * in 2.6, interrupts are masked when we come here and the runqueue lock is held | ||
6008 | */ | ||
6009 | void | ||
6010 | pfm_load_regs (struct task_struct *task) | ||
6011 | { | ||
6012 | pfm_context_t *ctx; | ||
6013 | struct thread_struct *t; | ||
6014 | unsigned long pmc_mask = 0UL, pmd_mask = 0UL; | ||
6015 | unsigned long flags; | ||
6016 | u64 psr, psr_up; | ||
6017 | int need_irq_resend; | ||
6018 | |||
6019 | ctx = PFM_GET_CTX(task); | ||
6020 | if (unlikely(ctx == NULL)) return; | ||
6021 | |||
6022 | BUG_ON(GET_PMU_OWNER()); | ||
6023 | |||
6024 | t = &task->thread; | ||
6025 | /* | ||
6026 | * possible on unload | ||
6027 | */ | ||
6028 | if (unlikely((t->flags & IA64_THREAD_PM_VALID) == 0)) return; | ||
6029 | |||
6030 | /* | ||
6031 | * we always come here with interrupts ALREADY disabled by | ||
6032 | * the scheduler. So we simply need to protect against concurrent | ||
6033 | * access, not CPU concurrency. | ||
6034 | */ | ||
6035 | flags = pfm_protect_ctx_ctxsw(ctx); | ||
6036 | psr = pfm_get_psr(); | ||
6037 | |||
6038 | need_irq_resend = pmu_conf->flags & PFM_PMU_IRQ_RESEND; | ||
6039 | |||
6040 | BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP)); | ||
6041 | BUG_ON(psr & IA64_PSR_I); | ||
6042 | |||
6043 | if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) { | ||
6044 | struct pt_regs *regs = ia64_task_regs(task); | ||
6045 | |||
6046 | BUG_ON(ctx->ctx_smpl_hdr); | ||
6047 | |||
6048 | pfm_force_cleanup(ctx, regs); | ||
6049 | |||
6050 | pfm_unprotect_ctx_ctxsw(ctx, flags); | ||
6051 | |||
6052 | /* | ||
6053 | * this one (kmalloc'ed) is fine with interrupts disabled | ||
6054 | */ | ||
6055 | pfm_context_free(ctx); | ||
6056 | |||
6057 | return; | ||
6058 | } | ||
6059 | |||
6060 | /* | ||
6061 | * we restore ALL the debug registers to avoid picking up | ||
6062 | * stale state. | ||
6063 | */ | ||
6064 | if (ctx->ctx_fl_using_dbreg) { | ||
6065 | pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs); | ||
6066 | pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs); | ||
6067 | } | ||
6068 | /* | ||
6069 | * retrieve saved psr.up | ||
6070 | */ | ||
6071 | psr_up = ctx->ctx_saved_psr_up; | ||
6072 | |||
6073 | /* | ||
6074 | * if we were the last user of the PMU on that CPU, | ||
6075 | * then nothing to do except restore psr | ||
6076 | */ | ||
6077 | if (GET_LAST_CPU(ctx) == smp_processor_id() && ctx->ctx_last_activation == GET_ACTIVATION()) { | ||
6078 | |||
6079 | /* | ||
6080 | * retrieve partial reload masks (due to user modifications) | ||
6081 | */ | ||
6082 | pmc_mask = ctx->ctx_reload_pmcs[0]; | ||
6083 | pmd_mask = ctx->ctx_reload_pmds[0]; | ||
6084 | |||
6085 | } else { | ||
6086 | /* | ||
6087 | * To avoid leaking information to the user level when psr.sp=0, | ||
6088 | * we must reload ALL implemented pmds (even the ones we don't use). | ||
6089 | * In the kernel we only allow PFM_READ_PMDS on registers which | ||
6090 | * we initialized or requested (sampling) so there is no risk there. | ||
6091 | */ | ||
6092 | pmd_mask = pfm_sysctl.fastctxsw ? ctx->ctx_used_pmds[0] : ctx->ctx_all_pmds[0]; | ||
6093 | |||
6094 | /* | ||
6095 | * ALL accessible PMCs are systematically reloaded, unused registers | ||
6096 | * get their default (from pfm_reset_pmu_state()) values to avoid picking | ||
6097 | * up stale configuration. | ||
6098 | * | ||
6099 | * PMC0 is never in the mask. It is always restored separately. | ||
6100 | */ | ||
6101 | pmc_mask = ctx->ctx_all_pmcs[0]; | ||
6102 | } | ||
6103 | /* | ||
6104 | * when context is MASKED, we will restore PMC with plm=0 | ||
6105 | * and PMD with stale information, but that's ok, nothing | ||
6106 | * will be captured. | ||
6107 | * | ||
6108 | * XXX: optimize here | ||
6109 | */ | ||
6110 | if (pmd_mask) pfm_restore_pmds(t->pmds, pmd_mask); | ||
6111 | if (pmc_mask) pfm_restore_pmcs(t->pmcs, pmc_mask); | ||
6112 | |||
6113 | /* | ||
6114 | * check for pending overflow at the time the state | ||
6115 | * was saved. | ||
6116 | */ | ||
6117 | if (unlikely(PMC0_HAS_OVFL(t->pmcs[0]))) { | ||
6118 | /* | ||
6119 | * reload pmc0 with the overflow information | ||
6120 | * On McKinley PMU, this will trigger a PMU interrupt | ||
6121 | */ | ||
6122 | ia64_set_pmc(0, t->pmcs[0]); | ||
6123 | ia64_srlz_d(); | ||
6124 | t->pmcs[0] = 0UL; | ||
6125 | |||
6126 | /* | ||
6127 | * will replay the PMU interrupt | ||
6128 | */ | ||
6129 | if (need_irq_resend) hw_resend_irq(NULL, IA64_PERFMON_VECTOR); | ||
6130 | |||
6131 | pfm_stats[smp_processor_id()].pfm_replay_ovfl_intr_count++; | ||
6132 | } | ||
6133 | |||
6134 | /* | ||
6135 | * we just did a reload, so we reset the partial reload fields | ||
6136 | */ | ||
6137 | ctx->ctx_reload_pmcs[0] = 0UL; | ||
6138 | ctx->ctx_reload_pmds[0] = 0UL; | ||
6139 | |||
6140 | SET_LAST_CPU(ctx, smp_processor_id()); | ||
6141 | |||
6142 | /* | ||
6143 | * dump activation value for this PMU | ||
6144 | */ | ||
6145 | INC_ACTIVATION(); | ||
6146 | /* | ||
6147 | * record current activation for this context | ||
6148 | */ | ||
6149 | SET_ACTIVATION(ctx); | ||
6150 | |||
6151 | /* | ||
6152 | * establish new ownership. | ||
6153 | */ | ||
6154 | SET_PMU_OWNER(task, ctx); | ||
6155 | |||
6156 | /* | ||
6157 | * restore the psr.up bit. measurement | ||
6158 | * is active again. | ||
6159 | * no PMU interrupt can happen at this point | ||
6160 | * because we still have interrupts disabled. | ||
6161 | */ | ||
6162 | if (likely(psr_up)) pfm_set_psr_up(); | ||
6163 | |||
6164 | /* | ||
6165 | * allow concurrent access to context | ||
6166 | */ | ||
6167 | pfm_unprotect_ctx_ctxsw(ctx, flags); | ||
6168 | } | ||
6169 | #else /* !CONFIG_SMP */ | ||
6170 | /* | ||
6171 | * reload PMU state for UP kernels | ||
6172 | * in 2.5 we come here with interrupts disabled | ||
6173 | */ | ||
6174 | void | ||
6175 | pfm_load_regs (struct task_struct *task) | ||
6176 | { | ||
6177 | struct thread_struct *t; | ||
6178 | pfm_context_t *ctx; | ||
6179 | struct task_struct *owner; | ||
6180 | unsigned long pmd_mask, pmc_mask; | ||
6181 | u64 psr, psr_up; | ||
6182 | int need_irq_resend; | ||
6183 | |||
6184 | owner = GET_PMU_OWNER(); | ||
6185 | ctx = PFM_GET_CTX(task); | ||
6186 | t = &task->thread; | ||
6187 | psr = pfm_get_psr(); | ||
6188 | |||
6189 | BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP)); | ||
6190 | BUG_ON(psr & IA64_PSR_I); | ||
6191 | |||
6192 | /* | ||
6193 | * we restore ALL the debug registers to avoid picking up | ||
6194 | * stale state. | ||
6195 | * | ||
6196 | * This must be done even when the task is still the owner | ||
6197 | * as the registers may have been modified via ptrace() | ||
6198 | * (not perfmon) by the previous task. | ||
6199 | */ | ||
6200 | if (ctx->ctx_fl_using_dbreg) { | ||
6201 | pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs); | ||
6202 | pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs); | ||
6203 | } | ||
6204 | |||
6205 | /* | ||
6206 | * retrieved saved psr.up | ||
6207 | */ | ||
6208 | psr_up = ctx->ctx_saved_psr_up; | ||
6209 | need_irq_resend = pmu_conf->flags & PFM_PMU_IRQ_RESEND; | ||
6210 | |||
6211 | /* | ||
6212 | * short path, our state is still there, just | ||
6213 | * need to restore psr and we go | ||
6214 | * | ||
6215 | * we do not touch either PMC nor PMD. the psr is not touched | ||
6216 | * by the overflow_handler. So we are safe w.r.t. to interrupt | ||
6217 | * concurrency even without interrupt masking. | ||
6218 | */ | ||
6219 | if (likely(owner == task)) { | ||
6220 | if (likely(psr_up)) pfm_set_psr_up(); | ||
6221 | return; | ||
6222 | } | ||
6223 | |||
6224 | /* | ||
6225 | * someone else is still using the PMU, first push it out and | ||
6226 | * then we'll be able to install our stuff ! | ||
6227 | * | ||
6228 | * Upon return, there will be no owner for the current PMU | ||
6229 | */ | ||
6230 | if (owner) pfm_lazy_save_regs(owner); | ||
6231 | |||
6232 | /* | ||
6233 | * To avoid leaking information to the user level when psr.sp=0, | ||
6234 | * we must reload ALL implemented pmds (even the ones we don't use). | ||
6235 | * In the kernel we only allow PFM_READ_PMDS on registers which | ||
6236 | * we initialized or requested (sampling) so there is no risk there. | ||
6237 | */ | ||
6238 | pmd_mask = pfm_sysctl.fastctxsw ? ctx->ctx_used_pmds[0] : ctx->ctx_all_pmds[0]; | ||
6239 | |||
6240 | /* | ||
6241 | * ALL accessible PMCs are systematically reloaded, unused registers | ||
6242 | * get their default (from pfm_reset_pmu_state()) values to avoid picking | ||
6243 | * up stale configuration. | ||
6244 | * | ||
6245 | * PMC0 is never in the mask. It is always restored separately | ||
6246 | */ | ||
6247 | pmc_mask = ctx->ctx_all_pmcs[0]; | ||
6248 | |||
6249 | pfm_restore_pmds(t->pmds, pmd_mask); | ||
6250 | pfm_restore_pmcs(t->pmcs, pmc_mask); | ||
6251 | |||
6252 | /* | ||
6253 | * check for pending overflow at the time the state | ||
6254 | * was saved. | ||
6255 | */ | ||
6256 | if (unlikely(PMC0_HAS_OVFL(t->pmcs[0]))) { | ||
6257 | /* | ||
6258 | * reload pmc0 with the overflow information | ||
6259 | * On McKinley PMU, this will trigger a PMU interrupt | ||
6260 | */ | ||
6261 | ia64_set_pmc(0, t->pmcs[0]); | ||
6262 | ia64_srlz_d(); | ||
6263 | |||
6264 | t->pmcs[0] = 0UL; | ||
6265 | |||
6266 | /* | ||
6267 | * will replay the PMU interrupt | ||
6268 | */ | ||
6269 | if (need_irq_resend) hw_resend_irq(NULL, IA64_PERFMON_VECTOR); | ||
6270 | |||
6271 | pfm_stats[smp_processor_id()].pfm_replay_ovfl_intr_count++; | ||
6272 | } | ||
6273 | |||
6274 | /* | ||
6275 | * establish new ownership. | ||
6276 | */ | ||
6277 | SET_PMU_OWNER(task, ctx); | ||
6278 | |||
6279 | /* | ||
6280 | * restore the psr.up bit. measurement | ||
6281 | * is active again. | ||
6282 | * no PMU interrupt can happen at this point | ||
6283 | * because we still have interrupts disabled. | ||
6284 | */ | ||
6285 | if (likely(psr_up)) pfm_set_psr_up(); | ||
6286 | } | ||
6287 | #endif /* CONFIG_SMP */ | ||
6288 | |||
6289 | /* | ||
6290 | * this function assumes monitoring is stopped | ||
6291 | */ | ||
6292 | static void | ||
6293 | pfm_flush_pmds(struct task_struct *task, pfm_context_t *ctx) | ||
6294 | { | ||
6295 | u64 pmc0; | ||
6296 | unsigned long mask2, val, pmd_val, ovfl_val; | ||
6297 | int i, can_access_pmu = 0; | ||
6298 | int is_self; | ||
6299 | |||
6300 | /* | ||
6301 | * is the caller the task being monitored (or which initiated the | ||
6302 | * session for system wide measurements) | ||
6303 | */ | ||
6304 | is_self = ctx->ctx_task == task ? 1 : 0; | ||
6305 | |||
6306 | /* | ||
6307 | * can access PMU is task is the owner of the PMU state on the current CPU | ||
6308 | * or if we are running on the CPU bound to the context in system-wide mode | ||
6309 | * (that is not necessarily the task the context is attached to in this mode). | ||
6310 | * In system-wide we always have can_access_pmu true because a task running on an | ||
6311 | * invalid processor is flagged earlier in the call stack (see pfm_stop). | ||
6312 | */ | ||
6313 | can_access_pmu = (GET_PMU_OWNER() == task) || (ctx->ctx_fl_system && ctx->ctx_cpu == smp_processor_id()); | ||
6314 | if (can_access_pmu) { | ||
6315 | /* | ||
6316 | * Mark the PMU as not owned | ||
6317 | * This will cause the interrupt handler to do nothing in case an overflow | ||
6318 | * interrupt was in-flight | ||
6319 | * This also guarantees that pmc0 will contain the final state | ||
6320 | * It virtually gives us full control on overflow processing from that point | ||
6321 | * on. | ||
6322 | */ | ||
6323 | SET_PMU_OWNER(NULL, NULL); | ||
6324 | DPRINT(("releasing ownership\n")); | ||
6325 | |||
6326 | /* | ||
6327 | * read current overflow status: | ||
6328 | * | ||
6329 | * we are guaranteed to read the final stable state | ||
6330 | */ | ||
6331 | ia64_srlz_d(); | ||
6332 | pmc0 = ia64_get_pmc(0); /* slow */ | ||
6333 | |||
6334 | /* | ||
6335 | * reset freeze bit, overflow status information destroyed | ||
6336 | */ | ||
6337 | pfm_unfreeze_pmu(); | ||
6338 | } else { | ||
6339 | pmc0 = task->thread.pmcs[0]; | ||
6340 | /* | ||
6341 | * clear whatever overflow status bits there were | ||
6342 | */ | ||
6343 | task->thread.pmcs[0] = 0; | ||
6344 | } | ||
6345 | ovfl_val = pmu_conf->ovfl_val; | ||
6346 | /* | ||
6347 | * we save all the used pmds | ||
6348 | * we take care of overflows for counting PMDs | ||
6349 | * | ||
6350 | * XXX: sampling situation is not taken into account here | ||
6351 | */ | ||
6352 | mask2 = ctx->ctx_used_pmds[0]; | ||
6353 | |||
6354 | DPRINT(("is_self=%d ovfl_val=0x%lx mask2=0x%lx\n", is_self, ovfl_val, mask2)); | ||
6355 | |||
6356 | for (i = 0; mask2; i++, mask2>>=1) { | ||
6357 | |||
6358 | /* skip non used pmds */ | ||
6359 | if ((mask2 & 0x1) == 0) continue; | ||
6360 | |||
6361 | /* | ||
6362 | * can access PMU always true in system wide mode | ||
6363 | */ | ||
6364 | val = pmd_val = can_access_pmu ? ia64_get_pmd(i) : task->thread.pmds[i]; | ||
6365 | |||
6366 | if (PMD_IS_COUNTING(i)) { | ||
6367 | DPRINT(("[%d] pmd[%d] ctx_pmd=0x%lx hw_pmd=0x%lx\n", | ||
6368 | task->pid, | ||
6369 | i, | ||
6370 | ctx->ctx_pmds[i].val, | ||
6371 | val & ovfl_val)); | ||
6372 | |||
6373 | /* | ||
6374 | * we rebuild the full 64 bit value of the counter | ||
6375 | */ | ||
6376 | val = ctx->ctx_pmds[i].val + (val & ovfl_val); | ||
6377 | |||
6378 | /* | ||
6379 | * now everything is in ctx_pmds[] and we need | ||
6380 | * to clear the saved context from save_regs() such that | ||
6381 | * pfm_read_pmds() gets the correct value | ||
6382 | */ | ||
6383 | pmd_val = 0UL; | ||
6384 | |||
6385 | /* | ||
6386 | * take care of overflow inline | ||
6387 | */ | ||
6388 | if (pmc0 & (1UL << i)) { | ||
6389 | val += 1 + ovfl_val; | ||
6390 | DPRINT(("[%d] pmd[%d] overflowed\n", task->pid, i)); | ||
6391 | } | ||
6392 | } | ||
6393 | |||
6394 | DPRINT(("[%d] ctx_pmd[%d]=0x%lx pmd_val=0x%lx\n", task->pid, i, val, pmd_val)); | ||
6395 | |||
6396 | if (is_self) task->thread.pmds[i] = pmd_val; | ||
6397 | |||
6398 | ctx->ctx_pmds[i].val = val; | ||
6399 | } | ||
6400 | } | ||
6401 | |||
6402 | static struct irqaction perfmon_irqaction = { | ||
6403 | .handler = pfm_interrupt_handler, | ||
6404 | .flags = SA_INTERRUPT, | ||
6405 | .name = "perfmon" | ||
6406 | }; | ||
6407 | |||
6408 | /* | ||
6409 | * perfmon initialization routine, called from the initcall() table | ||
6410 | */ | ||
6411 | static int init_pfm_fs(void); | ||
6412 | |||
6413 | static int __init | ||
6414 | pfm_probe_pmu(void) | ||
6415 | { | ||
6416 | pmu_config_t **p; | ||
6417 | int family; | ||
6418 | |||
6419 | family = local_cpu_data->family; | ||
6420 | p = pmu_confs; | ||
6421 | |||
6422 | while(*p) { | ||
6423 | if ((*p)->probe) { | ||
6424 | if ((*p)->probe() == 0) goto found; | ||
6425 | } else if ((*p)->pmu_family == family || (*p)->pmu_family == 0xff) { | ||
6426 | goto found; | ||
6427 | } | ||
6428 | p++; | ||
6429 | } | ||
6430 | return -1; | ||
6431 | found: | ||
6432 | pmu_conf = *p; | ||
6433 | return 0; | ||
6434 | } | ||
6435 | |||
6436 | static struct file_operations pfm_proc_fops = { | ||
6437 | .open = pfm_proc_open, | ||
6438 | .read = seq_read, | ||
6439 | .llseek = seq_lseek, | ||
6440 | .release = seq_release, | ||
6441 | }; | ||
6442 | |||
6443 | int __init | ||
6444 | pfm_init(void) | ||
6445 | { | ||
6446 | unsigned int n, n_counters, i; | ||
6447 | |||
6448 | printk("perfmon: version %u.%u IRQ %u\n", | ||
6449 | PFM_VERSION_MAJ, | ||
6450 | PFM_VERSION_MIN, | ||
6451 | IA64_PERFMON_VECTOR); | ||
6452 | |||
6453 | if (pfm_probe_pmu()) { | ||
6454 | printk(KERN_INFO "perfmon: disabled, there is no support for processor family %d\n", | ||
6455 | local_cpu_data->family); | ||
6456 | return -ENODEV; | ||
6457 | } | ||
6458 | |||
6459 | /* | ||
6460 | * compute the number of implemented PMD/PMC from the | ||
6461 | * description tables | ||
6462 | */ | ||
6463 | n = 0; | ||
6464 | for (i=0; PMC_IS_LAST(i) == 0; i++) { | ||
6465 | if (PMC_IS_IMPL(i) == 0) continue; | ||
6466 | pmu_conf->impl_pmcs[i>>6] |= 1UL << (i&63); | ||
6467 | n++; | ||
6468 | } | ||
6469 | pmu_conf->num_pmcs = n; | ||
6470 | |||
6471 | n = 0; n_counters = 0; | ||
6472 | for (i=0; PMD_IS_LAST(i) == 0; i++) { | ||
6473 | if (PMD_IS_IMPL(i) == 0) continue; | ||
6474 | pmu_conf->impl_pmds[i>>6] |= 1UL << (i&63); | ||
6475 | n++; | ||
6476 | if (PMD_IS_COUNTING(i)) n_counters++; | ||
6477 | } | ||
6478 | pmu_conf->num_pmds = n; | ||
6479 | pmu_conf->num_counters = n_counters; | ||
6480 | |||
6481 | /* | ||
6482 | * sanity checks on the number of debug registers | ||
6483 | */ | ||
6484 | if (pmu_conf->use_rr_dbregs) { | ||
6485 | if (pmu_conf->num_ibrs > IA64_NUM_DBG_REGS) { | ||
6486 | printk(KERN_INFO "perfmon: unsupported number of code debug registers (%u)\n", pmu_conf->num_ibrs); | ||
6487 | pmu_conf = NULL; | ||
6488 | return -1; | ||
6489 | } | ||
6490 | if (pmu_conf->num_dbrs > IA64_NUM_DBG_REGS) { | ||
6491 | printk(KERN_INFO "perfmon: unsupported number of data debug registers (%u)\n", pmu_conf->num_ibrs); | ||
6492 | pmu_conf = NULL; | ||
6493 | return -1; | ||
6494 | } | ||
6495 | } | ||
6496 | |||
6497 | printk("perfmon: %s PMU detected, %u PMCs, %u PMDs, %u counters (%lu bits)\n", | ||
6498 | pmu_conf->pmu_name, | ||
6499 | pmu_conf->num_pmcs, | ||
6500 | pmu_conf->num_pmds, | ||
6501 | pmu_conf->num_counters, | ||
6502 | ffz(pmu_conf->ovfl_val)); | ||
6503 | |||
6504 | /* sanity check */ | ||
6505 | if (pmu_conf->num_pmds >= IA64_NUM_PMD_REGS || pmu_conf->num_pmcs >= IA64_NUM_PMC_REGS) { | ||
6506 | printk(KERN_ERR "perfmon: not enough pmc/pmd, perfmon disabled\n"); | ||
6507 | pmu_conf = NULL; | ||
6508 | return -1; | ||
6509 | } | ||
6510 | |||
6511 | /* | ||
6512 | * create /proc/perfmon (mostly for debugging purposes) | ||
6513 | */ | ||
6514 | perfmon_dir = create_proc_entry("perfmon", S_IRUGO, NULL); | ||
6515 | if (perfmon_dir == NULL) { | ||
6516 | printk(KERN_ERR "perfmon: cannot create /proc entry, perfmon disabled\n"); | ||
6517 | pmu_conf = NULL; | ||
6518 | return -1; | ||
6519 | } | ||
6520 | /* | ||
6521 | * install customized file operations for /proc/perfmon entry | ||
6522 | */ | ||
6523 | perfmon_dir->proc_fops = &pfm_proc_fops; | ||
6524 | |||
6525 | /* | ||
6526 | * create /proc/sys/kernel/perfmon (for debugging purposes) | ||
6527 | */ | ||
6528 | pfm_sysctl_header = register_sysctl_table(pfm_sysctl_root, 0); | ||
6529 | |||
6530 | /* | ||
6531 | * initialize all our spinlocks | ||
6532 | */ | ||
6533 | spin_lock_init(&pfm_sessions.pfs_lock); | ||
6534 | spin_lock_init(&pfm_buffer_fmt_lock); | ||
6535 | |||
6536 | init_pfm_fs(); | ||
6537 | |||
6538 | for(i=0; i < NR_CPUS; i++) pfm_stats[i].pfm_ovfl_intr_cycles_min = ~0UL; | ||
6539 | |||
6540 | return 0; | ||
6541 | } | ||
6542 | |||
6543 | __initcall(pfm_init); | ||
6544 | |||
6545 | /* | ||
6546 | * this function is called before pfm_init() | ||
6547 | */ | ||
6548 | void | ||
6549 | pfm_init_percpu (void) | ||
6550 | { | ||
6551 | /* | ||
6552 | * make sure no measurement is active | ||
6553 | * (may inherit programmed PMCs from EFI). | ||
6554 | */ | ||
6555 | pfm_clear_psr_pp(); | ||
6556 | pfm_clear_psr_up(); | ||
6557 | |||
6558 | /* | ||
6559 | * we run with the PMU not frozen at all times | ||
6560 | */ | ||
6561 | pfm_unfreeze_pmu(); | ||
6562 | |||
6563 | if (smp_processor_id() == 0) | ||
6564 | register_percpu_irq(IA64_PERFMON_VECTOR, &perfmon_irqaction); | ||
6565 | |||
6566 | ia64_setreg(_IA64_REG_CR_PMV, IA64_PERFMON_VECTOR); | ||
6567 | ia64_srlz_d(); | ||
6568 | } | ||
6569 | |||
6570 | /* | ||
6571 | * used for debug purposes only | ||
6572 | */ | ||
6573 | void | ||
6574 | dump_pmu_state(const char *from) | ||
6575 | { | ||
6576 | struct task_struct *task; | ||
6577 | struct thread_struct *t; | ||
6578 | struct pt_regs *regs; | ||
6579 | pfm_context_t *ctx; | ||
6580 | unsigned long psr, dcr, info, flags; | ||
6581 | int i, this_cpu; | ||
6582 | |||
6583 | local_irq_save(flags); | ||
6584 | |||
6585 | this_cpu = smp_processor_id(); | ||
6586 | regs = ia64_task_regs(current); | ||
6587 | info = PFM_CPUINFO_GET(); | ||
6588 | dcr = ia64_getreg(_IA64_REG_CR_DCR); | ||
6589 | |||
6590 | if (info == 0 && ia64_psr(regs)->pp == 0 && (dcr & IA64_DCR_PP) == 0) { | ||
6591 | local_irq_restore(flags); | ||
6592 | return; | ||
6593 | } | ||
6594 | |||
6595 | printk("CPU%d from %s() current [%d] iip=0x%lx %s\n", | ||
6596 | this_cpu, | ||
6597 | from, | ||
6598 | current->pid, | ||
6599 | regs->cr_iip, | ||
6600 | current->comm); | ||
6601 | |||
6602 | task = GET_PMU_OWNER(); | ||
6603 | ctx = GET_PMU_CTX(); | ||
6604 | |||
6605 | printk("->CPU%d owner [%d] ctx=%p\n", this_cpu, task ? task->pid : -1, ctx); | ||
6606 | |||
6607 | psr = pfm_get_psr(); | ||
6608 | |||
6609 | printk("->CPU%d pmc0=0x%lx psr.pp=%d psr.up=%d dcr.pp=%d syst_info=0x%lx user_psr.up=%d user_psr.pp=%d\n", | ||
6610 | this_cpu, | ||
6611 | ia64_get_pmc(0), | ||
6612 | psr & IA64_PSR_PP ? 1 : 0, | ||
6613 | psr & IA64_PSR_UP ? 1 : 0, | ||
6614 | dcr & IA64_DCR_PP ? 1 : 0, | ||
6615 | info, | ||
6616 | ia64_psr(regs)->up, | ||
6617 | ia64_psr(regs)->pp); | ||
6618 | |||
6619 | ia64_psr(regs)->up = 0; | ||
6620 | ia64_psr(regs)->pp = 0; | ||
6621 | |||
6622 | t = ¤t->thread; | ||
6623 | |||
6624 | for (i=1; PMC_IS_LAST(i) == 0; i++) { | ||
6625 | if (PMC_IS_IMPL(i) == 0) continue; | ||
6626 | printk("->CPU%d pmc[%d]=0x%lx thread_pmc[%d]=0x%lx\n", this_cpu, i, ia64_get_pmc(i), i, t->pmcs[i]); | ||
6627 | } | ||
6628 | |||
6629 | for (i=1; PMD_IS_LAST(i) == 0; i++) { | ||
6630 | if (PMD_IS_IMPL(i) == 0) continue; | ||
6631 | printk("->CPU%d pmd[%d]=0x%lx thread_pmd[%d]=0x%lx\n", this_cpu, i, ia64_get_pmd(i), i, t->pmds[i]); | ||
6632 | } | ||
6633 | |||
6634 | if (ctx) { | ||
6635 | printk("->CPU%d ctx_state=%d vaddr=%p addr=%p fd=%d ctx_task=[%d] saved_psr_up=0x%lx\n", | ||
6636 | this_cpu, | ||
6637 | ctx->ctx_state, | ||
6638 | ctx->ctx_smpl_vaddr, | ||
6639 | ctx->ctx_smpl_hdr, | ||
6640 | ctx->ctx_msgq_head, | ||
6641 | ctx->ctx_msgq_tail, | ||
6642 | ctx->ctx_saved_psr_up); | ||
6643 | } | ||
6644 | local_irq_restore(flags); | ||
6645 | } | ||
6646 | |||
6647 | /* | ||
6648 | * called from process.c:copy_thread(). task is new child. | ||
6649 | */ | ||
6650 | void | ||
6651 | pfm_inherit(struct task_struct *task, struct pt_regs *regs) | ||
6652 | { | ||
6653 | struct thread_struct *thread; | ||
6654 | |||
6655 | DPRINT(("perfmon: pfm_inherit clearing state for [%d]\n", task->pid)); | ||
6656 | |||
6657 | thread = &task->thread; | ||
6658 | |||
6659 | /* | ||
6660 | * cut links inherited from parent (current) | ||
6661 | */ | ||
6662 | thread->pfm_context = NULL; | ||
6663 | |||
6664 | PFM_SET_WORK_PENDING(task, 0); | ||
6665 | |||
6666 | /* | ||
6667 | * the psr bits are already set properly in copy_threads() | ||
6668 | */ | ||
6669 | } | ||
6670 | #else /* !CONFIG_PERFMON */ | ||
6671 | asmlinkage long | ||
6672 | sys_perfmonctl (int fd, int cmd, void *arg, int count) | ||
6673 | { | ||
6674 | return -ENOSYS; | ||
6675 | } | ||
6676 | #endif /* CONFIG_PERFMON */ | ||