aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-06-20 14:29:32 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-06-20 14:29:32 -0400
commit12e24f34cb0d55efd08c18b2112507d4bf498008 (patch)
tree83b07be17b8ef45f42360a3b9159b3aaae3fbad4 /include
parent1eb51c33b21ffa3fceb634d1d6bcd6488c79bc26 (diff)
parenteadc84cc01e04f9f74ec2de0c9355be035c7b396 (diff)
Merge branch 'perfcounters-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'perfcounters-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (49 commits) perfcounter: Handle some IO return values perf_counter: Push perf_sample_data through the swcounter code perf_counter tools: Define and use our own u64, s64 etc. definitions perf_counter: Close race in perf_lock_task_context() perf_counter, x86: Improve interactions with fast-gup perf_counter: Simplify and fix task migration counting perf_counter tools: Add a data file header perf_counter: Update userspace callchain sampling uses perf_counter: Make callchain samples extensible perf report: Filter to parent set by default perf_counter tools: Handle lost events perf_counter: Add event overlow handling fs: Provide empty .set_page_dirty() aop for anon inodes perf_counter: tools: Makefile tweaks for 64-bit powerpc perf_counter: powerpc: Add processor back-end for MPC7450 family perf_counter: powerpc: Make powerpc perf_counter code safe for 32-bit kernels perf_counter: powerpc: Change how processor-specific back-ends get selected perf_counter: powerpc: Use unsigned long for register and constraint values perf_counter: powerpc: Enable use of software counters on 32-bit powerpc perf_counter tools: Add and use isprint() ...
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/kmap_types.h5
-rw-r--r--include/linux/mm.h6
-rw-r--r--include/linux/perf_counter.h79
3 files changed, 59 insertions, 31 deletions
diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
index 54e8b3d956b7..eddbce0f9fb9 100644
--- a/include/asm-generic/kmap_types.h
+++ b/include/asm-generic/kmap_types.h
@@ -24,7 +24,10 @@ D(12) KM_SOFTIRQ1,
24D(13) KM_SYNC_ICACHE, 24D(13) KM_SYNC_ICACHE,
25D(14) KM_SYNC_DCACHE, 25D(14) KM_SYNC_DCACHE,
26D(15) KM_UML_USERCOPY, /* UML specific, for copy_*_user - used in do_op_one_page */ 26D(15) KM_UML_USERCOPY, /* UML specific, for copy_*_user - used in do_op_one_page */
27D(16) KM_TYPE_NR 27D(16) KM_IRQ_PTE,
28D(17) KM_NMI,
29D(18) KM_NMI_PTE,
30D(19) KM_TYPE_NR
28}; 31};
29 32
30#undef D 33#undef D
diff --git a/include/linux/mm.h b/include/linux/mm.h
index d88d6fc530ad..cf260d848eb9 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -854,6 +854,12 @@ extern int mprotect_fixup(struct vm_area_struct *vma,
854 unsigned long end, unsigned long newflags); 854 unsigned long end, unsigned long newflags);
855 855
856/* 856/*
857 * doesn't attempt to fault and will return short.
858 */
859int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
860 struct page **pages);
861
862/*
857 * A callback you can register to apply pressure to ageable caches. 863 * A callback you can register to apply pressure to ageable caches.
858 * 864 *
859 * 'shrink' is passed a count 'nr_to_scan' and a 'gfpmask'. It should 865 * 'shrink' is passed a count 'nr_to_scan' and a 'gfpmask'. It should
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
index 1b3118a1023a..89698d8aba5c 100644
--- a/include/linux/perf_counter.h
+++ b/include/linux/perf_counter.h
@@ -236,10 +236,16 @@ struct perf_counter_mmap_page {
236 /* 236 /*
237 * Control data for the mmap() data buffer. 237 * Control data for the mmap() data buffer.
238 * 238 *
239 * User-space reading this value should issue an rmb(), on SMP capable 239 * User-space reading the @data_head value should issue an rmb(), on
240 * platforms, after reading this value -- see perf_counter_wakeup(). 240 * SMP capable platforms, after reading this value -- see
241 * perf_counter_wakeup().
242 *
243 * When the mapping is PROT_WRITE the @data_tail value should be
244 * written by userspace to reflect the last read data. In this case
245 * the kernel will not over-write unread data.
241 */ 246 */
242 __u64 data_head; /* head in the data section */ 247 __u64 data_head; /* head in the data section */
248 __u64 data_tail; /* user-space written tail */
243}; 249};
244 250
245#define PERF_EVENT_MISC_CPUMODE_MASK (3 << 0) 251#define PERF_EVENT_MISC_CPUMODE_MASK (3 << 0)
@@ -275,6 +281,15 @@ enum perf_event_type {
275 281
276 /* 282 /*
277 * struct { 283 * struct {
284 * struct perf_event_header header;
285 * u64 id;
286 * u64 lost;
287 * };
288 */
289 PERF_EVENT_LOST = 2,
290
291 /*
292 * struct {
278 * struct perf_event_header header; 293 * struct perf_event_header header;
279 * 294 *
280 * u32 pid, tid; 295 * u32 pid, tid;
@@ -313,30 +328,39 @@ enum perf_event_type {
313 328
314 /* 329 /*
315 * When header.misc & PERF_EVENT_MISC_OVERFLOW the event_type field 330 * When header.misc & PERF_EVENT_MISC_OVERFLOW the event_type field
316 * will be PERF_RECORD_* 331 * will be PERF_SAMPLE_*
317 * 332 *
318 * struct { 333 * struct {
319 * struct perf_event_header header; 334 * struct perf_event_header header;
320 * 335 *
321 * { u64 ip; } && PERF_RECORD_IP 336 * { u64 ip; } && PERF_SAMPLE_IP
322 * { u32 pid, tid; } && PERF_RECORD_TID 337 * { u32 pid, tid; } && PERF_SAMPLE_TID
323 * { u64 time; } && PERF_RECORD_TIME 338 * { u64 time; } && PERF_SAMPLE_TIME
324 * { u64 addr; } && PERF_RECORD_ADDR 339 * { u64 addr; } && PERF_SAMPLE_ADDR
325 * { u64 config; } && PERF_RECORD_CONFIG 340 * { u64 config; } && PERF_SAMPLE_CONFIG
326 * { u32 cpu, res; } && PERF_RECORD_CPU 341 * { u32 cpu, res; } && PERF_SAMPLE_CPU
327 * 342 *
328 * { u64 nr; 343 * { u64 nr;
329 * { u64 id, val; } cnt[nr]; } && PERF_RECORD_GROUP 344 * { u64 id, val; } cnt[nr]; } && PERF_SAMPLE_GROUP
330 * 345 *
331 * { u16 nr, 346 * { u64 nr,
332 * hv, 347 * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN
333 * kernel,
334 * user;
335 * u64 ips[nr]; } && PERF_RECORD_CALLCHAIN
336 * }; 348 * };
337 */ 349 */
338}; 350};
339 351
352enum perf_callchain_context {
353 PERF_CONTEXT_HV = (__u64)-32,
354 PERF_CONTEXT_KERNEL = (__u64)-128,
355 PERF_CONTEXT_USER = (__u64)-512,
356
357 PERF_CONTEXT_GUEST = (__u64)-2048,
358 PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176,
359 PERF_CONTEXT_GUEST_USER = (__u64)-2560,
360
361 PERF_CONTEXT_MAX = (__u64)-4095,
362};
363
340#ifdef __KERNEL__ 364#ifdef __KERNEL__
341/* 365/*
342 * Kernel-internal data types and definitions: 366 * Kernel-internal data types and definitions:
@@ -356,6 +380,13 @@ enum perf_event_type {
356#include <linux/pid_namespace.h> 380#include <linux/pid_namespace.h>
357#include <asm/atomic.h> 381#include <asm/atomic.h>
358 382
383#define PERF_MAX_STACK_DEPTH 255
384
385struct perf_callchain_entry {
386 __u64 nr;
387 __u64 ip[PERF_MAX_STACK_DEPTH];
388};
389
359struct task_struct; 390struct task_struct;
360 391
361/** 392/**
@@ -414,6 +445,7 @@ struct file;
414struct perf_mmap_data { 445struct perf_mmap_data {
415 struct rcu_head rcu_head; 446 struct rcu_head rcu_head;
416 int nr_pages; /* nr of data pages */ 447 int nr_pages; /* nr of data pages */
448 int writable; /* are we writable */
417 int nr_locked; /* nr pages mlocked */ 449 int nr_locked; /* nr pages mlocked */
418 450
419 atomic_t poll; /* POLL_ for wakeups */ 451 atomic_t poll; /* POLL_ for wakeups */
@@ -423,8 +455,8 @@ struct perf_mmap_data {
423 atomic_long_t done_head; /* completed head */ 455 atomic_long_t done_head; /* completed head */
424 456
425 atomic_t lock; /* concurrent writes */ 457 atomic_t lock; /* concurrent writes */
426
427 atomic_t wakeup; /* needs a wakeup */ 458 atomic_t wakeup; /* needs a wakeup */
459 atomic_t lost; /* nr records lost */
428 460
429 struct perf_counter_mmap_page *user_page; 461 struct perf_counter_mmap_page *user_page;
430 void *data_pages[0]; 462 void *data_pages[0];
@@ -604,6 +636,7 @@ extern void perf_counter_task_tick(struct task_struct *task, int cpu);
604extern int perf_counter_init_task(struct task_struct *child); 636extern int perf_counter_init_task(struct task_struct *child);
605extern void perf_counter_exit_task(struct task_struct *child); 637extern void perf_counter_exit_task(struct task_struct *child);
606extern void perf_counter_free_task(struct task_struct *task); 638extern void perf_counter_free_task(struct task_struct *task);
639extern void set_perf_counter_pending(void);
607extern void perf_counter_do_pending(void); 640extern void perf_counter_do_pending(void);
608extern void perf_counter_print_debug(void); 641extern void perf_counter_print_debug(void);
609extern void __perf_disable(void); 642extern void __perf_disable(void);
@@ -649,18 +682,6 @@ static inline void perf_counter_mmap(struct vm_area_struct *vma)
649extern void perf_counter_comm(struct task_struct *tsk); 682extern void perf_counter_comm(struct task_struct *tsk);
650extern void perf_counter_fork(struct task_struct *tsk); 683extern void perf_counter_fork(struct task_struct *tsk);
651 684
652extern void perf_counter_task_migration(struct task_struct *task, int cpu);
653
654#define MAX_STACK_DEPTH 255
655
656struct perf_callchain_entry {
657 u16 nr;
658 u16 hv;
659 u16 kernel;
660 u16 user;
661 u64 ip[MAX_STACK_DEPTH];
662};
663
664extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs); 685extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs);
665 686
666extern int sysctl_perf_counter_paranoid; 687extern int sysctl_perf_counter_paranoid;
@@ -701,8 +722,6 @@ static inline void perf_counter_mmap(struct vm_area_struct *vma) { }
701static inline void perf_counter_comm(struct task_struct *tsk) { } 722static inline void perf_counter_comm(struct task_struct *tsk) { }
702static inline void perf_counter_fork(struct task_struct *tsk) { } 723static inline void perf_counter_fork(struct task_struct *tsk) { }
703static inline void perf_counter_init(void) { } 724static inline void perf_counter_init(void) { }
704static inline void perf_counter_task_migration(struct task_struct *task,
705 int cpu) { }
706#endif 725#endif
707 726
708#endif /* __KERNEL__ */ 727#endif /* __KERNEL__ */