diff options
Diffstat (limited to 'kernel/events/internal.h')
-rw-r--r-- | kernel/events/internal.h | 96 |
1 files changed, 96 insertions, 0 deletions
diff --git a/kernel/events/internal.h b/kernel/events/internal.h new file mode 100644 index 000000000000..09097dd8116c --- /dev/null +++ b/kernel/events/internal.h | |||
@@ -0,0 +1,96 @@ | |||
1 | #ifndef _KERNEL_EVENTS_INTERNAL_H | ||
2 | #define _KERNEL_EVENTS_INTERNAL_H | ||
3 | |||
4 | #define RING_BUFFER_WRITABLE 0x01 | ||
5 | |||
6 | struct ring_buffer { | ||
7 | atomic_t refcount; | ||
8 | struct rcu_head rcu_head; | ||
9 | #ifdef CONFIG_PERF_USE_VMALLOC | ||
10 | struct work_struct work; | ||
11 | int page_order; /* allocation order */ | ||
12 | #endif | ||
13 | int nr_pages; /* nr of data pages */ | ||
14 | int writable; /* are we writable */ | ||
15 | |||
16 | atomic_t poll; /* POLL_ for wakeups */ | ||
17 | |||
18 | local_t head; /* write position */ | ||
19 | local_t nest; /* nested writers */ | ||
20 | local_t events; /* event limit */ | ||
21 | local_t wakeup; /* wakeup stamp */ | ||
22 | local_t lost; /* nr records lost */ | ||
23 | |||
24 | long watermark; /* wakeup watermark */ | ||
25 | |||
26 | struct perf_event_mmap_page *user_page; | ||
27 | void *data_pages[0]; | ||
28 | }; | ||
29 | |||
30 | extern void rb_free(struct ring_buffer *rb); | ||
31 | extern struct ring_buffer * | ||
32 | rb_alloc(int nr_pages, long watermark, int cpu, int flags); | ||
33 | extern void perf_event_wakeup(struct perf_event *event); | ||
34 | |||
35 | extern void | ||
36 | perf_event_header__init_id(struct perf_event_header *header, | ||
37 | struct perf_sample_data *data, | ||
38 | struct perf_event *event); | ||
39 | extern void | ||
40 | perf_event__output_id_sample(struct perf_event *event, | ||
41 | struct perf_output_handle *handle, | ||
42 | struct perf_sample_data *sample); | ||
43 | |||
44 | extern struct page * | ||
45 | perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff); | ||
46 | |||
47 | #ifdef CONFIG_PERF_USE_VMALLOC | ||
48 | /* | ||
49 | * Back perf_mmap() with vmalloc memory. | ||
50 | * | ||
51 | * Required for architectures that have d-cache aliasing issues. | ||
52 | */ | ||
53 | |||
54 | static inline int page_order(struct ring_buffer *rb) | ||
55 | { | ||
56 | return rb->page_order; | ||
57 | } | ||
58 | |||
59 | #else | ||
60 | |||
61 | static inline int page_order(struct ring_buffer *rb) | ||
62 | { | ||
63 | return 0; | ||
64 | } | ||
65 | #endif | ||
66 | |||
67 | static unsigned long perf_data_size(struct ring_buffer *rb) | ||
68 | { | ||
69 | return rb->nr_pages << (PAGE_SHIFT + page_order(rb)); | ||
70 | } | ||
71 | |||
72 | static inline void | ||
73 | __output_copy(struct perf_output_handle *handle, | ||
74 | const void *buf, unsigned int len) | ||
75 | { | ||
76 | do { | ||
77 | unsigned long size = min_t(unsigned long, handle->size, len); | ||
78 | |||
79 | memcpy(handle->addr, buf, size); | ||
80 | |||
81 | len -= size; | ||
82 | handle->addr += size; | ||
83 | buf += size; | ||
84 | handle->size -= size; | ||
85 | if (!handle->size) { | ||
86 | struct ring_buffer *rb = handle->rb; | ||
87 | |||
88 | handle->page++; | ||
89 | handle->page &= rb->nr_pages - 1; | ||
90 | handle->addr = rb->data_pages[handle->page]; | ||
91 | handle->size = PAGE_SIZE << page_order(rb); | ||
92 | } | ||
93 | } while (len); | ||
94 | } | ||
95 | |||
96 | #endif /* _KERNEL_EVENTS_INTERNAL_H */ | ||