aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/oprofile/cpu_buffer.h
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/oprofile/cpu_buffer.h')
-rw-r--r--drivers/oprofile/cpu_buffer.h71
1 files changed, 41 insertions, 30 deletions
diff --git a/drivers/oprofile/cpu_buffer.h b/drivers/oprofile/cpu_buffer.h
index 895763f065e9..aacb0f0bc566 100644
--- a/drivers/oprofile/cpu_buffer.h
+++ b/drivers/oprofile/cpu_buffer.h
@@ -15,6 +15,7 @@
15#include <linux/workqueue.h> 15#include <linux/workqueue.h>
16#include <linux/cache.h> 16#include <linux/cache.h>
17#include <linux/sched.h> 17#include <linux/sched.h>
18#include <linux/ring_buffer.h>
18 19
19struct task_struct; 20struct task_struct;
20 21
@@ -32,6 +33,12 @@ struct op_sample {
32 unsigned long event; 33 unsigned long event;
33}; 34};
34 35
36struct op_entry {
37 struct ring_buffer_event *event;
38 struct op_sample *sample;
39 unsigned long irq_flags;
40};
41
35struct oprofile_cpu_buffer { 42struct oprofile_cpu_buffer {
36 volatile unsigned long head_pos; 43 volatile unsigned long head_pos;
37 volatile unsigned long tail_pos; 44 volatile unsigned long tail_pos;
@@ -39,7 +46,6 @@ struct oprofile_cpu_buffer {
39 struct task_struct *last_task; 46 struct task_struct *last_task;
40 int last_is_kernel; 47 int last_is_kernel;
41 int tracing; 48 int tracing;
42 struct op_sample *buffer;
43 unsigned long sample_received; 49 unsigned long sample_received;
44 unsigned long sample_lost_overflow; 50 unsigned long sample_lost_overflow;
45 unsigned long backtrace_aborted; 51 unsigned long backtrace_aborted;
@@ -48,6 +54,8 @@ struct oprofile_cpu_buffer {
48 struct delayed_work work; 54 struct delayed_work work;
49}; 55};
50 56
57extern struct ring_buffer *op_ring_buffer_read;
58extern struct ring_buffer *op_ring_buffer_write;
51DECLARE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer); 59DECLARE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
52 60
53/* 61/*
@@ -64,46 +72,49 @@ static inline void cpu_buffer_reset(int cpu)
64 cpu_buf->last_task = NULL; 72 cpu_buf->last_task = NULL;
65} 73}
66 74
67static inline 75static inline int cpu_buffer_write_entry(struct op_entry *entry)
68struct op_sample *cpu_buffer_write_entry(struct oprofile_cpu_buffer *cpu_buf)
69{ 76{
70 return &cpu_buf->buffer[cpu_buf->head_pos]; 77 entry->event = ring_buffer_lock_reserve(op_ring_buffer_write,
71} 78 sizeof(struct op_sample),
79 &entry->irq_flags);
80 if (entry->event)
81 entry->sample = ring_buffer_event_data(entry->event);
82 else
83 entry->sample = NULL;
72 84
73static inline 85 if (!entry->sample)
74void cpu_buffer_write_commit(struct oprofile_cpu_buffer *b) 86 return -ENOMEM;
75{
76 unsigned long new_head = b->head_pos + 1;
77 87
78 /* 88 return 0;
79 * Ensure anything written to the slot before we increment is 89}
80 * visible
81 */
82 wmb();
83 90
84 if (new_head < b->buffer_size) 91static inline int cpu_buffer_write_commit(struct op_entry *entry)
85 b->head_pos = new_head; 92{
86 else 93 return ring_buffer_unlock_commit(op_ring_buffer_write, entry->event,
87 b->head_pos = 0; 94 entry->irq_flags);
88} 95}
89 96
90static inline 97static inline struct op_sample *cpu_buffer_read_entry(int cpu)
91struct op_sample *cpu_buffer_read_entry(struct oprofile_cpu_buffer *cpu_buf)
92{ 98{
93 return &cpu_buf->buffer[cpu_buf->tail_pos]; 99 struct ring_buffer_event *e;
100 e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL);
101 if (e)
102 return ring_buffer_event_data(e);
103 if (ring_buffer_swap_cpu(op_ring_buffer_read,
104 op_ring_buffer_write,
105 cpu))
106 return NULL;
107 e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL);
108 if (e)
109 return ring_buffer_event_data(e);
110 return NULL;
94} 111}
95 112
96/* "acquire" as many cpu buffer slots as we can */ 113/* "acquire" as many cpu buffer slots as we can */
97static inline 114static inline unsigned long cpu_buffer_entries(int cpu)
98unsigned long cpu_buffer_entries(struct oprofile_cpu_buffer *b)
99{ 115{
100 unsigned long head = b->head_pos; 116 return ring_buffer_entries_cpu(op_ring_buffer_read, cpu)
101 unsigned long tail = b->tail_pos; 117 + ring_buffer_entries_cpu(op_ring_buffer_write, cpu);
102
103 if (head >= tail)
104 return head - tail;
105
106 return head + (b->buffer_size - tail);
107} 118}
108 119
109/* transient events for the CPU buffer -> event buffer */ 120/* transient events for the CPU buffer -> event buffer */