aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/oprofile/cpu_buffer.c
diff options
context:
space:
mode:
authorRobert Richter <robert.richter@amd.com>2008-12-16 10:19:54 -0500
committerRobert Richter <robert.richter@amd.com>2008-12-29 09:19:19 -0500
commit9966718daee592fbdc523703b2d8200009642506 (patch)
tree8f2509353e8e22fbeb52112f4b0bcd9152553a12 /drivers/oprofile/cpu_buffer.c
parent6d2c53f3cd81e33eec17aa99845d43e599986982 (diff)
oprofile: remove ring buffer inline functions in cpu_buffer.h
This patch moves ring buffer inline functions to cpu_buffer.c. Cc: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Robert Richter <robert.richter@amd.com>
Diffstat (limited to 'drivers/oprofile/cpu_buffer.c')
-rw-r--r--drivers/oprofile/cpu_buffer.c48
1 files changed, 46 insertions, 2 deletions
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
index e52c085cd186..cd67d4dd30b7 100644
--- a/drivers/oprofile/cpu_buffer.c
+++ b/drivers/oprofile/cpu_buffer.c
@@ -45,8 +45,8 @@
45 * can be changed to a single buffer solution when the ring buffer 45 * can be changed to a single buffer solution when the ring buffer
46 * access is implemented as non-locking atomic code. 46 * access is implemented as non-locking atomic code.
47 */ 47 */
48struct ring_buffer *op_ring_buffer_read; 48static struct ring_buffer *op_ring_buffer_read;
49struct ring_buffer *op_ring_buffer_write; 49static struct ring_buffer *op_ring_buffer_write;
50DEFINE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer); 50DEFINE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
51 51
52static void wq_sync_buffer(struct work_struct *work); 52static void wq_sync_buffer(struct work_struct *work);
@@ -145,6 +145,50 @@ void end_cpu_work(void)
145 flush_scheduled_work(); 145 flush_scheduled_work();
146} 146}
147 147
148int op_cpu_buffer_write_entry(struct op_entry *entry)
149{
150 entry->event = ring_buffer_lock_reserve(op_ring_buffer_write,
151 sizeof(struct op_sample),
152 &entry->irq_flags);
153 if (entry->event)
154 entry->sample = ring_buffer_event_data(entry->event);
155 else
156 entry->sample = NULL;
157
158 if (!entry->sample)
159 return -ENOMEM;
160
161 return 0;
162}
163
164int op_cpu_buffer_write_commit(struct op_entry *entry)
165{
166 return ring_buffer_unlock_commit(op_ring_buffer_write, entry->event,
167 entry->irq_flags);
168}
169
170struct op_sample *op_cpu_buffer_read_entry(int cpu)
171{
172 struct ring_buffer_event *e;
173 e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL);
174 if (e)
175 return ring_buffer_event_data(e);
176 if (ring_buffer_swap_cpu(op_ring_buffer_read,
177 op_ring_buffer_write,
178 cpu))
179 return NULL;
180 e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL);
181 if (e)
182 return ring_buffer_event_data(e);
183 return NULL;
184}
185
186unsigned long op_cpu_buffer_entries(int cpu)
187{
188 return ring_buffer_entries_cpu(op_ring_buffer_read, cpu)
189 + ring_buffer_entries_cpu(op_ring_buffer_write, cpu);
190}
191
148static inline int 192static inline int
149add_sample(struct oprofile_cpu_buffer *cpu_buf, 193add_sample(struct oprofile_cpu_buffer *cpu_buf,
150 unsigned long pc, unsigned long event) 194 unsigned long pc, unsigned long event)