aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/oprofile
diff options
context:
space:
mode:
authorRobert Richter <robert.richter@amd.com>2008-12-16 10:19:54 -0500
committerRobert Richter <robert.richter@amd.com>2008-12-29 09:19:19 -0500
commit9966718daee592fbdc523703b2d8200009642506 (patch)
tree8f2509353e8e22fbeb52112f4b0bcd9152553a12 /drivers/oprofile
parent6d2c53f3cd81e33eec17aa99845d43e599986982 (diff)
oprofile: remove ring buffer inline functions in cpu_buffer.h
This patch moves ring buffer inline functions to cpu_buffer.c. Cc: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Robert Richter <robert.richter@amd.com>
Diffstat (limited to 'drivers/oprofile')
-rw-r--r--drivers/oprofile/cpu_buffer.c48
-rw-r--r--drivers/oprofile/cpu_buffer.h50
2 files changed, 50 insertions, 48 deletions
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
index e52c085cd186..cd67d4dd30b7 100644
--- a/drivers/oprofile/cpu_buffer.c
+++ b/drivers/oprofile/cpu_buffer.c
@@ -45,8 +45,8 @@
45 * can be changed to a single buffer solution when the ring buffer 45 * can be changed to a single buffer solution when the ring buffer
46 * access is implemented as non-locking atomic code. 46 * access is implemented as non-locking atomic code.
47 */ 47 */
48struct ring_buffer *op_ring_buffer_read; 48static struct ring_buffer *op_ring_buffer_read;
49struct ring_buffer *op_ring_buffer_write; 49static struct ring_buffer *op_ring_buffer_write;
50DEFINE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer); 50DEFINE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
51 51
52static void wq_sync_buffer(struct work_struct *work); 52static void wq_sync_buffer(struct work_struct *work);
@@ -145,6 +145,50 @@ void end_cpu_work(void)
145 flush_scheduled_work(); 145 flush_scheduled_work();
146} 146}
147 147
148int op_cpu_buffer_write_entry(struct op_entry *entry)
149{
150 entry->event = ring_buffer_lock_reserve(op_ring_buffer_write,
151 sizeof(struct op_sample),
152 &entry->irq_flags);
153 if (entry->event)
154 entry->sample = ring_buffer_event_data(entry->event);
155 else
156 entry->sample = NULL;
157
158 if (!entry->sample)
159 return -ENOMEM;
160
161 return 0;
162}
163
164int op_cpu_buffer_write_commit(struct op_entry *entry)
165{
166 return ring_buffer_unlock_commit(op_ring_buffer_write, entry->event,
167 entry->irq_flags);
168}
169
170struct op_sample *op_cpu_buffer_read_entry(int cpu)
171{
172 struct ring_buffer_event *e;
173 e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL);
174 if (e)
175 return ring_buffer_event_data(e);
176 if (ring_buffer_swap_cpu(op_ring_buffer_read,
177 op_ring_buffer_write,
178 cpu))
179 return NULL;
180 e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL);
181 if (e)
182 return ring_buffer_event_data(e);
183 return NULL;
184}
185
186unsigned long op_cpu_buffer_entries(int cpu)
187{
188 return ring_buffer_entries_cpu(op_ring_buffer_read, cpu)
189 + ring_buffer_entries_cpu(op_ring_buffer_write, cpu);
190}
191
148static inline int 192static inline int
149add_sample(struct oprofile_cpu_buffer *cpu_buf, 193add_sample(struct oprofile_cpu_buffer *cpu_buf,
150 unsigned long pc, unsigned long event) 194 unsigned long pc, unsigned long event)
diff --git a/drivers/oprofile/cpu_buffer.h b/drivers/oprofile/cpu_buffer.h
index 83d491e273fe..cd28abc06960 100644
--- a/drivers/oprofile/cpu_buffer.h
+++ b/drivers/oprofile/cpu_buffer.h
@@ -54,8 +54,6 @@ struct oprofile_cpu_buffer {
54 struct delayed_work work; 54 struct delayed_work work;
55}; 55};
56 56
57extern struct ring_buffer *op_ring_buffer_read;
58extern struct ring_buffer *op_ring_buffer_write;
59DECLARE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer); 57DECLARE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
60 58
61/* 59/*
@@ -72,50 +70,10 @@ static inline void op_cpu_buffer_reset(int cpu)
72 cpu_buf->last_task = NULL; 70 cpu_buf->last_task = NULL;
73} 71}
74 72
75static inline int op_cpu_buffer_write_entry(struct op_entry *entry) 73int op_cpu_buffer_write_entry(struct op_entry *entry);
76{ 74int op_cpu_buffer_write_commit(struct op_entry *entry);
77 entry->event = ring_buffer_lock_reserve(op_ring_buffer_write, 75struct op_sample *op_cpu_buffer_read_entry(int cpu);
78 sizeof(struct op_sample), 76unsigned long op_cpu_buffer_entries(int cpu);
79 &entry->irq_flags);
80 if (entry->event)
81 entry->sample = ring_buffer_event_data(entry->event);
82 else
83 entry->sample = NULL;
84
85 if (!entry->sample)
86 return -ENOMEM;
87
88 return 0;
89}
90
91static inline int op_cpu_buffer_write_commit(struct op_entry *entry)
92{
93 return ring_buffer_unlock_commit(op_ring_buffer_write, entry->event,
94 entry->irq_flags);
95}
96
97static inline struct op_sample *op_cpu_buffer_read_entry(int cpu)
98{
99 struct ring_buffer_event *e;
100 e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL);
101 if (e)
102 return ring_buffer_event_data(e);
103 if (ring_buffer_swap_cpu(op_ring_buffer_read,
104 op_ring_buffer_write,
105 cpu))
106 return NULL;
107 e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL);
108 if (e)
109 return ring_buffer_event_data(e);
110 return NULL;
111}
112
113/* "acquire" as many cpu buffer slots as we can */
114static inline unsigned long op_cpu_buffer_entries(int cpu)
115{
116 return ring_buffer_entries_cpu(op_ring_buffer_read, cpu)
117 + ring_buffer_entries_cpu(op_ring_buffer_write, cpu);
118}
119 77
120/* transient events for the CPU buffer -> event buffer */ 78/* transient events for the CPU buffer -> event buffer */
121#define CPU_IS_KERNEL 1 79#define CPU_IS_KERNEL 1