aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/oprofile/cpu_buffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/oprofile/cpu_buffer.c')
-rw-r--r--drivers/oprofile/cpu_buffer.c23
1 files changed, 3 insertions, 20 deletions
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
index e9b1772a3a2..026f671ea55 100644
--- a/drivers/oprofile/cpu_buffer.c
+++ b/drivers/oprofile/cpu_buffer.c
@@ -42,8 +42,7 @@ void free_cpu_buffers(void)
42 vfree(cpu_buffer[i].buffer); 42 vfree(cpu_buffer[i].buffer);
43 } 43 }
44} 44}
45 45
46
47int alloc_cpu_buffers(void) 46int alloc_cpu_buffers(void)
48{ 47{
49 int i; 48 int i;
@@ -74,7 +73,6 @@ fail:
74 free_cpu_buffers(); 73 free_cpu_buffers();
75 return -ENOMEM; 74 return -ENOMEM;
76} 75}
77
78 76
79void start_cpu_work(void) 77void start_cpu_work(void)
80{ 78{
@@ -93,7 +91,6 @@ void start_cpu_work(void)
93 } 91 }
94} 92}
95 93
96
97void end_cpu_work(void) 94void end_cpu_work(void)
98{ 95{
99 int i; 96 int i;
@@ -109,7 +106,6 @@ void end_cpu_work(void)
109 flush_scheduled_work(); 106 flush_scheduled_work();
110} 107}
111 108
112
113/* Resets the cpu buffer to a sane state. */ 109/* Resets the cpu buffer to a sane state. */
114void cpu_buffer_reset(struct oprofile_cpu_buffer * cpu_buf) 110void cpu_buffer_reset(struct oprofile_cpu_buffer * cpu_buf)
115{ 111{
@@ -121,7 +117,6 @@ void cpu_buffer_reset(struct oprofile_cpu_buffer * cpu_buf)
121 cpu_buf->last_task = NULL; 117 cpu_buf->last_task = NULL;
122} 118}
123 119
124
125/* compute number of available slots in cpu_buffer queue */ 120/* compute number of available slots in cpu_buffer queue */
126static unsigned long nr_available_slots(struct oprofile_cpu_buffer const * b) 121static unsigned long nr_available_slots(struct oprofile_cpu_buffer const * b)
127{ 122{
@@ -134,7 +129,6 @@ static unsigned long nr_available_slots(struct oprofile_cpu_buffer const * b)
134 return tail + (b->buffer_size - head) - 1; 129 return tail + (b->buffer_size - head) - 1;
135} 130}
136 131
137
138static void increment_head(struct oprofile_cpu_buffer * b) 132static void increment_head(struct oprofile_cpu_buffer * b)
139{ 133{
140 unsigned long new_head = b->head_pos + 1; 134 unsigned long new_head = b->head_pos + 1;
@@ -149,10 +143,7 @@ static void increment_head(struct oprofile_cpu_buffer * b)
149 b->head_pos = 0; 143 b->head_pos = 0;
150} 144}
151 145
152 146static inline void
153
154
155inline static void
156add_sample(struct oprofile_cpu_buffer * cpu_buf, 147add_sample(struct oprofile_cpu_buffer * cpu_buf,
157 unsigned long pc, unsigned long event) 148 unsigned long pc, unsigned long event)
158{ 149{
@@ -162,14 +153,12 @@ add_sample(struct oprofile_cpu_buffer * cpu_buf,
162 increment_head(cpu_buf); 153 increment_head(cpu_buf);
163} 154}
164 155
165 156static inline void
166inline static void
167add_code(struct oprofile_cpu_buffer * buffer, unsigned long value) 157add_code(struct oprofile_cpu_buffer * buffer, unsigned long value)
168{ 158{
169 add_sample(buffer, ESCAPE_CODE, value); 159 add_sample(buffer, ESCAPE_CODE, value);
170} 160}
171 161
172
173/* This must be safe from any context. It's safe writing here 162/* This must be safe from any context. It's safe writing here
174 * because of the head/tail separation of the writer and reader 163 * because of the head/tail separation of the writer and reader
175 * of the CPU buffer. 164 * of the CPU buffer.
@@ -223,13 +212,11 @@ static int oprofile_begin_trace(struct oprofile_cpu_buffer * cpu_buf)
223 return 1; 212 return 1;
224} 213}
225 214
226
227static void oprofile_end_trace(struct oprofile_cpu_buffer * cpu_buf) 215static void oprofile_end_trace(struct oprofile_cpu_buffer * cpu_buf)
228{ 216{
229 cpu_buf->tracing = 0; 217 cpu_buf->tracing = 0;
230} 218}
231 219
232
233void oprofile_add_sample(struct pt_regs * const regs, unsigned long event) 220void oprofile_add_sample(struct pt_regs * const regs, unsigned long event)
234{ 221{
235 struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()]; 222 struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()];
@@ -251,14 +238,12 @@ void oprofile_add_sample(struct pt_regs * const regs, unsigned long event)
251 oprofile_end_trace(cpu_buf); 238 oprofile_end_trace(cpu_buf);
252} 239}
253 240
254
255void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event) 241void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event)
256{ 242{
257 struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()]; 243 struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()];
258 log_sample(cpu_buf, pc, is_kernel, event); 244 log_sample(cpu_buf, pc, is_kernel, event);
259} 245}
260 246
261
262void oprofile_add_trace(unsigned long pc) 247void oprofile_add_trace(unsigned long pc)
263{ 248{
264 struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()]; 249 struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()];
@@ -283,8 +268,6 @@ void oprofile_add_trace(unsigned long pc)
283 add_sample(cpu_buf, pc, 0); 268 add_sample(cpu_buf, pc, 0);
284} 269}
285 270
286
287
288/* 271/*
289 * This serves to avoid cpu buffer overflow, and makes sure 272 * This serves to avoid cpu buffer overflow, and makes sure
290 * the task mortuary progresses 273 * the task mortuary progresses