diff options
author | Len Brown <len.brown@intel.com> | 2009-01-09 03:39:43 -0500 |
---|---|---|
committer | Len Brown <len.brown@intel.com> | 2009-01-09 03:39:43 -0500 |
commit | b2576e1d4408e134e2188c967b1f28af39cd79d4 (patch) | |
tree | 004f3c82faab760f304ce031d6d2f572e7746a50 /kernel/trace/ring_buffer.c | |
parent | 3cc8a5f4ba91f67bbdb81a43a99281a26aab8d77 (diff) | |
parent | 2150edc6c5cf00f7adb54538b9ea2a3e9cedca3f (diff) |
Merge branch 'linus' into release
Diffstat (limited to 'kernel/trace/ring_buffer.c')
-rw-r--r-- | kernel/trace/ring_buffer.c | 787 |
1 files changed, 563 insertions, 224 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 668bbb5ef2bd..a9d9760dc7b6 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -18,8 +18,46 @@ | |||
18 | 18 | ||
19 | #include "trace.h" | 19 | #include "trace.h" |
20 | 20 | ||
21 | /* Global flag to disable all recording to ring buffers */ | 21 | /* |
22 | static int ring_buffers_off __read_mostly; | 22 | * A fast way to enable or disable all ring buffers is to |
23 | * call tracing_on or tracing_off. Turning off the ring buffers | ||
24 | * prevents all ring buffers from being recorded to. | ||
25 | * Turning this switch on, makes it OK to write to the | ||
26 | * ring buffer, if the ring buffer is enabled itself. | ||
27 | * | ||
28 | * There's three layers that must be on in order to write | ||
29 | * to the ring buffer. | ||
30 | * | ||
31 | * 1) This global flag must be set. | ||
32 | * 2) The ring buffer must be enabled for recording. | ||
33 | * 3) The per cpu buffer must be enabled for recording. | ||
34 | * | ||
35 | * In case of an anomaly, this global flag has a bit set that | ||
36 | * will permantly disable all ring buffers. | ||
37 | */ | ||
38 | |||
39 | /* | ||
40 | * Global flag to disable all recording to ring buffers | ||
41 | * This has two bits: ON, DISABLED | ||
42 | * | ||
43 | * ON DISABLED | ||
44 | * ---- ---------- | ||
45 | * 0 0 : ring buffers are off | ||
46 | * 1 0 : ring buffers are on | ||
47 | * X 1 : ring buffers are permanently disabled | ||
48 | */ | ||
49 | |||
50 | enum { | ||
51 | RB_BUFFERS_ON_BIT = 0, | ||
52 | RB_BUFFERS_DISABLED_BIT = 1, | ||
53 | }; | ||
54 | |||
55 | enum { | ||
56 | RB_BUFFERS_ON = 1 << RB_BUFFERS_ON_BIT, | ||
57 | RB_BUFFERS_DISABLED = 1 << RB_BUFFERS_DISABLED_BIT, | ||
58 | }; | ||
59 | |||
60 | static long ring_buffer_flags __read_mostly = RB_BUFFERS_ON; | ||
23 | 61 | ||
24 | /** | 62 | /** |
25 | * tracing_on - enable all tracing buffers | 63 | * tracing_on - enable all tracing buffers |
@@ -29,8 +67,9 @@ static int ring_buffers_off __read_mostly; | |||
29 | */ | 67 | */ |
30 | void tracing_on(void) | 68 | void tracing_on(void) |
31 | { | 69 | { |
32 | ring_buffers_off = 0; | 70 | set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags); |
33 | } | 71 | } |
72 | EXPORT_SYMBOL_GPL(tracing_on); | ||
34 | 73 | ||
35 | /** | 74 | /** |
36 | * tracing_off - turn off all tracing buffers | 75 | * tracing_off - turn off all tracing buffers |
@@ -42,8 +81,22 @@ void tracing_on(void) | |||
42 | */ | 81 | */ |
43 | void tracing_off(void) | 82 | void tracing_off(void) |
44 | { | 83 | { |
45 | ring_buffers_off = 1; | 84 | clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags); |
46 | } | 85 | } |
86 | EXPORT_SYMBOL_GPL(tracing_off); | ||
87 | |||
88 | /** | ||
89 | * tracing_off_permanent - permanently disable ring buffers | ||
90 | * | ||
91 | * This function, once called, will disable all ring buffers | ||
92 | * permanenty. | ||
93 | */ | ||
94 | void tracing_off_permanent(void) | ||
95 | { | ||
96 | set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags); | ||
97 | } | ||
98 | |||
99 | #include "trace.h" | ||
47 | 100 | ||
48 | /* Up this if you want to test the TIME_EXTENTS and normalization */ | 101 | /* Up this if you want to test the TIME_EXTENTS and normalization */ |
49 | #define DEBUG_SHIFT 0 | 102 | #define DEBUG_SHIFT 0 |
@@ -56,16 +109,18 @@ u64 ring_buffer_time_stamp(int cpu) | |||
56 | preempt_disable_notrace(); | 109 | preempt_disable_notrace(); |
57 | /* shift to debug/test normalization and TIME_EXTENTS */ | 110 | /* shift to debug/test normalization and TIME_EXTENTS */ |
58 | time = sched_clock() << DEBUG_SHIFT; | 111 | time = sched_clock() << DEBUG_SHIFT; |
59 | preempt_enable_notrace(); | 112 | preempt_enable_no_resched_notrace(); |
60 | 113 | ||
61 | return time; | 114 | return time; |
62 | } | 115 | } |
116 | EXPORT_SYMBOL_GPL(ring_buffer_time_stamp); | ||
63 | 117 | ||
64 | void ring_buffer_normalize_time_stamp(int cpu, u64 *ts) | 118 | void ring_buffer_normalize_time_stamp(int cpu, u64 *ts) |
65 | { | 119 | { |
66 | /* Just stupid testing the normalize function and deltas */ | 120 | /* Just stupid testing the normalize function and deltas */ |
67 | *ts >>= DEBUG_SHIFT; | 121 | *ts >>= DEBUG_SHIFT; |
68 | } | 122 | } |
123 | EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp); | ||
69 | 124 | ||
70 | #define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event)) | 125 | #define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event)) |
71 | #define RB_ALIGNMENT_SHIFT 2 | 126 | #define RB_ALIGNMENT_SHIFT 2 |
@@ -115,6 +170,7 @@ unsigned ring_buffer_event_length(struct ring_buffer_event *event) | |||
115 | { | 170 | { |
116 | return rb_event_length(event); | 171 | return rb_event_length(event); |
117 | } | 172 | } |
173 | EXPORT_SYMBOL_GPL(ring_buffer_event_length); | ||
118 | 174 | ||
119 | /* inline for ring buffer fast paths */ | 175 | /* inline for ring buffer fast paths */ |
120 | static inline void * | 176 | static inline void * |
@@ -136,28 +192,33 @@ void *ring_buffer_event_data(struct ring_buffer_event *event) | |||
136 | { | 192 | { |
137 | return rb_event_data(event); | 193 | return rb_event_data(event); |
138 | } | 194 | } |
195 | EXPORT_SYMBOL_GPL(ring_buffer_event_data); | ||
139 | 196 | ||
140 | #define for_each_buffer_cpu(buffer, cpu) \ | 197 | #define for_each_buffer_cpu(buffer, cpu) \ |
141 | for_each_cpu_mask(cpu, buffer->cpumask) | 198 | for_each_cpu(cpu, buffer->cpumask) |
142 | 199 | ||
143 | #define TS_SHIFT 27 | 200 | #define TS_SHIFT 27 |
144 | #define TS_MASK ((1ULL << TS_SHIFT) - 1) | 201 | #define TS_MASK ((1ULL << TS_SHIFT) - 1) |
145 | #define TS_DELTA_TEST (~TS_MASK) | 202 | #define TS_DELTA_TEST (~TS_MASK) |
146 | 203 | ||
147 | /* | 204 | struct buffer_data_page { |
148 | * This hack stolen from mm/slob.c. | ||
149 | * We can store per page timing information in the page frame of the page. | ||
150 | * Thanks to Peter Zijlstra for suggesting this idea. | ||
151 | */ | ||
152 | struct buffer_page { | ||
153 | u64 time_stamp; /* page time stamp */ | 205 | u64 time_stamp; /* page time stamp */ |
154 | local_t write; /* index for next write */ | ||
155 | local_t commit; /* write commited index */ | 206 | local_t commit; /* write commited index */ |
207 | unsigned char data[]; /* data of buffer page */ | ||
208 | }; | ||
209 | |||
210 | struct buffer_page { | ||
211 | local_t write; /* index for next write */ | ||
156 | unsigned read; /* index for next read */ | 212 | unsigned read; /* index for next read */ |
157 | struct list_head list; /* list of free pages */ | 213 | struct list_head list; /* list of free pages */ |
158 | void *page; /* Actual data page */ | 214 | struct buffer_data_page *page; /* Actual data page */ |
159 | }; | 215 | }; |
160 | 216 | ||
217 | static void rb_init_page(struct buffer_data_page *bpage) | ||
218 | { | ||
219 | local_set(&bpage->commit, 0); | ||
220 | } | ||
221 | |||
161 | /* | 222 | /* |
162 | * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing | 223 | * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing |
163 | * this issue out. | 224 | * this issue out. |
@@ -179,7 +240,7 @@ static inline int test_time_stamp(u64 delta) | |||
179 | return 0; | 240 | return 0; |
180 | } | 241 | } |
181 | 242 | ||
182 | #define BUF_PAGE_SIZE PAGE_SIZE | 243 | #define BUF_PAGE_SIZE (PAGE_SIZE - sizeof(struct buffer_data_page)) |
183 | 244 | ||
184 | /* | 245 | /* |
185 | * head_page == tail_page && head == tail then buffer is empty. | 246 | * head_page == tail_page && head == tail then buffer is empty. |
@@ -187,7 +248,8 @@ static inline int test_time_stamp(u64 delta) | |||
187 | struct ring_buffer_per_cpu { | 248 | struct ring_buffer_per_cpu { |
188 | int cpu; | 249 | int cpu; |
189 | struct ring_buffer *buffer; | 250 | struct ring_buffer *buffer; |
190 | spinlock_t lock; | 251 | spinlock_t reader_lock; /* serialize readers */ |
252 | raw_spinlock_t lock; | ||
191 | struct lock_class_key lock_key; | 253 | struct lock_class_key lock_key; |
192 | struct list_head pages; | 254 | struct list_head pages; |
193 | struct buffer_page *head_page; /* read from head */ | 255 | struct buffer_page *head_page; /* read from head */ |
@@ -202,11 +264,10 @@ struct ring_buffer_per_cpu { | |||
202 | }; | 264 | }; |
203 | 265 | ||
204 | struct ring_buffer { | 266 | struct ring_buffer { |
205 | unsigned long size; | ||
206 | unsigned pages; | 267 | unsigned pages; |
207 | unsigned flags; | 268 | unsigned flags; |
208 | int cpus; | 269 | int cpus; |
209 | cpumask_t cpumask; | 270 | cpumask_var_t cpumask; |
210 | atomic_t record_disabled; | 271 | atomic_t record_disabled; |
211 | 272 | ||
212 | struct mutex mutex; | 273 | struct mutex mutex; |
@@ -221,32 +282,16 @@ struct ring_buffer_iter { | |||
221 | u64 read_stamp; | 282 | u64 read_stamp; |
222 | }; | 283 | }; |
223 | 284 | ||
285 | /* buffer may be either ring_buffer or ring_buffer_per_cpu */ | ||
224 | #define RB_WARN_ON(buffer, cond) \ | 286 | #define RB_WARN_ON(buffer, cond) \ |
225 | do { \ | 287 | ({ \ |
226 | if (unlikely(cond)) { \ | 288 | int _____ret = unlikely(cond); \ |
227 | atomic_inc(&buffer->record_disabled); \ | 289 | if (_____ret) { \ |
228 | WARN_ON(1); \ | ||
229 | } \ | ||
230 | } while (0) | ||
231 | |||
232 | #define RB_WARN_ON_RET(buffer, cond) \ | ||
233 | do { \ | ||
234 | if (unlikely(cond)) { \ | ||
235 | atomic_inc(&buffer->record_disabled); \ | ||
236 | WARN_ON(1); \ | ||
237 | return -1; \ | ||
238 | } \ | ||
239 | } while (0) | ||
240 | |||
241 | #define RB_WARN_ON_ONCE(buffer, cond) \ | ||
242 | do { \ | ||
243 | static int once; \ | ||
244 | if (unlikely(cond) && !once) { \ | ||
245 | once++; \ | ||
246 | atomic_inc(&buffer->record_disabled); \ | 290 | atomic_inc(&buffer->record_disabled); \ |
247 | WARN_ON(1); \ | 291 | WARN_ON(1); \ |
248 | } \ | 292 | } \ |
249 | } while (0) | 293 | _____ret; \ |
294 | }) | ||
250 | 295 | ||
251 | /** | 296 | /** |
252 | * check_pages - integrity check of buffer pages | 297 | * check_pages - integrity check of buffer pages |
@@ -258,16 +303,20 @@ struct ring_buffer_iter { | |||
258 | static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer) | 303 | static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer) |
259 | { | 304 | { |
260 | struct list_head *head = &cpu_buffer->pages; | 305 | struct list_head *head = &cpu_buffer->pages; |
261 | struct buffer_page *page, *tmp; | 306 | struct buffer_page *bpage, *tmp; |
262 | 307 | ||
263 | RB_WARN_ON_RET(cpu_buffer, head->next->prev != head); | 308 | if (RB_WARN_ON(cpu_buffer, head->next->prev != head)) |
264 | RB_WARN_ON_RET(cpu_buffer, head->prev->next != head); | 309 | return -1; |
310 | if (RB_WARN_ON(cpu_buffer, head->prev->next != head)) | ||
311 | return -1; | ||
265 | 312 | ||
266 | list_for_each_entry_safe(page, tmp, head, list) { | 313 | list_for_each_entry_safe(bpage, tmp, head, list) { |
267 | RB_WARN_ON_RET(cpu_buffer, | 314 | if (RB_WARN_ON(cpu_buffer, |
268 | page->list.next->prev != &page->list); | 315 | bpage->list.next->prev != &bpage->list)) |
269 | RB_WARN_ON_RET(cpu_buffer, | 316 | return -1; |
270 | page->list.prev->next != &page->list); | 317 | if (RB_WARN_ON(cpu_buffer, |
318 | bpage->list.prev->next != &bpage->list)) | ||
319 | return -1; | ||
271 | } | 320 | } |
272 | 321 | ||
273 | return 0; | 322 | return 0; |
@@ -277,22 +326,23 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, | |||
277 | unsigned nr_pages) | 326 | unsigned nr_pages) |
278 | { | 327 | { |
279 | struct list_head *head = &cpu_buffer->pages; | 328 | struct list_head *head = &cpu_buffer->pages; |
280 | struct buffer_page *page, *tmp; | 329 | struct buffer_page *bpage, *tmp; |
281 | unsigned long addr; | 330 | unsigned long addr; |
282 | LIST_HEAD(pages); | 331 | LIST_HEAD(pages); |
283 | unsigned i; | 332 | unsigned i; |
284 | 333 | ||
285 | for (i = 0; i < nr_pages; i++) { | 334 | for (i = 0; i < nr_pages; i++) { |
286 | page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()), | 335 | bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), |
287 | GFP_KERNEL, cpu_to_node(cpu_buffer->cpu)); | 336 | GFP_KERNEL, cpu_to_node(cpu_buffer->cpu)); |
288 | if (!page) | 337 | if (!bpage) |
289 | goto free_pages; | 338 | goto free_pages; |
290 | list_add(&page->list, &pages); | 339 | list_add(&bpage->list, &pages); |
291 | 340 | ||
292 | addr = __get_free_page(GFP_KERNEL); | 341 | addr = __get_free_page(GFP_KERNEL); |
293 | if (!addr) | 342 | if (!addr) |
294 | goto free_pages; | 343 | goto free_pages; |
295 | page->page = (void *)addr; | 344 | bpage->page = (void *)addr; |
345 | rb_init_page(bpage->page); | ||
296 | } | 346 | } |
297 | 347 | ||
298 | list_splice(&pages, head); | 348 | list_splice(&pages, head); |
@@ -302,9 +352,9 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, | |||
302 | return 0; | 352 | return 0; |
303 | 353 | ||
304 | free_pages: | 354 | free_pages: |
305 | list_for_each_entry_safe(page, tmp, &pages, list) { | 355 | list_for_each_entry_safe(bpage, tmp, &pages, list) { |
306 | list_del_init(&page->list); | 356 | list_del_init(&bpage->list); |
307 | free_buffer_page(page); | 357 | free_buffer_page(bpage); |
308 | } | 358 | } |
309 | return -ENOMEM; | 359 | return -ENOMEM; |
310 | } | 360 | } |
@@ -313,7 +363,7 @@ static struct ring_buffer_per_cpu * | |||
313 | rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu) | 363 | rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu) |
314 | { | 364 | { |
315 | struct ring_buffer_per_cpu *cpu_buffer; | 365 | struct ring_buffer_per_cpu *cpu_buffer; |
316 | struct buffer_page *page; | 366 | struct buffer_page *bpage; |
317 | unsigned long addr; | 367 | unsigned long addr; |
318 | int ret; | 368 | int ret; |
319 | 369 | ||
@@ -324,19 +374,21 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu) | |||
324 | 374 | ||
325 | cpu_buffer->cpu = cpu; | 375 | cpu_buffer->cpu = cpu; |
326 | cpu_buffer->buffer = buffer; | 376 | cpu_buffer->buffer = buffer; |
327 | spin_lock_init(&cpu_buffer->lock); | 377 | spin_lock_init(&cpu_buffer->reader_lock); |
378 | cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | ||
328 | INIT_LIST_HEAD(&cpu_buffer->pages); | 379 | INIT_LIST_HEAD(&cpu_buffer->pages); |
329 | 380 | ||
330 | page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()), | 381 | bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), |
331 | GFP_KERNEL, cpu_to_node(cpu)); | 382 | GFP_KERNEL, cpu_to_node(cpu)); |
332 | if (!page) | 383 | if (!bpage) |
333 | goto fail_free_buffer; | 384 | goto fail_free_buffer; |
334 | 385 | ||
335 | cpu_buffer->reader_page = page; | 386 | cpu_buffer->reader_page = bpage; |
336 | addr = __get_free_page(GFP_KERNEL); | 387 | addr = __get_free_page(GFP_KERNEL); |
337 | if (!addr) | 388 | if (!addr) |
338 | goto fail_free_reader; | 389 | goto fail_free_reader; |
339 | page->page = (void *)addr; | 390 | bpage->page = (void *)addr; |
391 | rb_init_page(bpage->page); | ||
340 | 392 | ||
341 | INIT_LIST_HEAD(&cpu_buffer->reader_page->list); | 393 | INIT_LIST_HEAD(&cpu_buffer->reader_page->list); |
342 | 394 | ||
@@ -361,14 +413,14 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu) | |||
361 | static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) | 413 | static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) |
362 | { | 414 | { |
363 | struct list_head *head = &cpu_buffer->pages; | 415 | struct list_head *head = &cpu_buffer->pages; |
364 | struct buffer_page *page, *tmp; | 416 | struct buffer_page *bpage, *tmp; |
365 | 417 | ||
366 | list_del_init(&cpu_buffer->reader_page->list); | 418 | list_del_init(&cpu_buffer->reader_page->list); |
367 | free_buffer_page(cpu_buffer->reader_page); | 419 | free_buffer_page(cpu_buffer->reader_page); |
368 | 420 | ||
369 | list_for_each_entry_safe(page, tmp, head, list) { | 421 | list_for_each_entry_safe(bpage, tmp, head, list) { |
370 | list_del_init(&page->list); | 422 | list_del_init(&bpage->list); |
371 | free_buffer_page(page); | 423 | free_buffer_page(bpage); |
372 | } | 424 | } |
373 | kfree(cpu_buffer); | 425 | kfree(cpu_buffer); |
374 | } | 426 | } |
@@ -381,7 +433,7 @@ extern int ring_buffer_page_too_big(void); | |||
381 | 433 | ||
382 | /** | 434 | /** |
383 | * ring_buffer_alloc - allocate a new ring_buffer | 435 | * ring_buffer_alloc - allocate a new ring_buffer |
384 | * @size: the size in bytes that is needed. | 436 | * @size: the size in bytes per cpu that is needed. |
385 | * @flags: attributes to set for the ring buffer. | 437 | * @flags: attributes to set for the ring buffer. |
386 | * | 438 | * |
387 | * Currently the only flag that is available is the RB_FL_OVERWRITE | 439 | * Currently the only flag that is available is the RB_FL_OVERWRITE |
@@ -406,6 +458,9 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags) | |||
406 | if (!buffer) | 458 | if (!buffer) |
407 | return NULL; | 459 | return NULL; |
408 | 460 | ||
461 | if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL)) | ||
462 | goto fail_free_buffer; | ||
463 | |||
409 | buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); | 464 | buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); |
410 | buffer->flags = flags; | 465 | buffer->flags = flags; |
411 | 466 | ||
@@ -413,14 +468,14 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags) | |||
413 | if (buffer->pages == 1) | 468 | if (buffer->pages == 1) |
414 | buffer->pages++; | 469 | buffer->pages++; |
415 | 470 | ||
416 | buffer->cpumask = cpu_possible_map; | 471 | cpumask_copy(buffer->cpumask, cpu_possible_mask); |
417 | buffer->cpus = nr_cpu_ids; | 472 | buffer->cpus = nr_cpu_ids; |
418 | 473 | ||
419 | bsize = sizeof(void *) * nr_cpu_ids; | 474 | bsize = sizeof(void *) * nr_cpu_ids; |
420 | buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()), | 475 | buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()), |
421 | GFP_KERNEL); | 476 | GFP_KERNEL); |
422 | if (!buffer->buffers) | 477 | if (!buffer->buffers) |
423 | goto fail_free_buffer; | 478 | goto fail_free_cpumask; |
424 | 479 | ||
425 | for_each_buffer_cpu(buffer, cpu) { | 480 | for_each_buffer_cpu(buffer, cpu) { |
426 | buffer->buffers[cpu] = | 481 | buffer->buffers[cpu] = |
@@ -440,10 +495,14 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags) | |||
440 | } | 495 | } |
441 | kfree(buffer->buffers); | 496 | kfree(buffer->buffers); |
442 | 497 | ||
498 | fail_free_cpumask: | ||
499 | free_cpumask_var(buffer->cpumask); | ||
500 | |||
443 | fail_free_buffer: | 501 | fail_free_buffer: |
444 | kfree(buffer); | 502 | kfree(buffer); |
445 | return NULL; | 503 | return NULL; |
446 | } | 504 | } |
505 | EXPORT_SYMBOL_GPL(ring_buffer_alloc); | ||
447 | 506 | ||
448 | /** | 507 | /** |
449 | * ring_buffer_free - free a ring buffer. | 508 | * ring_buffer_free - free a ring buffer. |
@@ -457,15 +516,18 @@ ring_buffer_free(struct ring_buffer *buffer) | |||
457 | for_each_buffer_cpu(buffer, cpu) | 516 | for_each_buffer_cpu(buffer, cpu) |
458 | rb_free_cpu_buffer(buffer->buffers[cpu]); | 517 | rb_free_cpu_buffer(buffer->buffers[cpu]); |
459 | 518 | ||
519 | free_cpumask_var(buffer->cpumask); | ||
520 | |||
460 | kfree(buffer); | 521 | kfree(buffer); |
461 | } | 522 | } |
523 | EXPORT_SYMBOL_GPL(ring_buffer_free); | ||
462 | 524 | ||
463 | static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer); | 525 | static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer); |
464 | 526 | ||
465 | static void | 527 | static void |
466 | rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages) | 528 | rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages) |
467 | { | 529 | { |
468 | struct buffer_page *page; | 530 | struct buffer_page *bpage; |
469 | struct list_head *p; | 531 | struct list_head *p; |
470 | unsigned i; | 532 | unsigned i; |
471 | 533 | ||
@@ -473,13 +535,15 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages) | |||
473 | synchronize_sched(); | 535 | synchronize_sched(); |
474 | 536 | ||
475 | for (i = 0; i < nr_pages; i++) { | 537 | for (i = 0; i < nr_pages; i++) { |
476 | BUG_ON(list_empty(&cpu_buffer->pages)); | 538 | if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages))) |
539 | return; | ||
477 | p = cpu_buffer->pages.next; | 540 | p = cpu_buffer->pages.next; |
478 | page = list_entry(p, struct buffer_page, list); | 541 | bpage = list_entry(p, struct buffer_page, list); |
479 | list_del_init(&page->list); | 542 | list_del_init(&bpage->list); |
480 | free_buffer_page(page); | 543 | free_buffer_page(bpage); |
481 | } | 544 | } |
482 | BUG_ON(list_empty(&cpu_buffer->pages)); | 545 | if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages))) |
546 | return; | ||
483 | 547 | ||
484 | rb_reset_cpu(cpu_buffer); | 548 | rb_reset_cpu(cpu_buffer); |
485 | 549 | ||
@@ -493,7 +557,7 @@ static void | |||
493 | rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer, | 557 | rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer, |
494 | struct list_head *pages, unsigned nr_pages) | 558 | struct list_head *pages, unsigned nr_pages) |
495 | { | 559 | { |
496 | struct buffer_page *page; | 560 | struct buffer_page *bpage; |
497 | struct list_head *p; | 561 | struct list_head *p; |
498 | unsigned i; | 562 | unsigned i; |
499 | 563 | ||
@@ -501,11 +565,12 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer, | |||
501 | synchronize_sched(); | 565 | synchronize_sched(); |
502 | 566 | ||
503 | for (i = 0; i < nr_pages; i++) { | 567 | for (i = 0; i < nr_pages; i++) { |
504 | BUG_ON(list_empty(pages)); | 568 | if (RB_WARN_ON(cpu_buffer, list_empty(pages))) |
569 | return; | ||
505 | p = pages->next; | 570 | p = pages->next; |
506 | page = list_entry(p, struct buffer_page, list); | 571 | bpage = list_entry(p, struct buffer_page, list); |
507 | list_del_init(&page->list); | 572 | list_del_init(&bpage->list); |
508 | list_add_tail(&page->list, &cpu_buffer->pages); | 573 | list_add_tail(&bpage->list, &cpu_buffer->pages); |
509 | } | 574 | } |
510 | rb_reset_cpu(cpu_buffer); | 575 | rb_reset_cpu(cpu_buffer); |
511 | 576 | ||
@@ -532,7 +597,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) | |||
532 | { | 597 | { |
533 | struct ring_buffer_per_cpu *cpu_buffer; | 598 | struct ring_buffer_per_cpu *cpu_buffer; |
534 | unsigned nr_pages, rm_pages, new_pages; | 599 | unsigned nr_pages, rm_pages, new_pages; |
535 | struct buffer_page *page, *tmp; | 600 | struct buffer_page *bpage, *tmp; |
536 | unsigned long buffer_size; | 601 | unsigned long buffer_size; |
537 | unsigned long addr; | 602 | unsigned long addr; |
538 | LIST_HEAD(pages); | 603 | LIST_HEAD(pages); |
@@ -562,7 +627,10 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) | |||
562 | if (size < buffer_size) { | 627 | if (size < buffer_size) { |
563 | 628 | ||
564 | /* easy case, just free pages */ | 629 | /* easy case, just free pages */ |
565 | BUG_ON(nr_pages >= buffer->pages); | 630 | if (RB_WARN_ON(buffer, nr_pages >= buffer->pages)) { |
631 | mutex_unlock(&buffer->mutex); | ||
632 | return -1; | ||
633 | } | ||
566 | 634 | ||
567 | rm_pages = buffer->pages - nr_pages; | 635 | rm_pages = buffer->pages - nr_pages; |
568 | 636 | ||
@@ -581,21 +649,26 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) | |||
581 | * add these pages to the cpu_buffers. Otherwise we just free | 649 | * add these pages to the cpu_buffers. Otherwise we just free |
582 | * them all and return -ENOMEM; | 650 | * them all and return -ENOMEM; |
583 | */ | 651 | */ |
584 | BUG_ON(nr_pages <= buffer->pages); | 652 | if (RB_WARN_ON(buffer, nr_pages <= buffer->pages)) { |
653 | mutex_unlock(&buffer->mutex); | ||
654 | return -1; | ||
655 | } | ||
656 | |||
585 | new_pages = nr_pages - buffer->pages; | 657 | new_pages = nr_pages - buffer->pages; |
586 | 658 | ||
587 | for_each_buffer_cpu(buffer, cpu) { | 659 | for_each_buffer_cpu(buffer, cpu) { |
588 | for (i = 0; i < new_pages; i++) { | 660 | for (i = 0; i < new_pages; i++) { |
589 | page = kzalloc_node(ALIGN(sizeof(*page), | 661 | bpage = kzalloc_node(ALIGN(sizeof(*bpage), |
590 | cache_line_size()), | 662 | cache_line_size()), |
591 | GFP_KERNEL, cpu_to_node(cpu)); | 663 | GFP_KERNEL, cpu_to_node(cpu)); |
592 | if (!page) | 664 | if (!bpage) |
593 | goto free_pages; | 665 | goto free_pages; |
594 | list_add(&page->list, &pages); | 666 | list_add(&bpage->list, &pages); |
595 | addr = __get_free_page(GFP_KERNEL); | 667 | addr = __get_free_page(GFP_KERNEL); |
596 | if (!addr) | 668 | if (!addr) |
597 | goto free_pages; | 669 | goto free_pages; |
598 | page->page = (void *)addr; | 670 | bpage->page = (void *)addr; |
671 | rb_init_page(bpage->page); | ||
599 | } | 672 | } |
600 | } | 673 | } |
601 | 674 | ||
@@ -604,7 +677,10 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) | |||
604 | rb_insert_pages(cpu_buffer, &pages, new_pages); | 677 | rb_insert_pages(cpu_buffer, &pages, new_pages); |
605 | } | 678 | } |
606 | 679 | ||
607 | BUG_ON(!list_empty(&pages)); | 680 | if (RB_WARN_ON(buffer, !list_empty(&pages))) { |
681 | mutex_unlock(&buffer->mutex); | ||
682 | return -1; | ||
683 | } | ||
608 | 684 | ||
609 | out: | 685 | out: |
610 | buffer->pages = nr_pages; | 686 | buffer->pages = nr_pages; |
@@ -613,22 +689,29 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) | |||
613 | return size; | 689 | return size; |
614 | 690 | ||
615 | free_pages: | 691 | free_pages: |
616 | list_for_each_entry_safe(page, tmp, &pages, list) { | 692 | list_for_each_entry_safe(bpage, tmp, &pages, list) { |
617 | list_del_init(&page->list); | 693 | list_del_init(&bpage->list); |
618 | free_buffer_page(page); | 694 | free_buffer_page(bpage); |
619 | } | 695 | } |
620 | mutex_unlock(&buffer->mutex); | 696 | mutex_unlock(&buffer->mutex); |
621 | return -ENOMEM; | 697 | return -ENOMEM; |
622 | } | 698 | } |
699 | EXPORT_SYMBOL_GPL(ring_buffer_resize); | ||
623 | 700 | ||
624 | static inline int rb_null_event(struct ring_buffer_event *event) | 701 | static inline int rb_null_event(struct ring_buffer_event *event) |
625 | { | 702 | { |
626 | return event->type == RINGBUF_TYPE_PADDING; | 703 | return event->type == RINGBUF_TYPE_PADDING; |
627 | } | 704 | } |
628 | 705 | ||
629 | static inline void *__rb_page_index(struct buffer_page *page, unsigned index) | 706 | static inline void * |
707 | __rb_data_page_index(struct buffer_data_page *bpage, unsigned index) | ||
708 | { | ||
709 | return bpage->data + index; | ||
710 | } | ||
711 | |||
712 | static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index) | ||
630 | { | 713 | { |
631 | return page->page + index; | 714 | return bpage->page->data + index; |
632 | } | 715 | } |
633 | 716 | ||
634 | static inline struct ring_buffer_event * | 717 | static inline struct ring_buffer_event * |
@@ -658,7 +741,7 @@ static inline unsigned rb_page_write(struct buffer_page *bpage) | |||
658 | 741 | ||
659 | static inline unsigned rb_page_commit(struct buffer_page *bpage) | 742 | static inline unsigned rb_page_commit(struct buffer_page *bpage) |
660 | { | 743 | { |
661 | return local_read(&bpage->commit); | 744 | return local_read(&bpage->page->commit); |
662 | } | 745 | } |
663 | 746 | ||
664 | /* Size is determined by what has been commited */ | 747 | /* Size is determined by what has been commited */ |
@@ -693,7 +776,8 @@ static void rb_update_overflow(struct ring_buffer_per_cpu *cpu_buffer) | |||
693 | head += rb_event_length(event)) { | 776 | head += rb_event_length(event)) { |
694 | 777 | ||
695 | event = __rb_page_index(cpu_buffer->head_page, head); | 778 | event = __rb_page_index(cpu_buffer->head_page, head); |
696 | BUG_ON(rb_null_event(event)); | 779 | if (RB_WARN_ON(cpu_buffer, rb_null_event(event))) |
780 | return; | ||
697 | /* Only count data entries */ | 781 | /* Only count data entries */ |
698 | if (event->type != RINGBUF_TYPE_DATA) | 782 | if (event->type != RINGBUF_TYPE_DATA) |
699 | continue; | 783 | continue; |
@@ -703,14 +787,14 @@ static void rb_update_overflow(struct ring_buffer_per_cpu *cpu_buffer) | |||
703 | } | 787 | } |
704 | 788 | ||
705 | static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer, | 789 | static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer, |
706 | struct buffer_page **page) | 790 | struct buffer_page **bpage) |
707 | { | 791 | { |
708 | struct list_head *p = (*page)->list.next; | 792 | struct list_head *p = (*bpage)->list.next; |
709 | 793 | ||
710 | if (p == &cpu_buffer->pages) | 794 | if (p == &cpu_buffer->pages) |
711 | p = p->next; | 795 | p = p->next; |
712 | 796 | ||
713 | *page = list_entry(p, struct buffer_page, list); | 797 | *bpage = list_entry(p, struct buffer_page, list); |
714 | } | 798 | } |
715 | 799 | ||
716 | static inline unsigned | 800 | static inline unsigned |
@@ -746,16 +830,18 @@ rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer, | |||
746 | addr &= PAGE_MASK; | 830 | addr &= PAGE_MASK; |
747 | 831 | ||
748 | while (cpu_buffer->commit_page->page != (void *)addr) { | 832 | while (cpu_buffer->commit_page->page != (void *)addr) { |
749 | RB_WARN_ON(cpu_buffer, | 833 | if (RB_WARN_ON(cpu_buffer, |
750 | cpu_buffer->commit_page == cpu_buffer->tail_page); | 834 | cpu_buffer->commit_page == cpu_buffer->tail_page)) |
751 | cpu_buffer->commit_page->commit = | 835 | return; |
836 | cpu_buffer->commit_page->page->commit = | ||
752 | cpu_buffer->commit_page->write; | 837 | cpu_buffer->commit_page->write; |
753 | rb_inc_page(cpu_buffer, &cpu_buffer->commit_page); | 838 | rb_inc_page(cpu_buffer, &cpu_buffer->commit_page); |
754 | cpu_buffer->write_stamp = cpu_buffer->commit_page->time_stamp; | 839 | cpu_buffer->write_stamp = |
840 | cpu_buffer->commit_page->page->time_stamp; | ||
755 | } | 841 | } |
756 | 842 | ||
757 | /* Now set the commit to the event's index */ | 843 | /* Now set the commit to the event's index */ |
758 | local_set(&cpu_buffer->commit_page->commit, index); | 844 | local_set(&cpu_buffer->commit_page->page->commit, index); |
759 | } | 845 | } |
760 | 846 | ||
761 | static inline void | 847 | static inline void |
@@ -769,25 +855,38 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer) | |||
769 | * back to us). This allows us to do a simple loop to | 855 | * back to us). This allows us to do a simple loop to |
770 | * assign the commit to the tail. | 856 | * assign the commit to the tail. |
771 | */ | 857 | */ |
858 | again: | ||
772 | while (cpu_buffer->commit_page != cpu_buffer->tail_page) { | 859 | while (cpu_buffer->commit_page != cpu_buffer->tail_page) { |
773 | cpu_buffer->commit_page->commit = | 860 | cpu_buffer->commit_page->page->commit = |
774 | cpu_buffer->commit_page->write; | 861 | cpu_buffer->commit_page->write; |
775 | rb_inc_page(cpu_buffer, &cpu_buffer->commit_page); | 862 | rb_inc_page(cpu_buffer, &cpu_buffer->commit_page); |
776 | cpu_buffer->write_stamp = cpu_buffer->commit_page->time_stamp; | 863 | cpu_buffer->write_stamp = |
864 | cpu_buffer->commit_page->page->time_stamp; | ||
777 | /* add barrier to keep gcc from optimizing too much */ | 865 | /* add barrier to keep gcc from optimizing too much */ |
778 | barrier(); | 866 | barrier(); |
779 | } | 867 | } |
780 | while (rb_commit_index(cpu_buffer) != | 868 | while (rb_commit_index(cpu_buffer) != |
781 | rb_page_write(cpu_buffer->commit_page)) { | 869 | rb_page_write(cpu_buffer->commit_page)) { |
782 | cpu_buffer->commit_page->commit = | 870 | cpu_buffer->commit_page->page->commit = |
783 | cpu_buffer->commit_page->write; | 871 | cpu_buffer->commit_page->write; |
784 | barrier(); | 872 | barrier(); |
785 | } | 873 | } |
874 | |||
875 | /* again, keep gcc from optimizing */ | ||
876 | barrier(); | ||
877 | |||
878 | /* | ||
879 | * If an interrupt came in just after the first while loop | ||
880 | * and pushed the tail page forward, we will be left with | ||
881 | * a dangling commit that will never go forward. | ||
882 | */ | ||
883 | if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page)) | ||
884 | goto again; | ||
786 | } | 885 | } |
787 | 886 | ||
788 | static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | 887 | static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer) |
789 | { | 888 | { |
790 | cpu_buffer->read_stamp = cpu_buffer->reader_page->time_stamp; | 889 | cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp; |
791 | cpu_buffer->reader_page->read = 0; | 890 | cpu_buffer->reader_page->read = 0; |
792 | } | 891 | } |
793 | 892 | ||
@@ -806,7 +905,7 @@ static inline void rb_inc_iter(struct ring_buffer_iter *iter) | |||
806 | else | 905 | else |
807 | rb_inc_page(cpu_buffer, &iter->head_page); | 906 | rb_inc_page(cpu_buffer, &iter->head_page); |
808 | 907 | ||
809 | iter->read_stamp = iter->head_page->time_stamp; | 908 | iter->read_stamp = iter->head_page->page->time_stamp; |
810 | iter->head = 0; | 909 | iter->head = 0; |
811 | } | 910 | } |
812 | 911 | ||
@@ -880,12 +979,15 @@ static struct ring_buffer_event * | |||
880 | __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, | 979 | __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, |
881 | unsigned type, unsigned long length, u64 *ts) | 980 | unsigned type, unsigned long length, u64 *ts) |
882 | { | 981 | { |
883 | struct buffer_page *tail_page, *head_page, *reader_page; | 982 | struct buffer_page *tail_page, *head_page, *reader_page, *commit_page; |
884 | unsigned long tail, write; | 983 | unsigned long tail, write; |
885 | struct ring_buffer *buffer = cpu_buffer->buffer; | 984 | struct ring_buffer *buffer = cpu_buffer->buffer; |
886 | struct ring_buffer_event *event; | 985 | struct ring_buffer_event *event; |
887 | unsigned long flags; | 986 | unsigned long flags; |
888 | 987 | ||
988 | commit_page = cpu_buffer->commit_page; | ||
989 | /* we just need to protect against interrupts */ | ||
990 | barrier(); | ||
889 | tail_page = cpu_buffer->tail_page; | 991 | tail_page = cpu_buffer->tail_page; |
890 | write = local_add_return(length, &tail_page->write); | 992 | write = local_add_return(length, &tail_page->write); |
891 | tail = write - length; | 993 | tail = write - length; |
@@ -894,7 +996,8 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, | |||
894 | if (write > BUF_PAGE_SIZE) { | 996 | if (write > BUF_PAGE_SIZE) { |
895 | struct buffer_page *next_page = tail_page; | 997 | struct buffer_page *next_page = tail_page; |
896 | 998 | ||
897 | spin_lock_irqsave(&cpu_buffer->lock, flags); | 999 | local_irq_save(flags); |
1000 | __raw_spin_lock(&cpu_buffer->lock); | ||
898 | 1001 | ||
899 | rb_inc_page(cpu_buffer, &next_page); | 1002 | rb_inc_page(cpu_buffer, &next_page); |
900 | 1003 | ||
@@ -902,14 +1005,15 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, | |||
902 | reader_page = cpu_buffer->reader_page; | 1005 | reader_page = cpu_buffer->reader_page; |
903 | 1006 | ||
904 | /* we grabbed the lock before incrementing */ | 1007 | /* we grabbed the lock before incrementing */ |
905 | RB_WARN_ON(cpu_buffer, next_page == reader_page); | 1008 | if (RB_WARN_ON(cpu_buffer, next_page == reader_page)) |
1009 | goto out_unlock; | ||
906 | 1010 | ||
907 | /* | 1011 | /* |
908 | * If for some reason, we had an interrupt storm that made | 1012 | * If for some reason, we had an interrupt storm that made |
909 | * it all the way around the buffer, bail, and warn | 1013 | * it all the way around the buffer, bail, and warn |
910 | * about it. | 1014 | * about it. |
911 | */ | 1015 | */ |
912 | if (unlikely(next_page == cpu_buffer->commit_page)) { | 1016 | if (unlikely(next_page == commit_page)) { |
913 | WARN_ON_ONCE(1); | 1017 | WARN_ON_ONCE(1); |
914 | goto out_unlock; | 1018 | goto out_unlock; |
915 | } | 1019 | } |
@@ -940,12 +1044,12 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, | |||
940 | */ | 1044 | */ |
941 | if (tail_page == cpu_buffer->tail_page) { | 1045 | if (tail_page == cpu_buffer->tail_page) { |
942 | local_set(&next_page->write, 0); | 1046 | local_set(&next_page->write, 0); |
943 | local_set(&next_page->commit, 0); | 1047 | local_set(&next_page->page->commit, 0); |
944 | cpu_buffer->tail_page = next_page; | 1048 | cpu_buffer->tail_page = next_page; |
945 | 1049 | ||
946 | /* reread the time stamp */ | 1050 | /* reread the time stamp */ |
947 | *ts = ring_buffer_time_stamp(cpu_buffer->cpu); | 1051 | *ts = ring_buffer_time_stamp(cpu_buffer->cpu); |
948 | cpu_buffer->tail_page->time_stamp = *ts; | 1052 | cpu_buffer->tail_page->page->time_stamp = *ts; |
949 | } | 1053 | } |
950 | 1054 | ||
951 | /* | 1055 | /* |
@@ -970,7 +1074,8 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, | |||
970 | rb_set_commit_to_write(cpu_buffer); | 1074 | rb_set_commit_to_write(cpu_buffer); |
971 | } | 1075 | } |
972 | 1076 | ||
973 | spin_unlock_irqrestore(&cpu_buffer->lock, flags); | 1077 | __raw_spin_unlock(&cpu_buffer->lock); |
1078 | local_irq_restore(flags); | ||
974 | 1079 | ||
975 | /* fail and let the caller try again */ | 1080 | /* fail and let the caller try again */ |
976 | return ERR_PTR(-EAGAIN); | 1081 | return ERR_PTR(-EAGAIN); |
@@ -978,7 +1083,8 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, | |||
978 | 1083 | ||
979 | /* We reserved something on the buffer */ | 1084 | /* We reserved something on the buffer */ |
980 | 1085 | ||
981 | BUG_ON(write > BUF_PAGE_SIZE); | 1086 | if (RB_WARN_ON(cpu_buffer, write > BUF_PAGE_SIZE)) |
1087 | return NULL; | ||
982 | 1088 | ||
983 | event = __rb_page_index(tail_page, tail); | 1089 | event = __rb_page_index(tail_page, tail); |
984 | rb_update_event(event, type, length); | 1090 | rb_update_event(event, type, length); |
@@ -988,12 +1094,13 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, | |||
988 | * this page's time stamp. | 1094 | * this page's time stamp. |
989 | */ | 1095 | */ |
990 | if (!tail && rb_is_commit(cpu_buffer, event)) | 1096 | if (!tail && rb_is_commit(cpu_buffer, event)) |
991 | cpu_buffer->commit_page->time_stamp = *ts; | 1097 | cpu_buffer->commit_page->page->time_stamp = *ts; |
992 | 1098 | ||
993 | return event; | 1099 | return event; |
994 | 1100 | ||
995 | out_unlock: | 1101 | out_unlock: |
996 | spin_unlock_irqrestore(&cpu_buffer->lock, flags); | 1102 | __raw_spin_unlock(&cpu_buffer->lock); |
1103 | local_irq_restore(flags); | ||
997 | return NULL; | 1104 | return NULL; |
998 | } | 1105 | } |
999 | 1106 | ||
@@ -1038,7 +1145,7 @@ rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer, | |||
1038 | event->time_delta = *delta & TS_MASK; | 1145 | event->time_delta = *delta & TS_MASK; |
1039 | event->array[0] = *delta >> TS_SHIFT; | 1146 | event->array[0] = *delta >> TS_SHIFT; |
1040 | } else { | 1147 | } else { |
1041 | cpu_buffer->commit_page->time_stamp = *ts; | 1148 | cpu_buffer->commit_page->page->time_stamp = *ts; |
1042 | event->time_delta = 0; | 1149 | event->time_delta = 0; |
1043 | event->array[0] = 0; | 1150 | event->array[0] = 0; |
1044 | } | 1151 | } |
@@ -1076,10 +1183,8 @@ rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer, | |||
1076 | * storm or we have something buggy. | 1183 | * storm or we have something buggy. |
1077 | * Bail! | 1184 | * Bail! |
1078 | */ | 1185 | */ |
1079 | if (unlikely(++nr_loops > 1000)) { | 1186 | if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000)) |
1080 | RB_WARN_ON(cpu_buffer, 1); | ||
1081 | return NULL; | 1187 | return NULL; |
1082 | } | ||
1083 | 1188 | ||
1084 | ts = ring_buffer_time_stamp(cpu_buffer->cpu); | 1189 | ts = ring_buffer_time_stamp(cpu_buffer->cpu); |
1085 | 1190 | ||
@@ -1175,19 +1280,18 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, | |||
1175 | struct ring_buffer_event *event; | 1280 | struct ring_buffer_event *event; |
1176 | int cpu, resched; | 1281 | int cpu, resched; |
1177 | 1282 | ||
1178 | if (ring_buffers_off) | 1283 | if (ring_buffer_flags != RB_BUFFERS_ON) |
1179 | return NULL; | 1284 | return NULL; |
1180 | 1285 | ||
1181 | if (atomic_read(&buffer->record_disabled)) | 1286 | if (atomic_read(&buffer->record_disabled)) |
1182 | return NULL; | 1287 | return NULL; |
1183 | 1288 | ||
1184 | /* If we are tracing schedule, we don't want to recurse */ | 1289 | /* If we are tracing schedule, we don't want to recurse */ |
1185 | resched = need_resched(); | 1290 | resched = ftrace_preempt_disable(); |
1186 | preempt_disable_notrace(); | ||
1187 | 1291 | ||
1188 | cpu = raw_smp_processor_id(); | 1292 | cpu = raw_smp_processor_id(); |
1189 | 1293 | ||
1190 | if (!cpu_isset(cpu, buffer->cpumask)) | 1294 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
1191 | goto out; | 1295 | goto out; |
1192 | 1296 | ||
1193 | cpu_buffer = buffer->buffers[cpu]; | 1297 | cpu_buffer = buffer->buffers[cpu]; |
@@ -1214,12 +1318,10 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, | |||
1214 | return event; | 1318 | return event; |
1215 | 1319 | ||
1216 | out: | 1320 | out: |
1217 | if (resched) | 1321 | ftrace_preempt_enable(resched); |
1218 | preempt_enable_no_resched_notrace(); | ||
1219 | else | ||
1220 | preempt_enable_notrace(); | ||
1221 | return NULL; | 1322 | return NULL; |
1222 | } | 1323 | } |
1324 | EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve); | ||
1223 | 1325 | ||
1224 | static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer, | 1326 | static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer, |
1225 | struct ring_buffer_event *event) | 1327 | struct ring_buffer_event *event) |
@@ -1259,16 +1361,14 @@ int ring_buffer_unlock_commit(struct ring_buffer *buffer, | |||
1259 | /* | 1361 | /* |
1260 | * Only the last preempt count needs to restore preemption. | 1362 | * Only the last preempt count needs to restore preemption. |
1261 | */ | 1363 | */ |
1262 | if (preempt_count() == 1) { | 1364 | if (preempt_count() == 1) |
1263 | if (per_cpu(rb_need_resched, cpu)) | 1365 | ftrace_preempt_enable(per_cpu(rb_need_resched, cpu)); |
1264 | preempt_enable_no_resched_notrace(); | 1366 | else |
1265 | else | ||
1266 | preempt_enable_notrace(); | ||
1267 | } else | ||
1268 | preempt_enable_no_resched_notrace(); | 1367 | preempt_enable_no_resched_notrace(); |
1269 | 1368 | ||
1270 | return 0; | 1369 | return 0; |
1271 | } | 1370 | } |
1371 | EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit); | ||
1272 | 1372 | ||
1273 | /** | 1373 | /** |
1274 | * ring_buffer_write - write data to the buffer without reserving | 1374 | * ring_buffer_write - write data to the buffer without reserving |
@@ -1294,18 +1394,17 @@ int ring_buffer_write(struct ring_buffer *buffer, | |||
1294 | int ret = -EBUSY; | 1394 | int ret = -EBUSY; |
1295 | int cpu, resched; | 1395 | int cpu, resched; |
1296 | 1396 | ||
1297 | if (ring_buffers_off) | 1397 | if (ring_buffer_flags != RB_BUFFERS_ON) |
1298 | return -EBUSY; | 1398 | return -EBUSY; |
1299 | 1399 | ||
1300 | if (atomic_read(&buffer->record_disabled)) | 1400 | if (atomic_read(&buffer->record_disabled)) |
1301 | return -EBUSY; | 1401 | return -EBUSY; |
1302 | 1402 | ||
1303 | resched = need_resched(); | 1403 | resched = ftrace_preempt_disable(); |
1304 | preempt_disable_notrace(); | ||
1305 | 1404 | ||
1306 | cpu = raw_smp_processor_id(); | 1405 | cpu = raw_smp_processor_id(); |
1307 | 1406 | ||
1308 | if (!cpu_isset(cpu, buffer->cpumask)) | 1407 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
1309 | goto out; | 1408 | goto out; |
1310 | 1409 | ||
1311 | cpu_buffer = buffer->buffers[cpu]; | 1410 | cpu_buffer = buffer->buffers[cpu]; |
@@ -1327,13 +1426,11 @@ int ring_buffer_write(struct ring_buffer *buffer, | |||
1327 | 1426 | ||
1328 | ret = 0; | 1427 | ret = 0; |
1329 | out: | 1428 | out: |
1330 | if (resched) | 1429 | ftrace_preempt_enable(resched); |
1331 | preempt_enable_no_resched_notrace(); | ||
1332 | else | ||
1333 | preempt_enable_notrace(); | ||
1334 | 1430 | ||
1335 | return ret; | 1431 | return ret; |
1336 | } | 1432 | } |
1433 | EXPORT_SYMBOL_GPL(ring_buffer_write); | ||
1337 | 1434 | ||
1338 | static inline int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) | 1435 | static inline int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) |
1339 | { | 1436 | { |
@@ -1360,6 +1457,7 @@ void ring_buffer_record_disable(struct ring_buffer *buffer) | |||
1360 | { | 1457 | { |
1361 | atomic_inc(&buffer->record_disabled); | 1458 | atomic_inc(&buffer->record_disabled); |
1362 | } | 1459 | } |
1460 | EXPORT_SYMBOL_GPL(ring_buffer_record_disable); | ||
1363 | 1461 | ||
1364 | /** | 1462 | /** |
1365 | * ring_buffer_record_enable - enable writes to the buffer | 1463 | * ring_buffer_record_enable - enable writes to the buffer |
@@ -1372,6 +1470,7 @@ void ring_buffer_record_enable(struct ring_buffer *buffer) | |||
1372 | { | 1470 | { |
1373 | atomic_dec(&buffer->record_disabled); | 1471 | atomic_dec(&buffer->record_disabled); |
1374 | } | 1472 | } |
1473 | EXPORT_SYMBOL_GPL(ring_buffer_record_enable); | ||
1375 | 1474 | ||
1376 | /** | 1475 | /** |
1377 | * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer | 1476 | * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer |
@@ -1387,12 +1486,13 @@ void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu) | |||
1387 | { | 1486 | { |
1388 | struct ring_buffer_per_cpu *cpu_buffer; | 1487 | struct ring_buffer_per_cpu *cpu_buffer; |
1389 | 1488 | ||
1390 | if (!cpu_isset(cpu, buffer->cpumask)) | 1489 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
1391 | return; | 1490 | return; |
1392 | 1491 | ||
1393 | cpu_buffer = buffer->buffers[cpu]; | 1492 | cpu_buffer = buffer->buffers[cpu]; |
1394 | atomic_inc(&cpu_buffer->record_disabled); | 1493 | atomic_inc(&cpu_buffer->record_disabled); |
1395 | } | 1494 | } |
1495 | EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu); | ||
1396 | 1496 | ||
1397 | /** | 1497 | /** |
1398 | * ring_buffer_record_enable_cpu - enable writes to the buffer | 1498 | * ring_buffer_record_enable_cpu - enable writes to the buffer |
@@ -1406,12 +1506,13 @@ void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu) | |||
1406 | { | 1506 | { |
1407 | struct ring_buffer_per_cpu *cpu_buffer; | 1507 | struct ring_buffer_per_cpu *cpu_buffer; |
1408 | 1508 | ||
1409 | if (!cpu_isset(cpu, buffer->cpumask)) | 1509 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
1410 | return; | 1510 | return; |
1411 | 1511 | ||
1412 | cpu_buffer = buffer->buffers[cpu]; | 1512 | cpu_buffer = buffer->buffers[cpu]; |
1413 | atomic_dec(&cpu_buffer->record_disabled); | 1513 | atomic_dec(&cpu_buffer->record_disabled); |
1414 | } | 1514 | } |
1515 | EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu); | ||
1415 | 1516 | ||
1416 | /** | 1517 | /** |
1417 | * ring_buffer_entries_cpu - get the number of entries in a cpu buffer | 1518 | * ring_buffer_entries_cpu - get the number of entries in a cpu buffer |
@@ -1422,12 +1523,13 @@ unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu) | |||
1422 | { | 1523 | { |
1423 | struct ring_buffer_per_cpu *cpu_buffer; | 1524 | struct ring_buffer_per_cpu *cpu_buffer; |
1424 | 1525 | ||
1425 | if (!cpu_isset(cpu, buffer->cpumask)) | 1526 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
1426 | return 0; | 1527 | return 0; |
1427 | 1528 | ||
1428 | cpu_buffer = buffer->buffers[cpu]; | 1529 | cpu_buffer = buffer->buffers[cpu]; |
1429 | return cpu_buffer->entries; | 1530 | return cpu_buffer->entries; |
1430 | } | 1531 | } |
1532 | EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu); | ||
1431 | 1533 | ||
1432 | /** | 1534 | /** |
1433 | * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer | 1535 | * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer |
@@ -1438,12 +1540,13 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu) | |||
1438 | { | 1540 | { |
1439 | struct ring_buffer_per_cpu *cpu_buffer; | 1541 | struct ring_buffer_per_cpu *cpu_buffer; |
1440 | 1542 | ||
1441 | if (!cpu_isset(cpu, buffer->cpumask)) | 1543 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
1442 | return 0; | 1544 | return 0; |
1443 | 1545 | ||
1444 | cpu_buffer = buffer->buffers[cpu]; | 1546 | cpu_buffer = buffer->buffers[cpu]; |
1445 | return cpu_buffer->overrun; | 1547 | return cpu_buffer->overrun; |
1446 | } | 1548 | } |
1549 | EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu); | ||
1447 | 1550 | ||
1448 | /** | 1551 | /** |
1449 | * ring_buffer_entries - get the number of entries in a buffer | 1552 | * ring_buffer_entries - get the number of entries in a buffer |
@@ -1466,6 +1569,7 @@ unsigned long ring_buffer_entries(struct ring_buffer *buffer) | |||
1466 | 1569 | ||
1467 | return entries; | 1570 | return entries; |
1468 | } | 1571 | } |
1572 | EXPORT_SYMBOL_GPL(ring_buffer_entries); | ||
1469 | 1573 | ||
1470 | /** | 1574 | /** |
1471 | * ring_buffer_overrun_cpu - get the number of overruns in buffer | 1575 | * ring_buffer_overrun_cpu - get the number of overruns in buffer |
@@ -1488,15 +1592,9 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer) | |||
1488 | 1592 | ||
1489 | return overruns; | 1593 | return overruns; |
1490 | } | 1594 | } |
1595 | EXPORT_SYMBOL_GPL(ring_buffer_overruns); | ||
1491 | 1596 | ||
1492 | /** | 1597 | static void rb_iter_reset(struct ring_buffer_iter *iter) |
1493 | * ring_buffer_iter_reset - reset an iterator | ||
1494 | * @iter: The iterator to reset | ||
1495 | * | ||
1496 | * Resets the iterator, so that it will start from the beginning | ||
1497 | * again. | ||
1498 | */ | ||
1499 | void ring_buffer_iter_reset(struct ring_buffer_iter *iter) | ||
1500 | { | 1598 | { |
1501 | struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; | 1599 | struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; |
1502 | 1600 | ||
@@ -1511,10 +1609,28 @@ void ring_buffer_iter_reset(struct ring_buffer_iter *iter) | |||
1511 | if (iter->head) | 1609 | if (iter->head) |
1512 | iter->read_stamp = cpu_buffer->read_stamp; | 1610 | iter->read_stamp = cpu_buffer->read_stamp; |
1513 | else | 1611 | else |
1514 | iter->read_stamp = iter->head_page->time_stamp; | 1612 | iter->read_stamp = iter->head_page->page->time_stamp; |
1515 | } | 1613 | } |
1516 | 1614 | ||
1517 | /** | 1615 | /** |
1616 | * ring_buffer_iter_reset - reset an iterator | ||
1617 | * @iter: The iterator to reset | ||
1618 | * | ||
1619 | * Resets the iterator, so that it will start from the beginning | ||
1620 | * again. | ||
1621 | */ | ||
1622 | void ring_buffer_iter_reset(struct ring_buffer_iter *iter) | ||
1623 | { | ||
1624 | struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; | ||
1625 | unsigned long flags; | ||
1626 | |||
1627 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | ||
1628 | rb_iter_reset(iter); | ||
1629 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | ||
1630 | } | ||
1631 | EXPORT_SYMBOL_GPL(ring_buffer_iter_reset); | ||
1632 | |||
1633 | /** | ||
1518 | * ring_buffer_iter_empty - check if an iterator has no more to read | 1634 | * ring_buffer_iter_empty - check if an iterator has no more to read |
1519 | * @iter: The iterator to check | 1635 | * @iter: The iterator to check |
1520 | */ | 1636 | */ |
@@ -1527,6 +1643,7 @@ int ring_buffer_iter_empty(struct ring_buffer_iter *iter) | |||
1527 | return iter->head_page == cpu_buffer->commit_page && | 1643 | return iter->head_page == cpu_buffer->commit_page && |
1528 | iter->head == rb_commit_index(cpu_buffer); | 1644 | iter->head == rb_commit_index(cpu_buffer); |
1529 | } | 1645 | } |
1646 | EXPORT_SYMBOL_GPL(ring_buffer_iter_empty); | ||
1530 | 1647 | ||
1531 | static void | 1648 | static void |
1532 | rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer, | 1649 | rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer, |
@@ -1597,7 +1714,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | |||
1597 | unsigned long flags; | 1714 | unsigned long flags; |
1598 | int nr_loops = 0; | 1715 | int nr_loops = 0; |
1599 | 1716 | ||
1600 | spin_lock_irqsave(&cpu_buffer->lock, flags); | 1717 | local_irq_save(flags); |
1718 | __raw_spin_lock(&cpu_buffer->lock); | ||
1601 | 1719 | ||
1602 | again: | 1720 | again: |
1603 | /* | 1721 | /* |
@@ -1606,8 +1724,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | |||
1606 | * a case where we will loop three times. There should be no | 1724 | * a case where we will loop three times. There should be no |
1607 | * reason to loop four times (that I know of). | 1725 | * reason to loop four times (that I know of). |
1608 | */ | 1726 | */ |
1609 | if (unlikely(++nr_loops > 3)) { | 1727 | if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) { |
1610 | RB_WARN_ON(cpu_buffer, 1); | ||
1611 | reader = NULL; | 1728 | reader = NULL; |
1612 | goto out; | 1729 | goto out; |
1613 | } | 1730 | } |
@@ -1619,8 +1736,9 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | |||
1619 | goto out; | 1736 | goto out; |
1620 | 1737 | ||
1621 | /* Never should we have an index greater than the size */ | 1738 | /* Never should we have an index greater than the size */ |
1622 | RB_WARN_ON(cpu_buffer, | 1739 | if (RB_WARN_ON(cpu_buffer, |
1623 | cpu_buffer->reader_page->read > rb_page_size(reader)); | 1740 | cpu_buffer->reader_page->read > rb_page_size(reader))) |
1741 | goto out; | ||
1624 | 1742 | ||
1625 | /* check if we caught up to the tail */ | 1743 | /* check if we caught up to the tail */ |
1626 | reader = NULL; | 1744 | reader = NULL; |
@@ -1637,7 +1755,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | |||
1637 | cpu_buffer->reader_page->list.prev = reader->list.prev; | 1755 | cpu_buffer->reader_page->list.prev = reader->list.prev; |
1638 | 1756 | ||
1639 | local_set(&cpu_buffer->reader_page->write, 0); | 1757 | local_set(&cpu_buffer->reader_page->write, 0); |
1640 | local_set(&cpu_buffer->reader_page->commit, 0); | 1758 | local_set(&cpu_buffer->reader_page->page->commit, 0); |
1641 | 1759 | ||
1642 | /* Make the reader page now replace the head */ | 1760 | /* Make the reader page now replace the head */ |
1643 | reader->list.prev->next = &cpu_buffer->reader_page->list; | 1761 | reader->list.prev->next = &cpu_buffer->reader_page->list; |
@@ -1659,7 +1777,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | |||
1659 | goto again; | 1777 | goto again; |
1660 | 1778 | ||
1661 | out: | 1779 | out: |
1662 | spin_unlock_irqrestore(&cpu_buffer->lock, flags); | 1780 | __raw_spin_unlock(&cpu_buffer->lock); |
1781 | local_irq_restore(flags); | ||
1663 | 1782 | ||
1664 | return reader; | 1783 | return reader; |
1665 | } | 1784 | } |
@@ -1673,7 +1792,8 @@ static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer) | |||
1673 | reader = rb_get_reader_page(cpu_buffer); | 1792 | reader = rb_get_reader_page(cpu_buffer); |
1674 | 1793 | ||
1675 | /* This function should not be called when buffer is empty */ | 1794 | /* This function should not be called when buffer is empty */ |
1676 | BUG_ON(!reader); | 1795 | if (RB_WARN_ON(cpu_buffer, !reader)) |
1796 | return; | ||
1677 | 1797 | ||
1678 | event = rb_reader_event(cpu_buffer); | 1798 | event = rb_reader_event(cpu_buffer); |
1679 | 1799 | ||
@@ -1700,7 +1820,9 @@ static void rb_advance_iter(struct ring_buffer_iter *iter) | |||
1700 | * Check if we are at the end of the buffer. | 1820 | * Check if we are at the end of the buffer. |
1701 | */ | 1821 | */ |
1702 | if (iter->head >= rb_page_size(iter->head_page)) { | 1822 | if (iter->head >= rb_page_size(iter->head_page)) { |
1703 | BUG_ON(iter->head_page == cpu_buffer->commit_page); | 1823 | if (RB_WARN_ON(buffer, |
1824 | iter->head_page == cpu_buffer->commit_page)) | ||
1825 | return; | ||
1704 | rb_inc_iter(iter); | 1826 | rb_inc_iter(iter); |
1705 | return; | 1827 | return; |
1706 | } | 1828 | } |
@@ -1713,8 +1835,10 @@ static void rb_advance_iter(struct ring_buffer_iter *iter) | |||
1713 | * This should not be called to advance the header if we are | 1835 | * This should not be called to advance the header if we are |
1714 | * at the tail of the buffer. | 1836 | * at the tail of the buffer. |
1715 | */ | 1837 | */ |
1716 | BUG_ON((iter->head_page == cpu_buffer->commit_page) && | 1838 | if (RB_WARN_ON(cpu_buffer, |
1717 | (iter->head + length > rb_commit_index(cpu_buffer))); | 1839 | (iter->head_page == cpu_buffer->commit_page) && |
1840 | (iter->head + length > rb_commit_index(cpu_buffer)))) | ||
1841 | return; | ||
1718 | 1842 | ||
1719 | rb_update_iter_read_stamp(iter, event); | 1843 | rb_update_iter_read_stamp(iter, event); |
1720 | 1844 | ||
@@ -1726,24 +1850,15 @@ static void rb_advance_iter(struct ring_buffer_iter *iter) | |||
1726 | rb_advance_iter(iter); | 1850 | rb_advance_iter(iter); |
1727 | } | 1851 | } |
1728 | 1852 | ||
1729 | /** | 1853 | static struct ring_buffer_event * |
1730 | * ring_buffer_peek - peek at the next event to be read | 1854 | rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) |
1731 | * @buffer: The ring buffer to read | ||
1732 | * @cpu: The cpu to peak at | ||
1733 | * @ts: The timestamp counter of this event. | ||
1734 | * | ||
1735 | * This will return the event that will be read next, but does | ||
1736 | * not consume the data. | ||
1737 | */ | ||
1738 | struct ring_buffer_event * | ||
1739 | ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) | ||
1740 | { | 1855 | { |
1741 | struct ring_buffer_per_cpu *cpu_buffer; | 1856 | struct ring_buffer_per_cpu *cpu_buffer; |
1742 | struct ring_buffer_event *event; | 1857 | struct ring_buffer_event *event; |
1743 | struct buffer_page *reader; | 1858 | struct buffer_page *reader; |
1744 | int nr_loops = 0; | 1859 | int nr_loops = 0; |
1745 | 1860 | ||
1746 | if (!cpu_isset(cpu, buffer->cpumask)) | 1861 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
1747 | return NULL; | 1862 | return NULL; |
1748 | 1863 | ||
1749 | cpu_buffer = buffer->buffers[cpu]; | 1864 | cpu_buffer = buffer->buffers[cpu]; |
@@ -1757,10 +1872,8 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) | |||
1757 | * can have. Nesting 10 deep of interrupts is clearly | 1872 | * can have. Nesting 10 deep of interrupts is clearly |
1758 | * an anomaly. | 1873 | * an anomaly. |
1759 | */ | 1874 | */ |
1760 | if (unlikely(++nr_loops > 10)) { | 1875 | if (RB_WARN_ON(cpu_buffer, ++nr_loops > 10)) |
1761 | RB_WARN_ON(cpu_buffer, 1); | ||
1762 | return NULL; | 1876 | return NULL; |
1763 | } | ||
1764 | 1877 | ||
1765 | reader = rb_get_reader_page(cpu_buffer); | 1878 | reader = rb_get_reader_page(cpu_buffer); |
1766 | if (!reader) | 1879 | if (!reader) |
@@ -1797,17 +1910,10 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) | |||
1797 | 1910 | ||
1798 | return NULL; | 1911 | return NULL; |
1799 | } | 1912 | } |
1913 | EXPORT_SYMBOL_GPL(ring_buffer_peek); | ||
1800 | 1914 | ||
1801 | /** | 1915 | static struct ring_buffer_event * |
1802 | * ring_buffer_iter_peek - peek at the next event to be read | 1916 | rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts) |
1803 | * @iter: The ring buffer iterator | ||
1804 | * @ts: The timestamp counter of this event. | ||
1805 | * | ||
1806 | * This will return the event that will be read next, but does | ||
1807 | * not increment the iterator. | ||
1808 | */ | ||
1809 | struct ring_buffer_event * | ||
1810 | ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts) | ||
1811 | { | 1917 | { |
1812 | struct ring_buffer *buffer; | 1918 | struct ring_buffer *buffer; |
1813 | struct ring_buffer_per_cpu *cpu_buffer; | 1919 | struct ring_buffer_per_cpu *cpu_buffer; |
@@ -1829,10 +1935,8 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts) | |||
1829 | * can have. Nesting 10 deep of interrupts is clearly | 1935 | * can have. Nesting 10 deep of interrupts is clearly |
1830 | * an anomaly. | 1936 | * an anomaly. |
1831 | */ | 1937 | */ |
1832 | if (unlikely(++nr_loops > 10)) { | 1938 | if (RB_WARN_ON(cpu_buffer, ++nr_loops > 10)) |
1833 | RB_WARN_ON(cpu_buffer, 1); | ||
1834 | return NULL; | 1939 | return NULL; |
1835 | } | ||
1836 | 1940 | ||
1837 | if (rb_per_cpu_empty(cpu_buffer)) | 1941 | if (rb_per_cpu_empty(cpu_buffer)) |
1838 | return NULL; | 1942 | return NULL; |
@@ -1867,6 +1971,52 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts) | |||
1867 | 1971 | ||
1868 | return NULL; | 1972 | return NULL; |
1869 | } | 1973 | } |
1974 | EXPORT_SYMBOL_GPL(ring_buffer_iter_peek); | ||
1975 | |||
1976 | /** | ||
1977 | * ring_buffer_peek - peek at the next event to be read | ||
1978 | * @buffer: The ring buffer to read | ||
1979 | * @cpu: The cpu to peak at | ||
1980 | * @ts: The timestamp counter of this event. | ||
1981 | * | ||
1982 | * This will return the event that will be read next, but does | ||
1983 | * not consume the data. | ||
1984 | */ | ||
1985 | struct ring_buffer_event * | ||
1986 | ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) | ||
1987 | { | ||
1988 | struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; | ||
1989 | struct ring_buffer_event *event; | ||
1990 | unsigned long flags; | ||
1991 | |||
1992 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | ||
1993 | event = rb_buffer_peek(buffer, cpu, ts); | ||
1994 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | ||
1995 | |||
1996 | return event; | ||
1997 | } | ||
1998 | |||
1999 | /** | ||
2000 | * ring_buffer_iter_peek - peek at the next event to be read | ||
2001 | * @iter: The ring buffer iterator | ||
2002 | * @ts: The timestamp counter of this event. | ||
2003 | * | ||
2004 | * This will return the event that will be read next, but does | ||
2005 | * not increment the iterator. | ||
2006 | */ | ||
2007 | struct ring_buffer_event * | ||
2008 | ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts) | ||
2009 | { | ||
2010 | struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; | ||
2011 | struct ring_buffer_event *event; | ||
2012 | unsigned long flags; | ||
2013 | |||
2014 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | ||
2015 | event = rb_iter_peek(iter, ts); | ||
2016 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | ||
2017 | |||
2018 | return event; | ||
2019 | } | ||
1870 | 2020 | ||
1871 | /** | 2021 | /** |
1872 | * ring_buffer_consume - return an event and consume it | 2022 | * ring_buffer_consume - return an event and consume it |
@@ -1879,21 +2029,27 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts) | |||
1879 | struct ring_buffer_event * | 2029 | struct ring_buffer_event * |
1880 | ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts) | 2030 | ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts) |
1881 | { | 2031 | { |
1882 | struct ring_buffer_per_cpu *cpu_buffer; | 2032 | struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; |
1883 | struct ring_buffer_event *event; | 2033 | struct ring_buffer_event *event; |
2034 | unsigned long flags; | ||
1884 | 2035 | ||
1885 | if (!cpu_isset(cpu, buffer->cpumask)) | 2036 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
1886 | return NULL; | 2037 | return NULL; |
1887 | 2038 | ||
1888 | event = ring_buffer_peek(buffer, cpu, ts); | 2039 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
2040 | |||
2041 | event = rb_buffer_peek(buffer, cpu, ts); | ||
1889 | if (!event) | 2042 | if (!event) |
1890 | return NULL; | 2043 | goto out; |
1891 | 2044 | ||
1892 | cpu_buffer = buffer->buffers[cpu]; | ||
1893 | rb_advance_reader(cpu_buffer); | 2045 | rb_advance_reader(cpu_buffer); |
1894 | 2046 | ||
2047 | out: | ||
2048 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | ||
2049 | |||
1895 | return event; | 2050 | return event; |
1896 | } | 2051 | } |
2052 | EXPORT_SYMBOL_GPL(ring_buffer_consume); | ||
1897 | 2053 | ||
1898 | /** | 2054 | /** |
1899 | * ring_buffer_read_start - start a non consuming read of the buffer | 2055 | * ring_buffer_read_start - start a non consuming read of the buffer |
@@ -1914,7 +2070,7 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu) | |||
1914 | struct ring_buffer_iter *iter; | 2070 | struct ring_buffer_iter *iter; |
1915 | unsigned long flags; | 2071 | unsigned long flags; |
1916 | 2072 | ||
1917 | if (!cpu_isset(cpu, buffer->cpumask)) | 2073 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
1918 | return NULL; | 2074 | return NULL; |
1919 | 2075 | ||
1920 | iter = kmalloc(sizeof(*iter), GFP_KERNEL); | 2076 | iter = kmalloc(sizeof(*iter), GFP_KERNEL); |
@@ -1928,12 +2084,15 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu) | |||
1928 | atomic_inc(&cpu_buffer->record_disabled); | 2084 | atomic_inc(&cpu_buffer->record_disabled); |
1929 | synchronize_sched(); | 2085 | synchronize_sched(); |
1930 | 2086 | ||
1931 | spin_lock_irqsave(&cpu_buffer->lock, flags); | 2087 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
1932 | ring_buffer_iter_reset(iter); | 2088 | __raw_spin_lock(&cpu_buffer->lock); |
1933 | spin_unlock_irqrestore(&cpu_buffer->lock, flags); | 2089 | rb_iter_reset(iter); |
2090 | __raw_spin_unlock(&cpu_buffer->lock); | ||
2091 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | ||
1934 | 2092 | ||
1935 | return iter; | 2093 | return iter; |
1936 | } | 2094 | } |
2095 | EXPORT_SYMBOL_GPL(ring_buffer_read_start); | ||
1937 | 2096 | ||
1938 | /** | 2097 | /** |
1939 | * ring_buffer_finish - finish reading the iterator of the buffer | 2098 | * ring_buffer_finish - finish reading the iterator of the buffer |
@@ -1950,6 +2109,7 @@ ring_buffer_read_finish(struct ring_buffer_iter *iter) | |||
1950 | atomic_dec(&cpu_buffer->record_disabled); | 2109 | atomic_dec(&cpu_buffer->record_disabled); |
1951 | kfree(iter); | 2110 | kfree(iter); |
1952 | } | 2111 | } |
2112 | EXPORT_SYMBOL_GPL(ring_buffer_read_finish); | ||
1953 | 2113 | ||
1954 | /** | 2114 | /** |
1955 | * ring_buffer_read - read the next item in the ring buffer by the iterator | 2115 | * ring_buffer_read - read the next item in the ring buffer by the iterator |
@@ -1962,15 +2122,21 @@ struct ring_buffer_event * | |||
1962 | ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts) | 2122 | ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts) |
1963 | { | 2123 | { |
1964 | struct ring_buffer_event *event; | 2124 | struct ring_buffer_event *event; |
2125 | struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; | ||
2126 | unsigned long flags; | ||
1965 | 2127 | ||
1966 | event = ring_buffer_iter_peek(iter, ts); | 2128 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
2129 | event = rb_iter_peek(iter, ts); | ||
1967 | if (!event) | 2130 | if (!event) |
1968 | return NULL; | 2131 | goto out; |
1969 | 2132 | ||
1970 | rb_advance_iter(iter); | 2133 | rb_advance_iter(iter); |
2134 | out: | ||
2135 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | ||
1971 | 2136 | ||
1972 | return event; | 2137 | return event; |
1973 | } | 2138 | } |
2139 | EXPORT_SYMBOL_GPL(ring_buffer_read); | ||
1974 | 2140 | ||
1975 | /** | 2141 | /** |
1976 | * ring_buffer_size - return the size of the ring buffer (in bytes) | 2142 | * ring_buffer_size - return the size of the ring buffer (in bytes) |
@@ -1980,6 +2146,7 @@ unsigned long ring_buffer_size(struct ring_buffer *buffer) | |||
1980 | { | 2146 | { |
1981 | return BUF_PAGE_SIZE * buffer->pages; | 2147 | return BUF_PAGE_SIZE * buffer->pages; |
1982 | } | 2148 | } |
2149 | EXPORT_SYMBOL_GPL(ring_buffer_size); | ||
1983 | 2150 | ||
1984 | static void | 2151 | static void |
1985 | rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) | 2152 | rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) |
@@ -1987,7 +2154,7 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) | |||
1987 | cpu_buffer->head_page | 2154 | cpu_buffer->head_page |
1988 | = list_entry(cpu_buffer->pages.next, struct buffer_page, list); | 2155 | = list_entry(cpu_buffer->pages.next, struct buffer_page, list); |
1989 | local_set(&cpu_buffer->head_page->write, 0); | 2156 | local_set(&cpu_buffer->head_page->write, 0); |
1990 | local_set(&cpu_buffer->head_page->commit, 0); | 2157 | local_set(&cpu_buffer->head_page->page->commit, 0); |
1991 | 2158 | ||
1992 | cpu_buffer->head_page->read = 0; | 2159 | cpu_buffer->head_page->read = 0; |
1993 | 2160 | ||
@@ -1996,7 +2163,7 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) | |||
1996 | 2163 | ||
1997 | INIT_LIST_HEAD(&cpu_buffer->reader_page->list); | 2164 | INIT_LIST_HEAD(&cpu_buffer->reader_page->list); |
1998 | local_set(&cpu_buffer->reader_page->write, 0); | 2165 | local_set(&cpu_buffer->reader_page->write, 0); |
1999 | local_set(&cpu_buffer->reader_page->commit, 0); | 2166 | local_set(&cpu_buffer->reader_page->page->commit, 0); |
2000 | cpu_buffer->reader_page->read = 0; | 2167 | cpu_buffer->reader_page->read = 0; |
2001 | 2168 | ||
2002 | cpu_buffer->overrun = 0; | 2169 | cpu_buffer->overrun = 0; |
@@ -2013,15 +2180,20 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu) | |||
2013 | struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; | 2180 | struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; |
2014 | unsigned long flags; | 2181 | unsigned long flags; |
2015 | 2182 | ||
2016 | if (!cpu_isset(cpu, buffer->cpumask)) | 2183 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
2017 | return; | 2184 | return; |
2018 | 2185 | ||
2019 | spin_lock_irqsave(&cpu_buffer->lock, flags); | 2186 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
2187 | |||
2188 | __raw_spin_lock(&cpu_buffer->lock); | ||
2020 | 2189 | ||
2021 | rb_reset_cpu(cpu_buffer); | 2190 | rb_reset_cpu(cpu_buffer); |
2022 | 2191 | ||
2023 | spin_unlock_irqrestore(&cpu_buffer->lock, flags); | 2192 | __raw_spin_unlock(&cpu_buffer->lock); |
2193 | |||
2194 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | ||
2024 | } | 2195 | } |
2196 | EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu); | ||
2025 | 2197 | ||
2026 | /** | 2198 | /** |
2027 | * ring_buffer_reset - reset a ring buffer | 2199 | * ring_buffer_reset - reset a ring buffer |
@@ -2034,6 +2206,7 @@ void ring_buffer_reset(struct ring_buffer *buffer) | |||
2034 | for_each_buffer_cpu(buffer, cpu) | 2206 | for_each_buffer_cpu(buffer, cpu) |
2035 | ring_buffer_reset_cpu(buffer, cpu); | 2207 | ring_buffer_reset_cpu(buffer, cpu); |
2036 | } | 2208 | } |
2209 | EXPORT_SYMBOL_GPL(ring_buffer_reset); | ||
2037 | 2210 | ||
2038 | /** | 2211 | /** |
2039 | * rind_buffer_empty - is the ring buffer empty? | 2212 | * rind_buffer_empty - is the ring buffer empty? |
@@ -2052,6 +2225,7 @@ int ring_buffer_empty(struct ring_buffer *buffer) | |||
2052 | } | 2225 | } |
2053 | return 1; | 2226 | return 1; |
2054 | } | 2227 | } |
2228 | EXPORT_SYMBOL_GPL(ring_buffer_empty); | ||
2055 | 2229 | ||
2056 | /** | 2230 | /** |
2057 | * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty? | 2231 | * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty? |
@@ -2062,12 +2236,13 @@ int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu) | |||
2062 | { | 2236 | { |
2063 | struct ring_buffer_per_cpu *cpu_buffer; | 2237 | struct ring_buffer_per_cpu *cpu_buffer; |
2064 | 2238 | ||
2065 | if (!cpu_isset(cpu, buffer->cpumask)) | 2239 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
2066 | return 1; | 2240 | return 1; |
2067 | 2241 | ||
2068 | cpu_buffer = buffer->buffers[cpu]; | 2242 | cpu_buffer = buffer->buffers[cpu]; |
2069 | return rb_per_cpu_empty(cpu_buffer); | 2243 | return rb_per_cpu_empty(cpu_buffer); |
2070 | } | 2244 | } |
2245 | EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu); | ||
2071 | 2246 | ||
2072 | /** | 2247 | /** |
2073 | * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers | 2248 | * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers |
@@ -2085,13 +2260,12 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a, | |||
2085 | struct ring_buffer_per_cpu *cpu_buffer_a; | 2260 | struct ring_buffer_per_cpu *cpu_buffer_a; |
2086 | struct ring_buffer_per_cpu *cpu_buffer_b; | 2261 | struct ring_buffer_per_cpu *cpu_buffer_b; |
2087 | 2262 | ||
2088 | if (!cpu_isset(cpu, buffer_a->cpumask) || | 2263 | if (!cpumask_test_cpu(cpu, buffer_a->cpumask) || |
2089 | !cpu_isset(cpu, buffer_b->cpumask)) | 2264 | !cpumask_test_cpu(cpu, buffer_b->cpumask)) |
2090 | return -EINVAL; | 2265 | return -EINVAL; |
2091 | 2266 | ||
2092 | /* At least make sure the two buffers are somewhat the same */ | 2267 | /* At least make sure the two buffers are somewhat the same */ |
2093 | if (buffer_a->size != buffer_b->size || | 2268 | if (buffer_a->pages != buffer_b->pages) |
2094 | buffer_a->pages != buffer_b->pages) | ||
2095 | return -EINVAL; | 2269 | return -EINVAL; |
2096 | 2270 | ||
2097 | cpu_buffer_a = buffer_a->buffers[cpu]; | 2271 | cpu_buffer_a = buffer_a->buffers[cpu]; |
@@ -2117,17 +2291,180 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a, | |||
2117 | 2291 | ||
2118 | return 0; | 2292 | return 0; |
2119 | } | 2293 | } |
2294 | EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu); | ||
2295 | |||
2296 | static void rb_remove_entries(struct ring_buffer_per_cpu *cpu_buffer, | ||
2297 | struct buffer_data_page *bpage) | ||
2298 | { | ||
2299 | struct ring_buffer_event *event; | ||
2300 | unsigned long head; | ||
2301 | |||
2302 | __raw_spin_lock(&cpu_buffer->lock); | ||
2303 | for (head = 0; head < local_read(&bpage->commit); | ||
2304 | head += rb_event_length(event)) { | ||
2305 | |||
2306 | event = __rb_data_page_index(bpage, head); | ||
2307 | if (RB_WARN_ON(cpu_buffer, rb_null_event(event))) | ||
2308 | return; | ||
2309 | /* Only count data entries */ | ||
2310 | if (event->type != RINGBUF_TYPE_DATA) | ||
2311 | continue; | ||
2312 | cpu_buffer->entries--; | ||
2313 | } | ||
2314 | __raw_spin_unlock(&cpu_buffer->lock); | ||
2315 | } | ||
2316 | |||
2317 | /** | ||
2318 | * ring_buffer_alloc_read_page - allocate a page to read from buffer | ||
2319 | * @buffer: the buffer to allocate for. | ||
2320 | * | ||
2321 | * This function is used in conjunction with ring_buffer_read_page. | ||
2322 | * When reading a full page from the ring buffer, these functions | ||
2323 | * can be used to speed up the process. The calling function should | ||
2324 | * allocate a few pages first with this function. Then when it | ||
2325 | * needs to get pages from the ring buffer, it passes the result | ||
2326 | * of this function into ring_buffer_read_page, which will swap | ||
2327 | * the page that was allocated, with the read page of the buffer. | ||
2328 | * | ||
2329 | * Returns: | ||
2330 | * The page allocated, or NULL on error. | ||
2331 | */ | ||
2332 | void *ring_buffer_alloc_read_page(struct ring_buffer *buffer) | ||
2333 | { | ||
2334 | unsigned long addr; | ||
2335 | struct buffer_data_page *bpage; | ||
2336 | |||
2337 | addr = __get_free_page(GFP_KERNEL); | ||
2338 | if (!addr) | ||
2339 | return NULL; | ||
2340 | |||
2341 | bpage = (void *)addr; | ||
2342 | |||
2343 | return bpage; | ||
2344 | } | ||
2345 | |||
2346 | /** | ||
2347 | * ring_buffer_free_read_page - free an allocated read page | ||
2348 | * @buffer: the buffer the page was allocate for | ||
2349 | * @data: the page to free | ||
2350 | * | ||
2351 | * Free a page allocated from ring_buffer_alloc_read_page. | ||
2352 | */ | ||
2353 | void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data) | ||
2354 | { | ||
2355 | free_page((unsigned long)data); | ||
2356 | } | ||
2357 | |||
2358 | /** | ||
2359 | * ring_buffer_read_page - extract a page from the ring buffer | ||
2360 | * @buffer: buffer to extract from | ||
2361 | * @data_page: the page to use allocated from ring_buffer_alloc_read_page | ||
2362 | * @cpu: the cpu of the buffer to extract | ||
2363 | * @full: should the extraction only happen when the page is full. | ||
2364 | * | ||
2365 | * This function will pull out a page from the ring buffer and consume it. | ||
2366 | * @data_page must be the address of the variable that was returned | ||
2367 | * from ring_buffer_alloc_read_page. This is because the page might be used | ||
2368 | * to swap with a page in the ring buffer. | ||
2369 | * | ||
2370 | * for example: | ||
2371 | * rpage = ring_buffer_alloc_page(buffer); | ||
2372 | * if (!rpage) | ||
2373 | * return error; | ||
2374 | * ret = ring_buffer_read_page(buffer, &rpage, cpu, 0); | ||
2375 | * if (ret) | ||
2376 | * process_page(rpage); | ||
2377 | * | ||
2378 | * When @full is set, the function will not return true unless | ||
2379 | * the writer is off the reader page. | ||
2380 | * | ||
2381 | * Note: it is up to the calling functions to handle sleeps and wakeups. | ||
2382 | * The ring buffer can be used anywhere in the kernel and can not | ||
2383 | * blindly call wake_up. The layer that uses the ring buffer must be | ||
2384 | * responsible for that. | ||
2385 | * | ||
2386 | * Returns: | ||
2387 | * 1 if data has been transferred | ||
2388 | * 0 if no data has been transferred. | ||
2389 | */ | ||
2390 | int ring_buffer_read_page(struct ring_buffer *buffer, | ||
2391 | void **data_page, int cpu, int full) | ||
2392 | { | ||
2393 | struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; | ||
2394 | struct ring_buffer_event *event; | ||
2395 | struct buffer_data_page *bpage; | ||
2396 | unsigned long flags; | ||
2397 | int ret = 0; | ||
2398 | |||
2399 | if (!data_page) | ||
2400 | return 0; | ||
2401 | |||
2402 | bpage = *data_page; | ||
2403 | if (!bpage) | ||
2404 | return 0; | ||
2405 | |||
2406 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | ||
2407 | |||
2408 | /* | ||
2409 | * rb_buffer_peek will get the next ring buffer if | ||
2410 | * the current reader page is empty. | ||
2411 | */ | ||
2412 | event = rb_buffer_peek(buffer, cpu, NULL); | ||
2413 | if (!event) | ||
2414 | goto out; | ||
2415 | |||
2416 | /* check for data */ | ||
2417 | if (!local_read(&cpu_buffer->reader_page->page->commit)) | ||
2418 | goto out; | ||
2419 | /* | ||
2420 | * If the writer is already off of the read page, then simply | ||
2421 | * switch the read page with the given page. Otherwise | ||
2422 | * we need to copy the data from the reader to the writer. | ||
2423 | */ | ||
2424 | if (cpu_buffer->reader_page == cpu_buffer->commit_page) { | ||
2425 | unsigned int read = cpu_buffer->reader_page->read; | ||
2426 | |||
2427 | if (full) | ||
2428 | goto out; | ||
2429 | /* The writer is still on the reader page, we must copy */ | ||
2430 | bpage = cpu_buffer->reader_page->page; | ||
2431 | memcpy(bpage->data, | ||
2432 | cpu_buffer->reader_page->page->data + read, | ||
2433 | local_read(&bpage->commit) - read); | ||
2434 | |||
2435 | /* consume what was read */ | ||
2436 | cpu_buffer->reader_page += read; | ||
2437 | |||
2438 | } else { | ||
2439 | /* swap the pages */ | ||
2440 | rb_init_page(bpage); | ||
2441 | bpage = cpu_buffer->reader_page->page; | ||
2442 | cpu_buffer->reader_page->page = *data_page; | ||
2443 | cpu_buffer->reader_page->read = 0; | ||
2444 | *data_page = bpage; | ||
2445 | } | ||
2446 | ret = 1; | ||
2447 | |||
2448 | /* update the entry counter */ | ||
2449 | rb_remove_entries(cpu_buffer, bpage); | ||
2450 | out: | ||
2451 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | ||
2452 | |||
2453 | return ret; | ||
2454 | } | ||
2120 | 2455 | ||
2121 | static ssize_t | 2456 | static ssize_t |
2122 | rb_simple_read(struct file *filp, char __user *ubuf, | 2457 | rb_simple_read(struct file *filp, char __user *ubuf, |
2123 | size_t cnt, loff_t *ppos) | 2458 | size_t cnt, loff_t *ppos) |
2124 | { | 2459 | { |
2125 | int *p = filp->private_data; | 2460 | long *p = filp->private_data; |
2126 | char buf[64]; | 2461 | char buf[64]; |
2127 | int r; | 2462 | int r; |
2128 | 2463 | ||
2129 | /* !ring_buffers_off == tracing_on */ | 2464 | if (test_bit(RB_BUFFERS_DISABLED_BIT, p)) |
2130 | r = sprintf(buf, "%d\n", !*p); | 2465 | r = sprintf(buf, "permanently disabled\n"); |
2466 | else | ||
2467 | r = sprintf(buf, "%d\n", test_bit(RB_BUFFERS_ON_BIT, p)); | ||
2131 | 2468 | ||
2132 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | 2469 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); |
2133 | } | 2470 | } |
@@ -2136,7 +2473,7 @@ static ssize_t | |||
2136 | rb_simple_write(struct file *filp, const char __user *ubuf, | 2473 | rb_simple_write(struct file *filp, const char __user *ubuf, |
2137 | size_t cnt, loff_t *ppos) | 2474 | size_t cnt, loff_t *ppos) |
2138 | { | 2475 | { |
2139 | int *p = filp->private_data; | 2476 | long *p = filp->private_data; |
2140 | char buf[64]; | 2477 | char buf[64]; |
2141 | long val; | 2478 | long val; |
2142 | int ret; | 2479 | int ret; |
@@ -2153,8 +2490,10 @@ rb_simple_write(struct file *filp, const char __user *ubuf, | |||
2153 | if (ret < 0) | 2490 | if (ret < 0) |
2154 | return ret; | 2491 | return ret; |
2155 | 2492 | ||
2156 | /* !ring_buffers_off == tracing_on */ | 2493 | if (val) |
2157 | *p = !val; | 2494 | set_bit(RB_BUFFERS_ON_BIT, p); |
2495 | else | ||
2496 | clear_bit(RB_BUFFERS_ON_BIT, p); | ||
2158 | 2497 | ||
2159 | (*ppos)++; | 2498 | (*ppos)++; |
2160 | 2499 | ||
@@ -2176,7 +2515,7 @@ static __init int rb_init_debugfs(void) | |||
2176 | d_tracer = tracing_init_dentry(); | 2515 | d_tracer = tracing_init_dentry(); |
2177 | 2516 | ||
2178 | entry = debugfs_create_file("tracing_on", 0644, d_tracer, | 2517 | entry = debugfs_create_file("tracing_on", 0644, d_tracer, |
2179 | &ring_buffers_off, &rb_simple_fops); | 2518 | &ring_buffer_flags, &rb_simple_fops); |
2180 | if (!entry) | 2519 | if (!entry) |
2181 | pr_warning("Could not create debugfs 'tracing_on' entry\n"); | 2520 | pr_warning("Could not create debugfs 'tracing_on' entry\n"); |
2182 | 2521 | ||