aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/ring_buffer.c
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2009-06-17 14:03:44 -0400
committerSteven Rostedt <rostedt@goodmis.org>2009-06-17 14:16:07 -0400
commit0dcd4d6c3e5b17ccf88d41cb354bb4d57cb18cbf (patch)
tree4798b2172902f8ce4dc81856ee1d858909147824 /kernel/trace/ring_buffer.c
parentc6a9d7b55e2df63de012a9a285bf2a0bee8e4d59 (diff)
ring-buffer: remove useless compile check for buffer_page size
The original version of the ring buffer had a hack to map the page struct that held the pages of the buffer to also be the structure that the ring buffer would keep the pages in a link list. This overlap of the page struct was very dangerous and that hack was removed a while ago. But there was a check to make sure the buffer_page never became bigger than the page struct, and would fail the compile if it did. The check was only meaningful when we had the hack. Now that we have separate allocated descriptors for the buffer pages, we can remove this check. Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/ring_buffer.c')
-rw-r--r--kernel/trace/ring_buffer.c11
1 files changed, 0 insertions, 11 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 6cf340e1a4a3..162da2305cbc 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -620,12 +620,6 @@ static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
620 kfree(cpu_buffer); 620 kfree(cpu_buffer);
621} 621}
622 622
623/*
624 * Causes compile errors if the struct buffer_page gets bigger
625 * than the struct page.
626 */
627extern int ring_buffer_page_too_big(void);
628
629#ifdef CONFIG_HOTPLUG_CPU 623#ifdef CONFIG_HOTPLUG_CPU
630static int rb_cpu_notify(struct notifier_block *self, 624static int rb_cpu_notify(struct notifier_block *self,
631 unsigned long action, void *hcpu); 625 unsigned long action, void *hcpu);
@@ -648,11 +642,6 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
648 int bsize; 642 int bsize;
649 int cpu; 643 int cpu;
650 644
651 /* Paranoid! Optimizes out when all is well */
652 if (sizeof(struct buffer_page) > sizeof(struct page))
653 ring_buffer_page_too_big();
654
655
656 /* keep it in its own cache line */ 645 /* keep it in its own cache line */
657 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()), 646 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
658 GFP_KERNEL); 647 GFP_KERNEL);