aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/ring_buffer.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-11-15 22:42:40 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2017-11-15 22:42:40 -0500
commit7c225c69f86c934e3be9be63ecde754e286838d7 (patch)
treeff2df419b0c4886b37407235f7d21215e4cf45e4 /kernel/trace/ring_buffer.c
parent6363b3f3ac5be096d08c8c504128befa0c033529 (diff)
parent1b7176aea0a924ac59c6a283129d3e8eb00aa915 (diff)
Merge branch 'akpm' (patches from Andrew)
Merge updates from Andrew Morton: - a few misc bits - ocfs2 updates - almost all of MM * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (131 commits) memory hotplug: fix comments when adding section mm: make alloc_node_mem_map a void call if we don't have CONFIG_FLAT_NODE_MEM_MAP mm: simplify nodemask printing mm,oom_reaper: remove pointless kthread_run() error check mm/page_ext.c: check if page_ext is not prepared writeback: remove unused function parameter mm: do not rely on preempt_count in print_vma_addr mm, sparse: do not swamp log with huge vmemmap allocation failures mm/hmm: remove redundant variable align_end mm/list_lru.c: mark expected switch fall-through mm/shmem.c: mark expected switch fall-through mm/page_alloc.c: broken deferred calculation mm: don't warn about allocations which stall for too long fs: fuse: account fuse_inode slab memory as reclaimable mm, page_alloc: fix potential false positive in __zone_watermark_ok mm: mlock: remove lru_add_drain_all() mm, sysctl: make NUMA stats configurable shmem: convert shmem_init_inodecache() to void Unify migrate_pages and move_pages access checks mm, pagevec: rename pagevec drained field ...
Diffstat (limited to 'kernel/trace/ring_buffer.c')
-rw-r--r--kernel/trace/ring_buffer.c3
1 files changed, 0 insertions, 3 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 845f3805c73d..d57fede84b38 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -13,7 +13,6 @@
13#include <linux/uaccess.h> 13#include <linux/uaccess.h>
14#include <linux/hardirq.h> 14#include <linux/hardirq.h>
15#include <linux/kthread.h> /* for self test */ 15#include <linux/kthread.h> /* for self test */
16#include <linux/kmemcheck.h>
17#include <linux/module.h> 16#include <linux/module.h>
18#include <linux/percpu.h> 17#include <linux/percpu.h>
19#include <linux/mutex.h> 18#include <linux/mutex.h>
@@ -2055,7 +2054,6 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
2055 } 2054 }
2056 2055
2057 event = __rb_page_index(tail_page, tail); 2056 event = __rb_page_index(tail_page, tail);
2058 kmemcheck_annotate_bitfield(event, bitfield);
2059 2057
2060 /* account for padding bytes */ 2058 /* account for padding bytes */
2061 local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes); 2059 local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes);
@@ -2686,7 +2684,6 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
2686 /* We reserved something on the buffer */ 2684 /* We reserved something on the buffer */
2687 2685
2688 event = __rb_page_index(tail_page, tail); 2686 event = __rb_page_index(tail_page, tail);
2689 kmemcheck_annotate_bitfield(event, bitfield);
2690 rb_update_event(cpu_buffer, event, info); 2687 rb_update_event(cpu_buffer, event, info);
2691 2688
2692 local_inc(&tail_page->entries); 2689 local_inc(&tail_page->entries);