aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/oprofile/cpu_buffer.h
blob: 83d491e273fe67c07a2cb99c886e93fddc1bcd46 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
/**
 * @file cpu_buffer.h
 *
 * @remark Copyright 2002 OProfile authors
 * @remark Read the file COPYING
 *
 * @author John Levon <levon@movementarian.org>
 */

#ifndef OPROFILE_CPU_BUFFER_H
#define OPROFILE_CPU_BUFFER_H

#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/cache.h>
#include <linux/sched.h>
#include <linux/ring_buffer.h>

struct task_struct;

int alloc_cpu_buffers(void);
void free_cpu_buffers(void);

void start_cpu_work(void);
void end_cpu_work(void);

/* CPU buffer is composed of such entries (which are
 * also used for context switch notes)
 */
struct op_sample {
	unsigned long eip;
	unsigned long event;
};

struct op_entry {
	struct ring_buffer_event *event;
	struct op_sample *sample;
	unsigned long irq_flags;
};

struct oprofile_cpu_buffer {
	volatile unsigned long head_pos;
	volatile unsigned long tail_pos;
	unsigned long buffer_size;
	struct task_struct *last_task;
	int last_is_kernel;
	int tracing;
	unsigned long sample_received;
	unsigned long sample_lost_overflow;
	unsigned long backtrace_aborted;
	unsigned long sample_invalid_eip;
	int cpu;
	struct delayed_work work;
};

extern struct ring_buffer *op_ring_buffer_read;
extern struct ring_buffer *op_ring_buffer_write;
DECLARE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);

/*
 * Resets the cpu buffer to a sane state.
 *
 * reset these to invalid values; the next sample collected will
 * populate the buffer with proper values to initialize the buffer
 */
static inline void op_cpu_buffer_reset(int cpu)
{
	struct oprofile_cpu_buffer *cpu_buf = &per_cpu(cpu_buffer, cpu);

	cpu_buf->last_is_kernel = -1;
	cpu_buf->last_task = NULL;
}

static inline int op_cpu_buffer_write_entry(struct op_entry *entry)
{
	entry->event = ring_buffer_lock_reserve(op_ring_buffer_write,
						sizeof(struct op_sample),
						&entry->irq_flags);
	if (entry->event)
		entry->sample = ring_buffer_event_data(entry->event);
	else
		entry->sample = NULL;

	if (!entry->sample)
		return -ENOMEM;

	return 0;
}

static inline int op_cpu_buffer_write_commit(struct op_entry *entry)
{
	return ring_buffer_unlock_commit(op_ring_buffer_write, entry->event,
					 entry->irq_flags);
}

static inline struct op_sample *op_cpu_buffer_read_entry(int cpu)
{
	struct ring_buffer_event *e;
	e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL);
	if (e)
		return ring_buffer_event_data(e);
	if (ring_buffer_swap_cpu(op_ring_buffer_read,
				 op_ring_buffer_write,
				 cpu))
		return NULL;
	e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL);
	if (e)
		return ring_buffer_event_data(e);
	return NULL;
}

/* "acquire" as many cpu buffer slots as we can */
static inline unsigned long op_cpu_buffer_entries(int cpu)
{
	return ring_buffer_entries_cpu(op_ring_buffer_read, cpu)
		+ ring_buffer_entries_cpu(op_ring_buffer_write, cpu);
}

/* transient events for the CPU buffer -> event buffer */
#define CPU_IS_KERNEL 1
#define CPU_TRACE_BEGIN 2
#define IBS_FETCH_BEGIN 3
#define IBS_OP_BEGIN    4

#endif /* OPROFILE_CPU_BUFFER_H */