diff options
author | Bjoern B. Brandenburg <bbb@cs.unc.edu> | 2010-11-11 02:33:43 -0500 |
---|---|---|
committer | Bjoern B. Brandenburg <bbb@cs.unc.edu> | 2010-11-11 17:59:06 -0500 |
commit | 98ac0cd2bbe476d79ebf44139a6259cb8d0dc6be (patch) | |
tree | 90e8b9beff6e188c247ec20ab893fe4d2f09425f /litmus/sched_trace.c | |
parent | f599a587e1c7446a76d7d62ed7748f3c4435acd8 (diff) |
Cleanup TRACE() implementation
Since the intial rebased from .24 to .32, the TRACE() implementation
was a hybrid between our old ringbuffer implementation and the new
generic kfifo API. This was a) ugly and b) not save for TRACE()
invoctations during early boot.
This patch rips out the old parts and replaces the actual buffer with a static kfifo.
This also increases TRACE() buffer size considerably. As we avoid a
dynamic allocation, this a larger size is less problematic for debug
builds. This helps a bit with holes in the debug log if the
buffer-flushing task is starved.
Diffstat (limited to 'litmus/sched_trace.c')
-rw-r--r-- | litmus/sched_trace.c | 184 |
1 files changed, 29 insertions, 155 deletions
diff --git a/litmus/sched_trace.c b/litmus/sched_trace.c index 8051b51a2022..9b03e0cfe82b 100644 --- a/litmus/sched_trace.c +++ b/litmus/sched_trace.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * sched_trace.c -- record scheduling events to a byte stream. | 2 | * sched_trace.c -- record scheduling events to a byte stream. |
3 | */ | 3 | */ |
4 | #include <linux/spinlock.h> | 4 | #include <linux/spinlock.h> |
5 | #include <linux/semaphore.h> | 5 | #include <linux/mutex.h> |
6 | 6 | ||
7 | #include <linux/fs.h> | 7 | #include <linux/fs.h> |
8 | #include <linux/slab.h> | 8 | #include <linux/slab.h> |
@@ -19,7 +19,7 @@ | |||
19 | #define SCHED_TRACE_NAME "litmus/log" | 19 | #define SCHED_TRACE_NAME "litmus/log" |
20 | 20 | ||
21 | /* Allocate a buffer of about 32k per CPU */ | 21 | /* Allocate a buffer of about 32k per CPU */ |
22 | #define LITMUS_TRACE_BUF_PAGES 8 | 22 | #define LITMUS_TRACE_BUF_PAGES 64 |
23 | #define LITMUS_TRACE_BUF_SIZE (PAGE_SIZE * LITMUS_TRACE_BUF_PAGES * NR_CPUS) | 23 | #define LITMUS_TRACE_BUF_SIZE (PAGE_SIZE * LITMUS_TRACE_BUF_PAGES * NR_CPUS) |
24 | 24 | ||
25 | /* Max length of one read from the buffer */ | 25 | /* Max length of one read from the buffer */ |
@@ -28,123 +28,13 @@ | |||
28 | /* Max length for one write --- from kernel --- to the buffer */ | 28 | /* Max length for one write --- from kernel --- to the buffer */ |
29 | #define MSG_SIZE 255 | 29 | #define MSG_SIZE 255 |
30 | 30 | ||
31 | /* Inner ring buffer structure */ | ||
32 | typedef struct { | ||
33 | rwlock_t del_lock; | ||
34 | 31 | ||
35 | /* the buffer */ | 32 | static DEFINE_MUTEX(reader_mutex); |
36 | struct kfifo kfifo; | 33 | static atomic_t reader_cnt = ATOMIC_INIT(0); |
37 | } ring_buffer_t; | 34 | static DEFINE_KFIFO(debug_buffer, char, LITMUS_TRACE_BUF_SIZE); |
38 | 35 | ||
39 | /* Main buffer structure */ | ||
40 | typedef struct { | ||
41 | ring_buffer_t buf; | ||
42 | atomic_t reader_cnt; | ||
43 | struct semaphore reader_mutex; | ||
44 | } trace_buffer_t; | ||
45 | 36 | ||
46 | |||
47 | /* | ||
48 | * Inner buffer management functions | ||
49 | */ | ||
50 | void rb_init(ring_buffer_t* buf) | ||
51 | { | ||
52 | rwlock_init(&buf->del_lock); | ||
53 | } | ||
54 | |||
55 | int rb_alloc_buf(ring_buffer_t* buf, unsigned int size) | ||
56 | { | ||
57 | unsigned long flags; | ||
58 | int ret = 0; | ||
59 | |||
60 | write_lock_irqsave(&buf->del_lock, flags); | ||
61 | |||
62 | /* kfifo size must be a power of 2 | ||
63 | * atm kfifo alloc is automatically rounding the size | ||
64 | */ | ||
65 | ret = kfifo_alloc(&buf->kfifo, size, GFP_ATOMIC); | ||
66 | |||
67 | write_unlock_irqrestore(&buf->del_lock, flags); | ||
68 | |||
69 | if(ret < 0) | ||
70 | printk(KERN_ERR "kfifo_alloc failed\n"); | ||
71 | |||
72 | return ret; | ||
73 | } | ||
74 | |||
75 | int rb_free_buf(ring_buffer_t* buf) | ||
76 | { | ||
77 | unsigned long flags; | ||
78 | |||
79 | write_lock_irqsave(&buf->del_lock, flags); | ||
80 | |||
81 | BUG_ON(!kfifo_initialized(&buf->kfifo)); | ||
82 | kfifo_free(&buf->kfifo); | ||
83 | |||
84 | write_unlock_irqrestore(&buf->del_lock, flags); | ||
85 | |||
86 | return 0; | ||
87 | } | ||
88 | |||
89 | /* | ||
90 | * Assumption: concurrent writes are serialized externally | ||
91 | * | ||
92 | * Will only succeed if there is enough space for all len bytes. | ||
93 | */ | ||
94 | int rb_put(ring_buffer_t* buf, char* mem, size_t len) | ||
95 | { | ||
96 | unsigned long flags; | ||
97 | int error = 0; | ||
98 | |||
99 | read_lock_irqsave(&buf->del_lock, flags); | ||
100 | |||
101 | if (!kfifo_initialized(&buf->kfifo)) { | ||
102 | error = -ENODEV; | ||
103 | goto out; | ||
104 | } | ||
105 | |||
106 | if((kfifo_in(&buf->kfifo, mem, len)) < len) { | ||
107 | error = -ENOMEM; | ||
108 | goto out; | ||
109 | } | ||
110 | |||
111 | out: | ||
112 | read_unlock_irqrestore(&buf->del_lock, flags); | ||
113 | return error; | ||
114 | } | ||
115 | |||
116 | /* Assumption: concurrent reads are serialized externally */ | ||
117 | int rb_get(ring_buffer_t* buf, char* mem, size_t len) | ||
118 | { | ||
119 | unsigned long flags; | ||
120 | int error = 0; | ||
121 | |||
122 | read_lock_irqsave(&buf->del_lock, flags); | ||
123 | if (!kfifo_initialized(&buf->kfifo)) { | ||
124 | error = -ENODEV; | ||
125 | goto out; | ||
126 | } | ||
127 | |||
128 | error = kfifo_out(&buf->kfifo, (unsigned char*)mem, len); | ||
129 | |||
130 | out: | ||
131 | read_unlock_irqrestore(&buf->del_lock, flags); | ||
132 | return error; | ||
133 | } | ||
134 | |||
135 | /* | ||
136 | * Device Driver management | ||
137 | */ | ||
138 | static DEFINE_RAW_SPINLOCK(log_buffer_lock); | 37 | static DEFINE_RAW_SPINLOCK(log_buffer_lock); |
139 | static trace_buffer_t log_buffer; | ||
140 | |||
141 | static void init_log_buffer(void) | ||
142 | { | ||
143 | rb_init(&log_buffer.buf); | ||
144 | atomic_set(&log_buffer.reader_cnt,0); | ||
145 | init_MUTEX(&log_buffer.reader_mutex); | ||
146 | } | ||
147 | |||
148 | static DEFINE_PER_CPU(char[MSG_SIZE], fmt_buffer); | 38 | static DEFINE_PER_CPU(char[MSG_SIZE], fmt_buffer); |
149 | 39 | ||
150 | /* | 40 | /* |
@@ -163,6 +53,10 @@ void sched_trace_log_message(const char* fmt, ...) | |||
163 | size_t len; | 53 | size_t len; |
164 | char* buf; | 54 | char* buf; |
165 | 55 | ||
56 | if (!atomic_read(&reader_cnt)) | ||
57 | /* early exit if nobody is listening */ | ||
58 | return; | ||
59 | |||
166 | va_start(args, fmt); | 60 | va_start(args, fmt); |
167 | local_irq_save(flags); | 61 | local_irq_save(flags); |
168 | 62 | ||
@@ -171,32 +65,33 @@ void sched_trace_log_message(const char* fmt, ...) | |||
171 | len = vscnprintf(buf, MSG_SIZE, fmt, args); | 65 | len = vscnprintf(buf, MSG_SIZE, fmt, args); |
172 | 66 | ||
173 | raw_spin_lock(&log_buffer_lock); | 67 | raw_spin_lock(&log_buffer_lock); |
174 | /* Don't copy the trailing null byte, we don't want null bytes | 68 | /* Don't copy the trailing null byte, we don't want null bytes in a |
175 | * in a text file. | 69 | * text file. |
176 | */ | 70 | */ |
177 | rb_put(&log_buffer.buf, buf, len); | 71 | kfifo_in(&debug_buffer, buf, len); |
178 | raw_spin_unlock(&log_buffer_lock); | 72 | raw_spin_unlock(&log_buffer_lock); |
179 | 73 | ||
180 | local_irq_restore(flags); | 74 | local_irq_restore(flags); |
181 | va_end(args); | 75 | va_end(args); |
182 | } | 76 | } |
183 | 77 | ||
78 | |||
184 | /* | 79 | /* |
185 | * log_read - Read the trace buffer | 80 | * log_read - Read the trace buffer |
186 | * | 81 | * |
187 | * This function is called as a file operation from userspace. | 82 | * This function is called as a file operation from userspace. |
188 | * Readers can sleep. Access is serialized through reader_mutex | 83 | * Readers can sleep. Access is serialized through reader_mutex |
189 | */ | 84 | */ |
190 | static ssize_t log_read(struct file *filp, char __user *to, size_t len, | 85 | static ssize_t log_read(struct file *filp, |
191 | loff_t *f_pos) | 86 | char __user *to, size_t len, |
87 | loff_t *f_pos) | ||
192 | { | 88 | { |
193 | /* we ignore f_pos, this is strictly sequential */ | 89 | /* we ignore f_pos, this is strictly sequential */ |
194 | 90 | ||
195 | ssize_t error = -EINVAL; | 91 | ssize_t error = -EINVAL; |
196 | char* mem; | 92 | char* mem; |
197 | trace_buffer_t *tbuf = filp->private_data; | ||
198 | 93 | ||
199 | if (down_interruptible(&tbuf->reader_mutex)) { | 94 | if (mutex_lock_interruptible(&reader_mutex)) { |
200 | error = -ERESTARTSYS; | 95 | error = -ERESTARTSYS; |
201 | goto out; | 96 | goto out; |
202 | } | 97 | } |
@@ -210,14 +105,14 @@ static ssize_t log_read(struct file *filp, char __user *to, size_t len, | |||
210 | goto out_unlock; | 105 | goto out_unlock; |
211 | } | 106 | } |
212 | 107 | ||
213 | error = rb_get(&tbuf->buf, mem, len); | 108 | error = kfifo_out(&debug_buffer, mem, len); |
214 | while (!error) { | 109 | while (!error) { |
215 | set_current_state(TASK_INTERRUPTIBLE); | 110 | set_current_state(TASK_INTERRUPTIBLE); |
216 | schedule_timeout(110); | 111 | schedule_timeout(110); |
217 | if (signal_pending(current)) | 112 | if (signal_pending(current)) |
218 | error = -ERESTARTSYS; | 113 | error = -ERESTARTSYS; |
219 | else | 114 | else |
220 | error = rb_get(&tbuf->buf, mem, len); | 115 | error = kfifo_out(&debug_buffer, mem, len); |
221 | } | 116 | } |
222 | 117 | ||
223 | if (error > 0 && copy_to_user(to, mem, error)) | 118 | if (error > 0 && copy_to_user(to, mem, error)) |
@@ -225,7 +120,7 @@ static ssize_t log_read(struct file *filp, char __user *to, size_t len, | |||
225 | 120 | ||
226 | kfree(mem); | 121 | kfree(mem); |
227 | out_unlock: | 122 | out_unlock: |
228 | up(&tbuf->reader_mutex); | 123 | mutex_unlock(&reader_mutex); |
229 | out: | 124 | out: |
230 | return error; | 125 | return error; |
231 | } | 126 | } |
@@ -243,36 +138,23 @@ extern int trace_recurse; | |||
243 | static int log_open(struct inode *in, struct file *filp) | 138 | static int log_open(struct inode *in, struct file *filp) |
244 | { | 139 | { |
245 | int error = -EINVAL; | 140 | int error = -EINVAL; |
246 | trace_buffer_t* tbuf; | ||
247 | |||
248 | tbuf = &log_buffer; | ||
249 | 141 | ||
250 | if (down_interruptible(&tbuf->reader_mutex)) { | 142 | if (mutex_lock_interruptible(&reader_mutex)) { |
251 | error = -ERESTARTSYS; | 143 | error = -ERESTARTSYS; |
252 | goto out; | 144 | goto out; |
253 | } | 145 | } |
254 | 146 | ||
255 | /* first open must allocate buffers */ | 147 | atomic_inc(&reader_cnt); |
256 | if (atomic_inc_return(&tbuf->reader_cnt) == 1) { | ||
257 | if ((error = rb_alloc_buf(&tbuf->buf, LITMUS_TRACE_BUF_SIZE))) | ||
258 | { | ||
259 | atomic_dec(&tbuf->reader_cnt); | ||
260 | goto out_unlock; | ||
261 | } | ||
262 | } | ||
263 | |||
264 | error = 0; | 148 | error = 0; |
265 | filp->private_data = tbuf; | ||
266 | 149 | ||
267 | printk(KERN_DEBUG | 150 | printk(KERN_DEBUG |
268 | "sched_trace kfifo with buffer starting at: 0x%p\n", | 151 | "sched_trace kfifo with buffer starting at: 0x%p\n", |
269 | (tbuf->buf.kfifo).buf); | 152 | debug_buffer.buf); |
270 | 153 | ||
271 | /* override printk() */ | 154 | /* override printk() */ |
272 | trace_override++; | 155 | trace_override++; |
273 | 156 | ||
274 | out_unlock: | 157 | mutex_unlock(&reader_mutex); |
275 | up(&tbuf->reader_mutex); | ||
276 | out: | 158 | out: |
277 | return error; | 159 | return error; |
278 | } | 160 | } |
@@ -280,26 +162,20 @@ static int log_open(struct inode *in, struct file *filp) | |||
280 | static int log_release(struct inode *in, struct file *filp) | 162 | static int log_release(struct inode *in, struct file *filp) |
281 | { | 163 | { |
282 | int error = -EINVAL; | 164 | int error = -EINVAL; |
283 | trace_buffer_t* tbuf = filp->private_data; | ||
284 | |||
285 | BUG_ON(!filp->private_data); | ||
286 | 165 | ||
287 | if (down_interruptible(&tbuf->reader_mutex)) { | 166 | if (mutex_lock_interruptible(&reader_mutex)) { |
288 | error = -ERESTARTSYS; | 167 | error = -ERESTARTSYS; |
289 | goto out; | 168 | goto out; |
290 | } | 169 | } |
291 | 170 | ||
292 | /* last release must deallocate buffers */ | 171 | atomic_dec(&reader_cnt); |
293 | if (atomic_dec_return(&tbuf->reader_cnt) == 0) { | ||
294 | error = rb_free_buf(&tbuf->buf); | ||
295 | } | ||
296 | 172 | ||
297 | /* release printk() overriding */ | 173 | /* release printk() overriding */ |
298 | trace_override--; | 174 | trace_override--; |
299 | 175 | ||
300 | printk(KERN_DEBUG "sched_trace kfifo released\n"); | 176 | printk(KERN_DEBUG "sched_trace kfifo released\n"); |
301 | 177 | ||
302 | up(&tbuf->reader_mutex); | 178 | mutex_unlock(&reader_mutex); |
303 | out: | 179 | out: |
304 | return error; | 180 | return error; |
305 | } | 181 | } |
@@ -333,7 +209,7 @@ void dump_trace_buffer(int max) | |||
333 | /* potential, but very unlikely, race... */ | 209 | /* potential, but very unlikely, race... */ |
334 | trace_recurse = 1; | 210 | trace_recurse = 1; |
335 | while ((max == 0 || count++ < max) && | 211 | while ((max == 0 || count++ < max) && |
336 | (len = rb_get(&log_buffer.buf, line, sizeof(line) - 1)) > 0) { | 212 | (len = kfifo_out(&debug_buffer, line, sizeof(line - 1))) > 0) { |
337 | line[len] = '\0'; | 213 | line[len] = '\0'; |
338 | printk("%s", line); | 214 | printk("%s", line); |
339 | } | 215 | } |
@@ -355,7 +231,6 @@ static struct sysrq_key_op sysrq_dump_trace_buffer_op = { | |||
355 | static int __init init_sched_trace(void) | 231 | static int __init init_sched_trace(void) |
356 | { | 232 | { |
357 | printk("Initializing TRACE() device\n"); | 233 | printk("Initializing TRACE() device\n"); |
358 | init_log_buffer(); | ||
359 | 234 | ||
360 | #ifdef CONFIG_MAGIC_SYSRQ | 235 | #ifdef CONFIG_MAGIC_SYSRQ |
361 | /* offer some debugging help */ | 236 | /* offer some debugging help */ |
@@ -365,7 +240,6 @@ static int __init init_sched_trace(void) | |||
365 | printk("Could not register dump-trace-buffer(Y) magic sysrq.\n"); | 240 | printk("Could not register dump-trace-buffer(Y) magic sysrq.\n"); |
366 | #endif | 241 | #endif |
367 | 242 | ||
368 | |||
369 | return misc_register(&litmus_log_dev); | 243 | return misc_register(&litmus_log_dev); |
370 | } | 244 | } |
371 | 245 | ||