diff options
author | Bjoern B. Brandenburg <bbb@cs.unc.edu> | 2007-04-12 15:45:00 -0400 |
---|---|---|
committer | Bjoern B. Brandenburg <bbb@cs.unc.edu> | 2007-04-12 15:45:00 -0400 |
commit | 58754cad26f958527ddfcb16da870724b4f5a561 (patch) | |
tree | 01e21b734fed38f9af42a10e39ae7ef30cc917bc /kernel | |
parent | 4300134c74385b82710672cb25f604ade97f334d (diff) |
Make TRACING more robust in the early boot phase.
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched_trace.c | 38 |
1 files changed, 25 insertions, 13 deletions
diff --git a/kernel/sched_trace.c b/kernel/sched_trace.c index fa7bad33a5..dbc99bb026 100644 --- a/kernel/sched_trace.c +++ b/kernel/sched_trace.c | |||
@@ -35,15 +35,18 @@ typedef struct { | |||
35 | 35 | ||
36 | } ring_buffer_t; | 36 | } ring_buffer_t; |
37 | 37 | ||
38 | #define EMPTY_RING_BUFFER { \ | ||
39 | .lock = SPIN_LOCK_UNLOCKED, \ | ||
40 | .del_lock = RW_LOCK_UNLOCKED, \ | ||
41 | .buf = NULL, \ | ||
42 | .end = NULL, \ | ||
43 | .writep = NULL, \ | ||
44 | .readp = NULL \ | ||
45 | } | ||
38 | 46 | ||
39 | void rb_init(ring_buffer_t* buf) | 47 | void rb_init(ring_buffer_t* buf) |
40 | { | 48 | { |
41 | buf->lock = SPIN_LOCK_UNLOCKED; | 49 | *buf = (ring_buffer_t) EMPTY_RING_BUFFER; |
42 | buf->del_lock = RW_LOCK_UNLOCKED; | ||
43 | buf->buf = NULL; | ||
44 | buf->end = NULL; | ||
45 | buf->writep = NULL; | ||
46 | buf->readp = NULL; | ||
47 | } | 50 | } |
48 | 51 | ||
49 | int rb_alloc_buf(ring_buffer_t* buf, unsigned long order) | 52 | int rb_alloc_buf(ring_buffer_t* buf, unsigned long order) |
@@ -226,6 +229,7 @@ int rb_get(ring_buffer_t* buf, char* mem, size_t len) | |||
226 | /******************************************************************************/ | 229 | /******************************************************************************/ |
227 | 230 | ||
228 | 231 | ||
232 | |||
229 | /* Allocate a buffer of about 1 MB per CPU. | 233 | /* Allocate a buffer of about 1 MB per CPU. |
230 | * | 234 | * |
231 | */ | 235 | */ |
@@ -237,10 +241,17 @@ typedef struct { | |||
237 | struct semaphore reader_mutex; | 241 | struct semaphore reader_mutex; |
238 | } trace_buffer_t; | 242 | } trace_buffer_t; |
239 | 243 | ||
244 | |||
245 | |||
246 | /* This does not initialize the semaphore!! */ | ||
247 | |||
248 | #define EMPTY_TRACE_BUFFER \ | ||
249 | { .buf = EMPTY_RING_BUFFER, .reader_cnt = ATOMIC_INIT(0)} | ||
250 | |||
240 | static DEFINE_PER_CPU(trace_buffer_t, trace_buffer); | 251 | static DEFINE_PER_CPU(trace_buffer_t, trace_buffer); |
241 | 252 | ||
242 | static queuelock_t log_buffer_lock; | 253 | static spinlock_t log_buffer_lock = SPIN_LOCK_UNLOCKED; |
243 | static trace_buffer_t log_buffer; | 254 | static trace_buffer_t log_buffer = EMPTY_TRACE_BUFFER; |
244 | 255 | ||
245 | static void init_buffers(void) | 256 | static void init_buffers(void) |
246 | { | 257 | { |
@@ -251,9 +262,10 @@ static void init_buffers(void) | |||
251 | init_MUTEX(&per_cpu(trace_buffer, i).reader_mutex); | 262 | init_MUTEX(&per_cpu(trace_buffer, i).reader_mutex); |
252 | atomic_set(&per_cpu(trace_buffer, i).reader_cnt, 0); | 263 | atomic_set(&per_cpu(trace_buffer, i).reader_cnt, 0); |
253 | } | 264 | } |
254 | rb_init(&log_buffer.buf); | 265 | /* only initialize the mutex, the rest was initialized as part |
266 | * of the static initialization macro | ||
267 | */ | ||
255 | init_MUTEX(&log_buffer.reader_mutex); | 268 | init_MUTEX(&log_buffer.reader_mutex); |
256 | atomic_set(&log_buffer.reader_cnt, 0); | ||
257 | } | 269 | } |
258 | 270 | ||
259 | static int trace_release(struct inode *in, struct file *filp) | 271 | static int trace_release(struct inode *in, struct file *filp) |
@@ -465,7 +477,7 @@ static int __init init_sched_trace(void) | |||
465 | int error1 = 0, error2 = 0; | 477 | int error1 = 0, error2 = 0; |
466 | 478 | ||
467 | printk("Initializing scheduler trace device\n"); | 479 | printk("Initializing scheduler trace device\n"); |
468 | queue_lock_init(&log_buffer_lock); | 480 | //queue_lock_init(&log_buffer_lock); |
469 | init_buffers(); | 481 | init_buffers(); |
470 | 482 | ||
471 | error1 = register_buffer_dev("schedtrace", &trace_fops, | 483 | error1 = register_buffer_dev("schedtrace", &trace_fops, |
@@ -512,12 +524,12 @@ void sched_trace_log_message(const char* fmt, ...) | |||
512 | buf = __get_cpu_var(fmt_buffer); | 524 | buf = __get_cpu_var(fmt_buffer); |
513 | len = vscnprintf(buf, MSG_SIZE, fmt, args); | 525 | len = vscnprintf(buf, MSG_SIZE, fmt, args); |
514 | 526 | ||
515 | queue_lock(&log_buffer_lock); | 527 | spin_lock(&log_buffer_lock); |
516 | /* Don't copy the trailing null byte, we don't want null bytes | 528 | /* Don't copy the trailing null byte, we don't want null bytes |
517 | * in a text file. | 529 | * in a text file. |
518 | */ | 530 | */ |
519 | rb_put(&log_buffer.buf, buf, len); | 531 | rb_put(&log_buffer.buf, buf, len); |
520 | queue_unlock(&log_buffer_lock); | 532 | spin_unlock(&log_buffer_lock); |
521 | 533 | ||
522 | local_irq_restore(flags); | 534 | local_irq_restore(flags); |
523 | va_end(args); | 535 | va_end(args); |