aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Rientjes <rientjes@google.com>2009-09-09 09:02:33 -0400
committerRobert Richter <robert.richter@amd.com>2009-10-09 12:02:01 -0400
commit066b3aa8454bee3cdc665d86b5de812d8d0513b3 (patch)
tree5005ac2eba0190371c21d88af286ecff267a229c
parent374576a8b6f865022c0fd1ca62396889b23d66dd (diff)
oprofile: fix race condition in event_buffer free
Looking at the 2.6.31-rc9 code, it appears there is a race condition in the event_buffer cleanup code path (shutdown). This could lead to kernel panic as some CPUs may be operating on the event buffer AFTER it has been freed. The attached patch solves the problem and makes sure CPUs check if the buffer is not NULL before they access it as some may have been spinning on the mutex while the buffer was being freed. The race may happen if the buffer is freed during pending reads. But it is not clear why there are races in add_event_entry() since all workqueues or handlers are canceled or flushed before the event buffer is freed. Signed-off-by: David Rientjes <rientjes@google.com> Signed-off-by: Stephane Eranian <eranian@google.com> Signed-off-by: Robert Richter <robert.richter@amd.com>
-rw-r--r--drivers/oprofile/event_buffer.c14
1 files changed, 13 insertions, 1 deletions
diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
index 2b7ae366ceb1..c38adb389064 100644
--- a/drivers/oprofile/event_buffer.c
+++ b/drivers/oprofile/event_buffer.c
@@ -41,6 +41,12 @@ static atomic_t buffer_ready = ATOMIC_INIT(0);
41 */ 41 */
42void add_event_entry(unsigned long value) 42void add_event_entry(unsigned long value)
43{ 43{
44 /*
45 * catch potential error
46 */
47 if (!event_buffer)
48 return;
49
44 if (buffer_pos == buffer_size) { 50 if (buffer_pos == buffer_size) {
45 atomic_inc(&oprofile_stats.event_lost_overflow); 51 atomic_inc(&oprofile_stats.event_lost_overflow);
46 return; 52 return;
@@ -92,9 +98,10 @@ out:
92 98
93void free_event_buffer(void) 99void free_event_buffer(void)
94{ 100{
101 mutex_lock(&buffer_mutex);
95 vfree(event_buffer); 102 vfree(event_buffer);
96
97 event_buffer = NULL; 103 event_buffer = NULL;
104 mutex_unlock(&buffer_mutex);
98} 105}
99 106
100 107
@@ -167,6 +174,11 @@ static ssize_t event_buffer_read(struct file *file, char __user *buf,
167 174
168 mutex_lock(&buffer_mutex); 175 mutex_lock(&buffer_mutex);
169 176
177 if (!event_buffer) {
178 retval = -EINTR;
179 goto out;
180 }
181
170 atomic_set(&buffer_ready, 0); 182 atomic_set(&buffer_ready, 0);
171 183
172 retval = -EFAULT; 184 retval = -EFAULT;