aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/time/timer_stats.c
diff options
context:
space:
mode:
authorBjorn Steinbrink <B.Steinbrink@gmx.de>2007-06-01 03:47:15 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-06-01 11:18:30 -0400
commit9fcc15ec3c1c287a781a4620e52522b6186f26f6 (patch)
tree86a0214fc404c0ca6e0ca283c83f22bd15aa2eb4 /kernel/time/timer_stats.c
parentc79d9c9e9ace5eeae54f484f62fbd86bf27a344a (diff)
timer statistics: fix race
Fix two races in the timer stats lookup code. One by ensuring that the initialization of a new entry is finished upon insertion of that entry. The other by cleaning up the hash table when the entries array is cleared, so that we don't have any "pre-inserted" entries. Thanks to Eric Dumazet for reminding me of the memory barriers. Signed-off-by: Bjorn Steinbrink <B.Steinbrink@gmx.de> Signed-off-by: Ian Kumlien <pomac@vapor.com> Acked-by: Ingo Molnar <mingo@elte.hu> Cc: Eric Dumazet <dada1@cosmosbay.com> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel/time/timer_stats.c')
-rw-r--r--kernel/time/timer_stats.c37
1 files changed, 21 insertions, 16 deletions
diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
index 868f1bceb07f..fa3d380ca8c0 100644
--- a/kernel/time/timer_stats.c
+++ b/kernel/time/timer_stats.c
@@ -117,21 +117,6 @@ static struct entry entries[MAX_ENTRIES];
117 117
118static atomic_t overflow_count; 118static atomic_t overflow_count;
119 119
120static void reset_entries(void)
121{
122 nr_entries = 0;
123 memset(entries, 0, sizeof(entries));
124 atomic_set(&overflow_count, 0);
125}
126
127static struct entry *alloc_entry(void)
128{
129 if (nr_entries >= MAX_ENTRIES)
130 return NULL;
131
132 return entries + nr_entries++;
133}
134
135/* 120/*
136 * The entries are in a hash-table, for fast lookup: 121 * The entries are in a hash-table, for fast lookup:
137 */ 122 */
@@ -149,6 +134,22 @@ static struct entry *alloc_entry(void)
149 134
150static struct entry *tstat_hash_table[TSTAT_HASH_SIZE] __read_mostly; 135static struct entry *tstat_hash_table[TSTAT_HASH_SIZE] __read_mostly;
151 136
137static void reset_entries(void)
138{
139 nr_entries = 0;
140 memset(entries, 0, sizeof(entries));
141 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
142 atomic_set(&overflow_count, 0);
143}
144
145static struct entry *alloc_entry(void)
146{
147 if (nr_entries >= MAX_ENTRIES)
148 return NULL;
149
150 return entries + nr_entries++;
151}
152
152static int match_entries(struct entry *entry1, struct entry *entry2) 153static int match_entries(struct entry *entry1, struct entry *entry2)
153{ 154{
154 return entry1->timer == entry2->timer && 155 return entry1->timer == entry2->timer &&
@@ -202,12 +203,15 @@ static struct entry *tstat_lookup(struct entry *entry, char *comm)
202 if (curr) { 203 if (curr) {
203 *curr = *entry; 204 *curr = *entry;
204 curr->count = 0; 205 curr->count = 0;
206 curr->next = NULL;
205 memcpy(curr->comm, comm, TASK_COMM_LEN); 207 memcpy(curr->comm, comm, TASK_COMM_LEN);
208
209 smp_mb(); /* Ensure that curr is initialized before insert */
210
206 if (prev) 211 if (prev)
207 prev->next = curr; 212 prev->next = curr;
208 else 213 else
209 *head = curr; 214 *head = curr;
210 curr->next = NULL;
211 } 215 }
212 out_unlock: 216 out_unlock:
213 spin_unlock(&table_lock); 217 spin_unlock(&table_lock);
@@ -360,6 +364,7 @@ static ssize_t tstats_write(struct file *file, const char __user *buf,
360 if (!active) { 364 if (!active) {
361 reset_entries(); 365 reset_entries();
362 time_start = ktime_get(); 366 time_start = ktime_get();
367 smp_mb();
363 active = 1; 368 active = 1;
364 } 369 }
365 break; 370 break;