diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-10-14 18:25:35 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-10-14 18:25:35 -0400 |
commit | ee67e6cbe1121da1ae4eceb7b2bcb535c5cbf65e (patch) | |
tree | 8ceefe56b6f325a4b8dbf0ee2dcda0d9216a52a3 | |
parent | 220a6258b1bac1f0b050a99aa8233330d6c8c416 (diff) | |
parent | c7cedb125ba20cc531671dc667ad704baa667d97 (diff) |
Merge branch 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
oprofile: warn on freeing event buffer too early
oprofile: fix race condition in event_buffer free
lockdep: Use cpu_clock() for lockstat
-rw-r--r-- | drivers/oprofile/event_buffer.c | 35 | ||||
-rw-r--r-- | kernel/lockdep.c | 20 |
2 files changed, 38 insertions, 17 deletions
diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c index 2b7ae366ceb1..5df60a6b6776 100644 --- a/drivers/oprofile/event_buffer.c +++ b/drivers/oprofile/event_buffer.c | |||
@@ -35,12 +35,23 @@ static size_t buffer_pos; | |||
35 | /* atomic_t because wait_event checks it outside of buffer_mutex */ | 35 | /* atomic_t because wait_event checks it outside of buffer_mutex */ |
36 | static atomic_t buffer_ready = ATOMIC_INIT(0); | 36 | static atomic_t buffer_ready = ATOMIC_INIT(0); |
37 | 37 | ||
38 | /* Add an entry to the event buffer. When we | 38 | /* |
39 | * get near to the end we wake up the process | 39 | * Add an entry to the event buffer. When we get near to the end we |
40 | * sleeping on the read() of the file. | 40 | * wake up the process sleeping on the read() of the file. To protect |
41 | * the event_buffer this function may only be called when buffer_mutex | ||
42 | * is set. | ||
41 | */ | 43 | */ |
42 | void add_event_entry(unsigned long value) | 44 | void add_event_entry(unsigned long value) |
43 | { | 45 | { |
46 | /* | ||
47 | * This shouldn't happen since all workqueues or handlers are | ||
48 | * canceled or flushed before the event buffer is freed. | ||
49 | */ | ||
50 | if (!event_buffer) { | ||
51 | WARN_ON_ONCE(1); | ||
52 | return; | ||
53 | } | ||
54 | |||
44 | if (buffer_pos == buffer_size) { | 55 | if (buffer_pos == buffer_size) { |
45 | atomic_inc(&oprofile_stats.event_lost_overflow); | 56 | atomic_inc(&oprofile_stats.event_lost_overflow); |
46 | return; | 57 | return; |
@@ -69,7 +80,6 @@ void wake_up_buffer_waiter(void) | |||
69 | 80 | ||
70 | int alloc_event_buffer(void) | 81 | int alloc_event_buffer(void) |
71 | { | 82 | { |
72 | int err = -ENOMEM; | ||
73 | unsigned long flags; | 83 | unsigned long flags; |
74 | 84 | ||
75 | spin_lock_irqsave(&oprofilefs_lock, flags); | 85 | spin_lock_irqsave(&oprofilefs_lock, flags); |
@@ -80,21 +90,22 @@ int alloc_event_buffer(void) | |||
80 | if (buffer_watershed >= buffer_size) | 90 | if (buffer_watershed >= buffer_size) |
81 | return -EINVAL; | 91 | return -EINVAL; |
82 | 92 | ||
93 | buffer_pos = 0; | ||
83 | event_buffer = vmalloc(sizeof(unsigned long) * buffer_size); | 94 | event_buffer = vmalloc(sizeof(unsigned long) * buffer_size); |
84 | if (!event_buffer) | 95 | if (!event_buffer) |
85 | goto out; | 96 | return -ENOMEM; |
86 | 97 | ||
87 | err = 0; | 98 | return 0; |
88 | out: | ||
89 | return err; | ||
90 | } | 99 | } |
91 | 100 | ||
92 | 101 | ||
93 | void free_event_buffer(void) | 102 | void free_event_buffer(void) |
94 | { | 103 | { |
104 | mutex_lock(&buffer_mutex); | ||
95 | vfree(event_buffer); | 105 | vfree(event_buffer); |
96 | 106 | buffer_pos = 0; | |
97 | event_buffer = NULL; | 107 | event_buffer = NULL; |
108 | mutex_unlock(&buffer_mutex); | ||
98 | } | 109 | } |
99 | 110 | ||
100 | 111 | ||
@@ -167,6 +178,12 @@ static ssize_t event_buffer_read(struct file *file, char __user *buf, | |||
167 | 178 | ||
168 | mutex_lock(&buffer_mutex); | 179 | mutex_lock(&buffer_mutex); |
169 | 180 | ||
181 | /* May happen if the buffer is freed during pending reads. */ | ||
182 | if (!event_buffer) { | ||
183 | retval = -EINTR; | ||
184 | goto out; | ||
185 | } | ||
186 | |||
170 | atomic_set(&buffer_ready, 0); | 187 | atomic_set(&buffer_ready, 0); |
171 | 188 | ||
172 | retval = -EFAULT; | 189 | retval = -EFAULT; |
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 3815ac1d58b2..9af56723c096 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
@@ -142,6 +142,11 @@ static inline struct lock_class *hlock_class(struct held_lock *hlock) | |||
142 | #ifdef CONFIG_LOCK_STAT | 142 | #ifdef CONFIG_LOCK_STAT |
143 | static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats); | 143 | static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats); |
144 | 144 | ||
145 | static inline u64 lockstat_clock(void) | ||
146 | { | ||
147 | return cpu_clock(smp_processor_id()); | ||
148 | } | ||
149 | |||
145 | static int lock_point(unsigned long points[], unsigned long ip) | 150 | static int lock_point(unsigned long points[], unsigned long ip) |
146 | { | 151 | { |
147 | int i; | 152 | int i; |
@@ -158,7 +163,7 @@ static int lock_point(unsigned long points[], unsigned long ip) | |||
158 | return i; | 163 | return i; |
159 | } | 164 | } |
160 | 165 | ||
161 | static void lock_time_inc(struct lock_time *lt, s64 time) | 166 | static void lock_time_inc(struct lock_time *lt, u64 time) |
162 | { | 167 | { |
163 | if (time > lt->max) | 168 | if (time > lt->max) |
164 | lt->max = time; | 169 | lt->max = time; |
@@ -234,12 +239,12 @@ static void put_lock_stats(struct lock_class_stats *stats) | |||
234 | static void lock_release_holdtime(struct held_lock *hlock) | 239 | static void lock_release_holdtime(struct held_lock *hlock) |
235 | { | 240 | { |
236 | struct lock_class_stats *stats; | 241 | struct lock_class_stats *stats; |
237 | s64 holdtime; | 242 | u64 holdtime; |
238 | 243 | ||
239 | if (!lock_stat) | 244 | if (!lock_stat) |
240 | return; | 245 | return; |
241 | 246 | ||
242 | holdtime = sched_clock() - hlock->holdtime_stamp; | 247 | holdtime = lockstat_clock() - hlock->holdtime_stamp; |
243 | 248 | ||
244 | stats = get_lock_stats(hlock_class(hlock)); | 249 | stats = get_lock_stats(hlock_class(hlock)); |
245 | if (hlock->read) | 250 | if (hlock->read) |
@@ -2792,7 +2797,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
2792 | hlock->references = references; | 2797 | hlock->references = references; |
2793 | #ifdef CONFIG_LOCK_STAT | 2798 | #ifdef CONFIG_LOCK_STAT |
2794 | hlock->waittime_stamp = 0; | 2799 | hlock->waittime_stamp = 0; |
2795 | hlock->holdtime_stamp = sched_clock(); | 2800 | hlock->holdtime_stamp = lockstat_clock(); |
2796 | #endif | 2801 | #endif |
2797 | 2802 | ||
2798 | if (check == 2 && !mark_irqflags(curr, hlock)) | 2803 | if (check == 2 && !mark_irqflags(curr, hlock)) |
@@ -3322,7 +3327,7 @@ found_it: | |||
3322 | if (hlock->instance != lock) | 3327 | if (hlock->instance != lock) |
3323 | return; | 3328 | return; |
3324 | 3329 | ||
3325 | hlock->waittime_stamp = sched_clock(); | 3330 | hlock->waittime_stamp = lockstat_clock(); |
3326 | 3331 | ||
3327 | contention_point = lock_point(hlock_class(hlock)->contention_point, ip); | 3332 | contention_point = lock_point(hlock_class(hlock)->contention_point, ip); |
3328 | contending_point = lock_point(hlock_class(hlock)->contending_point, | 3333 | contending_point = lock_point(hlock_class(hlock)->contending_point, |
@@ -3345,8 +3350,7 @@ __lock_acquired(struct lockdep_map *lock, unsigned long ip) | |||
3345 | struct held_lock *hlock, *prev_hlock; | 3350 | struct held_lock *hlock, *prev_hlock; |
3346 | struct lock_class_stats *stats; | 3351 | struct lock_class_stats *stats; |
3347 | unsigned int depth; | 3352 | unsigned int depth; |
3348 | u64 now; | 3353 | u64 now, waittime = 0; |
3349 | s64 waittime = 0; | ||
3350 | int i, cpu; | 3354 | int i, cpu; |
3351 | 3355 | ||
3352 | depth = curr->lockdep_depth; | 3356 | depth = curr->lockdep_depth; |
@@ -3374,7 +3378,7 @@ found_it: | |||
3374 | 3378 | ||
3375 | cpu = smp_processor_id(); | 3379 | cpu = smp_processor_id(); |
3376 | if (hlock->waittime_stamp) { | 3380 | if (hlock->waittime_stamp) { |
3377 | now = sched_clock(); | 3381 | now = lockstat_clock(); |
3378 | waittime = now - hlock->waittime_stamp; | 3382 | waittime = now - hlock->waittime_stamp; |
3379 | hlock->holdtime_stamp = now; | 3383 | hlock->holdtime_stamp = now; |
3380 | } | 3384 | } |