diff options
Diffstat (limited to 'kernel/time/timer_stats.c')
| -rw-r--r-- | kernel/time/timer_stats.c | 18 |
1 files changed, 10 insertions, 8 deletions
diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c index ee5681f8d7ec..2f3b585b8d7d 100644 --- a/kernel/time/timer_stats.c +++ b/kernel/time/timer_stats.c | |||
| @@ -86,7 +86,7 @@ static DEFINE_SPINLOCK(table_lock); | |||
| 86 | /* | 86 | /* |
| 87 | * Per-CPU lookup locks for fast hash lookup: | 87 | * Per-CPU lookup locks for fast hash lookup: |
| 88 | */ | 88 | */ |
| 89 | static DEFINE_PER_CPU(spinlock_t, lookup_lock); | 89 | static DEFINE_PER_CPU(raw_spinlock_t, tstats_lookup_lock); |
| 90 | 90 | ||
| 91 | /* | 91 | /* |
| 92 | * Mutex to serialize state changes with show-stats activities: | 92 | * Mutex to serialize state changes with show-stats activities: |
| @@ -238,14 +238,14 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf, | |||
| 238 | /* | 238 | /* |
| 239 | * It doesnt matter which lock we take: | 239 | * It doesnt matter which lock we take: |
| 240 | */ | 240 | */ |
| 241 | spinlock_t *lock; | 241 | raw_spinlock_t *lock; |
| 242 | struct entry *entry, input; | 242 | struct entry *entry, input; |
| 243 | unsigned long flags; | 243 | unsigned long flags; |
| 244 | 244 | ||
| 245 | if (likely(!timer_stats_active)) | 245 | if (likely(!timer_stats_active)) |
| 246 | return; | 246 | return; |
| 247 | 247 | ||
| 248 | lock = &per_cpu(lookup_lock, raw_smp_processor_id()); | 248 | lock = &per_cpu(tstats_lookup_lock, raw_smp_processor_id()); |
| 249 | 249 | ||
| 250 | input.timer = timer; | 250 | input.timer = timer; |
| 251 | input.start_func = startf; | 251 | input.start_func = startf; |
| @@ -253,7 +253,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf, | |||
| 253 | input.pid = pid; | 253 | input.pid = pid; |
| 254 | input.timer_flag = timer_flag; | 254 | input.timer_flag = timer_flag; |
| 255 | 255 | ||
| 256 | spin_lock_irqsave(lock, flags); | 256 | raw_spin_lock_irqsave(lock, flags); |
| 257 | if (!timer_stats_active) | 257 | if (!timer_stats_active) |
| 258 | goto out_unlock; | 258 | goto out_unlock; |
| 259 | 259 | ||
| @@ -264,7 +264,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf, | |||
| 264 | atomic_inc(&overflow_count); | 264 | atomic_inc(&overflow_count); |
| 265 | 265 | ||
| 266 | out_unlock: | 266 | out_unlock: |
| 267 | spin_unlock_irqrestore(lock, flags); | 267 | raw_spin_unlock_irqrestore(lock, flags); |
| 268 | } | 268 | } |
| 269 | 269 | ||
| 270 | static void print_name_offset(struct seq_file *m, unsigned long addr) | 270 | static void print_name_offset(struct seq_file *m, unsigned long addr) |
| @@ -348,9 +348,11 @@ static void sync_access(void) | |||
| 348 | int cpu; | 348 | int cpu; |
| 349 | 349 | ||
| 350 | for_each_online_cpu(cpu) { | 350 | for_each_online_cpu(cpu) { |
| 351 | spin_lock_irqsave(&per_cpu(lookup_lock, cpu), flags); | 351 | raw_spinlock_t *lock = &per_cpu(tstats_lookup_lock, cpu); |
| 352 | |||
| 353 | raw_spin_lock_irqsave(lock, flags); | ||
| 352 | /* nothing */ | 354 | /* nothing */ |
| 353 | spin_unlock_irqrestore(&per_cpu(lookup_lock, cpu), flags); | 355 | raw_spin_unlock_irqrestore(lock, flags); |
| 354 | } | 356 | } |
| 355 | } | 357 | } |
| 356 | 358 | ||
| @@ -408,7 +410,7 @@ void __init init_timer_stats(void) | |||
| 408 | int cpu; | 410 | int cpu; |
| 409 | 411 | ||
| 410 | for_each_possible_cpu(cpu) | 412 | for_each_possible_cpu(cpu) |
| 411 | spin_lock_init(&per_cpu(lookup_lock, cpu)); | 413 | raw_spin_lock_init(&per_cpu(tstats_lookup_lock, cpu)); |
| 412 | } | 414 | } |
| 413 | 415 | ||
| 414 | static int __init init_tstats_procfs(void) | 416 | static int __init init_tstats_procfs(void) |
