diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/lockdep.c | 11 | ||||
| -rw-r--r-- | kernel/module.c | 150 | ||||
| -rw-r--r-- | kernel/rcutorture.c | 8 | ||||
| -rw-r--r-- | kernel/sched.c | 8 | ||||
| -rw-r--r-- | kernel/softirq.c | 4 | ||||
| -rw-r--r-- | kernel/softlockup.c | 54 | ||||
| -rw-r--r-- | kernel/time/timer_stats.c | 11 | ||||
| -rw-r--r-- | kernel/trace/trace.c | 12 | ||||
| -rw-r--r-- | kernel/trace/trace.h | 2 | ||||
| -rw-r--r-- | kernel/trace/trace_functions_graph.c | 4 | ||||
| -rw-r--r-- | kernel/trace/trace_hw_branches.c | 51 |
11 files changed, 84 insertions, 231 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 4f8df01dbe51..429540c70d3f 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
| @@ -140,7 +140,8 @@ static inline struct lock_class *hlock_class(struct held_lock *hlock) | |||
| 140 | } | 140 | } |
| 141 | 141 | ||
| 142 | #ifdef CONFIG_LOCK_STAT | 142 | #ifdef CONFIG_LOCK_STAT |
| 143 | static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats); | 143 | static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], |
| 144 | cpu_lock_stats); | ||
| 144 | 145 | ||
| 145 | static inline u64 lockstat_clock(void) | 146 | static inline u64 lockstat_clock(void) |
| 146 | { | 147 | { |
| @@ -198,7 +199,7 @@ struct lock_class_stats lock_stats(struct lock_class *class) | |||
| 198 | memset(&stats, 0, sizeof(struct lock_class_stats)); | 199 | memset(&stats, 0, sizeof(struct lock_class_stats)); |
| 199 | for_each_possible_cpu(cpu) { | 200 | for_each_possible_cpu(cpu) { |
| 200 | struct lock_class_stats *pcs = | 201 | struct lock_class_stats *pcs = |
| 201 | &per_cpu(lock_stats, cpu)[class - lock_classes]; | 202 | &per_cpu(cpu_lock_stats, cpu)[class - lock_classes]; |
| 202 | 203 | ||
| 203 | for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++) | 204 | for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++) |
| 204 | stats.contention_point[i] += pcs->contention_point[i]; | 205 | stats.contention_point[i] += pcs->contention_point[i]; |
| @@ -225,7 +226,7 @@ void clear_lock_stats(struct lock_class *class) | |||
| 225 | 226 | ||
| 226 | for_each_possible_cpu(cpu) { | 227 | for_each_possible_cpu(cpu) { |
| 227 | struct lock_class_stats *cpu_stats = | 228 | struct lock_class_stats *cpu_stats = |
| 228 | &per_cpu(lock_stats, cpu)[class - lock_classes]; | 229 | &per_cpu(cpu_lock_stats, cpu)[class - lock_classes]; |
| 229 | 230 | ||
| 230 | memset(cpu_stats, 0, sizeof(struct lock_class_stats)); | 231 | memset(cpu_stats, 0, sizeof(struct lock_class_stats)); |
| 231 | } | 232 | } |
| @@ -235,12 +236,12 @@ void clear_lock_stats(struct lock_class *class) | |||
| 235 | 236 | ||
| 236 | static struct lock_class_stats *get_lock_stats(struct lock_class *class) | 237 | static struct lock_class_stats *get_lock_stats(struct lock_class *class) |
| 237 | { | 238 | { |
| 238 | return &get_cpu_var(lock_stats)[class - lock_classes]; | 239 | return &get_cpu_var(cpu_lock_stats)[class - lock_classes]; |
| 239 | } | 240 | } |
| 240 | 241 | ||
| 241 | static void put_lock_stats(struct lock_class_stats *stats) | 242 | static void put_lock_stats(struct lock_class_stats *stats) |
| 242 | { | 243 | { |
| 243 | put_cpu_var(lock_stats); | 244 | put_cpu_var(cpu_lock_stats); |
| 244 | } | 245 | } |
| 245 | 246 | ||
| 246 | static void lock_release_holdtime(struct held_lock *hlock) | 247 | static void lock_release_holdtime(struct held_lock *hlock) |
diff --git a/kernel/module.c b/kernel/module.c index 5842a71cf052..12afc5a3ddd3 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
| @@ -370,8 +370,6 @@ EXPORT_SYMBOL_GPL(find_module); | |||
| 370 | 370 | ||
| 371 | #ifdef CONFIG_SMP | 371 | #ifdef CONFIG_SMP |
| 372 | 372 | ||
| 373 | #ifndef CONFIG_HAVE_LEGACY_PER_CPU_AREA | ||
| 374 | |||
| 375 | static void *percpu_modalloc(unsigned long size, unsigned long align, | 373 | static void *percpu_modalloc(unsigned long size, unsigned long align, |
| 376 | const char *name) | 374 | const char *name) |
| 377 | { | 375 | { |
| @@ -395,154 +393,6 @@ static void percpu_modfree(void *freeme) | |||
| 395 | free_percpu(freeme); | 393 | free_percpu(freeme); |
| 396 | } | 394 | } |
| 397 | 395 | ||
| 398 | #else /* ... CONFIG_HAVE_LEGACY_PER_CPU_AREA */ | ||
| 399 | |||
| 400 | /* Number of blocks used and allocated. */ | ||
| 401 | static unsigned int pcpu_num_used, pcpu_num_allocated; | ||
| 402 | /* Size of each block. -ve means used. */ | ||
| 403 | static int *pcpu_size; | ||
| 404 | |||
| 405 | static int split_block(unsigned int i, unsigned short size) | ||
| 406 | { | ||
| 407 | /* Reallocation required? */ | ||
| 408 | if (pcpu_num_used + 1 > pcpu_num_allocated) { | ||
| 409 | int *new; | ||
| 410 | |||
| 411 | new = krealloc(pcpu_size, sizeof(new[0])*pcpu_num_allocated*2, | ||
| 412 | GFP_KERNEL); | ||
| 413 | if (!new) | ||
| 414 | return 0; | ||
| 415 | |||
| 416 | pcpu_num_allocated *= 2; | ||
| 417 | pcpu_size = new; | ||
| 418 | } | ||
| 419 | |||
| 420 | /* Insert a new subblock */ | ||
| 421 | memmove(&pcpu_size[i+1], &pcpu_size[i], | ||
| 422 | sizeof(pcpu_size[0]) * (pcpu_num_used - i)); | ||
| 423 | pcpu_num_used++; | ||
| 424 | |||
| 425 | pcpu_size[i+1] -= size; | ||
| 426 | pcpu_size[i] = size; | ||
| 427 | return 1; | ||
| 428 | } | ||
| 429 | |||
| 430 | static inline unsigned int block_size(int val) | ||
| 431 | { | ||
| 432 | if (val < 0) | ||
| 433 | return -val; | ||
| 434 | return val; | ||
| 435 | } | ||
| 436 | |||
| 437 | static void *percpu_modalloc(unsigned long size, unsigned long align, | ||
| 438 | const char *name) | ||
| 439 | { | ||
| 440 | unsigned long extra; | ||
| 441 | unsigned int i; | ||
| 442 | void *ptr; | ||
| 443 | int cpu; | ||
| 444 | |||
| 445 | if (align > PAGE_SIZE) { | ||
| 446 | printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n", | ||
| 447 | name, align, PAGE_SIZE); | ||
| 448 | align = PAGE_SIZE; | ||
| 449 | } | ||
| 450 | |||
| 451 | ptr = __per_cpu_start; | ||
| 452 | for (i = 0; i < pcpu_num_used; ptr += block_size(pcpu_size[i]), i++) { | ||
| 453 | /* Extra for alignment requirement. */ | ||
| 454 | extra = ALIGN((unsigned long)ptr, align) - (unsigned long)ptr; | ||
| 455 | BUG_ON(i == 0 && extra != 0); | ||
| 456 | |||
| 457 | if (pcpu_size[i] < 0 || pcpu_size[i] < extra + size) | ||
| 458 | continue; | ||
| 459 | |||
| 460 | /* Transfer extra to previous block. */ | ||
| 461 | if (pcpu_size[i-1] < 0) | ||
| 462 | pcpu_size[i-1] -= extra; | ||
| 463 | else | ||
| 464 | pcpu_size[i-1] += extra; | ||
| 465 | pcpu_size[i] -= extra; | ||
| 466 | ptr += extra; | ||
| 467 | |||
| 468 | /* Split block if warranted */ | ||
| 469 | if (pcpu_size[i] - size > sizeof(unsigned long)) | ||
| 470 | if (!split_block(i, size)) | ||
| 471 | return NULL; | ||
| 472 | |||
| 473 | /* add the per-cpu scanning areas */ | ||
| 474 | for_each_possible_cpu(cpu) | ||
| 475 | kmemleak_alloc(ptr + per_cpu_offset(cpu), size, 0, | ||
| 476 | GFP_KERNEL); | ||
| 477 | |||
| 478 | /* Mark allocated */ | ||
| 479 | pcpu_size[i] = -pcpu_size[i]; | ||
| 480 | return ptr; | ||
| 481 | } | ||
| 482 | |||
| 483 | printk(KERN_WARNING "Could not allocate %lu bytes percpu data\n", | ||
| 484 | size); | ||
| 485 | return NULL; | ||
| 486 | } | ||
| 487 | |||
| 488 | static void percpu_modfree(void *freeme) | ||
| 489 | { | ||
| 490 | unsigned int i; | ||
| 491 | void *ptr = __per_cpu_start + block_size(pcpu_size[0]); | ||
| 492 | int cpu; | ||
| 493 | |||
| 494 | /* First entry is core kernel percpu data. */ | ||
| 495 | for (i = 1; i < pcpu_num_used; ptr += block_size(pcpu_size[i]), i++) { | ||
| 496 | if (ptr == freeme) { | ||
| 497 | pcpu_size[i] = -pcpu_size[i]; | ||
| 498 | goto free; | ||
| 499 | } | ||
| 500 | } | ||
| 501 | BUG(); | ||
| 502 | |||
| 503 | free: | ||
| 504 | /* remove the per-cpu scanning areas */ | ||
| 505 | for_each_possible_cpu(cpu) | ||
| 506 | kmemleak_free(freeme + per_cpu_offset(cpu)); | ||
| 507 | |||
| 508 | /* Merge with previous? */ | ||
| 509 | if (pcpu_size[i-1] >= 0) { | ||
| 510 | pcpu_size[i-1] += pcpu_size[i]; | ||
| 511 | pcpu_num_used--; | ||
| 512 | memmove(&pcpu_size[i], &pcpu_size[i+1], | ||
| 513 | (pcpu_num_used - i) * sizeof(pcpu_size[0])); | ||
| 514 | i--; | ||
| 515 | } | ||
| 516 | /* Merge with next? */ | ||
| 517 | if (i+1 < pcpu_num_used && pcpu_size[i+1] >= 0) { | ||
| 518 | pcpu_size[i] += pcpu_size[i+1]; | ||
| 519 | pcpu_num_used--; | ||
| 520 | memmove(&pcpu_size[i+1], &pcpu_size[i+2], | ||
| 521 | (pcpu_num_used - (i+1)) * sizeof(pcpu_size[0])); | ||
| 522 | } | ||
| 523 | } | ||
| 524 | |||
| 525 | static int percpu_modinit(void) | ||
| 526 | { | ||
| 527 | pcpu_num_used = 2; | ||
| 528 | pcpu_num_allocated = 2; | ||
| 529 | pcpu_size = kmalloc(sizeof(pcpu_size[0]) * pcpu_num_allocated, | ||
| 530 | GFP_KERNEL); | ||
| 531 | /* Static in-kernel percpu data (used). */ | ||
| 532 | pcpu_size[0] = -(__per_cpu_end-__per_cpu_start); | ||
| 533 | /* Free room. */ | ||
| 534 | pcpu_size[1] = PERCPU_ENOUGH_ROOM + pcpu_size[0]; | ||
| 535 | if (pcpu_size[1] < 0) { | ||
| 536 | printk(KERN_ERR "No per-cpu room for modules.\n"); | ||
| 537 | pcpu_num_used = 1; | ||
| 538 | } | ||
| 539 | |||
| 540 | return 0; | ||
| 541 | } | ||
| 542 | __initcall(percpu_modinit); | ||
| 543 | |||
| 544 | #endif /* CONFIG_HAVE_LEGACY_PER_CPU_AREA */ | ||
| 545 | |||
| 546 | static unsigned int find_pcpusec(Elf_Ehdr *hdr, | 396 | static unsigned int find_pcpusec(Elf_Ehdr *hdr, |
| 547 | Elf_Shdr *sechdrs, | 397 | Elf_Shdr *sechdrs, |
| 548 | const char *secstrings) | 398 | const char *secstrings) |
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c index a621a67ef4e3..9bb52177af02 100644 --- a/kernel/rcutorture.c +++ b/kernel/rcutorture.c | |||
| @@ -763,13 +763,13 @@ static void rcu_torture_timer(unsigned long unused) | |||
| 763 | /* Should not happen, but... */ | 763 | /* Should not happen, but... */ |
| 764 | pipe_count = RCU_TORTURE_PIPE_LEN; | 764 | pipe_count = RCU_TORTURE_PIPE_LEN; |
| 765 | } | 765 | } |
| 766 | ++__get_cpu_var(rcu_torture_count)[pipe_count]; | 766 | __this_cpu_inc(per_cpu_var(rcu_torture_count)[pipe_count]); |
| 767 | completed = cur_ops->completed() - completed; | 767 | completed = cur_ops->completed() - completed; |
| 768 | if (completed > RCU_TORTURE_PIPE_LEN) { | 768 | if (completed > RCU_TORTURE_PIPE_LEN) { |
| 769 | /* Should not happen, but... */ | 769 | /* Should not happen, but... */ |
| 770 | completed = RCU_TORTURE_PIPE_LEN; | 770 | completed = RCU_TORTURE_PIPE_LEN; |
| 771 | } | 771 | } |
| 772 | ++__get_cpu_var(rcu_torture_batch)[completed]; | 772 | __this_cpu_inc(per_cpu_var(rcu_torture_batch)[completed]); |
| 773 | preempt_enable(); | 773 | preempt_enable(); |
| 774 | cur_ops->readunlock(idx); | 774 | cur_ops->readunlock(idx); |
| 775 | } | 775 | } |
| @@ -818,13 +818,13 @@ rcu_torture_reader(void *arg) | |||
| 818 | /* Should not happen, but... */ | 818 | /* Should not happen, but... */ |
| 819 | pipe_count = RCU_TORTURE_PIPE_LEN; | 819 | pipe_count = RCU_TORTURE_PIPE_LEN; |
| 820 | } | 820 | } |
| 821 | ++__get_cpu_var(rcu_torture_count)[pipe_count]; | 821 | __this_cpu_inc(per_cpu_var(rcu_torture_count)[pipe_count]); |
| 822 | completed = cur_ops->completed() - completed; | 822 | completed = cur_ops->completed() - completed; |
| 823 | if (completed > RCU_TORTURE_PIPE_LEN) { | 823 | if (completed > RCU_TORTURE_PIPE_LEN) { |
| 824 | /* Should not happen, but... */ | 824 | /* Should not happen, but... */ |
| 825 | completed = RCU_TORTURE_PIPE_LEN; | 825 | completed = RCU_TORTURE_PIPE_LEN; |
| 826 | } | 826 | } |
| 827 | ++__get_cpu_var(rcu_torture_batch)[completed]; | 827 | __this_cpu_inc(per_cpu_var(rcu_torture_batch)[completed]); |
| 828 | preempt_enable(); | 828 | preempt_enable(); |
| 829 | cur_ops->readunlock(idx); | 829 | cur_ops->readunlock(idx); |
| 830 | schedule(); | 830 | schedule(); |
diff --git a/kernel/sched.c b/kernel/sched.c index ff39cadf621e..fd05861b2111 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -298,7 +298,7 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct cfs_rq, init_tg_cfs_rq); | |||
| 298 | 298 | ||
| 299 | #ifdef CONFIG_RT_GROUP_SCHED | 299 | #ifdef CONFIG_RT_GROUP_SCHED |
| 300 | static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity); | 300 | static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity); |
| 301 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq); | 301 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq_var); |
| 302 | #endif /* CONFIG_RT_GROUP_SCHED */ | 302 | #endif /* CONFIG_RT_GROUP_SCHED */ |
| 303 | #else /* !CONFIG_USER_SCHED */ | 303 | #else /* !CONFIG_USER_SCHED */ |
| 304 | #define root_task_group init_task_group | 304 | #define root_task_group init_task_group |
| @@ -8286,14 +8286,14 @@ enum s_alloc { | |||
| 8286 | */ | 8286 | */ |
| 8287 | #ifdef CONFIG_SCHED_SMT | 8287 | #ifdef CONFIG_SCHED_SMT |
| 8288 | static DEFINE_PER_CPU(struct static_sched_domain, cpu_domains); | 8288 | static DEFINE_PER_CPU(struct static_sched_domain, cpu_domains); |
| 8289 | static DEFINE_PER_CPU(struct static_sched_group, sched_group_cpus); | 8289 | static DEFINE_PER_CPU(struct static_sched_group, sched_groups); |
| 8290 | 8290 | ||
| 8291 | static int | 8291 | static int |
| 8292 | cpu_to_cpu_group(int cpu, const struct cpumask *cpu_map, | 8292 | cpu_to_cpu_group(int cpu, const struct cpumask *cpu_map, |
| 8293 | struct sched_group **sg, struct cpumask *unused) | 8293 | struct sched_group **sg, struct cpumask *unused) |
| 8294 | { | 8294 | { |
| 8295 | if (sg) | 8295 | if (sg) |
| 8296 | *sg = &per_cpu(sched_group_cpus, cpu).sg; | 8296 | *sg = &per_cpu(sched_groups, cpu).sg; |
| 8297 | return cpu; | 8297 | return cpu; |
| 8298 | } | 8298 | } |
| 8299 | #endif /* CONFIG_SCHED_SMT */ | 8299 | #endif /* CONFIG_SCHED_SMT */ |
| @@ -9583,7 +9583,7 @@ void __init sched_init(void) | |||
| 9583 | #elif defined CONFIG_USER_SCHED | 9583 | #elif defined CONFIG_USER_SCHED |
| 9584 | init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, 0, NULL); | 9584 | init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, 0, NULL); |
| 9585 | init_tg_rt_entry(&init_task_group, | 9585 | init_tg_rt_entry(&init_task_group, |
| 9586 | &per_cpu(init_rt_rq, i), | 9586 | &per_cpu(init_rt_rq_var, i), |
| 9587 | &per_cpu(init_sched_rt_entity, i), i, 1, | 9587 | &per_cpu(init_sched_rt_entity, i), i, 1, |
| 9588 | root_task_group.rt_se[i]); | 9588 | root_task_group.rt_se[i]); |
| 9589 | #endif | 9589 | #endif |
diff --git a/kernel/softirq.c b/kernel/softirq.c index 21939d9e830e..a09502e2ef75 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
| @@ -697,7 +697,7 @@ void __init softirq_init(void) | |||
| 697 | open_softirq(HI_SOFTIRQ, tasklet_hi_action); | 697 | open_softirq(HI_SOFTIRQ, tasklet_hi_action); |
| 698 | } | 698 | } |
| 699 | 699 | ||
| 700 | static int ksoftirqd(void * __bind_cpu) | 700 | static int run_ksoftirqd(void * __bind_cpu) |
| 701 | { | 701 | { |
| 702 | set_current_state(TASK_INTERRUPTIBLE); | 702 | set_current_state(TASK_INTERRUPTIBLE); |
| 703 | 703 | ||
| @@ -810,7 +810,7 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb, | |||
| 810 | switch (action) { | 810 | switch (action) { |
| 811 | case CPU_UP_PREPARE: | 811 | case CPU_UP_PREPARE: |
| 812 | case CPU_UP_PREPARE_FROZEN: | 812 | case CPU_UP_PREPARE_FROZEN: |
| 813 | p = kthread_create(ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu); | 813 | p = kthread_create(run_ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu); |
| 814 | if (IS_ERR(p)) { | 814 | if (IS_ERR(p)) { |
| 815 | printk("ksoftirqd for %i failed\n", hotcpu); | 815 | printk("ksoftirqd for %i failed\n", hotcpu); |
| 816 | return NOTIFY_BAD; | 816 | return NOTIFY_BAD; |
diff --git a/kernel/softlockup.c b/kernel/softlockup.c index 81324d12eb35..d22579087e27 100644 --- a/kernel/softlockup.c +++ b/kernel/softlockup.c | |||
| @@ -22,9 +22,9 @@ | |||
| 22 | 22 | ||
| 23 | static DEFINE_SPINLOCK(print_lock); | 23 | static DEFINE_SPINLOCK(print_lock); |
| 24 | 24 | ||
| 25 | static DEFINE_PER_CPU(unsigned long, touch_timestamp); | 25 | static DEFINE_PER_CPU(unsigned long, softlockup_touch_ts); /* touch timestamp */ |
| 26 | static DEFINE_PER_CPU(unsigned long, print_timestamp); | 26 | static DEFINE_PER_CPU(unsigned long, softlockup_print_ts); /* print timestamp */ |
| 27 | static DEFINE_PER_CPU(struct task_struct *, watchdog_task); | 27 | static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog); |
| 28 | 28 | ||
| 29 | static int __read_mostly did_panic; | 29 | static int __read_mostly did_panic; |
| 30 | int __read_mostly softlockup_thresh = 60; | 30 | int __read_mostly softlockup_thresh = 60; |
| @@ -70,12 +70,12 @@ static void __touch_softlockup_watchdog(void) | |||
| 70 | { | 70 | { |
| 71 | int this_cpu = raw_smp_processor_id(); | 71 | int this_cpu = raw_smp_processor_id(); |
| 72 | 72 | ||
| 73 | __raw_get_cpu_var(touch_timestamp) = get_timestamp(this_cpu); | 73 | __raw_get_cpu_var(softlockup_touch_ts) = get_timestamp(this_cpu); |
| 74 | } | 74 | } |
| 75 | 75 | ||
| 76 | void touch_softlockup_watchdog(void) | 76 | void touch_softlockup_watchdog(void) |
| 77 | { | 77 | { |
| 78 | __raw_get_cpu_var(touch_timestamp) = 0; | 78 | __raw_get_cpu_var(softlockup_touch_ts) = 0; |
| 79 | } | 79 | } |
| 80 | EXPORT_SYMBOL(touch_softlockup_watchdog); | 80 | EXPORT_SYMBOL(touch_softlockup_watchdog); |
| 81 | 81 | ||
| @@ -85,7 +85,7 @@ void touch_all_softlockup_watchdogs(void) | |||
| 85 | 85 | ||
| 86 | /* Cause each CPU to re-update its timestamp rather than complain */ | 86 | /* Cause each CPU to re-update its timestamp rather than complain */ |
| 87 | for_each_online_cpu(cpu) | 87 | for_each_online_cpu(cpu) |
| 88 | per_cpu(touch_timestamp, cpu) = 0; | 88 | per_cpu(softlockup_touch_ts, cpu) = 0; |
| 89 | } | 89 | } |
| 90 | EXPORT_SYMBOL(touch_all_softlockup_watchdogs); | 90 | EXPORT_SYMBOL(touch_all_softlockup_watchdogs); |
| 91 | 91 | ||
| @@ -104,28 +104,28 @@ int proc_dosoftlockup_thresh(struct ctl_table *table, int write, | |||
| 104 | void softlockup_tick(void) | 104 | void softlockup_tick(void) |
| 105 | { | 105 | { |
| 106 | int this_cpu = smp_processor_id(); | 106 | int this_cpu = smp_processor_id(); |
| 107 | unsigned long touch_timestamp = per_cpu(touch_timestamp, this_cpu); | 107 | unsigned long touch_ts = per_cpu(softlockup_touch_ts, this_cpu); |
| 108 | unsigned long print_timestamp; | 108 | unsigned long print_ts; |
| 109 | struct pt_regs *regs = get_irq_regs(); | 109 | struct pt_regs *regs = get_irq_regs(); |
| 110 | unsigned long now; | 110 | unsigned long now; |
| 111 | 111 | ||
| 112 | /* Is detection switched off? */ | 112 | /* Is detection switched off? */ |
| 113 | if (!per_cpu(watchdog_task, this_cpu) || softlockup_thresh <= 0) { | 113 | if (!per_cpu(softlockup_watchdog, this_cpu) || softlockup_thresh <= 0) { |
| 114 | /* Be sure we don't false trigger if switched back on */ | 114 | /* Be sure we don't false trigger if switched back on */ |
| 115 | if (touch_timestamp) | 115 | if (touch_ts) |
| 116 | per_cpu(touch_timestamp, this_cpu) = 0; | 116 | per_cpu(softlockup_touch_ts, this_cpu) = 0; |
| 117 | return; | 117 | return; |
| 118 | } | 118 | } |
| 119 | 119 | ||
| 120 | if (touch_timestamp == 0) { | 120 | if (touch_ts == 0) { |
| 121 | __touch_softlockup_watchdog(); | 121 | __touch_softlockup_watchdog(); |
| 122 | return; | 122 | return; |
| 123 | } | 123 | } |
| 124 | 124 | ||
| 125 | print_timestamp = per_cpu(print_timestamp, this_cpu); | 125 | print_ts = per_cpu(softlockup_print_ts, this_cpu); |
| 126 | 126 | ||
| 127 | /* report at most once a second */ | 127 | /* report at most once a second */ |
| 128 | if (print_timestamp == touch_timestamp || did_panic) | 128 | if (print_ts == touch_ts || did_panic) |
| 129 | return; | 129 | return; |
| 130 | 130 | ||
| 131 | /* do not print during early bootup: */ | 131 | /* do not print during early bootup: */ |
| @@ -140,18 +140,18 @@ void softlockup_tick(void) | |||
| 140 | * Wake up the high-prio watchdog task twice per | 140 | * Wake up the high-prio watchdog task twice per |
| 141 | * threshold timespan. | 141 | * threshold timespan. |
| 142 | */ | 142 | */ |
| 143 | if (now > touch_timestamp + softlockup_thresh/2) | 143 | if (now > touch_ts + softlockup_thresh/2) |
| 144 | wake_up_process(per_cpu(watchdog_task, this_cpu)); | 144 | wake_up_process(per_cpu(softlockup_watchdog, this_cpu)); |
| 145 | 145 | ||
| 146 | /* Warn about unreasonable delays: */ | 146 | /* Warn about unreasonable delays: */ |
| 147 | if (now <= (touch_timestamp + softlockup_thresh)) | 147 | if (now <= (touch_ts + softlockup_thresh)) |
| 148 | return; | 148 | return; |
| 149 | 149 | ||
| 150 | per_cpu(print_timestamp, this_cpu) = touch_timestamp; | 150 | per_cpu(softlockup_print_ts, this_cpu) = touch_ts; |
| 151 | 151 | ||
| 152 | spin_lock(&print_lock); | 152 | spin_lock(&print_lock); |
| 153 | printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %lus! [%s:%d]\n", | 153 | printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %lus! [%s:%d]\n", |
| 154 | this_cpu, now - touch_timestamp, | 154 | this_cpu, now - touch_ts, |
| 155 | current->comm, task_pid_nr(current)); | 155 | current->comm, task_pid_nr(current)); |
| 156 | print_modules(); | 156 | print_modules(); |
| 157 | print_irqtrace_events(current); | 157 | print_irqtrace_events(current); |
| @@ -209,32 +209,32 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
| 209 | switch (action) { | 209 | switch (action) { |
| 210 | case CPU_UP_PREPARE: | 210 | case CPU_UP_PREPARE: |
| 211 | case CPU_UP_PREPARE_FROZEN: | 211 | case CPU_UP_PREPARE_FROZEN: |
| 212 | BUG_ON(per_cpu(watchdog_task, hotcpu)); | 212 | BUG_ON(per_cpu(softlockup_watchdog, hotcpu)); |
| 213 | p = kthread_create(watchdog, hcpu, "watchdog/%d", hotcpu); | 213 | p = kthread_create(watchdog, hcpu, "watchdog/%d", hotcpu); |
| 214 | if (IS_ERR(p)) { | 214 | if (IS_ERR(p)) { |
| 215 | printk(KERN_ERR "watchdog for %i failed\n", hotcpu); | 215 | printk(KERN_ERR "watchdog for %i failed\n", hotcpu); |
| 216 | return NOTIFY_BAD; | 216 | return NOTIFY_BAD; |
| 217 | } | 217 | } |
| 218 | per_cpu(touch_timestamp, hotcpu) = 0; | 218 | per_cpu(softlockup_touch_ts, hotcpu) = 0; |
| 219 | per_cpu(watchdog_task, hotcpu) = p; | 219 | per_cpu(softlockup_watchdog, hotcpu) = p; |
| 220 | kthread_bind(p, hotcpu); | 220 | kthread_bind(p, hotcpu); |
| 221 | break; | 221 | break; |
| 222 | case CPU_ONLINE: | 222 | case CPU_ONLINE: |
| 223 | case CPU_ONLINE_FROZEN: | 223 | case CPU_ONLINE_FROZEN: |
| 224 | wake_up_process(per_cpu(watchdog_task, hotcpu)); | 224 | wake_up_process(per_cpu(softlockup_watchdog, hotcpu)); |
| 225 | break; | 225 | break; |
| 226 | #ifdef CONFIG_HOTPLUG_CPU | 226 | #ifdef CONFIG_HOTPLUG_CPU |
| 227 | case CPU_UP_CANCELED: | 227 | case CPU_UP_CANCELED: |
| 228 | case CPU_UP_CANCELED_FROZEN: | 228 | case CPU_UP_CANCELED_FROZEN: |
| 229 | if (!per_cpu(watchdog_task, hotcpu)) | 229 | if (!per_cpu(softlockup_watchdog, hotcpu)) |
| 230 | break; | 230 | break; |
| 231 | /* Unbind so it can run. Fall thru. */ | 231 | /* Unbind so it can run. Fall thru. */ |
| 232 | kthread_bind(per_cpu(watchdog_task, hotcpu), | 232 | kthread_bind(per_cpu(softlockup_watchdog, hotcpu), |
| 233 | cpumask_any(cpu_online_mask)); | 233 | cpumask_any(cpu_online_mask)); |
| 234 | case CPU_DEAD: | 234 | case CPU_DEAD: |
| 235 | case CPU_DEAD_FROZEN: | 235 | case CPU_DEAD_FROZEN: |
| 236 | p = per_cpu(watchdog_task, hotcpu); | 236 | p = per_cpu(softlockup_watchdog, hotcpu); |
| 237 | per_cpu(watchdog_task, hotcpu) = NULL; | 237 | per_cpu(softlockup_watchdog, hotcpu) = NULL; |
| 238 | kthread_stop(p); | 238 | kthread_stop(p); |
| 239 | break; | 239 | break; |
| 240 | #endif /* CONFIG_HOTPLUG_CPU */ | 240 | #endif /* CONFIG_HOTPLUG_CPU */ |
diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c index ee5681f8d7ec..63b117e9eba1 100644 --- a/kernel/time/timer_stats.c +++ b/kernel/time/timer_stats.c | |||
| @@ -86,7 +86,7 @@ static DEFINE_SPINLOCK(table_lock); | |||
| 86 | /* | 86 | /* |
| 87 | * Per-CPU lookup locks for fast hash lookup: | 87 | * Per-CPU lookup locks for fast hash lookup: |
| 88 | */ | 88 | */ |
| 89 | static DEFINE_PER_CPU(spinlock_t, lookup_lock); | 89 | static DEFINE_PER_CPU(spinlock_t, tstats_lookup_lock); |
| 90 | 90 | ||
| 91 | /* | 91 | /* |
| 92 | * Mutex to serialize state changes with show-stats activities: | 92 | * Mutex to serialize state changes with show-stats activities: |
| @@ -245,7 +245,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf, | |||
| 245 | if (likely(!timer_stats_active)) | 245 | if (likely(!timer_stats_active)) |
| 246 | return; | 246 | return; |
| 247 | 247 | ||
| 248 | lock = &per_cpu(lookup_lock, raw_smp_processor_id()); | 248 | lock = &per_cpu(tstats_lookup_lock, raw_smp_processor_id()); |
| 249 | 249 | ||
| 250 | input.timer = timer; | 250 | input.timer = timer; |
| 251 | input.start_func = startf; | 251 | input.start_func = startf; |
| @@ -348,9 +348,10 @@ static void sync_access(void) | |||
| 348 | int cpu; | 348 | int cpu; |
| 349 | 349 | ||
| 350 | for_each_online_cpu(cpu) { | 350 | for_each_online_cpu(cpu) { |
| 351 | spin_lock_irqsave(&per_cpu(lookup_lock, cpu), flags); | 351 | spinlock_t *lock = &per_cpu(tstats_lookup_lock, cpu); |
| 352 | spin_lock_irqsave(lock, flags); | ||
| 352 | /* nothing */ | 353 | /* nothing */ |
| 353 | spin_unlock_irqrestore(&per_cpu(lookup_lock, cpu), flags); | 354 | spin_unlock_irqrestore(lock, flags); |
| 354 | } | 355 | } |
| 355 | } | 356 | } |
| 356 | 357 | ||
| @@ -408,7 +409,7 @@ void __init init_timer_stats(void) | |||
| 408 | int cpu; | 409 | int cpu; |
| 409 | 410 | ||
| 410 | for_each_possible_cpu(cpu) | 411 | for_each_possible_cpu(cpu) |
| 411 | spin_lock_init(&per_cpu(lookup_lock, cpu)); | 412 | spin_lock_init(&per_cpu(tstats_lookup_lock, cpu)); |
| 412 | } | 413 | } |
| 413 | 414 | ||
| 414 | static int __init init_tstats_procfs(void) | 415 | static int __init init_tstats_procfs(void) |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 88bd9ae2a9ed..c82dfd92fdfd 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
| @@ -86,17 +86,17 @@ static int dummy_set_flag(u32 old_flags, u32 bit, int set) | |||
| 86 | */ | 86 | */ |
| 87 | static int tracing_disabled = 1; | 87 | static int tracing_disabled = 1; |
| 88 | 88 | ||
| 89 | DEFINE_PER_CPU(local_t, ftrace_cpu_disabled); | 89 | DEFINE_PER_CPU(int, ftrace_cpu_disabled); |
| 90 | 90 | ||
| 91 | static inline void ftrace_disable_cpu(void) | 91 | static inline void ftrace_disable_cpu(void) |
| 92 | { | 92 | { |
| 93 | preempt_disable(); | 93 | preempt_disable(); |
| 94 | local_inc(&__get_cpu_var(ftrace_cpu_disabled)); | 94 | __this_cpu_inc(per_cpu_var(ftrace_cpu_disabled)); |
| 95 | } | 95 | } |
| 96 | 96 | ||
| 97 | static inline void ftrace_enable_cpu(void) | 97 | static inline void ftrace_enable_cpu(void) |
| 98 | { | 98 | { |
| 99 | local_dec(&__get_cpu_var(ftrace_cpu_disabled)); | 99 | __this_cpu_dec(per_cpu_var(ftrace_cpu_disabled)); |
| 100 | preempt_enable(); | 100 | preempt_enable(); |
| 101 | } | 101 | } |
| 102 | 102 | ||
| @@ -203,7 +203,7 @@ cycle_t ftrace_now(int cpu) | |||
| 203 | */ | 203 | */ |
| 204 | static struct trace_array max_tr; | 204 | static struct trace_array max_tr; |
| 205 | 205 | ||
| 206 | static DEFINE_PER_CPU(struct trace_array_cpu, max_data); | 206 | static DEFINE_PER_CPU(struct trace_array_cpu, max_tr_data); |
| 207 | 207 | ||
| 208 | /* tracer_enabled is used to toggle activation of a tracer */ | 208 | /* tracer_enabled is used to toggle activation of a tracer */ |
| 209 | static int tracer_enabled = 1; | 209 | static int tracer_enabled = 1; |
| @@ -1085,7 +1085,7 @@ trace_function(struct trace_array *tr, | |||
| 1085 | struct ftrace_entry *entry; | 1085 | struct ftrace_entry *entry; |
| 1086 | 1086 | ||
| 1087 | /* If we are reading the ring buffer, don't trace */ | 1087 | /* If we are reading the ring buffer, don't trace */ |
| 1088 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) | 1088 | if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled)))) |
| 1089 | return; | 1089 | return; |
| 1090 | 1090 | ||
| 1091 | event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry), | 1091 | event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry), |
| @@ -4454,7 +4454,7 @@ __init static int tracer_alloc_buffers(void) | |||
| 4454 | /* Allocate the first page for all buffers */ | 4454 | /* Allocate the first page for all buffers */ |
| 4455 | for_each_tracing_cpu(i) { | 4455 | for_each_tracing_cpu(i) { |
| 4456 | global_trace.data[i] = &per_cpu(global_trace_cpu, i); | 4456 | global_trace.data[i] = &per_cpu(global_trace_cpu, i); |
| 4457 | max_tr.data[i] = &per_cpu(max_data, i); | 4457 | max_tr.data[i] = &per_cpu(max_tr_data, i); |
| 4458 | } | 4458 | } |
| 4459 | 4459 | ||
| 4460 | trace_init_cmdlines(); | 4460 | trace_init_cmdlines(); |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 7fa33cab6962..a52bed2eedd8 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
| @@ -443,7 +443,7 @@ extern int DYN_FTRACE_TEST_NAME(void); | |||
| 443 | 443 | ||
| 444 | extern int ring_buffer_expanded; | 444 | extern int ring_buffer_expanded; |
| 445 | extern bool tracing_selftest_disabled; | 445 | extern bool tracing_selftest_disabled; |
| 446 | DECLARE_PER_CPU(local_t, ftrace_cpu_disabled); | 446 | DECLARE_PER_CPU(int, ftrace_cpu_disabled); |
| 447 | 447 | ||
| 448 | #ifdef CONFIG_FTRACE_STARTUP_TEST | 448 | #ifdef CONFIG_FTRACE_STARTUP_TEST |
| 449 | extern int trace_selftest_startup_function(struct tracer *trace, | 449 | extern int trace_selftest_startup_function(struct tracer *trace, |
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index a43d009c561a..b1342c5d37cf 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
| @@ -187,7 +187,7 @@ static int __trace_graph_entry(struct trace_array *tr, | |||
| 187 | struct ring_buffer *buffer = tr->buffer; | 187 | struct ring_buffer *buffer = tr->buffer; |
| 188 | struct ftrace_graph_ent_entry *entry; | 188 | struct ftrace_graph_ent_entry *entry; |
| 189 | 189 | ||
| 190 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) | 190 | if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled)))) |
| 191 | return 0; | 191 | return 0; |
| 192 | 192 | ||
| 193 | event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT, | 193 | event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT, |
| @@ -251,7 +251,7 @@ static void __trace_graph_return(struct trace_array *tr, | |||
| 251 | struct ring_buffer *buffer = tr->buffer; | 251 | struct ring_buffer *buffer = tr->buffer; |
| 252 | struct ftrace_graph_ret_entry *entry; | 252 | struct ftrace_graph_ret_entry *entry; |
| 253 | 253 | ||
| 254 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) | 254 | if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled)))) |
| 255 | return; | 255 | return; |
| 256 | 256 | ||
| 257 | event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET, | 257 | event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET, |
diff --git a/kernel/trace/trace_hw_branches.c b/kernel/trace/trace_hw_branches.c index 69543a905cd5..7b97000745f5 100644 --- a/kernel/trace/trace_hw_branches.c +++ b/kernel/trace/trace_hw_branches.c | |||
| @@ -20,10 +20,10 @@ | |||
| 20 | 20 | ||
| 21 | #define BTS_BUFFER_SIZE (1 << 13) | 21 | #define BTS_BUFFER_SIZE (1 << 13) |
| 22 | 22 | ||
| 23 | static DEFINE_PER_CPU(struct bts_tracer *, tracer); | 23 | static DEFINE_PER_CPU(struct bts_tracer *, hwb_tracer); |
| 24 | static DEFINE_PER_CPU(unsigned char[BTS_BUFFER_SIZE], buffer); | 24 | static DEFINE_PER_CPU(unsigned char[BTS_BUFFER_SIZE], hwb_buffer); |
| 25 | 25 | ||
| 26 | #define this_tracer per_cpu(tracer, smp_processor_id()) | 26 | #define this_tracer per_cpu(hwb_tracer, smp_processor_id()) |
| 27 | 27 | ||
| 28 | static int trace_hw_branches_enabled __read_mostly; | 28 | static int trace_hw_branches_enabled __read_mostly; |
| 29 | static int trace_hw_branches_suspended __read_mostly; | 29 | static int trace_hw_branches_suspended __read_mostly; |
| @@ -32,12 +32,13 @@ static struct trace_array *hw_branch_trace __read_mostly; | |||
| 32 | 32 | ||
| 33 | static void bts_trace_init_cpu(int cpu) | 33 | static void bts_trace_init_cpu(int cpu) |
| 34 | { | 34 | { |
| 35 | per_cpu(tracer, cpu) = | 35 | per_cpu(hwb_tracer, cpu) = |
| 36 | ds_request_bts_cpu(cpu, per_cpu(buffer, cpu), BTS_BUFFER_SIZE, | 36 | ds_request_bts_cpu(cpu, per_cpu(hwb_buffer, cpu), |
| 37 | NULL, (size_t)-1, BTS_KERNEL); | 37 | BTS_BUFFER_SIZE, NULL, (size_t)-1, |
| 38 | BTS_KERNEL); | ||
| 38 | 39 | ||
| 39 | if (IS_ERR(per_cpu(tracer, cpu))) | 40 | if (IS_ERR(per_cpu(hwb_tracer, cpu))) |
| 40 | per_cpu(tracer, cpu) = NULL; | 41 | per_cpu(hwb_tracer, cpu) = NULL; |
| 41 | } | 42 | } |
| 42 | 43 | ||
| 43 | static int bts_trace_init(struct trace_array *tr) | 44 | static int bts_trace_init(struct trace_array *tr) |
| @@ -51,7 +52,7 @@ static int bts_trace_init(struct trace_array *tr) | |||
| 51 | for_each_online_cpu(cpu) { | 52 | for_each_online_cpu(cpu) { |
| 52 | bts_trace_init_cpu(cpu); | 53 | bts_trace_init_cpu(cpu); |
| 53 | 54 | ||
| 54 | if (likely(per_cpu(tracer, cpu))) | 55 | if (likely(per_cpu(hwb_tracer, cpu))) |
| 55 | trace_hw_branches_enabled = 1; | 56 | trace_hw_branches_enabled = 1; |
| 56 | } | 57 | } |
| 57 | trace_hw_branches_suspended = 0; | 58 | trace_hw_branches_suspended = 0; |
| @@ -67,9 +68,9 @@ static void bts_trace_reset(struct trace_array *tr) | |||
| 67 | 68 | ||
| 68 | get_online_cpus(); | 69 | get_online_cpus(); |
| 69 | for_each_online_cpu(cpu) { | 70 | for_each_online_cpu(cpu) { |
| 70 | if (likely(per_cpu(tracer, cpu))) { | 71 | if (likely(per_cpu(hwb_tracer, cpu))) { |
| 71 | ds_release_bts(per_cpu(tracer, cpu)); | 72 | ds_release_bts(per_cpu(hwb_tracer, cpu)); |
| 72 | per_cpu(tracer, cpu) = NULL; | 73 | per_cpu(hwb_tracer, cpu) = NULL; |
| 73 | } | 74 | } |
| 74 | } | 75 | } |
| 75 | trace_hw_branches_enabled = 0; | 76 | trace_hw_branches_enabled = 0; |
| @@ -83,8 +84,8 @@ static void bts_trace_start(struct trace_array *tr) | |||
| 83 | 84 | ||
| 84 | get_online_cpus(); | 85 | get_online_cpus(); |
| 85 | for_each_online_cpu(cpu) | 86 | for_each_online_cpu(cpu) |
| 86 | if (likely(per_cpu(tracer, cpu))) | 87 | if (likely(per_cpu(hwb_tracer, cpu))) |
| 87 | ds_resume_bts(per_cpu(tracer, cpu)); | 88 | ds_resume_bts(per_cpu(hwb_tracer, cpu)); |
| 88 | trace_hw_branches_suspended = 0; | 89 | trace_hw_branches_suspended = 0; |
| 89 | put_online_cpus(); | 90 | put_online_cpus(); |
| 90 | } | 91 | } |
| @@ -95,8 +96,8 @@ static void bts_trace_stop(struct trace_array *tr) | |||
| 95 | 96 | ||
| 96 | get_online_cpus(); | 97 | get_online_cpus(); |
| 97 | for_each_online_cpu(cpu) | 98 | for_each_online_cpu(cpu) |
| 98 | if (likely(per_cpu(tracer, cpu))) | 99 | if (likely(per_cpu(hwb_tracer, cpu))) |
| 99 | ds_suspend_bts(per_cpu(tracer, cpu)); | 100 | ds_suspend_bts(per_cpu(hwb_tracer, cpu)); |
| 100 | trace_hw_branches_suspended = 1; | 101 | trace_hw_branches_suspended = 1; |
| 101 | put_online_cpus(); | 102 | put_online_cpus(); |
| 102 | } | 103 | } |
| @@ -114,16 +115,16 @@ static int __cpuinit bts_hotcpu_handler(struct notifier_block *nfb, | |||
| 114 | bts_trace_init_cpu(cpu); | 115 | bts_trace_init_cpu(cpu); |
| 115 | 116 | ||
| 116 | if (trace_hw_branches_suspended && | 117 | if (trace_hw_branches_suspended && |
| 117 | likely(per_cpu(tracer, cpu))) | 118 | likely(per_cpu(hwb_tracer, cpu))) |
| 118 | ds_suspend_bts(per_cpu(tracer, cpu)); | 119 | ds_suspend_bts(per_cpu(hwb_tracer, cpu)); |
| 119 | } | 120 | } |
| 120 | break; | 121 | break; |
| 121 | 122 | ||
| 122 | case CPU_DOWN_PREPARE: | 123 | case CPU_DOWN_PREPARE: |
| 123 | /* The notification is sent with interrupts enabled. */ | 124 | /* The notification is sent with interrupts enabled. */ |
| 124 | if (likely(per_cpu(tracer, cpu))) { | 125 | if (likely(per_cpu(hwb_tracer, cpu))) { |
| 125 | ds_release_bts(per_cpu(tracer, cpu)); | 126 | ds_release_bts(per_cpu(hwb_tracer, cpu)); |
| 126 | per_cpu(tracer, cpu) = NULL; | 127 | per_cpu(hwb_tracer, cpu) = NULL; |
| 127 | } | 128 | } |
| 128 | } | 129 | } |
| 129 | 130 | ||
| @@ -258,8 +259,8 @@ static void trace_bts_prepare(struct trace_iterator *iter) | |||
| 258 | 259 | ||
| 259 | get_online_cpus(); | 260 | get_online_cpus(); |
| 260 | for_each_online_cpu(cpu) | 261 | for_each_online_cpu(cpu) |
| 261 | if (likely(per_cpu(tracer, cpu))) | 262 | if (likely(per_cpu(hwb_tracer, cpu))) |
| 262 | ds_suspend_bts(per_cpu(tracer, cpu)); | 263 | ds_suspend_bts(per_cpu(hwb_tracer, cpu)); |
| 263 | /* | 264 | /* |
| 264 | * We need to collect the trace on the respective cpu since ftrace | 265 | * We need to collect the trace on the respective cpu since ftrace |
| 265 | * implicitly adds the record for the current cpu. | 266 | * implicitly adds the record for the current cpu. |
| @@ -268,8 +269,8 @@ static void trace_bts_prepare(struct trace_iterator *iter) | |||
| 268 | on_each_cpu(trace_bts_cpu, iter->tr, 1); | 269 | on_each_cpu(trace_bts_cpu, iter->tr, 1); |
| 269 | 270 | ||
| 270 | for_each_online_cpu(cpu) | 271 | for_each_online_cpu(cpu) |
| 271 | if (likely(per_cpu(tracer, cpu))) | 272 | if (likely(per_cpu(hwb_tracer, cpu))) |
| 272 | ds_resume_bts(per_cpu(tracer, cpu)); | 273 | ds_resume_bts(per_cpu(hwb_tracer, cpu)); |
| 273 | put_online_cpus(); | 274 | put_online_cpus(); |
| 274 | } | 275 | } |
| 275 | 276 | ||
