diff options
| -rw-r--r-- | arch/x86/include/asm/alternative.h | 1 | ||||
| -rw-r--r-- | arch/x86/include/asm/i387.h | 24 | ||||
| -rw-r--r-- | arch/x86/include/asm/mce.h | 3 | ||||
| -rw-r--r-- | arch/x86/include/asm/msr-index.h | 12 | ||||
| -rw-r--r-- | arch/x86/include/asm/paravirt.h | 2 | ||||
| -rw-r--r-- | arch/x86/kernel/alternative.c | 3 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/mcheck/therm_throt.c | 40 | ||||
| -rw-r--r-- | arch/x86/kernel/smpboot.c | 14 | ||||
| -rw-r--r-- | include/linux/interrupt.h | 6 | ||||
| -rw-r--r-- | kernel/cpu.c | 11 | ||||
| -rw-r--r-- | kernel/lockdep_proc.c | 16 |
11 files changed, 117 insertions, 15 deletions
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h index 4a2adaa9aefc..13009d1af99a 100644 --- a/arch/x86/include/asm/alternative.h +++ b/arch/x86/include/asm/alternative.h | |||
| @@ -66,6 +66,7 @@ extern void alternatives_smp_module_add(struct module *mod, char *name, | |||
| 66 | extern void alternatives_smp_module_del(struct module *mod); | 66 | extern void alternatives_smp_module_del(struct module *mod); |
| 67 | extern void alternatives_smp_switch(int smp); | 67 | extern void alternatives_smp_switch(int smp); |
| 68 | extern int alternatives_text_reserved(void *start, void *end); | 68 | extern int alternatives_text_reserved(void *start, void *end); |
| 69 | extern bool skip_smp_alternatives; | ||
| 69 | #else | 70 | #else |
| 70 | static inline void alternatives_smp_module_add(struct module *mod, char *name, | 71 | static inline void alternatives_smp_module_add(struct module *mod, char *name, |
| 71 | void *locks, void *locks_end, | 72 | void *locks, void *locks_end, |
diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h index 4aa2bb3b242a..ef328901c802 100644 --- a/arch/x86/include/asm/i387.h +++ b/arch/x86/include/asm/i387.h | |||
| @@ -93,6 +93,17 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx) | |||
| 93 | int err; | 93 | int err; |
| 94 | 94 | ||
| 95 | /* See comment in fxsave() below. */ | 95 | /* See comment in fxsave() below. */ |
| 96 | #ifdef CONFIG_AS_FXSAVEQ | ||
| 97 | asm volatile("1: fxrstorq %[fx]\n\t" | ||
| 98 | "2:\n" | ||
| 99 | ".section .fixup,\"ax\"\n" | ||
| 100 | "3: movl $-1,%[err]\n" | ||
| 101 | " jmp 2b\n" | ||
| 102 | ".previous\n" | ||
| 103 | _ASM_EXTABLE(1b, 3b) | ||
| 104 | : [err] "=r" (err) | ||
| 105 | : [fx] "m" (*fx), "0" (0)); | ||
| 106 | #else | ||
| 96 | asm volatile("1: rex64/fxrstor (%[fx])\n\t" | 107 | asm volatile("1: rex64/fxrstor (%[fx])\n\t" |
| 97 | "2:\n" | 108 | "2:\n" |
| 98 | ".section .fixup,\"ax\"\n" | 109 | ".section .fixup,\"ax\"\n" |
| @@ -102,6 +113,7 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx) | |||
| 102 | _ASM_EXTABLE(1b, 3b) | 113 | _ASM_EXTABLE(1b, 3b) |
| 103 | : [err] "=r" (err) | 114 | : [err] "=r" (err) |
| 104 | : [fx] "R" (fx), "m" (*fx), "0" (0)); | 115 | : [fx] "R" (fx), "m" (*fx), "0" (0)); |
| 116 | #endif | ||
| 105 | return err; | 117 | return err; |
| 106 | } | 118 | } |
| 107 | 119 | ||
| @@ -119,6 +131,17 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx) | |||
| 119 | return -EFAULT; | 131 | return -EFAULT; |
| 120 | 132 | ||
| 121 | /* See comment in fxsave() below. */ | 133 | /* See comment in fxsave() below. */ |
| 134 | #ifdef CONFIG_AS_FXSAVEQ | ||
| 135 | asm volatile("1: fxsaveq %[fx]\n\t" | ||
| 136 | "2:\n" | ||
| 137 | ".section .fixup,\"ax\"\n" | ||
| 138 | "3: movl $-1,%[err]\n" | ||
| 139 | " jmp 2b\n" | ||
| 140 | ".previous\n" | ||
| 141 | _ASM_EXTABLE(1b, 3b) | ||
| 142 | : [err] "=r" (err), [fx] "=m" (*fx) | ||
| 143 | : "0" (0)); | ||
| 144 | #else | ||
| 122 | asm volatile("1: rex64/fxsave (%[fx])\n\t" | 145 | asm volatile("1: rex64/fxsave (%[fx])\n\t" |
| 123 | "2:\n" | 146 | "2:\n" |
| 124 | ".section .fixup,\"ax\"\n" | 147 | ".section .fixup,\"ax\"\n" |
| @@ -128,6 +151,7 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx) | |||
| 128 | _ASM_EXTABLE(1b, 3b) | 151 | _ASM_EXTABLE(1b, 3b) |
| 129 | : [err] "=r" (err), "=m" (*fx) | 152 | : [err] "=r" (err), "=m" (*fx) |
| 130 | : [fx] "R" (fx), "0" (0)); | 153 | : [fx] "R" (fx), "0" (0)); |
| 154 | #endif | ||
| 131 | if (unlikely(err) && | 155 | if (unlikely(err) && |
| 132 | __clear_user(fx, sizeof(struct i387_fxsave_struct))) | 156 | __clear_user(fx, sizeof(struct i387_fxsave_struct))) |
| 133 | err = -EFAULT; | 157 | err = -EFAULT; |
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index c62c13cb9788..eb16e94ae04f 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h | |||
| @@ -223,6 +223,9 @@ void intel_init_thermal(struct cpuinfo_x86 *c); | |||
| 223 | 223 | ||
| 224 | void mce_log_therm_throt_event(__u64 status); | 224 | void mce_log_therm_throt_event(__u64 status); |
| 225 | 225 | ||
| 226 | /* Interrupt Handler for core thermal thresholds */ | ||
| 227 | extern int (*platform_thermal_notify)(__u64 msr_val); | ||
| 228 | |||
| 226 | #ifdef CONFIG_X86_THERMAL_VECTOR | 229 | #ifdef CONFIG_X86_THERMAL_VECTOR |
| 227 | extern void mcheck_intel_therm_init(void); | 230 | extern void mcheck_intel_therm_init(void); |
| 228 | #else | 231 | #else |
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 86030f63ba02..4d0dfa0d998e 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h | |||
| @@ -257,6 +257,18 @@ | |||
| 257 | #define PACKAGE_THERM_INT_LOW_ENABLE (1 << 1) | 257 | #define PACKAGE_THERM_INT_LOW_ENABLE (1 << 1) |
| 258 | #define PACKAGE_THERM_INT_PLN_ENABLE (1 << 24) | 258 | #define PACKAGE_THERM_INT_PLN_ENABLE (1 << 24) |
| 259 | 259 | ||
| 260 | /* Thermal Thresholds Support */ | ||
| 261 | #define THERM_INT_THRESHOLD0_ENABLE (1 << 15) | ||
| 262 | #define THERM_SHIFT_THRESHOLD0 8 | ||
| 263 | #define THERM_MASK_THRESHOLD0 (0x7f << THERM_SHIFT_THRESHOLD0) | ||
| 264 | #define THERM_INT_THRESHOLD1_ENABLE (1 << 23) | ||
| 265 | #define THERM_SHIFT_THRESHOLD1 16 | ||
| 266 | #define THERM_MASK_THRESHOLD1 (0x7f << THERM_SHIFT_THRESHOLD1) | ||
| 267 | #define THERM_STATUS_THRESHOLD0 (1 << 6) | ||
| 268 | #define THERM_LOG_THRESHOLD0 (1 << 7) | ||
| 269 | #define THERM_STATUS_THRESHOLD1 (1 << 8) | ||
| 270 | #define THERM_LOG_THRESHOLD1 (1 << 9) | ||
| 271 | |||
| 260 | /* MISC_ENABLE bits: architectural */ | 272 | /* MISC_ENABLE bits: architectural */ |
| 261 | #define MSR_IA32_MISC_ENABLE_FAST_STRING (1ULL << 0) | 273 | #define MSR_IA32_MISC_ENABLE_FAST_STRING (1ULL << 0) |
| 262 | #define MSR_IA32_MISC_ENABLE_TCC (1ULL << 1) | 274 | #define MSR_IA32_MISC_ENABLE_TCC (1ULL << 1) |
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index ef9975812c77..7709c12431b8 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h | |||
| @@ -112,7 +112,7 @@ static inline void arch_safe_halt(void) | |||
| 112 | 112 | ||
| 113 | static inline void halt(void) | 113 | static inline void halt(void) |
| 114 | { | 114 | { |
| 115 | PVOP_VCALL0(pv_irq_ops.safe_halt); | 115 | PVOP_VCALL0(pv_irq_ops.halt); |
| 116 | } | 116 | } |
| 117 | 117 | ||
| 118 | static inline void wbinvd(void) | 118 | static inline void wbinvd(void) |
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 553d0b0d639b..123608531c8f 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c | |||
| @@ -353,6 +353,7 @@ void __init_or_module alternatives_smp_module_del(struct module *mod) | |||
| 353 | mutex_unlock(&smp_alt); | 353 | mutex_unlock(&smp_alt); |
| 354 | } | 354 | } |
| 355 | 355 | ||
| 356 | bool skip_smp_alternatives; | ||
| 356 | void alternatives_smp_switch(int smp) | 357 | void alternatives_smp_switch(int smp) |
| 357 | { | 358 | { |
| 358 | struct smp_alt_module *mod; | 359 | struct smp_alt_module *mod; |
| @@ -368,7 +369,7 @@ void alternatives_smp_switch(int smp) | |||
| 368 | printk("lockdep: fixing up alternatives.\n"); | 369 | printk("lockdep: fixing up alternatives.\n"); |
| 369 | #endif | 370 | #endif |
| 370 | 371 | ||
| 371 | if (noreplace_smp || smp_alt_once) | 372 | if (noreplace_smp || smp_alt_once || skip_smp_alternatives) |
| 372 | return; | 373 | return; |
| 373 | BUG_ON(!smp && (num_online_cpus() > 1)); | 374 | BUG_ON(!smp && (num_online_cpus() > 1)); |
| 374 | 375 | ||
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c index 4b683267eca5..e12246ff5aa6 100644 --- a/arch/x86/kernel/cpu/mcheck/therm_throt.c +++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c | |||
| @@ -53,8 +53,13 @@ struct thermal_state { | |||
| 53 | struct _thermal_state core_power_limit; | 53 | struct _thermal_state core_power_limit; |
| 54 | struct _thermal_state package_throttle; | 54 | struct _thermal_state package_throttle; |
| 55 | struct _thermal_state package_power_limit; | 55 | struct _thermal_state package_power_limit; |
| 56 | struct _thermal_state core_thresh0; | ||
| 57 | struct _thermal_state core_thresh1; | ||
| 56 | }; | 58 | }; |
| 57 | 59 | ||
| 60 | /* Callback to handle core threshold interrupts */ | ||
| 61 | int (*platform_thermal_notify)(__u64 msr_val); | ||
| 62 | |||
| 58 | static DEFINE_PER_CPU(struct thermal_state, thermal_state); | 63 | static DEFINE_PER_CPU(struct thermal_state, thermal_state); |
| 59 | 64 | ||
| 60 | static atomic_t therm_throt_en = ATOMIC_INIT(0); | 65 | static atomic_t therm_throt_en = ATOMIC_INIT(0); |
| @@ -200,6 +205,22 @@ static int therm_throt_process(bool new_event, int event, int level) | |||
| 200 | return 0; | 205 | return 0; |
| 201 | } | 206 | } |
| 202 | 207 | ||
| 208 | static int thresh_event_valid(int event) | ||
| 209 | { | ||
| 210 | struct _thermal_state *state; | ||
| 211 | unsigned int this_cpu = smp_processor_id(); | ||
| 212 | struct thermal_state *pstate = &per_cpu(thermal_state, this_cpu); | ||
| 213 | u64 now = get_jiffies_64(); | ||
| 214 | |||
| 215 | state = (event == 0) ? &pstate->core_thresh0 : &pstate->core_thresh1; | ||
| 216 | |||
| 217 | if (time_before64(now, state->next_check)) | ||
| 218 | return 0; | ||
| 219 | |||
| 220 | state->next_check = now + CHECK_INTERVAL; | ||
| 221 | return 1; | ||
| 222 | } | ||
| 223 | |||
| 203 | #ifdef CONFIG_SYSFS | 224 | #ifdef CONFIG_SYSFS |
| 204 | /* Add/Remove thermal_throttle interface for CPU device: */ | 225 | /* Add/Remove thermal_throttle interface for CPU device: */ |
| 205 | static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev, | 226 | static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev, |
| @@ -313,6 +334,22 @@ device_initcall(thermal_throttle_init_device); | |||
| 313 | #define PACKAGE_THROTTLED ((__u64)2 << 62) | 334 | #define PACKAGE_THROTTLED ((__u64)2 << 62) |
| 314 | #define PACKAGE_POWER_LIMIT ((__u64)3 << 62) | 335 | #define PACKAGE_POWER_LIMIT ((__u64)3 << 62) |
| 315 | 336 | ||
| 337 | static void notify_thresholds(__u64 msr_val) | ||
| 338 | { | ||
| 339 | /* check whether the interrupt handler is defined; | ||
| 340 | * otherwise simply return | ||
| 341 | */ | ||
| 342 | if (!platform_thermal_notify) | ||
| 343 | return; | ||
| 344 | |||
| 345 | /* lower threshold reached */ | ||
| 346 | if ((msr_val & THERM_LOG_THRESHOLD0) && thresh_event_valid(0)) | ||
| 347 | platform_thermal_notify(msr_val); | ||
| 348 | /* higher threshold reached */ | ||
| 349 | if ((msr_val & THERM_LOG_THRESHOLD1) && thresh_event_valid(1)) | ||
| 350 | platform_thermal_notify(msr_val); | ||
| 351 | } | ||
| 352 | |||
| 316 | /* Thermal transition interrupt handler */ | 353 | /* Thermal transition interrupt handler */ |
| 317 | static void intel_thermal_interrupt(void) | 354 | static void intel_thermal_interrupt(void) |
| 318 | { | 355 | { |
| @@ -321,6 +358,9 @@ static void intel_thermal_interrupt(void) | |||
| 321 | 358 | ||
| 322 | rdmsrl(MSR_IA32_THERM_STATUS, msr_val); | 359 | rdmsrl(MSR_IA32_THERM_STATUS, msr_val); |
| 323 | 360 | ||
| 361 | /* Check for violation of core thermal thresholds*/ | ||
| 362 | notify_thresholds(msr_val); | ||
| 363 | |||
| 324 | if (therm_throt_process(msr_val & THERM_STATUS_PROCHOT, | 364 | if (therm_throt_process(msr_val & THERM_STATUS_PROCHOT, |
| 325 | THERMAL_THROTTLING_EVENT, | 365 | THERMAL_THROTTLING_EVENT, |
| 326 | CORE_LEVEL) != 0) | 366 | CORE_LEVEL) != 0) |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 68f61ac632e1..ee886fe10ef4 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
| @@ -1161,6 +1161,20 @@ out: | |||
| 1161 | preempt_enable(); | 1161 | preempt_enable(); |
| 1162 | } | 1162 | } |
| 1163 | 1163 | ||
| 1164 | void arch_disable_nonboot_cpus_begin(void) | ||
| 1165 | { | ||
| 1166 | /* | ||
| 1167 | * Avoid the smp alternatives switch during the disable_nonboot_cpus(). | ||
| 1168 | * In the suspend path, we will be back in the SMP mode shortly anyways. | ||
| 1169 | */ | ||
| 1170 | skip_smp_alternatives = true; | ||
| 1171 | } | ||
| 1172 | |||
| 1173 | void arch_disable_nonboot_cpus_end(void) | ||
| 1174 | { | ||
| 1175 | skip_smp_alternatives = false; | ||
| 1176 | } | ||
| 1177 | |||
| 1164 | void arch_enable_nonboot_cpus_begin(void) | 1178 | void arch_enable_nonboot_cpus_begin(void) |
| 1165 | { | 1179 | { |
| 1166 | set_mtrr_aps_delayed_init(); | 1180 | set_mtrr_aps_delayed_init(); |
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 79d0c4f6d071..55e0d4253e49 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h | |||
| @@ -114,15 +114,15 @@ typedef irqreturn_t (*irq_handler_t)(int, void *); | |||
| 114 | struct irqaction { | 114 | struct irqaction { |
| 115 | irq_handler_t handler; | 115 | irq_handler_t handler; |
| 116 | unsigned long flags; | 116 | unsigned long flags; |
| 117 | const char *name; | ||
| 118 | void *dev_id; | 117 | void *dev_id; |
| 119 | struct irqaction *next; | 118 | struct irqaction *next; |
| 120 | int irq; | 119 | int irq; |
| 121 | struct proc_dir_entry *dir; | ||
| 122 | irq_handler_t thread_fn; | 120 | irq_handler_t thread_fn; |
| 123 | struct task_struct *thread; | 121 | struct task_struct *thread; |
| 124 | unsigned long thread_flags; | 122 | unsigned long thread_flags; |
| 125 | }; | 123 | const char *name; |
| 124 | struct proc_dir_entry *dir; | ||
| 125 | } ____cacheline_internodealigned_in_smp; | ||
| 126 | 126 | ||
| 127 | extern irqreturn_t no_action(int cpl, void *dev_id); | 127 | extern irqreturn_t no_action(int cpl, void *dev_id); |
| 128 | 128 | ||
diff --git a/kernel/cpu.c b/kernel/cpu.c index cb7a1efa9c2b..156cc5556140 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
| @@ -384,6 +384,14 @@ out: | |||
| 384 | #ifdef CONFIG_PM_SLEEP_SMP | 384 | #ifdef CONFIG_PM_SLEEP_SMP |
| 385 | static cpumask_var_t frozen_cpus; | 385 | static cpumask_var_t frozen_cpus; |
| 386 | 386 | ||
| 387 | void __weak arch_disable_nonboot_cpus_begin(void) | ||
| 388 | { | ||
| 389 | } | ||
| 390 | |||
| 391 | void __weak arch_disable_nonboot_cpus_end(void) | ||
| 392 | { | ||
| 393 | } | ||
| 394 | |||
| 387 | int disable_nonboot_cpus(void) | 395 | int disable_nonboot_cpus(void) |
| 388 | { | 396 | { |
| 389 | int cpu, first_cpu, error = 0; | 397 | int cpu, first_cpu, error = 0; |
| @@ -395,6 +403,7 @@ int disable_nonboot_cpus(void) | |||
| 395 | * with the userspace trying to use the CPU hotplug at the same time | 403 | * with the userspace trying to use the CPU hotplug at the same time |
| 396 | */ | 404 | */ |
| 397 | cpumask_clear(frozen_cpus); | 405 | cpumask_clear(frozen_cpus); |
| 406 | arch_disable_nonboot_cpus_begin(); | ||
| 398 | 407 | ||
| 399 | printk("Disabling non-boot CPUs ...\n"); | 408 | printk("Disabling non-boot CPUs ...\n"); |
| 400 | for_each_online_cpu(cpu) { | 409 | for_each_online_cpu(cpu) { |
| @@ -410,6 +419,8 @@ int disable_nonboot_cpus(void) | |||
| 410 | } | 419 | } |
| 411 | } | 420 | } |
| 412 | 421 | ||
| 422 | arch_disable_nonboot_cpus_end(); | ||
| 423 | |||
| 413 | if (!error) { | 424 | if (!error) { |
| 414 | BUG_ON(num_online_cpus() > 1); | 425 | BUG_ON(num_online_cpus() > 1); |
| 415 | /* Make sure the CPUs won't be enabled by someone else */ | 426 | /* Make sure the CPUs won't be enabled by someone else */ |
diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c index 59b76c8ce9d7..1969d2fc4b36 100644 --- a/kernel/lockdep_proc.c +++ b/kernel/lockdep_proc.c | |||
| @@ -494,7 +494,6 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data) | |||
| 494 | namelen += 2; | 494 | namelen += 2; |
| 495 | 495 | ||
| 496 | for (i = 0; i < LOCKSTAT_POINTS; i++) { | 496 | for (i = 0; i < LOCKSTAT_POINTS; i++) { |
| 497 | char sym[KSYM_SYMBOL_LEN]; | ||
| 498 | char ip[32]; | 497 | char ip[32]; |
| 499 | 498 | ||
| 500 | if (class->contention_point[i] == 0) | 499 | if (class->contention_point[i] == 0) |
| @@ -503,15 +502,13 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data) | |||
| 503 | if (!i) | 502 | if (!i) |
| 504 | seq_line(m, '-', 40-namelen, namelen); | 503 | seq_line(m, '-', 40-namelen, namelen); |
| 505 | 504 | ||
| 506 | sprint_symbol(sym, class->contention_point[i]); | ||
| 507 | snprintf(ip, sizeof(ip), "[<%p>]", | 505 | snprintf(ip, sizeof(ip), "[<%p>]", |
| 508 | (void *)class->contention_point[i]); | 506 | (void *)class->contention_point[i]); |
| 509 | seq_printf(m, "%40s %14lu %29s %s\n", name, | 507 | seq_printf(m, "%40s %14lu %29s %pS\n", |
| 510 | stats->contention_point[i], | 508 | name, stats->contention_point[i], |
| 511 | ip, sym); | 509 | ip, (void *)class->contention_point[i]); |
| 512 | } | 510 | } |
| 513 | for (i = 0; i < LOCKSTAT_POINTS; i++) { | 511 | for (i = 0; i < LOCKSTAT_POINTS; i++) { |
| 514 | char sym[KSYM_SYMBOL_LEN]; | ||
| 515 | char ip[32]; | 512 | char ip[32]; |
| 516 | 513 | ||
| 517 | if (class->contending_point[i] == 0) | 514 | if (class->contending_point[i] == 0) |
| @@ -520,12 +517,11 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data) | |||
| 520 | if (!i) | 517 | if (!i) |
| 521 | seq_line(m, '-', 40-namelen, namelen); | 518 | seq_line(m, '-', 40-namelen, namelen); |
| 522 | 519 | ||
| 523 | sprint_symbol(sym, class->contending_point[i]); | ||
| 524 | snprintf(ip, sizeof(ip), "[<%p>]", | 520 | snprintf(ip, sizeof(ip), "[<%p>]", |
| 525 | (void *)class->contending_point[i]); | 521 | (void *)class->contending_point[i]); |
| 526 | seq_printf(m, "%40s %14lu %29s %s\n", name, | 522 | seq_printf(m, "%40s %14lu %29s %pS\n", |
| 527 | stats->contending_point[i], | 523 | name, stats->contending_point[i], |
| 528 | ip, sym); | 524 | ip, (void *)class->contending_point[i]); |
| 529 | } | 525 | } |
| 530 | if (i) { | 526 | if (i) { |
| 531 | seq_puts(m, "\n"); | 527 | seq_puts(m, "\n"); |
