diff options
author | Chris J Arges <arges@linux.vnet.ibm.com> | 2008-10-15 12:03:39 -0400 |
---|---|---|
committer | Robert Richter <robert.richter@amd.com> | 2008-10-17 04:38:58 -0400 |
commit | 4bd9b9dc97e344670e9e5762399a07dcd5f15311 (patch) | |
tree | 47bafd5ea9e5fd68738ab6788fee6d7f21815303 /drivers | |
parent | 0f019cc477b494dfc472f2a98eb64d02d4937741 (diff) |
oprofile: hotplug cpu fix
This patch addresses problems when hotplugging cpus while
profiling. Instead of allocating only online cpus, all possible cpu
buffers are allocated, which allows cpus to be onlined during
operation. If a cpu is offlined before profiling is shutdown
wq_sync_buffer checks for this condition then cancels this work and
does not sync this buffer.
Signed-off-by: Chris J Arges <arges@linux.vnet.ibm.com>
Signed-off-by: Robert Richter <robert.richter@amd.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/oprofile/cpu_buffer.c | 9 |
1 files changed, 7 insertions, 2 deletions
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c index 5a178065cfa0..67bcc1c95e60 100644 --- a/drivers/oprofile/cpu_buffer.c +++ b/drivers/oprofile/cpu_buffer.c | |||
@@ -39,7 +39,7 @@ void free_cpu_buffers(void) | |||
39 | { | 39 | { |
40 | int i; | 40 | int i; |
41 | 41 | ||
42 | for_each_online_cpu(i) { | 42 | for_each_possible_cpu(i) { |
43 | vfree(per_cpu(cpu_buffer, i).buffer); | 43 | vfree(per_cpu(cpu_buffer, i).buffer); |
44 | per_cpu(cpu_buffer, i).buffer = NULL; | 44 | per_cpu(cpu_buffer, i).buffer = NULL; |
45 | } | 45 | } |
@@ -51,7 +51,7 @@ int alloc_cpu_buffers(void) | |||
51 | 51 | ||
52 | unsigned long buffer_size = fs_cpu_buffer_size; | 52 | unsigned long buffer_size = fs_cpu_buffer_size; |
53 | 53 | ||
54 | for_each_online_cpu(i) { | 54 | for_each_possible_cpu(i) { |
55 | struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i); | 55 | struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i); |
56 | 56 | ||
57 | b->buffer = vmalloc_node(sizeof(struct op_sample) * buffer_size, | 57 | b->buffer = vmalloc_node(sizeof(struct op_sample) * buffer_size, |
@@ -350,6 +350,11 @@ static void wq_sync_buffer(struct work_struct *work) | |||
350 | if (b->cpu != smp_processor_id()) { | 350 | if (b->cpu != smp_processor_id()) { |
351 | printk(KERN_DEBUG "WQ on CPU%d, prefer CPU%d\n", | 351 | printk(KERN_DEBUG "WQ on CPU%d, prefer CPU%d\n", |
352 | smp_processor_id(), b->cpu); | 352 | smp_processor_id(), b->cpu); |
353 | |||
354 | if (!cpu_online(b->cpu)) { | ||
355 | cancel_delayed_work(&b->work); | ||
356 | return; | ||
357 | } | ||
353 | } | 358 | } |
354 | sync_buffer(b->cpu); | 359 | sync_buffer(b->cpu); |
355 | 360 | ||