aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-05-18 11:18:07 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-05-18 11:18:07 -0400
commit3aaf51ace5975050ab43c7d4d7e439e0ae7d13d7 (patch)
tree3ceb741d8b78c6dc78be3fd2e4f8aac443044787 /drivers
parentf262af3d08d3fffc4e11277d3a177b2d67ea2aba (diff)
parentcc49b092d308f8ea8634134b0d95d831a88a674b (diff)
Merge branch 'oprofile-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'oprofile-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (24 commits) oprofile/x86: make AMD IBS hotplug capable oprofile/x86: notify cpus only when daemon is running oprofile/x86: reordering some functions oprofile/x86: stop disabled counters in nmi handler oprofile/x86: protect cpu hotplug sections oprofile/x86: remove CONFIG_SMP macros oprofile/x86: fix uninitialized counter usage during cpu hotplug oprofile/x86: remove duplicate IBS capability check oprofile/x86: move IBS code oprofile/x86: return -EBUSY if counters are already reserved oprofile/x86: moving shutdown functions oprofile/x86: reserve counter msrs pairwise oprofile/x86: rework error handler in nmi_setup() oprofile: update file list in MAINTAINERS file oprofile: protect from not being in an IRQ context oprofile: remove double ring buffering ring-buffer: Add lost event count to end of sub buffer tracing: Show the lost events in the trace_pipe output ring-buffer: Add place holder recording of dropped events tracing: Fix compile error in module tracepoints when MODULE_UNLOAD not set ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/oprofile/cpu_buffer.c75
-rw-r--r--drivers/oprofile/oprof.c12
-rw-r--r--drivers/oprofile/oprof.h3
-rw-r--r--drivers/oprofile/timer_int.c78
4 files changed, 102 insertions, 66 deletions
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
index 166b67ea622f..219f79e2210a 100644
--- a/drivers/oprofile/cpu_buffer.c
+++ b/drivers/oprofile/cpu_buffer.c
@@ -30,23 +30,7 @@
30 30
31#define OP_BUFFER_FLAGS 0 31#define OP_BUFFER_FLAGS 0
32 32
33/* 33static struct ring_buffer *op_ring_buffer;
34 * Read and write access is using spin locking. Thus, writing to the
35 * buffer by NMI handler (x86) could occur also during critical
36 * sections when reading the buffer. To avoid this, there are 2
37 * buffers for independent read and write access. Read access is in
38 * process context only, write access only in the NMI handler. If the
39 * read buffer runs empty, both buffers are swapped atomically. There
40 * is potentially a small window during swapping where the buffers are
41 * disabled and samples could be lost.
42 *
43 * Using 2 buffers is a little bit overhead, but the solution is clear
44 * and does not require changes in the ring buffer implementation. It
45 * can be changed to a single buffer solution when the ring buffer
46 * access is implemented as non-locking atomic code.
47 */
48static struct ring_buffer *op_ring_buffer_read;
49static struct ring_buffer *op_ring_buffer_write;
50DEFINE_PER_CPU(struct oprofile_cpu_buffer, op_cpu_buffer); 34DEFINE_PER_CPU(struct oprofile_cpu_buffer, op_cpu_buffer);
51 35
52static void wq_sync_buffer(struct work_struct *work); 36static void wq_sync_buffer(struct work_struct *work);
@@ -68,12 +52,9 @@ void oprofile_cpu_buffer_inc_smpl_lost(void)
68 52
69void free_cpu_buffers(void) 53void free_cpu_buffers(void)
70{ 54{
71 if (op_ring_buffer_read) 55 if (op_ring_buffer)
72 ring_buffer_free(op_ring_buffer_read); 56 ring_buffer_free(op_ring_buffer);
73 op_ring_buffer_read = NULL; 57 op_ring_buffer = NULL;
74 if (op_ring_buffer_write)
75 ring_buffer_free(op_ring_buffer_write);
76 op_ring_buffer_write = NULL;
77} 58}
78 59
79#define RB_EVENT_HDR_SIZE 4 60#define RB_EVENT_HDR_SIZE 4
@@ -86,11 +67,8 @@ int alloc_cpu_buffers(void)
86 unsigned long byte_size = buffer_size * (sizeof(struct op_sample) + 67 unsigned long byte_size = buffer_size * (sizeof(struct op_sample) +
87 RB_EVENT_HDR_SIZE); 68 RB_EVENT_HDR_SIZE);
88 69
89 op_ring_buffer_read = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS); 70 op_ring_buffer = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS);
90 if (!op_ring_buffer_read) 71 if (!op_ring_buffer)
91 goto fail;
92 op_ring_buffer_write = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS);
93 if (!op_ring_buffer_write)
94 goto fail; 72 goto fail;
95 73
96 for_each_possible_cpu(i) { 74 for_each_possible_cpu(i) {
@@ -162,16 +140,11 @@ struct op_sample
162*op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size) 140*op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size)
163{ 141{
164 entry->event = ring_buffer_lock_reserve 142 entry->event = ring_buffer_lock_reserve
165 (op_ring_buffer_write, sizeof(struct op_sample) + 143 (op_ring_buffer, sizeof(struct op_sample) +
166 size * sizeof(entry->sample->data[0])); 144 size * sizeof(entry->sample->data[0]));
167 if (entry->event) 145 if (!entry->event)
168 entry->sample = ring_buffer_event_data(entry->event);
169 else
170 entry->sample = NULL;
171
172 if (!entry->sample)
173 return NULL; 146 return NULL;
174 147 entry->sample = ring_buffer_event_data(entry->event);
175 entry->size = size; 148 entry->size = size;
176 entry->data = entry->sample->data; 149 entry->data = entry->sample->data;
177 150
@@ -180,25 +153,16 @@ struct op_sample
180 153
181int op_cpu_buffer_write_commit(struct op_entry *entry) 154int op_cpu_buffer_write_commit(struct op_entry *entry)
182{ 155{
183 return ring_buffer_unlock_commit(op_ring_buffer_write, entry->event); 156 return ring_buffer_unlock_commit(op_ring_buffer, entry->event);
184} 157}
185 158
186struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu) 159struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu)
187{ 160{
188 struct ring_buffer_event *e; 161 struct ring_buffer_event *e;
189 e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL); 162 e = ring_buffer_consume(op_ring_buffer, cpu, NULL, NULL);
190 if (e) 163 if (!e)
191 goto event;
192 if (ring_buffer_swap_cpu(op_ring_buffer_read,
193 op_ring_buffer_write,
194 cpu))
195 return NULL; 164 return NULL;
196 e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL);
197 if (e)
198 goto event;
199 return NULL;
200 165
201event:
202 entry->event = e; 166 entry->event = e;
203 entry->sample = ring_buffer_event_data(e); 167 entry->sample = ring_buffer_event_data(e);
204 entry->size = (ring_buffer_event_length(e) - sizeof(struct op_sample)) 168 entry->size = (ring_buffer_event_length(e) - sizeof(struct op_sample))
@@ -209,8 +173,7 @@ event:
209 173
210unsigned long op_cpu_buffer_entries(int cpu) 174unsigned long op_cpu_buffer_entries(int cpu)
211{ 175{
212 return ring_buffer_entries_cpu(op_ring_buffer_read, cpu) 176 return ring_buffer_entries_cpu(op_ring_buffer, cpu);
213 + ring_buffer_entries_cpu(op_ring_buffer_write, cpu);
214} 177}
215 178
216static int 179static int
@@ -356,8 +319,16 @@ void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
356 319
357void oprofile_add_sample(struct pt_regs * const regs, unsigned long event) 320void oprofile_add_sample(struct pt_regs * const regs, unsigned long event)
358{ 321{
359 int is_kernel = !user_mode(regs); 322 int is_kernel;
360 unsigned long pc = profile_pc(regs); 323 unsigned long pc;
324
325 if (likely(regs)) {
326 is_kernel = !user_mode(regs);
327 pc = profile_pc(regs);
328 } else {
329 is_kernel = 0; /* This value will not be used */
330 pc = ESCAPE_CODE; /* as this causes an early return. */
331 }
361 332
362 __oprofile_add_ext_sample(pc, regs, event, is_kernel); 333 __oprofile_add_ext_sample(pc, regs, event, is_kernel);
363} 334}
diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
index dc8a0428260d..b336cd9ee7a1 100644
--- a/drivers/oprofile/oprof.c
+++ b/drivers/oprofile/oprof.c
@@ -253,22 +253,26 @@ static int __init oprofile_init(void)
253 int err; 253 int err;
254 254
255 err = oprofile_arch_init(&oprofile_ops); 255 err = oprofile_arch_init(&oprofile_ops);
256
257 if (err < 0 || timer) { 256 if (err < 0 || timer) {
258 printk(KERN_INFO "oprofile: using timer interrupt.\n"); 257 printk(KERN_INFO "oprofile: using timer interrupt.\n");
259 oprofile_timer_init(&oprofile_ops); 258 err = oprofile_timer_init(&oprofile_ops);
259 if (err)
260 goto out_arch;
260 } 261 }
261
262 err = oprofilefs_register(); 262 err = oprofilefs_register();
263 if (err) 263 if (err)
264 oprofile_arch_exit(); 264 goto out_arch;
265 return 0;
265 266
267out_arch:
268 oprofile_arch_exit();
266 return err; 269 return err;
267} 270}
268 271
269 272
270static void __exit oprofile_exit(void) 273static void __exit oprofile_exit(void)
271{ 274{
275 oprofile_timer_exit();
272 oprofilefs_unregister(); 276 oprofilefs_unregister();
273 oprofile_arch_exit(); 277 oprofile_arch_exit();
274} 278}
diff --git a/drivers/oprofile/oprof.h b/drivers/oprofile/oprof.h
index cb92f5c98c1a..47e12cb4ee8b 100644
--- a/drivers/oprofile/oprof.h
+++ b/drivers/oprofile/oprof.h
@@ -34,7 +34,8 @@ struct super_block;
34struct dentry; 34struct dentry;
35 35
36void oprofile_create_files(struct super_block *sb, struct dentry *root); 36void oprofile_create_files(struct super_block *sb, struct dentry *root);
37void oprofile_timer_init(struct oprofile_operations *ops); 37int oprofile_timer_init(struct oprofile_operations *ops);
38void oprofile_timer_exit(void);
38 39
39int oprofile_set_backtrace(unsigned long depth); 40int oprofile_set_backtrace(unsigned long depth);
40int oprofile_set_timeout(unsigned long time); 41int oprofile_set_timeout(unsigned long time);
diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
index 333f915568c7..dc0ae4d14dff 100644
--- a/drivers/oprofile/timer_int.c
+++ b/drivers/oprofile/timer_int.c
@@ -13,34 +13,94 @@
13#include <linux/oprofile.h> 13#include <linux/oprofile.h>
14#include <linux/profile.h> 14#include <linux/profile.h>
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/cpu.h>
17#include <linux/hrtimer.h>
18#include <asm/irq_regs.h>
16#include <asm/ptrace.h> 19#include <asm/ptrace.h>
17 20
18#include "oprof.h" 21#include "oprof.h"
19 22
20static int timer_notify(struct pt_regs *regs) 23static DEFINE_PER_CPU(struct hrtimer, oprofile_hrtimer);
24
25static enum hrtimer_restart oprofile_hrtimer_notify(struct hrtimer *hrtimer)
26{
27 oprofile_add_sample(get_irq_regs(), 0);
28 hrtimer_forward_now(hrtimer, ns_to_ktime(TICK_NSEC));
29 return HRTIMER_RESTART;
30}
31
32static void __oprofile_hrtimer_start(void *unused)
33{
34 struct hrtimer *hrtimer = &__get_cpu_var(oprofile_hrtimer);
35
36 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
37 hrtimer->function = oprofile_hrtimer_notify;
38
39 hrtimer_start(hrtimer, ns_to_ktime(TICK_NSEC),
40 HRTIMER_MODE_REL_PINNED);
41}
42
43static int oprofile_hrtimer_start(void)
21{ 44{
22 oprofile_add_sample(regs, 0); 45 on_each_cpu(__oprofile_hrtimer_start, NULL, 1);
23 return 0; 46 return 0;
24} 47}
25 48
26static int timer_start(void) 49static void __oprofile_hrtimer_stop(int cpu)
27{ 50{
28 return register_timer_hook(timer_notify); 51 struct hrtimer *hrtimer = &per_cpu(oprofile_hrtimer, cpu);
52
53 hrtimer_cancel(hrtimer);
29} 54}
30 55
56static void oprofile_hrtimer_stop(void)
57{
58 int cpu;
59
60 for_each_online_cpu(cpu)
61 __oprofile_hrtimer_stop(cpu);
62}
31 63
32static void timer_stop(void) 64static int __cpuinit oprofile_cpu_notify(struct notifier_block *self,
65 unsigned long action, void *hcpu)
33{ 66{
34 unregister_timer_hook(timer_notify); 67 long cpu = (long) hcpu;
68
69 switch (action) {
70 case CPU_ONLINE:
71 case CPU_ONLINE_FROZEN:
72 smp_call_function_single(cpu, __oprofile_hrtimer_start,
73 NULL, 1);
74 break;
75 case CPU_DEAD:
76 case CPU_DEAD_FROZEN:
77 __oprofile_hrtimer_stop(cpu);
78 break;
79 }
80 return NOTIFY_OK;
35} 81}
36 82
83static struct notifier_block __refdata oprofile_cpu_notifier = {
84 .notifier_call = oprofile_cpu_notify,
85};
37 86
38void __init oprofile_timer_init(struct oprofile_operations *ops) 87int __init oprofile_timer_init(struct oprofile_operations *ops)
39{ 88{
89 int rc;
90
91 rc = register_hotcpu_notifier(&oprofile_cpu_notifier);
92 if (rc)
93 return rc;
40 ops->create_files = NULL; 94 ops->create_files = NULL;
41 ops->setup = NULL; 95 ops->setup = NULL;
42 ops->shutdown = NULL; 96 ops->shutdown = NULL;
43 ops->start = timer_start; 97 ops->start = oprofile_hrtimer_start;
44 ops->stop = timer_stop; 98 ops->stop = oprofile_hrtimer_stop;
45 ops->cpu_type = "timer"; 99 ops->cpu_type = "timer";
100 return 0;
101}
102
103void __exit oprofile_timer_exit(void)
104{
105 unregister_hotcpu_notifier(&oprofile_cpu_notifier);
46} 106}