diff options
| author | Markus Metzger <markus.t.metzger@intel.com> | 2009-03-13 05:48:52 -0400 |
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2009-03-13 06:57:21 -0400 |
| commit | ba9372a8f306c4e53a5f61dcbcd6c1e4a8c2e9ac (patch) | |
| tree | 665e2a61bc5830de12fe18aa04a3be0a298d6a49 /kernel/trace | |
| parent | b8e47195451c5d3f62620b2b1b5928669afd56eb (diff) | |
x86, hw-branch-tracer: keep resources on stop
Distinguish init/reset and start/stop:
init/reset will allocate and release bts tracing resources
stop/start will suspend and resume bts tracing
Return an error on init() if no cpu can be traced.
Signed-off-by: Markus Metzger <markus.t.metzger@intel.com>
LKML-Reference: <20090313104852.A30168@sedona.ch.intel.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace')
| -rw-r--r-- | kernel/trace/trace_hw_branches.c | 119 |
1 files changed, 85 insertions, 34 deletions
diff --git a/kernel/trace/trace_hw_branches.c b/kernel/trace/trace_hw_branches.c index 7bfdf4c2347f..a99a04c5e9cd 100644 --- a/kernel/trace/trace_hw_branches.c +++ b/kernel/trace/trace_hw_branches.c | |||
| @@ -19,7 +19,7 @@ | |||
| 19 | #include "trace_output.h" | 19 | #include "trace_output.h" |
| 20 | 20 | ||
| 21 | 21 | ||
| 22 | #define SIZEOF_BTS (1 << 13) | 22 | #define BTS_BUFFER_SIZE (1 << 13) |
| 23 | 23 | ||
| 24 | /* | 24 | /* |
| 25 | * The tracer lock protects the below per-cpu tracer array. | 25 | * The tracer lock protects the below per-cpu tracer array. |
| @@ -33,53 +33,68 @@ | |||
| 33 | */ | 33 | */ |
| 34 | static DEFINE_SPINLOCK(bts_tracer_lock); | 34 | static DEFINE_SPINLOCK(bts_tracer_lock); |
| 35 | static DEFINE_PER_CPU(struct bts_tracer *, tracer); | 35 | static DEFINE_PER_CPU(struct bts_tracer *, tracer); |
| 36 | static DEFINE_PER_CPU(unsigned char[SIZEOF_BTS], buffer); | 36 | static DEFINE_PER_CPU(unsigned char[BTS_BUFFER_SIZE], buffer); |
| 37 | 37 | ||
| 38 | #define this_tracer per_cpu(tracer, smp_processor_id()) | 38 | #define this_tracer per_cpu(tracer, smp_processor_id()) |
| 39 | #define this_buffer per_cpu(buffer, smp_processor_id()) | 39 | #define this_buffer per_cpu(buffer, smp_processor_id()) |
| 40 | 40 | ||
| 41 | static int __read_mostly trace_hw_branches_enabled; | 41 | static int trace_hw_branches_enabled __read_mostly; |
| 42 | static int trace_hw_branches_suspended __read_mostly; | ||
| 42 | static struct trace_array *hw_branch_trace __read_mostly; | 43 | static struct trace_array *hw_branch_trace __read_mostly; |
| 43 | 44 | ||
| 44 | 45 | ||
| 45 | /* | 46 | /* |
| 46 | * Start tracing on the current cpu. | 47 | * Initialize the tracer for the current cpu. |
| 47 | * The argument is ignored. | 48 | * The argument is ignored. |
| 48 | * | 49 | * |
| 49 | * pre: bts_tracer_lock must be locked. | 50 | * pre: bts_tracer_lock must be locked. |
| 50 | */ | 51 | */ |
| 51 | static void bts_trace_start_cpu(void *arg) | 52 | static void bts_trace_init_cpu(void *arg) |
| 52 | { | 53 | { |
| 53 | if (this_tracer) | 54 | if (this_tracer) |
| 54 | ds_release_bts(this_tracer); | 55 | ds_release_bts(this_tracer); |
| 55 | 56 | ||
| 56 | this_tracer = | 57 | this_tracer = ds_request_bts(NULL, this_buffer, BTS_BUFFER_SIZE, |
| 57 | ds_request_bts(/* task = */ NULL, this_buffer, SIZEOF_BTS, | 58 | NULL, (size_t)-1, BTS_KERNEL); |
| 58 | /* ovfl = */ NULL, /* th = */ (size_t)-1, | ||
| 59 | BTS_KERNEL); | ||
| 60 | if (IS_ERR(this_tracer)) { | 59 | if (IS_ERR(this_tracer)) { |
| 61 | this_tracer = NULL; | 60 | this_tracer = NULL; |
| 62 | return; | 61 | return; |
| 63 | } | 62 | } |
| 64 | } | 63 | } |
| 65 | 64 | ||
| 66 | static void bts_trace_start(struct trace_array *tr) | 65 | static int bts_trace_init(struct trace_array *tr) |
| 67 | { | 66 | { |
| 67 | int cpu, avail; | ||
| 68 | |||
| 68 | spin_lock(&bts_tracer_lock); | 69 | spin_lock(&bts_tracer_lock); |
| 69 | 70 | ||
| 70 | on_each_cpu(bts_trace_start_cpu, NULL, 1); | 71 | hw_branch_trace = tr; |
| 71 | trace_hw_branches_enabled = 1; | 72 | |
| 73 | on_each_cpu(bts_trace_init_cpu, NULL, 1); | ||
| 74 | |||
| 75 | /* Check on how many cpus we could enable tracing */ | ||
| 76 | avail = 0; | ||
| 77 | for_each_online_cpu(cpu) | ||
| 78 | if (per_cpu(tracer, cpu)) | ||
| 79 | avail++; | ||
| 80 | |||
| 81 | trace_hw_branches_enabled = (avail ? 1 : 0); | ||
| 82 | trace_hw_branches_suspended = 0; | ||
| 72 | 83 | ||
| 73 | spin_unlock(&bts_tracer_lock); | 84 | spin_unlock(&bts_tracer_lock); |
| 85 | |||
| 86 | |||
| 87 | /* If we could not enable tracing on a single cpu, we fail. */ | ||
| 88 | return avail ? 0 : -EOPNOTSUPP; | ||
| 74 | } | 89 | } |
| 75 | 90 | ||
| 76 | /* | 91 | /* |
| 77 | * Stop tracing on the current cpu. | 92 | * Release the tracer for the current cpu. |
| 78 | * The argument is ignored. | 93 | * The argument is ignored. |
| 79 | * | 94 | * |
| 80 | * pre: bts_tracer_lock must be locked. | 95 | * pre: bts_tracer_lock must be locked. |
| 81 | */ | 96 | */ |
| 82 | static void bts_trace_stop_cpu(void *arg) | 97 | static void bts_trace_release_cpu(void *arg) |
| 83 | { | 98 | { |
| 84 | if (this_tracer) { | 99 | if (this_tracer) { |
| 85 | ds_release_bts(this_tracer); | 100 | ds_release_bts(this_tracer); |
| @@ -87,12 +102,57 @@ static void bts_trace_stop_cpu(void *arg) | |||
| 87 | } | 102 | } |
| 88 | } | 103 | } |
| 89 | 104 | ||
| 90 | static void bts_trace_stop(struct trace_array *tr) | 105 | static void bts_trace_reset(struct trace_array *tr) |
| 91 | { | 106 | { |
| 92 | spin_lock(&bts_tracer_lock); | 107 | spin_lock(&bts_tracer_lock); |
| 93 | 108 | ||
| 109 | on_each_cpu(bts_trace_release_cpu, NULL, 1); | ||
| 94 | trace_hw_branches_enabled = 0; | 110 | trace_hw_branches_enabled = 0; |
| 95 | on_each_cpu(bts_trace_stop_cpu, NULL, 1); | 111 | trace_hw_branches_suspended = 0; |
| 112 | |||
| 113 | spin_unlock(&bts_tracer_lock); | ||
| 114 | } | ||
| 115 | |||
| 116 | /* | ||
| 117 | * Resume tracing on the current cpu. | ||
| 118 | * The argument is ignored. | ||
| 119 | * | ||
| 120 | * pre: bts_tracer_lock must be locked. | ||
| 121 | */ | ||
| 122 | static void bts_trace_resume_cpu(void *arg) | ||
| 123 | { | ||
| 124 | if (this_tracer) | ||
| 125 | ds_resume_bts(this_tracer); | ||
| 126 | } | ||
| 127 | |||
| 128 | static void bts_trace_start(struct trace_array *tr) | ||
| 129 | { | ||
| 130 | spin_lock(&bts_tracer_lock); | ||
| 131 | |||
| 132 | on_each_cpu(bts_trace_resume_cpu, NULL, 1); | ||
| 133 | trace_hw_branches_suspended = 0; | ||
| 134 | |||
| 135 | spin_unlock(&bts_tracer_lock); | ||
| 136 | } | ||
| 137 | |||
| 138 | /* | ||
| 139 | * Suspend tracing on the current cpu. | ||
| 140 | * The argument is ignored. | ||
| 141 | * | ||
| 142 | * pre: bts_tracer_lock must be locked. | ||
| 143 | */ | ||
| 144 | static void bts_trace_suspend_cpu(void *arg) | ||
| 145 | { | ||
| 146 | if (this_tracer) | ||
| 147 | ds_suspend_bts(this_tracer); | ||
| 148 | } | ||
| 149 | |||
| 150 | static void bts_trace_stop(struct trace_array *tr) | ||
| 151 | { | ||
| 152 | spin_lock(&bts_tracer_lock); | ||
| 153 | |||
| 154 | on_each_cpu(bts_trace_suspend_cpu, NULL, 1); | ||
| 155 | trace_hw_branches_suspended = 1; | ||
| 96 | 156 | ||
| 97 | spin_unlock(&bts_tracer_lock); | 157 | spin_unlock(&bts_tracer_lock); |
| 98 | } | 158 | } |
| @@ -110,10 +170,14 @@ static int __cpuinit bts_hotcpu_handler(struct notifier_block *nfb, | |||
| 110 | switch (action) { | 170 | switch (action) { |
| 111 | case CPU_ONLINE: | 171 | case CPU_ONLINE: |
| 112 | case CPU_DOWN_FAILED: | 172 | case CPU_DOWN_FAILED: |
| 113 | smp_call_function_single(cpu, bts_trace_start_cpu, NULL, 1); | 173 | smp_call_function_single(cpu, bts_trace_init_cpu, NULL, 1); |
| 174 | |||
| 175 | if (trace_hw_branches_suspended) | ||
| 176 | smp_call_function_single(cpu, bts_trace_suspend_cpu, | ||
| 177 | NULL, 1); | ||
| 114 | break; | 178 | break; |
| 115 | case CPU_DOWN_PREPARE: | 179 | case CPU_DOWN_PREPARE: |
| 116 | smp_call_function_single(cpu, bts_trace_stop_cpu, NULL, 1); | 180 | smp_call_function_single(cpu, bts_trace_release_cpu, NULL, 1); |
| 117 | break; | 181 | break; |
| 118 | } | 182 | } |
| 119 | 183 | ||
| @@ -126,20 +190,6 @@ static struct notifier_block bts_hotcpu_notifier __cpuinitdata = { | |||
| 126 | .notifier_call = bts_hotcpu_handler | 190 | .notifier_call = bts_hotcpu_handler |
| 127 | }; | 191 | }; |
| 128 | 192 | ||
| 129 | static int bts_trace_init(struct trace_array *tr) | ||
| 130 | { | ||
| 131 | hw_branch_trace = tr; | ||
| 132 | |||
| 133 | bts_trace_start(tr); | ||
| 134 | |||
| 135 | return 0; | ||
| 136 | } | ||
| 137 | |||
| 138 | static void bts_trace_reset(struct trace_array *tr) | ||
| 139 | { | ||
| 140 | bts_trace_stop(tr); | ||
| 141 | } | ||
| 142 | |||
| 143 | static void bts_trace_print_header(struct seq_file *m) | 193 | static void bts_trace_print_header(struct seq_file *m) |
| 144 | { | 194 | { |
| 145 | seq_puts(m, "# CPU# TO <- FROM\n"); | 195 | seq_puts(m, "# CPU# TO <- FROM\n"); |
| @@ -228,7 +278,7 @@ static void trace_bts_at(const struct bts_trace *trace, void *at) | |||
| 228 | */ | 278 | */ |
| 229 | static void trace_bts_cpu(void *arg) | 279 | static void trace_bts_cpu(void *arg) |
| 230 | { | 280 | { |
| 231 | struct trace_array *tr = (struct trace_array *) arg; | 281 | struct trace_array *tr = (struct trace_array *)arg; |
| 232 | const struct bts_trace *trace; | 282 | const struct bts_trace *trace; |
| 233 | unsigned char *at; | 283 | unsigned char *at; |
| 234 | 284 | ||
| @@ -276,7 +326,8 @@ void trace_hw_branch_oops(void) | |||
| 276 | { | 326 | { |
| 277 | spin_lock(&bts_tracer_lock); | 327 | spin_lock(&bts_tracer_lock); |
| 278 | 328 | ||
| 279 | trace_bts_cpu(hw_branch_trace); | 329 | if (trace_hw_branches_enabled) |
| 330 | trace_bts_cpu(hw_branch_trace); | ||
| 280 | 331 | ||
| 281 | spin_unlock(&bts_tracer_lock); | 332 | spin_unlock(&bts_tracer_lock); |
| 282 | } | 333 | } |
