aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMarkus Metzger <markus.t.metzger@intel.com>2008-01-30 07:31:09 -0500
committerIngo Molnar <mingo@elte.hu>2008-01-30 07:31:09 -0500
commiteee3af4a2c83a97fff107ddc445d9df6fded9ce4 (patch)
treea7e9179b82b4df9e4cf6e810c54309324589395b
parent7796931f542518092d1fd2fb7cf1f1d96e0cd4b5 (diff)
x86, ptrace: support for branch trace store(BTS)
Resend using different mail client Changes to the last version: - split implementation into two layers: ds/bts and ptrace - renamed TIF's - save/restore ds save area msr in __switch_to_xtra() - make block-stepping only look at BTF bit Signed-off-by: Markus Metzger <markus.t.metzger@intel.com> Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--arch/x86/kernel/Makefile_321
-rw-r--r--arch/x86/kernel/Makefile_641
-rw-r--r--arch/x86/kernel/cpu/intel.c5
-rw-r--r--arch/x86/kernel/ds.c429
-rw-r--r--arch/x86/kernel/process_32.c19
-rw-r--r--arch/x86/kernel/process_64.c26
-rw-r--r--arch/x86/kernel/ptrace.c212
-rw-r--r--arch/x86/kernel/setup_64.c5
-rw-r--r--arch/x86/kernel/step.c18
-rw-r--r--include/asm-x86/ds.h65
-rw-r--r--include/asm-x86/processor_32.h3
-rw-r--r--include/asm-x86/processor_64.h3
-rw-r--r--include/asm-x86/ptrace-abi.h52
-rw-r--r--include/asm-x86/ptrace.h11
-rw-r--r--include/asm-x86/thread_info_32.h12
-rw-r--r--include/asm-x86/thread_info_64.h9
16 files changed, 859 insertions, 12 deletions
diff --git a/arch/x86/kernel/Makefile_32 b/arch/x86/kernel/Makefile_32
index b2d7aea4c82d..cc2651bcc07f 100644
--- a/arch/x86/kernel/Makefile_32
+++ b/arch/x86/kernel/Makefile_32
@@ -11,6 +11,7 @@ obj-y := process_32.o signal_32.o entry_32.o traps_32.o irq_32.o \
11 quirks.o i8237.o topology.o alternative.o i8253.o tsc_32.o io_delay.o rtc.o 11 quirks.o i8237.o topology.o alternative.o i8253.o tsc_32.o io_delay.o rtc.o
12 12
13obj-y += ptrace.o 13obj-y += ptrace.o
14obj-y += ds.o
14obj-y += tls.o 15obj-y += tls.o
15obj-y += step.o 16obj-y += step.o
16obj-$(CONFIG_STACKTRACE) += stacktrace.o 17obj-$(CONFIG_STACKTRACE) += stacktrace.o
diff --git a/arch/x86/kernel/Makefile_64 b/arch/x86/kernel/Makefile_64
index 19af64e1a3fc..2ec96acf6486 100644
--- a/arch/x86/kernel/Makefile_64
+++ b/arch/x86/kernel/Makefile_64
@@ -13,6 +13,7 @@ obj-y := process_64.o signal_64.o entry_64.o traps_64.o irq_64.o \
13 i8253.o io_delay.o rtc.o 13 i8253.o io_delay.o rtc.o
14 14
15obj-y += ptrace.o 15obj-y += ptrace.o
16obj-y += ds.o
16obj-y += step.o 17obj-y += step.o
17 18
18obj-$(CONFIG_IA32_EMULATION) += tls.o 19obj-$(CONFIG_IA32_EMULATION) += tls.o
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 867ff94579be..e4b7e73e9024 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -11,6 +11,8 @@
11#include <asm/pgtable.h> 11#include <asm/pgtable.h>
12#include <asm/msr.h> 12#include <asm/msr.h>
13#include <asm/uaccess.h> 13#include <asm/uaccess.h>
14#include <asm/ptrace.h>
15#include <asm/ds.h>
14 16
15#include "cpu.h" 17#include "cpu.h"
16 18
@@ -219,6 +221,9 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
219 if (!(l1 & (1<<12))) 221 if (!(l1 & (1<<12)))
220 set_bit(X86_FEATURE_PEBS, c->x86_capability); 222 set_bit(X86_FEATURE_PEBS, c->x86_capability);
221 } 223 }
224
225 if (cpu_has_bts)
226 ds_init_intel(c);
222} 227}
223 228
224static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 * c, unsigned int size) 229static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 * c, unsigned int size)
diff --git a/arch/x86/kernel/ds.c b/arch/x86/kernel/ds.c
new file mode 100644
index 000000000000..996a7c4f5963
--- /dev/null
+++ b/arch/x86/kernel/ds.c
@@ -0,0 +1,429 @@
1/*
2 * Debug Store support
3 *
4 * This provides a low-level interface to the hardware's Debug Store
5 * feature that is used for last branch recording (LBR) and
6 * precise-event based sampling (PEBS).
7 *
8 * Different architectures use a different DS layout/pointer size.
9 * The below functions therefore work on a void*.
10 *
11 *
12 * Since there is no user for PEBS, yet, only LBR (or branch
13 * trace store, BTS) is supported.
14 *
15 *
16 * Copyright (C) 2007 Intel Corporation.
17 * Markus Metzger <markus.t.metzger@intel.com>, Dec 2007
18 */
19
20#include <asm/ds.h>
21
22#include <linux/errno.h>
23#include <linux/string.h>
24#include <linux/slab.h>
25
26
27/*
28 * Debug Store (DS) save area configuration (see Intel64 and IA32
29 * Architectures Software Developer's Manual, section 18.5)
30 *
31 * The DS configuration consists of the following fields; different
32 * architetures vary in the size of those fields.
33 * - double-word aligned base linear address of the BTS buffer
34 * - write pointer into the BTS buffer
35 * - end linear address of the BTS buffer (one byte beyond the end of
36 * the buffer)
37 * - interrupt pointer into BTS buffer
38 * (interrupt occurs when write pointer passes interrupt pointer)
39 * - double-word aligned base linear address of the PEBS buffer
40 * - write pointer into the PEBS buffer
41 * - end linear address of the PEBS buffer (one byte beyond the end of
42 * the buffer)
43 * - interrupt pointer into PEBS buffer
44 * (interrupt occurs when write pointer passes interrupt pointer)
45 * - value to which counter is reset following counter overflow
46 *
47 * On later architectures, the last branch recording hardware uses
48 * 64bit pointers even in 32bit mode.
49 *
50 *
51 * Branch Trace Store (BTS) records store information about control
52 * flow changes. They at least provide the following information:
53 * - source linear address
54 * - destination linear address
55 *
56 * Netburst supported a predicated bit that had been dropped in later
57 * architectures. We do not suppor it.
58 *
59 *
60 * In order to abstract from the actual DS and BTS layout, we describe
61 * the access to the relevant fields.
62 * Thanks to Andi Kleen for proposing this design.
63 *
64 * The implementation, however, is not as general as it might seem. In
65 * order to stay somewhat simple and efficient, we assume an
66 * underlying unsigned type (mostly a pointer type) and we expect the
67 * field to be at least as big as that type.
68 */
69
70/*
71 * A special from_ip address to indicate that the BTS record is an
72 * info record that needs to be interpreted or skipped.
73 */
74#define BTS_ESCAPE_ADDRESS (-1)
75
76/*
77 * A field access descriptor
78 */
79struct access_desc {
80 unsigned char offset;
81 unsigned char size;
82};
83
84/*
85 * The configuration for a particular DS/BTS hardware implementation.
86 */
87struct ds_configuration {
88 /* the DS configuration */
89 unsigned char sizeof_ds;
90 struct access_desc bts_buffer_base;
91 struct access_desc bts_index;
92 struct access_desc bts_absolute_maximum;
93 struct access_desc bts_interrupt_threshold;
94 /* the BTS configuration */
95 unsigned char sizeof_bts;
96 struct access_desc from_ip;
97 struct access_desc to_ip;
98 /* BTS variants used to store additional information like
99 timestamps */
100 struct access_desc info_type;
101 struct access_desc info_data;
102 unsigned long debugctl_mask;
103};
104
105/*
106 * The global configuration used by the below accessor functions
107 */
108static struct ds_configuration ds_cfg;
109
110/*
111 * Accessor functions for some DS and BTS fields using the above
112 * global ptrace_bts_cfg.
113 */
114static inline void *get_bts_buffer_base(char *base)
115{
116 return *(void **)(base + ds_cfg.bts_buffer_base.offset);
117}
118static inline void set_bts_buffer_base(char *base, void *value)
119{
120 (*(void **)(base + ds_cfg.bts_buffer_base.offset)) = value;
121}
122static inline void *get_bts_index(char *base)
123{
124 return *(void **)(base + ds_cfg.bts_index.offset);
125}
126static inline void set_bts_index(char *base, void *value)
127{
128 (*(void **)(base + ds_cfg.bts_index.offset)) = value;
129}
130static inline void *get_bts_absolute_maximum(char *base)
131{
132 return *(void **)(base + ds_cfg.bts_absolute_maximum.offset);
133}
134static inline void set_bts_absolute_maximum(char *base, void *value)
135{
136 (*(void **)(base + ds_cfg.bts_absolute_maximum.offset)) = value;
137}
138static inline void *get_bts_interrupt_threshold(char *base)
139{
140 return *(void **)(base + ds_cfg.bts_interrupt_threshold.offset);
141}
142static inline void set_bts_interrupt_threshold(char *base, void *value)
143{
144 (*(void **)(base + ds_cfg.bts_interrupt_threshold.offset)) = value;
145}
146static inline long get_from_ip(char *base)
147{
148 return *(long *)(base + ds_cfg.from_ip.offset);
149}
150static inline void set_from_ip(char *base, long value)
151{
152 (*(long *)(base + ds_cfg.from_ip.offset)) = value;
153}
154static inline long get_to_ip(char *base)
155{
156 return *(long *)(base + ds_cfg.to_ip.offset);
157}
158static inline void set_to_ip(char *base, long value)
159{
160 (*(long *)(base + ds_cfg.to_ip.offset)) = value;
161}
162static inline unsigned char get_info_type(char *base)
163{
164 return *(unsigned char *)(base + ds_cfg.info_type.offset);
165}
166static inline void set_info_type(char *base, unsigned char value)
167{
168 (*(unsigned char *)(base + ds_cfg.info_type.offset)) = value;
169}
170/*
171 * The info data might overlap with the info type on some architectures.
172 * We therefore read and write the exact number of bytes.
173 */
174static inline unsigned long long get_info_data(char *base)
175{
176 unsigned long long value = 0;
177 memcpy(&value,
178 base + ds_cfg.info_data.offset,
179 ds_cfg.info_data.size);
180 return value;
181}
182static inline void set_info_data(char *base, unsigned long long value)
183{
184 memcpy(base + ds_cfg.info_data.offset,
185 &value,
186 ds_cfg.info_data.size);
187}
188
189
190int ds_allocate(void **dsp, size_t bts_size_in_records)
191{
192 size_t bts_size_in_bytes = 0;
193 void *bts = 0;
194 void *ds = 0;
195
196 if (!ds_cfg.sizeof_ds || !ds_cfg.sizeof_bts)
197 return -EOPNOTSUPP;
198
199 if (bts_size_in_records < 0)
200 return -EINVAL;
201
202 bts_size_in_bytes =
203 bts_size_in_records * ds_cfg.sizeof_bts;
204
205 if (bts_size_in_bytes <= 0)
206 return -EINVAL;
207
208 bts = kzalloc(bts_size_in_bytes, GFP_KERNEL);
209
210 if (!bts)
211 return -ENOMEM;
212
213 ds = kzalloc(ds_cfg.sizeof_ds, GFP_KERNEL);
214
215 if (!ds) {
216 kfree(bts);
217 return -ENOMEM;
218 }
219
220 set_bts_buffer_base(ds, bts);
221 set_bts_index(ds, bts);
222 set_bts_absolute_maximum(ds, bts + bts_size_in_bytes);
223 set_bts_interrupt_threshold(ds, bts + bts_size_in_bytes + 1);
224
225 *dsp = ds;
226 return 0;
227}
228
229int ds_free(void **dsp)
230{
231 if (*dsp)
232 kfree(get_bts_buffer_base(*dsp));
233 kfree(*dsp);
234 *dsp = 0;
235
236 return 0;
237}
238
239int ds_get_bts_size(void *ds)
240{
241 size_t size_in_bytes;
242
243 if (!ds_cfg.sizeof_ds || !ds_cfg.sizeof_bts)
244 return -EOPNOTSUPP;
245
246 size_in_bytes =
247 get_bts_absolute_maximum(ds) -
248 get_bts_buffer_base(ds);
249
250 return size_in_bytes / ds_cfg.sizeof_bts;
251}
252
253int ds_get_bts_index(void *ds)
254{
255 size_t index_offset_in_bytes;
256
257 if (!ds_cfg.sizeof_ds || !ds_cfg.sizeof_bts)
258 return -EOPNOTSUPP;
259
260 index_offset_in_bytes =
261 get_bts_index(ds) -
262 get_bts_buffer_base(ds);
263
264 return index_offset_in_bytes / ds_cfg.sizeof_bts;
265}
266
267int ds_read_bts(void *ds, size_t index, struct bts_struct *out)
268{
269 void *bts;
270
271 if (!ds_cfg.sizeof_ds || !ds_cfg.sizeof_bts)
272 return -EOPNOTSUPP;
273
274 if (index < 0)
275 return -EINVAL;
276
277 if (index >= ds_get_bts_size(ds))
278 return -EINVAL;
279
280 bts = get_bts_buffer_base(ds);
281 bts = (char *)bts + (index * ds_cfg.sizeof_bts);
282
283 memset(out, 0, sizeof(*out));
284 if (get_from_ip(bts) == BTS_ESCAPE_ADDRESS) {
285 out->qualifier = get_info_type(bts);
286 out->variant.timestamp = get_info_data(bts);
287 } else {
288 out->qualifier = BTS_BRANCH;
289 out->variant.lbr.from_ip = get_from_ip(bts);
290 out->variant.lbr.to_ip = get_to_ip(bts);
291 }
292
293 return 0;
294}
295
296int ds_write_bts(void *ds, const struct bts_struct *in)
297{
298 void *bts;
299
300 if (!ds_cfg.sizeof_ds || !ds_cfg.sizeof_bts)
301 return -EOPNOTSUPP;
302
303 if (ds_get_bts_size(ds) <= 0)
304 return -ENXIO;
305
306 bts = get_bts_index(ds);
307
308 memset(bts, 0, ds_cfg.sizeof_bts);
309 switch (in->qualifier) {
310 case BTS_INVALID:
311 break;
312
313 case BTS_BRANCH:
314 set_from_ip(bts, in->variant.lbr.from_ip);
315 set_to_ip(bts, in->variant.lbr.to_ip);
316 break;
317
318 case BTS_TASK_ARRIVES:
319 case BTS_TASK_DEPARTS:
320 set_from_ip(bts, BTS_ESCAPE_ADDRESS);
321 set_info_type(bts, in->qualifier);
322 set_info_data(bts, in->variant.timestamp);
323 break;
324
325 default:
326 return -EINVAL;
327 }
328
329 bts = (char *)bts + ds_cfg.sizeof_bts;
330 if (bts >= get_bts_absolute_maximum(ds))
331 bts = get_bts_buffer_base(ds);
332 set_bts_index(ds, bts);
333
334 return 0;
335}
336
337unsigned long ds_debugctl_mask(void)
338{
339 return ds_cfg.debugctl_mask;
340}
341
342#ifdef __i386__
343static const struct ds_configuration ds_cfg_netburst = {
344 .sizeof_ds = 9 * 4,
345 .bts_buffer_base = { 0, 4 },
346 .bts_index = { 4, 4 },
347 .bts_absolute_maximum = { 8, 4 },
348 .bts_interrupt_threshold = { 12, 4 },
349 .sizeof_bts = 3 * 4,
350 .from_ip = { 0, 4 },
351 .to_ip = { 4, 4 },
352 .info_type = { 4, 1 },
353 .info_data = { 5, 7 },
354 .debugctl_mask = (1<<2)|(1<<3)
355};
356
357static const struct ds_configuration ds_cfg_pentium_m = {
358 .sizeof_ds = 9 * 4,
359 .bts_buffer_base = { 0, 4 },
360 .bts_index = { 4, 4 },
361 .bts_absolute_maximum = { 8, 4 },
362 .bts_interrupt_threshold = { 12, 4 },
363 .sizeof_bts = 3 * 4,
364 .from_ip = { 0, 4 },
365 .to_ip = { 4, 4 },
366 .info_type = { 4, 1 },
367 .info_data = { 5, 7 },
368 .debugctl_mask = (1<<6)|(1<<7)
369};
370#endif /* _i386_ */
371
372static const struct ds_configuration ds_cfg_core2 = {
373 .sizeof_ds = 9 * 8,
374 .bts_buffer_base = { 0, 8 },
375 .bts_index = { 8, 8 },
376 .bts_absolute_maximum = { 16, 8 },
377 .bts_interrupt_threshold = { 24, 8 },
378 .sizeof_bts = 3 * 8,
379 .from_ip = { 0, 8 },
380 .to_ip = { 8, 8 },
381 .info_type = { 8, 1 },
382 .info_data = { 9, 7 },
383 .debugctl_mask = (1<<6)|(1<<7)|(1<<9)
384};
385
386static inline void
387ds_configure(const struct ds_configuration *cfg)
388{
389 ds_cfg = *cfg;
390}
391
392void __cpuinit ds_init_intel(struct cpuinfo_x86 *c)
393{
394 switch (c->x86) {
395 case 0x6:
396 switch (c->x86_model) {
397#ifdef __i386__
398 case 0xD:
399 case 0xE: /* Pentium M */
400 ds_configure(&ds_cfg_pentium_m);
401 break;
402#endif /* _i386_ */
403 case 0xF: /* Core2 */
404 ds_configure(&ds_cfg_core2);
405 break;
406 default:
407 /* sorry, don't know about them */
408 break;
409 }
410 break;
411 case 0xF:
412 switch (c->x86_model) {
413#ifdef __i386__
414 case 0x0:
415 case 0x1:
416 case 0x2: /* Netburst */
417 ds_configure(&ds_cfg_netburst);
418 break;
419#endif /* _i386_ */
420 default:
421 /* sorry, don't know about them */
422 break;
423 }
424 break;
425 default:
426 /* sorry, don't know about them */
427 break;
428 }
429}
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 5350763a2d03..2b9db9371060 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -614,11 +614,21 @@ __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
614 struct tss_struct *tss) 614 struct tss_struct *tss)
615{ 615{
616 struct thread_struct *prev, *next; 616 struct thread_struct *prev, *next;
617 unsigned long debugctl;
617 618
618 prev = &prev_p->thread; 619 prev = &prev_p->thread;
619 next = &next_p->thread; 620 next = &next_p->thread;
620 621
621 if (next->debugctlmsr != prev->debugctlmsr) 622 debugctl = prev->debugctlmsr;
623 if (next->ds_area_msr != prev->ds_area_msr) {
624 /* we clear debugctl to make sure DS
625 * is not in use when we change it */
626 debugctl = 0;
627 wrmsrl(MSR_IA32_DEBUGCTLMSR, 0);
628 wrmsr(MSR_IA32_DS_AREA, next->ds_area_msr, 0);
629 }
630
631 if (next->debugctlmsr != debugctl)
622 wrmsr(MSR_IA32_DEBUGCTLMSR, next->debugctlmsr, 0); 632 wrmsr(MSR_IA32_DEBUGCTLMSR, next->debugctlmsr, 0);
623 633
624 if (test_tsk_thread_flag(next_p, TIF_DEBUG)) { 634 if (test_tsk_thread_flag(next_p, TIF_DEBUG)) {
@@ -642,6 +652,13 @@ __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
642 } 652 }
643#endif 653#endif
644 654
655 if (test_tsk_thread_flag(prev_p, TIF_BTS_TRACE_TS))
656 ptrace_bts_take_timestamp(prev_p, BTS_TASK_DEPARTS);
657
658 if (test_tsk_thread_flag(next_p, TIF_BTS_TRACE_TS))
659 ptrace_bts_take_timestamp(next_p, BTS_TASK_ARRIVES);
660
661
645 if (!test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) { 662 if (!test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
646 /* 663 /*
647 * Disable the bitmap via an invalid offset. We still cache 664 * Disable the bitmap via an invalid offset. We still cache
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 057b5442ffda..843bf0c978a4 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -568,11 +568,21 @@ static inline void __switch_to_xtra(struct task_struct *prev_p,
568 struct tss_struct *tss) 568 struct tss_struct *tss)
569{ 569{
570 struct thread_struct *prev, *next; 570 struct thread_struct *prev, *next;
571 unsigned long debugctl;
571 572
572 prev = &prev_p->thread, 573 prev = &prev_p->thread,
573 next = &next_p->thread; 574 next = &next_p->thread;
574 575
575 if (next->debugctlmsr != prev->debugctlmsr) 576 debugctl = prev->debugctlmsr;
577 if (next->ds_area_msr != prev->ds_area_msr) {
578 /* we clear debugctl to make sure DS
579 * is not in use when we change it */
580 debugctl = 0;
581 wrmsrl(MSR_IA32_DEBUGCTLMSR, 0);
582 wrmsrl(MSR_IA32_DS_AREA, next->ds_area_msr);
583 }
584
585 if (next->debugctlmsr != debugctl)
576 wrmsrl(MSR_IA32_DEBUGCTLMSR, next->debugctlmsr); 586 wrmsrl(MSR_IA32_DEBUGCTLMSR, next->debugctlmsr);
577 587
578 if (test_tsk_thread_flag(next_p, TIF_DEBUG)) { 588 if (test_tsk_thread_flag(next_p, TIF_DEBUG)) {
@@ -598,6 +608,16 @@ static inline void __switch_to_xtra(struct task_struct *prev_p,
598 */ 608 */
599 memset(tss->io_bitmap, 0xff, prev->io_bitmap_max); 609 memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
600 } 610 }
611
612 /*
613 * Last branch recording recofiguration of trace hardware and
614 * disentangling of trace data per task.
615 */
616 if (test_tsk_thread_flag(prev_p, TIF_BTS_TRACE_TS))
617 ptrace_bts_take_timestamp(prev_p, BTS_TASK_DEPARTS);
618
619 if (test_tsk_thread_flag(next_p, TIF_BTS_TRACE_TS))
620 ptrace_bts_take_timestamp(next_p, BTS_TASK_ARRIVES);
601} 621}
602 622
603/* 623/*
@@ -701,8 +721,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
701 /* 721 /*
702 * Now maybe reload the debug registers and handle I/O bitmaps 722 * Now maybe reload the debug registers and handle I/O bitmaps
703 */ 723 */
704 if (unlikely((task_thread_info(next_p)->flags & _TIF_WORK_CTXSW)) 724 if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT ||
705 || test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) 725 task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
706 __switch_to_xtra(prev_p, next_p, tss); 726 __switch_to_xtra(prev_p, next_p, tss);
707 727
708 /* If the task has used fpu the last 5 timeslices, just do a full 728 /* If the task has used fpu the last 5 timeslices, just do a full
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index 3399c1be79b8..8d0dd8b5effe 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -2,6 +2,9 @@
2/* 2/*
3 * Pentium III FXSR, SSE support 3 * Pentium III FXSR, SSE support
4 * Gareth Hughes <gareth@valinux.com>, May 2000 4 * Gareth Hughes <gareth@valinux.com>, May 2000
5 *
6 * BTS tracing
7 * Markus Metzger <markus.t.metzger@intel.com>, Dec 2007
5 */ 8 */
6 9
7#include <linux/kernel.h> 10#include <linux/kernel.h>
@@ -26,6 +29,14 @@
26#include <asm/desc.h> 29#include <asm/desc.h>
27#include <asm/prctl.h> 30#include <asm/prctl.h>
28#include <asm/proto.h> 31#include <asm/proto.h>
32#include <asm/ds.h>
33
34
35/*
36 * The maximal size of a BTS buffer per traced task in number of BTS
37 * records.
38 */
39#define PTRACE_BTS_BUFFER_MAX 4000
29 40
30/* 41/*
31 * does not yet catch signals sent when the child dies. 42 * does not yet catch signals sent when the child dies.
@@ -455,6 +466,165 @@ static int ptrace_set_debugreg(struct task_struct *child,
455 return 0; 466 return 0;
456} 467}
457 468
469static int ptrace_bts_max_buffer_size(void)
470{
471 return PTRACE_BTS_BUFFER_MAX;
472}
473
474static int ptrace_bts_get_buffer_size(struct task_struct *child)
475{
476 if (!child->thread.ds_area_msr)
477 return -ENXIO;
478
479 return ds_get_bts_size((void *)child->thread.ds_area_msr);
480}
481
482static int ptrace_bts_get_index(struct task_struct *child)
483{
484 if (!child->thread.ds_area_msr)
485 return -ENXIO;
486
487 return ds_get_bts_index((void *)child->thread.ds_area_msr);
488}
489
490static int ptrace_bts_read_record(struct task_struct *child,
491 long index,
492 struct bts_struct __user *out)
493{
494 struct bts_struct ret;
495 int retval;
496
497 if (!child->thread.ds_area_msr)
498 return -ENXIO;
499
500 retval = ds_read_bts((void *)child->thread.ds_area_msr,
501 index, &ret);
502 if (retval)
503 return retval;
504
505 if (copy_to_user(out, &ret, sizeof(ret)))
506 return -EFAULT;
507
508 return sizeof(ret);
509}
510
511static int ptrace_bts_write_record(struct task_struct *child,
512 const struct bts_struct *in)
513{
514 int retval;
515
516 if (!child->thread.ds_area_msr)
517 return -ENXIO;
518
519 retval = ds_write_bts((void *)child->thread.ds_area_msr, in);
520 if (retval)
521 return retval;
522
523 return sizeof(*in);
524}
525
526static int ptrace_bts_config(struct task_struct *child,
527 unsigned long options)
528{
529 unsigned long debugctl_mask = ds_debugctl_mask();
530 int retval;
531
532 retval = ptrace_bts_get_buffer_size(child);
533 if (retval < 0)
534 return retval;
535 if (retval == 0)
536 return -ENXIO;
537
538 if (options & PTRACE_BTS_O_TRACE_TASK) {
539 child->thread.debugctlmsr |= debugctl_mask;
540 set_tsk_thread_flag(child, TIF_DEBUGCTLMSR);
541 } else {
542 /* there is no way for us to check whether we 'own'
543 * the respective bits in the DEBUGCTL MSR, we're
544 * about to clear */
545 child->thread.debugctlmsr &= ~debugctl_mask;
546
547 if (!child->thread.debugctlmsr)
548 clear_tsk_thread_flag(child, TIF_DEBUGCTLMSR);
549 }
550
551 if (options & PTRACE_BTS_O_TIMESTAMPS)
552 set_tsk_thread_flag(child, TIF_BTS_TRACE_TS);
553 else
554 clear_tsk_thread_flag(child, TIF_BTS_TRACE_TS);
555
556 return 0;
557}
558
559static int ptrace_bts_status(struct task_struct *child)
560{
561 unsigned long debugctl_mask = ds_debugctl_mask();
562 int retval, status = 0;
563
564 retval = ptrace_bts_get_buffer_size(child);
565 if (retval < 0)
566 return retval;
567 if (retval == 0)
568 return -ENXIO;
569
570 if (ptrace_bts_get_buffer_size(child) <= 0)
571 return -ENXIO;
572
573 if (test_tsk_thread_flag(child, TIF_DEBUGCTLMSR) &&
574 child->thread.debugctlmsr & debugctl_mask)
575 status |= PTRACE_BTS_O_TRACE_TASK;
576 if (test_tsk_thread_flag(child, TIF_BTS_TRACE_TS))
577 status |= PTRACE_BTS_O_TIMESTAMPS;
578
579 return status;
580}
581
582static int ptrace_bts_allocate_bts(struct task_struct *child,
583 int size_in_records)
584{
585 int retval = 0;
586 void *ds;
587
588 if (size_in_records < 0)
589 return -EINVAL;
590
591 if (size_in_records > ptrace_bts_max_buffer_size())
592 return -EINVAL;
593
594 if (size_in_records == 0) {
595 ptrace_bts_config(child, /* options = */ 0);
596 } else {
597 retval = ds_allocate(&ds, size_in_records);
598 if (retval)
599 return retval;
600 }
601
602 if (child->thread.ds_area_msr)
603 ds_free((void **)&child->thread.ds_area_msr);
604
605 child->thread.ds_area_msr = (unsigned long)ds;
606 if (child->thread.ds_area_msr)
607 set_tsk_thread_flag(child, TIF_DS_AREA_MSR);
608 else
609 clear_tsk_thread_flag(child, TIF_DS_AREA_MSR);
610
611 return retval;
612}
613
614void ptrace_bts_take_timestamp(struct task_struct *tsk,
615 enum bts_qualifier qualifier)
616{
617 struct bts_struct rec = {
618 .qualifier = qualifier,
619 .variant.timestamp = sched_clock()
620 };
621
622 if (ptrace_bts_get_buffer_size(tsk) <= 0)
623 return;
624
625 ptrace_bts_write_record(tsk, &rec);
626}
627
458/* 628/*
459 * Called by kernel/ptrace.c when detaching.. 629 * Called by kernel/ptrace.c when detaching..
460 * 630 *
@@ -466,6 +636,11 @@ void ptrace_disable(struct task_struct *child)
466#ifdef TIF_SYSCALL_EMU 636#ifdef TIF_SYSCALL_EMU
467 clear_tsk_thread_flag(child, TIF_SYSCALL_EMU); 637 clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
468#endif 638#endif
639 ptrace_bts_config(child, /* options = */ 0);
640 if (child->thread.ds_area_msr) {
641 ds_free((void **)&child->thread.ds_area_msr);
642 clear_tsk_thread_flag(child, TIF_DS_AREA_MSR);
643 }
469} 644}
470 645
471long arch_ptrace(struct task_struct *child, long request, long addr, long data) 646long arch_ptrace(struct task_struct *child, long request, long addr, long data)
@@ -626,6 +801,36 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
626 break; 801 break;
627#endif 802#endif
628 803
804 case PTRACE_BTS_MAX_BUFFER_SIZE:
805 ret = ptrace_bts_max_buffer_size();
806 break;
807
808 case PTRACE_BTS_ALLOCATE_BUFFER:
809 ret = ptrace_bts_allocate_bts(child, data);
810 break;
811
812 case PTRACE_BTS_GET_BUFFER_SIZE:
813 ret = ptrace_bts_get_buffer_size(child);
814 break;
815
816 case PTRACE_BTS_GET_INDEX:
817 ret = ptrace_bts_get_index(child);
818 break;
819
820 case PTRACE_BTS_READ_RECORD:
821 ret = ptrace_bts_read_record
822 (child, data,
823 (struct bts_struct __user *) addr);
824 break;
825
826 case PTRACE_BTS_CONFIG:
827 ret = ptrace_bts_config(child, data);
828 break;
829
830 case PTRACE_BTS_STATUS:
831 ret = ptrace_bts_status(child);
832 break;
833
629 default: 834 default:
630 ret = ptrace_request(child, request, addr, data); 835 ret = ptrace_request(child, request, addr, data);
631 break; 836 break;
@@ -809,6 +1014,13 @@ asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data)
809 case PTRACE_SETOPTIONS: 1014 case PTRACE_SETOPTIONS:
810 case PTRACE_SET_THREAD_AREA: 1015 case PTRACE_SET_THREAD_AREA:
811 case PTRACE_GET_THREAD_AREA: 1016 case PTRACE_GET_THREAD_AREA:
1017 case PTRACE_BTS_MAX_BUFFER_SIZE:
1018 case PTRACE_BTS_ALLOCATE_BUFFER:
1019 case PTRACE_BTS_GET_BUFFER_SIZE:
1020 case PTRACE_BTS_GET_INDEX:
1021 case PTRACE_BTS_READ_RECORD:
1022 case PTRACE_BTS_CONFIG:
1023 case PTRACE_BTS_STATUS:
812 return sys_ptrace(request, pid, addr, data); 1024 return sys_ptrace(request, pid, addr, data);
813 1025
814 default: 1026 default:
diff --git a/arch/x86/kernel/setup_64.c b/arch/x86/kernel/setup_64.c
index ce4d6b52ce36..f2b131ef844e 100644
--- a/arch/x86/kernel/setup_64.c
+++ b/arch/x86/kernel/setup_64.c
@@ -60,6 +60,7 @@
60#include <asm/dmi.h> 60#include <asm/dmi.h>
61#include <asm/cacheflush.h> 61#include <asm/cacheflush.h>
62#include <asm/mce.h> 62#include <asm/mce.h>
63#include <asm/ds.h>
63 64
64/* 65/*
65 * Machine setup.. 66 * Machine setup..
@@ -823,6 +824,10 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
823 set_cpu_cap(c, X86_FEATURE_PEBS); 824 set_cpu_cap(c, X86_FEATURE_PEBS);
824 } 825 }
825 826
827
828 if (cpu_has_bts)
829 ds_init_intel(c);
830
826 n = c->extended_cpuid_level; 831 n = c->extended_cpuid_level;
827 if (n >= 0x80000008) { 832 if (n >= 0x80000008) {
828 unsigned eax = cpuid_eax(0x80000008); 833 unsigned eax = cpuid_eax(0x80000008);
diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
index f55c003f5b63..21ea22fda5fc 100644
--- a/arch/x86/kernel/step.c
+++ b/arch/x86/kernel/step.c
@@ -169,9 +169,14 @@ static void enable_step(struct task_struct *child, bool block)
169 */ 169 */
170 if (enable_single_step(child) && block) { 170 if (enable_single_step(child) && block) {
171 set_tsk_thread_flag(child, TIF_DEBUGCTLMSR); 171 set_tsk_thread_flag(child, TIF_DEBUGCTLMSR);
172 write_debugctlmsr(child, DEBUGCTLMSR_BTF); 172 write_debugctlmsr(child,
173 } else if (test_and_clear_tsk_thread_flag(child, TIF_DEBUGCTLMSR)) { 173 child->thread.debugctlmsr | DEBUGCTLMSR_BTF);
174 write_debugctlmsr(child, 0); 174 } else {
175 write_debugctlmsr(child,
176 child->thread.debugctlmsr & ~TIF_DEBUGCTLMSR);
177
178 if (!child->thread.debugctlmsr)
179 clear_tsk_thread_flag(child, TIF_DEBUGCTLMSR);
175 } 180 }
176} 181}
177 182
@@ -190,8 +195,11 @@ void user_disable_single_step(struct task_struct *child)
190 /* 195 /*
191 * Make sure block stepping (BTF) is disabled. 196 * Make sure block stepping (BTF) is disabled.
192 */ 197 */
193 if (test_and_clear_tsk_thread_flag(child, TIF_DEBUGCTLMSR)) 198 write_debugctlmsr(child,
194 write_debugctlmsr(child, 0); 199 child->thread.debugctlmsr & ~TIF_DEBUGCTLMSR);
200
201 if (!child->thread.debugctlmsr)
202 clear_tsk_thread_flag(child, TIF_DEBUGCTLMSR);
195 203
196 /* Always clear TIF_SINGLESTEP... */ 204 /* Always clear TIF_SINGLESTEP... */
197 clear_tsk_thread_flag(child, TIF_SINGLESTEP); 205 clear_tsk_thread_flag(child, TIF_SINGLESTEP);
diff --git a/include/asm-x86/ds.h b/include/asm-x86/ds.h
new file mode 100644
index 000000000000..edd8467740a6
--- /dev/null
+++ b/include/asm-x86/ds.h
@@ -0,0 +1,65 @@
1/*
2 * Debug Store (DS) support
3 *
4 * This provides a low-level interface to the hardware's Debug Store
5 * feature that is used for last branch recording (LBR) and
6 * precise-event based sampling (PEBS).
7 *
8 * Different architectures use a different DS layout/pointer size.
9 * The below functions therefore work on a void*.
10 *
11 *
12 * Since there is no user for PEBS, yet, only LBR (or branch
13 * trace store, BTS) is supported.
14 *
15 *
16 * Copyright (C) 2007 Intel Corporation.
17 * Markus Metzger <markus.t.metzger@intel.com>, Dec 2007
18 */
19
20#ifndef _ASM_X86_DS_H
21#define _ASM_X86_DS_H
22
23#include <linux/types.h>
24#include <linux/init.h>
25
26struct cpuinfo_x86;
27
28
29/* a branch trace record entry
30 *
31 * In order to unify the interface between various processor versions,
32 * we use the below data structure for all processors.
33 */
34enum bts_qualifier {
35 BTS_INVALID = 0,
36 BTS_BRANCH,
37 BTS_TASK_ARRIVES,
38 BTS_TASK_DEPARTS
39};
40
41struct bts_struct {
42 enum bts_qualifier qualifier;
43 union {
44 /* BTS_BRANCH */
45 struct {
46 long from_ip;
47 long to_ip;
48 } lbr;
49 /* BTS_TASK_ARRIVES or
50 BTS_TASK_DEPARTS */
51 unsigned long long timestamp;
52 } variant;
53};
54
55
56extern int ds_allocate(void **, size_t);
57extern int ds_free(void **);
58extern int ds_get_bts_size(void *);
59extern int ds_get_bts_index(void *);
60extern int ds_read_bts(void *, size_t, struct bts_struct *);
61extern int ds_write_bts(void *, const struct bts_struct *);
62extern unsigned long ds_debugctl_mask(void);
63extern void __cpuinit ds_init_intel(struct cpuinfo_x86 *c);
64
65#endif /* _ASM_X86_DS_H */
diff --git a/include/asm-x86/processor_32.h b/include/asm-x86/processor_32.h
index 0d83da198127..9c0ab7f26bd9 100644
--- a/include/asm-x86/processor_32.h
+++ b/include/asm-x86/processor_32.h
@@ -360,6 +360,9 @@ struct thread_struct {
360 unsigned long io_bitmap_max; 360 unsigned long io_bitmap_max;
361/* MSR_IA32_DEBUGCTLMSR value to switch in if TIF_DEBUGCTLMSR is set. */ 361/* MSR_IA32_DEBUGCTLMSR value to switch in if TIF_DEBUGCTLMSR is set. */
362 unsigned long debugctlmsr; 362 unsigned long debugctlmsr;
363/* Debug Store - if not 0 points to a DS Save Area configuration;
364 * goes into MSR_IA32_DS_AREA */
365 unsigned long ds_area_msr;
363}; 366};
364 367
365#define INIT_THREAD { \ 368#define INIT_THREAD { \
diff --git a/include/asm-x86/processor_64.h b/include/asm-x86/processor_64.h
index 0780f3e3fdfe..7b7f8a142e20 100644
--- a/include/asm-x86/processor_64.h
+++ b/include/asm-x86/processor_64.h
@@ -240,6 +240,9 @@ struct thread_struct {
240 unsigned io_bitmap_max; 240 unsigned io_bitmap_max;
241/* MSR_IA32_DEBUGCTLMSR value to switch in if TIF_DEBUGCTLMSR is set. */ 241/* MSR_IA32_DEBUGCTLMSR value to switch in if TIF_DEBUGCTLMSR is set. */
242 unsigned long debugctlmsr; 242 unsigned long debugctlmsr;
243/* Debug Store - if not 0 points to a DS Save Area configuration;
244 * goes into MSR_IA32_DS_AREA */
245 unsigned long ds_area_msr;
243/* cached TLS descriptors. */ 246/* cached TLS descriptors. */
244 u64 tls_array[GDT_ENTRY_TLS_ENTRIES]; 247 u64 tls_array[GDT_ENTRY_TLS_ENTRIES];
245} __attribute__((aligned(16))); 248} __attribute__((aligned(16)));
diff --git a/include/asm-x86/ptrace-abi.h b/include/asm-x86/ptrace-abi.h
index adce6b51df2e..6fadc5214e14 100644
--- a/include/asm-x86/ptrace-abi.h
+++ b/include/asm-x86/ptrace-abi.h
@@ -80,4 +80,56 @@
80 80
81#define PTRACE_SINGLEBLOCK 33 /* resume execution until next branch */ 81#define PTRACE_SINGLEBLOCK 33 /* resume execution until next branch */
82 82
83/* Return maximal BTS buffer size in number of records,
84 if successuf; -1, otherwise.
85 EOPNOTSUPP...processor does not support bts tracing */
86#define PTRACE_BTS_MAX_BUFFER_SIZE 40
87
88/* Allocate new bts buffer (free old one, if exists) of size DATA bts records;
89 parameter ADDR is ignored.
90 Return 0, if successful; -1, otherwise.
91 EOPNOTSUPP...processor does not support bts tracing
92 EINVAL.......invalid size in records
93 ENOMEM.......out of memory */
94#define PTRACE_BTS_ALLOCATE_BUFFER 41
95
96/* Return the size of the bts buffer in number of bts records,
97 if successful; -1, otherwise.
98 EOPNOTSUPP...processor does not support bts tracing
99 ENXIO........no buffer allocated */
100#define PTRACE_BTS_GET_BUFFER_SIZE 42
101
102/* Return the index of the next bts record to be written,
103 if successful; -1, otherwise.
104 EOPNOTSUPP...processor does not support bts tracing
105 ENXIO........no buffer allocated
106 After the first warp-around, this is the start of the circular bts buffer. */
107#define PTRACE_BTS_GET_INDEX 43
108
109/* Read the DATA'th bts record into a ptrace_bts_record buffer provided in ADDR.
110 Return 0, if successful; -1, otherwise
111 EOPNOTSUPP...processor does not support bts tracing
112 ENXIO........no buffer allocated
113 EINVAL.......invalid index */
114#define PTRACE_BTS_READ_RECORD 44
115
116/* Configure last branch trace; the configuration is given as a bit-mask of
117 PTRACE_BTS_O_* options in DATA; parameter ADDR is ignored.
118 Return 0, if successful; -1, otherwise
119 EOPNOTSUPP...processor does not support bts tracing
120 ENXIO........no buffer allocated */
121#define PTRACE_BTS_CONFIG 45
122
123/* Return the configuration as bit-mask of PTRACE_BTS_O_* options
124 if successful; -1, otherwise.
125 EOPNOTSUPP...processor does not support bts tracing
126 ENXIO........no buffer allocated */
127#define PTRACE_BTS_STATUS 46
128
129/* Trace configuration options */
130/* Collect last branch trace */
131#define PTRACE_BTS_O_TRACE_TASK 0x1
132/* Take timestamps when the task arrives and departs */
133#define PTRACE_BTS_O_TIMESTAMPS 0x2
134
83#endif 135#endif
diff --git a/include/asm-x86/ptrace.h b/include/asm-x86/ptrace.h
index 9228870f6157..a9a1bab1451a 100644
--- a/include/asm-x86/ptrace.h
+++ b/include/asm-x86/ptrace.h
@@ -4,8 +4,19 @@
4#include <linux/compiler.h> /* For __user */ 4#include <linux/compiler.h> /* For __user */
5#include <asm/ptrace-abi.h> 5#include <asm/ptrace-abi.h>
6 6
7
7#ifndef __ASSEMBLY__ 8#ifndef __ASSEMBLY__
8 9
10#ifdef __KERNEL__
11
12#include <asm/ds.h>
13
14struct task_struct;
15extern void ptrace_bts_take_timestamp(struct task_struct *, enum bts_qualifier);
16
17#endif /* __KERNEL__ */
18
19
9#ifdef __i386__ 20#ifdef __i386__
10/* this struct defines the way the registers are stored on the 21/* this struct defines the way the registers are stored on the
11 stack during a system call. */ 22 stack during a system call. */
diff --git a/include/asm-x86/thread_info_32.h b/include/asm-x86/thread_info_32.h
index 306fc80800e1..5bd508260ffb 100644
--- a/include/asm-x86/thread_info_32.h
+++ b/include/asm-x86/thread_info_32.h
@@ -140,6 +140,8 @@ static inline struct thread_info *current_thread_info(void)
140#define TIF_NOTSC 20 /* TSC is not accessible in userland */ 140#define TIF_NOTSC 20 /* TSC is not accessible in userland */
141#define TIF_FORCED_TF 21 /* true if TF in eflags artificially */ 141#define TIF_FORCED_TF 21 /* true if TF in eflags artificially */
142#define TIF_DEBUGCTLMSR 22 /* uses thread_struct.debugctlmsr */ 142#define TIF_DEBUGCTLMSR 22 /* uses thread_struct.debugctlmsr */
143#define TIF_DS_AREA_MSR 23 /* uses thread_struct.ds_area_msr */
144#define TIF_BTS_TRACE_TS 24 /* record scheduling event timestamps */
143 145
144#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) 146#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
145#define _TIF_SIGPENDING (1<<TIF_SIGPENDING) 147#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
@@ -157,6 +159,8 @@ static inline struct thread_info *current_thread_info(void)
157#define _TIF_NOTSC (1<<TIF_NOTSC) 159#define _TIF_NOTSC (1<<TIF_NOTSC)
158#define _TIF_FORCED_TF (1<<TIF_FORCED_TF) 160#define _TIF_FORCED_TF (1<<TIF_FORCED_TF)
159#define _TIF_DEBUGCTLMSR (1<<TIF_DEBUGCTLMSR) 161#define _TIF_DEBUGCTLMSR (1<<TIF_DEBUGCTLMSR)
162#define _TIF_DS_AREA_MSR (1<<TIF_DS_AREA_MSR)
163#define _TIF_BTS_TRACE_TS (1<<TIF_BTS_TRACE_TS)
160 164
161/* work to do on interrupt/exception return */ 165/* work to do on interrupt/exception return */
162#define _TIF_WORK_MASK \ 166#define _TIF_WORK_MASK \
@@ -166,8 +170,12 @@ static inline struct thread_info *current_thread_info(void)
166#define _TIF_ALLWORK_MASK (0x0000FFFF & ~_TIF_SECCOMP) 170#define _TIF_ALLWORK_MASK (0x0000FFFF & ~_TIF_SECCOMP)
167 171
168/* flags to check in __switch_to() */ 172/* flags to check in __switch_to() */
169#define _TIF_WORK_CTXSW_NEXT (_TIF_IO_BITMAP | _TIF_NOTSC | _TIF_DEBUG | _TIF_DEBUGCTLMSR) 173#define _TIF_WORK_CTXSW \
170#define _TIF_WORK_CTXSW_PREV (_TIF_IO_BITMAP | _TIF_NOTSC | _TIF_DEBUGCTLMSR) 174 (_TIF_IO_BITMAP | _TIF_NOTSC | _TIF_DEBUGCTLMSR | \
175 _TIF_DS_AREA_MSR | _TIF_BTS_TRACE_TS)
176#define _TIF_WORK_CTXSW_PREV _TIF_WORK_CTXSW
177#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW | _TIF_DEBUG)
178
171 179
172/* 180/*
173 * Thread-synchronous status. 181 * Thread-synchronous status.
diff --git a/include/asm-x86/thread_info_64.h b/include/asm-x86/thread_info_64.h
index ee35fd12b541..c2911a99cc32 100644
--- a/include/asm-x86/thread_info_64.h
+++ b/include/asm-x86/thread_info_64.h
@@ -123,6 +123,8 @@ static inline struct thread_info *stack_thread_info(void)
123#define TIF_FREEZE 23 /* is freezing for suspend */ 123#define TIF_FREEZE 23 /* is freezing for suspend */
124#define TIF_FORCED_TF 24 /* true if TF in eflags artificially */ 124#define TIF_FORCED_TF 24 /* true if TF in eflags artificially */
125#define TIF_DEBUGCTLMSR 25 /* uses thread_struct.debugctlmsr */ 125#define TIF_DEBUGCTLMSR 25 /* uses thread_struct.debugctlmsr */
126#define TIF_DS_AREA_MSR 25 /* uses thread_struct.ds_area_msr */
127#define TIF_BTS_TRACE_TS 26 /* record scheduling event timestamps */
126 128
127#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) 129#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
128#define _TIF_SIGPENDING (1<<TIF_SIGPENDING) 130#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
@@ -142,6 +144,8 @@ static inline struct thread_info *stack_thread_info(void)
142#define _TIF_FREEZE (1<<TIF_FREEZE) 144#define _TIF_FREEZE (1<<TIF_FREEZE)
143#define _TIF_FORCED_TF (1<<TIF_FORCED_TF) 145#define _TIF_FORCED_TF (1<<TIF_FORCED_TF)
144#define _TIF_DEBUGCTLMSR (1<<TIF_DEBUGCTLMSR) 146#define _TIF_DEBUGCTLMSR (1<<TIF_DEBUGCTLMSR)
147#define _TIF_DS_AREA_MSR (1<<TIF_DS_AREA_MSR)
148#define _TIF_BTS_TRACE_TS (1<<TIF_BTS_TRACE_TS)
145 149
146/* work to do on interrupt/exception return */ 150/* work to do on interrupt/exception return */
147#define _TIF_WORK_MASK \ 151#define _TIF_WORK_MASK \
@@ -153,7 +157,10 @@ static inline struct thread_info *stack_thread_info(void)
153 (_TIF_SIGPENDING|_TIF_SINGLESTEP|_TIF_MCE_NOTIFY|_TIF_HRTICK_RESCHED) 157 (_TIF_SIGPENDING|_TIF_SINGLESTEP|_TIF_MCE_NOTIFY|_TIF_HRTICK_RESCHED)
154 158
155/* flags to check in __switch_to() */ 159/* flags to check in __switch_to() */
156#define _TIF_WORK_CTXSW (_TIF_DEBUG|_TIF_IO_BITMAP|_TIF_DEBUGCTLMSR) 160#define _TIF_WORK_CTXSW \
161 (_TIF_IO_BITMAP|_TIF_DEBUGCTLMSR|_TIF_DS_AREA_MSR|_TIF_BTS_TRACE_TS)
162#define _TIF_WORK_CTXSW_PREV _TIF_WORK_CTXSW
163#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW|_TIF_DEBUG)
157 164
158#define PREEMPT_ACTIVE 0x10000000 165#define PREEMPT_ACTIVE 0x10000000
159 166