aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2010-03-25 09:51:50 -0400
committerIngo Molnar <mingo@elte.hu>2010-03-26 06:33:55 -0400
commitfaa4602e47690fb11221e00f9b9697c8dc0d4b19 (patch)
treeaf667d1cdff7dc63b6893ee3f27a1f2503229ed1 /include/linux
parent7c5ecaf7666617889f337296c610815b519abfa9 (diff)
x86, perf, bts, mm: Delete the never used BTS-ptrace code
Support for the PMU's BTS features has been upstreamed in v2.6.32, but we still have the old and disabled ptrace-BTS, as Linus noticed it not so long ago. It's buggy: TIF_DEBUGCTLMSR is trampling all over that MSR without regard for other uses (perf) and doesn't provide the flexibility needed for perf either. Its users are ptrace-block-step and ptrace-bts, since ptrace-bts was never used and ptrace-block-step can be implemented using a much simpler approach. So axe all 3000 lines of it. That includes the *locked_memory*() APIs in mm/mlock.c as well. Reported-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Roland McGrath <roland@redhat.com> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Markus Metzger <markus.t.metzger@intel.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Andrew Morton <akpm@linux-foundation.org> LKML-Reference: <20100325135413.938004390@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/ftrace.h12
-rw-r--r--include/linux/mm.h4
-rw-r--r--include/linux/ptrace.h12
-rw-r--r--include/linux/sched.h9
4 files changed, 0 insertions, 37 deletions
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 01e6adea07ec..cc12b3c556b3 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -504,18 +504,6 @@ extern int ftrace_dump_on_oops;
504#define INIT_TRACE_RECURSION 504#define INIT_TRACE_RECURSION
505#endif 505#endif
506 506
507#ifdef CONFIG_HW_BRANCH_TRACER
508
509void trace_hw_branch(u64 from, u64 to);
510void trace_hw_branch_oops(void);
511
512#else /* CONFIG_HW_BRANCH_TRACER */
513
514static inline void trace_hw_branch(u64 from, u64 to) {}
515static inline void trace_hw_branch_oops(void) {}
516
517#endif /* CONFIG_HW_BRANCH_TRACER */
518
519#ifdef CONFIG_FTRACE_SYSCALLS 507#ifdef CONFIG_FTRACE_SYSCALLS
520 508
521unsigned long arch_syscall_addr(int nr); 509unsigned long arch_syscall_addr(int nr);
diff --git a/include/linux/mm.h b/include/linux/mm.h
index e70f21beb4b4..c8442b655111 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -19,7 +19,6 @@ struct anon_vma;
19struct file_ra_state; 19struct file_ra_state;
20struct user_struct; 20struct user_struct;
21struct writeback_control; 21struct writeback_control;
22struct rlimit;
23 22
24#ifndef CONFIG_DISCONTIGMEM /* Don't use mapnrs, do it properly */ 23#ifndef CONFIG_DISCONTIGMEM /* Don't use mapnrs, do it properly */
25extern unsigned long max_mapnr; 24extern unsigned long max_mapnr;
@@ -1449,9 +1448,6 @@ int vmemmap_populate_basepages(struct page *start_page,
1449int vmemmap_populate(struct page *start_page, unsigned long pages, int node); 1448int vmemmap_populate(struct page *start_page, unsigned long pages, int node);
1450void vmemmap_populate_print_last(void); 1449void vmemmap_populate_print_last(void);
1451 1450
1452extern int account_locked_memory(struct mm_struct *mm, struct rlimit *rlim,
1453 size_t size);
1454extern void refund_locked_memory(struct mm_struct *mm, size_t size);
1455 1451
1456enum mf_flags { 1452enum mf_flags {
1457 MF_COUNT_INCREASED = 1 << 0, 1453 MF_COUNT_INCREASED = 1 << 0,
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index e1fb60729979..4272521e29e9 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -345,18 +345,6 @@ static inline void user_single_step_siginfo(struct task_struct *tsk,
345#define arch_ptrace_stop(code, info) do { } while (0) 345#define arch_ptrace_stop(code, info) do { } while (0)
346#endif 346#endif
347 347
348#ifndef arch_ptrace_untrace
349/*
350 * Do machine-specific work before untracing child.
351 *
352 * This is called for a normal detach as well as from ptrace_exit()
353 * when the tracing task dies.
354 *
355 * Called with write_lock(&tasklist_lock) held.
356 */
357#define arch_ptrace_untrace(task) do { } while (0)
358#endif
359
360extern int task_current_syscall(struct task_struct *target, long *callno, 348extern int task_current_syscall(struct task_struct *target, long *callno,
361 unsigned long args[6], unsigned int maxargs, 349 unsigned long args[6], unsigned int maxargs,
362 unsigned long *sp, unsigned long *pc); 350 unsigned long *sp, unsigned long *pc);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index dad7f668ebf7..e0447c64af6a 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -99,7 +99,6 @@ struct futex_pi_state;
99struct robust_list_head; 99struct robust_list_head;
100struct bio_list; 100struct bio_list;
101struct fs_struct; 101struct fs_struct;
102struct bts_context;
103struct perf_event_context; 102struct perf_event_context;
104 103
105/* 104/*
@@ -1272,12 +1271,6 @@ struct task_struct {
1272 struct list_head ptraced; 1271 struct list_head ptraced;
1273 struct list_head ptrace_entry; 1272 struct list_head ptrace_entry;
1274 1273
1275 /*
1276 * This is the tracer handle for the ptrace BTS extension.
1277 * This field actually belongs to the ptracer task.
1278 */
1279 struct bts_context *bts;
1280
1281 /* PID/PID hash table linkage. */ 1274 /* PID/PID hash table linkage. */
1282 struct pid_link pids[PIDTYPE_MAX]; 1275 struct pid_link pids[PIDTYPE_MAX];
1283 struct list_head thread_group; 1276 struct list_head thread_group;
@@ -2123,10 +2116,8 @@ extern void set_task_comm(struct task_struct *tsk, char *from);
2123extern char *get_task_comm(char *to, struct task_struct *tsk); 2116extern char *get_task_comm(char *to, struct task_struct *tsk);
2124 2117
2125#ifdef CONFIG_SMP 2118#ifdef CONFIG_SMP
2126extern void wait_task_context_switch(struct task_struct *p);
2127extern unsigned long wait_task_inactive(struct task_struct *, long match_state); 2119extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
2128#else 2120#else
2129static inline void wait_task_context_switch(struct task_struct *p) {}
2130static inline unsigned long wait_task_inactive(struct task_struct *p, 2121static inline unsigned long wait_task_inactive(struct task_struct *p,
2131 long match_state) 2122 long match_state)
2132{ 2123{