diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2010-03-25 09:51:50 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-03-26 06:33:55 -0400 |
commit | faa4602e47690fb11221e00f9b9697c8dc0d4b19 (patch) | |
tree | af667d1cdff7dc63b6893ee3f27a1f2503229ed1 /kernel/sched.c | |
parent | 7c5ecaf7666617889f337296c610815b519abfa9 (diff) |
x86, perf, bts, mm: Delete the never used BTS-ptrace code
Support for the PMU's BTS features has been upstreamed in
v2.6.32, but we still have the old and disabled ptrace-BTS,
as Linus noticed it not so long ago.
It's buggy: TIF_DEBUGCTLMSR is trampling all over that MSR without
regard for other uses (perf) and doesn't provide the flexibility
needed for perf either.
Its users are ptrace-block-step and ptrace-bts, since ptrace-bts
was never used and ptrace-block-step can be implemented using a
much simpler approach.
So axe all 3000 lines of it. That includes the *locked_memory*()
APIs in mm/mlock.c as well.
Reported-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Roland McGrath <roland@redhat.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Markus Metzger <markus.t.metzger@intel.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
LKML-Reference: <20100325135413.938004390@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 43 |
1 files changed, 0 insertions, 43 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 9ab3cd7858d3..117b7cad31b3 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -2077,49 +2077,6 @@ migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req) | |||
2077 | } | 2077 | } |
2078 | 2078 | ||
2079 | /* | 2079 | /* |
2080 | * wait_task_context_switch - wait for a thread to complete at least one | ||
2081 | * context switch. | ||
2082 | * | ||
2083 | * @p must not be current. | ||
2084 | */ | ||
2085 | void wait_task_context_switch(struct task_struct *p) | ||
2086 | { | ||
2087 | unsigned long nvcsw, nivcsw, flags; | ||
2088 | int running; | ||
2089 | struct rq *rq; | ||
2090 | |||
2091 | nvcsw = p->nvcsw; | ||
2092 | nivcsw = p->nivcsw; | ||
2093 | for (;;) { | ||
2094 | /* | ||
2095 | * The runqueue is assigned before the actual context | ||
2096 | * switch. We need to take the runqueue lock. | ||
2097 | * | ||
2098 | * We could check initially without the lock but it is | ||
2099 | * very likely that we need to take the lock in every | ||
2100 | * iteration. | ||
2101 | */ | ||
2102 | rq = task_rq_lock(p, &flags); | ||
2103 | running = task_running(rq, p); | ||
2104 | task_rq_unlock(rq, &flags); | ||
2105 | |||
2106 | if (likely(!running)) | ||
2107 | break; | ||
2108 | /* | ||
2109 | * The switch count is incremented before the actual | ||
2110 | * context switch. We thus wait for two switches to be | ||
2111 | * sure at least one completed. | ||
2112 | */ | ||
2113 | if ((p->nvcsw - nvcsw) > 1) | ||
2114 | break; | ||
2115 | if ((p->nivcsw - nivcsw) > 1) | ||
2116 | break; | ||
2117 | |||
2118 | cpu_relax(); | ||
2119 | } | ||
2120 | } | ||
2121 | |||
2122 | /* | ||
2123 | * wait_task_inactive - wait for a thread to unschedule. | 2080 | * wait_task_inactive - wait for a thread to unschedule. |
2124 | * | 2081 | * |
2125 | * If @match_state is nonzero, it's the @p->state value just checked and | 2082 | * If @match_state is nonzero, it's the @p->state value just checked and |