aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/ptrace.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2010-03-25 09:51:50 -0400
committerIngo Molnar <mingo@elte.hu>2010-03-26 06:33:55 -0400
commitfaa4602e47690fb11221e00f9b9697c8dc0d4b19 (patch)
treeaf667d1cdff7dc63b6893ee3f27a1f2503229ed1 /arch/x86/kernel/ptrace.c
parent7c5ecaf7666617889f337296c610815b519abfa9 (diff)
x86, perf, bts, mm: Delete the never used BTS-ptrace code
Support for the PMU's BTS features has been upstreamed in v2.6.32, but we still have the old and disabled ptrace-BTS, as Linus noticed it not so long ago. It's buggy: TIF_DEBUGCTLMSR is trampling all over that MSR without regard for other uses (perf) and doesn't provide the flexibility needed for perf either. Its users are ptrace-block-step and ptrace-bts, since ptrace-bts was never used and ptrace-block-step can be implemented using a much simpler approach. So axe all 3000 lines of it. That includes the *locked_memory*() APIs in mm/mlock.c as well. Reported-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Roland McGrath <roland@redhat.com> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Markus Metzger <markus.t.metzger@intel.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Andrew Morton <akpm@linux-foundation.org> LKML-Reference: <20100325135413.938004390@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/ptrace.c')
-rw-r--r--arch/x86/kernel/ptrace.c382
1 files changed, 0 insertions, 382 deletions
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index a503b1fd04e5..f2fd3b80e565 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -2,9 +2,6 @@
2/* 2/*
3 * Pentium III FXSR, SSE support 3 * Pentium III FXSR, SSE support
4 * Gareth Hughes <gareth@valinux.com>, May 2000 4 * Gareth Hughes <gareth@valinux.com>, May 2000
5 *
6 * BTS tracing
7 * Markus Metzger <markus.t.metzger@intel.com>, Dec 2007
8 */ 5 */
9 6
10#include <linux/kernel.h> 7#include <linux/kernel.h>
@@ -21,7 +18,6 @@
21#include <linux/audit.h> 18#include <linux/audit.h>
22#include <linux/seccomp.h> 19#include <linux/seccomp.h>
23#include <linux/signal.h> 20#include <linux/signal.h>
24#include <linux/workqueue.h>
25#include <linux/perf_event.h> 21#include <linux/perf_event.h>
26#include <linux/hw_breakpoint.h> 22#include <linux/hw_breakpoint.h>
27 23
@@ -35,7 +31,6 @@
35#include <asm/desc.h> 31#include <asm/desc.h>
36#include <asm/prctl.h> 32#include <asm/prctl.h>
37#include <asm/proto.h> 33#include <asm/proto.h>
38#include <asm/ds.h>
39#include <asm/hw_breakpoint.h> 34#include <asm/hw_breakpoint.h>
40 35
41#include "tls.h" 36#include "tls.h"
@@ -788,342 +783,6 @@ static int ioperm_get(struct task_struct *target,
788 0, IO_BITMAP_BYTES); 783 0, IO_BITMAP_BYTES);
789} 784}
790 785
791#ifdef CONFIG_X86_PTRACE_BTS
792/*
793 * A branch trace store context.
794 *
795 * Contexts may only be installed by ptrace_bts_config() and only for
796 * ptraced tasks.
797 *
798 * Contexts are destroyed when the tracee is detached from the tracer.
799 * The actual destruction work requires interrupts enabled, so the
800 * work is deferred and will be scheduled during __ptrace_unlink().
801 *
802 * Contexts hold an additional task_struct reference on the traced
803 * task, as well as a reference on the tracer's mm.
804 *
805 * Ptrace already holds a task_struct for the duration of ptrace operations,
806 * but since destruction is deferred, it may be executed after both
807 * tracer and tracee exited.
808 */
809struct bts_context {
810 /* The branch trace handle. */
811 struct bts_tracer *tracer;
812
813 /* The buffer used to store the branch trace and its size. */
814 void *buffer;
815 unsigned int size;
816
817 /* The mm that paid for the above buffer. */
818 struct mm_struct *mm;
819
820 /* The task this context belongs to. */
821 struct task_struct *task;
822
823 /* The signal to send on a bts buffer overflow. */
824 unsigned int bts_ovfl_signal;
825
826 /* The work struct to destroy a context. */
827 struct work_struct work;
828};
829
830static int alloc_bts_buffer(struct bts_context *context, unsigned int size)
831{
832 void *buffer = NULL;
833 int err = -ENOMEM;
834
835 err = account_locked_memory(current->mm, current->signal->rlim, size);
836 if (err < 0)
837 return err;
838
839 buffer = kzalloc(size, GFP_KERNEL);
840 if (!buffer)
841 goto out_refund;
842
843 context->buffer = buffer;
844 context->size = size;
845 context->mm = get_task_mm(current);
846
847 return 0;
848
849 out_refund:
850 refund_locked_memory(current->mm, size);
851 return err;
852}
853
854static inline void free_bts_buffer(struct bts_context *context)
855{
856 if (!context->buffer)
857 return;
858
859 kfree(context->buffer);
860 context->buffer = NULL;
861
862 refund_locked_memory(context->mm, context->size);
863 context->size = 0;
864
865 mmput(context->mm);
866 context->mm = NULL;
867}
868
869static void free_bts_context_work(struct work_struct *w)
870{
871 struct bts_context *context;
872
873 context = container_of(w, struct bts_context, work);
874
875 ds_release_bts(context->tracer);
876 put_task_struct(context->task);
877 free_bts_buffer(context);
878 kfree(context);
879}
880
881static inline void free_bts_context(struct bts_context *context)
882{
883 INIT_WORK(&context->work, free_bts_context_work);
884 schedule_work(&context->work);
885}
886
887static inline struct bts_context *alloc_bts_context(struct task_struct *task)
888{
889 struct bts_context *context = kzalloc(sizeof(*context), GFP_KERNEL);
890 if (context) {
891 context->task = task;
892 task->bts = context;
893
894 get_task_struct(task);
895 }
896
897 return context;
898}
899
900static int ptrace_bts_read_record(struct task_struct *child, size_t index,
901 struct bts_struct __user *out)
902{
903 struct bts_context *context;
904 const struct bts_trace *trace;
905 struct bts_struct bts;
906 const unsigned char *at;
907 int error;
908
909 context = child->bts;
910 if (!context)
911 return -ESRCH;
912
913 trace = ds_read_bts(context->tracer);
914 if (!trace)
915 return -ESRCH;
916
917 at = trace->ds.top - ((index + 1) * trace->ds.size);
918 if ((void *)at < trace->ds.begin)
919 at += (trace->ds.n * trace->ds.size);
920
921 if (!trace->read)
922 return -EOPNOTSUPP;
923
924 error = trace->read(context->tracer, at, &bts);
925 if (error < 0)
926 return error;
927
928 if (copy_to_user(out, &bts, sizeof(bts)))
929 return -EFAULT;
930
931 return sizeof(bts);
932}
933
934static int ptrace_bts_drain(struct task_struct *child,
935 long size,
936 struct bts_struct __user *out)
937{
938 struct bts_context *context;
939 const struct bts_trace *trace;
940 const unsigned char *at;
941 int error, drained = 0;
942
943 context = child->bts;
944 if (!context)
945 return -ESRCH;
946
947 trace = ds_read_bts(context->tracer);
948 if (!trace)
949 return -ESRCH;
950
951 if (!trace->read)
952 return -EOPNOTSUPP;
953
954 if (size < (trace->ds.top - trace->ds.begin))
955 return -EIO;
956
957 for (at = trace->ds.begin; (void *)at < trace->ds.top;
958 out++, drained++, at += trace->ds.size) {
959 struct bts_struct bts;
960
961 error = trace->read(context->tracer, at, &bts);
962 if (error < 0)
963 return error;
964
965 if (copy_to_user(out, &bts, sizeof(bts)))
966 return -EFAULT;
967 }
968
969 memset(trace->ds.begin, 0, trace->ds.n * trace->ds.size);
970
971 error = ds_reset_bts(context->tracer);
972 if (error < 0)
973 return error;
974
975 return drained;
976}
977
978static int ptrace_bts_config(struct task_struct *child,
979 long cfg_size,
980 const struct ptrace_bts_config __user *ucfg)
981{
982 struct bts_context *context;
983 struct ptrace_bts_config cfg;
984 unsigned int flags = 0;
985
986 if (cfg_size < sizeof(cfg))
987 return -EIO;
988
989 if (copy_from_user(&cfg, ucfg, sizeof(cfg)))
990 return -EFAULT;
991
992 context = child->bts;
993 if (!context)
994 context = alloc_bts_context(child);
995 if (!context)
996 return -ENOMEM;
997
998 if (cfg.flags & PTRACE_BTS_O_SIGNAL) {
999 if (!cfg.signal)
1000 return -EINVAL;
1001
1002 return -EOPNOTSUPP;
1003 context->bts_ovfl_signal = cfg.signal;
1004 }
1005
1006 ds_release_bts(context->tracer);
1007 context->tracer = NULL;
1008
1009 if ((cfg.flags & PTRACE_BTS_O_ALLOC) && (cfg.size != context->size)) {
1010 int err;
1011
1012 free_bts_buffer(context);
1013 if (!cfg.size)
1014 return 0;
1015
1016 err = alloc_bts_buffer(context, cfg.size);
1017 if (err < 0)
1018 return err;
1019 }
1020
1021 if (cfg.flags & PTRACE_BTS_O_TRACE)
1022 flags |= BTS_USER;
1023
1024 if (cfg.flags & PTRACE_BTS_O_SCHED)
1025 flags |= BTS_TIMESTAMPS;
1026
1027 context->tracer =
1028 ds_request_bts_task(child, context->buffer, context->size,
1029 NULL, (size_t)-1, flags);
1030 if (unlikely(IS_ERR(context->tracer))) {
1031 int error = PTR_ERR(context->tracer);
1032
1033 free_bts_buffer(context);
1034 context->tracer = NULL;
1035 return error;
1036 }
1037
1038 return sizeof(cfg);
1039}
1040
1041static int ptrace_bts_status(struct task_struct *child,
1042 long cfg_size,
1043 struct ptrace_bts_config __user *ucfg)
1044{
1045 struct bts_context *context;
1046 const struct bts_trace *trace;
1047 struct ptrace_bts_config cfg;
1048
1049 context = child->bts;
1050 if (!context)
1051 return -ESRCH;
1052
1053 if (cfg_size < sizeof(cfg))
1054 return -EIO;
1055
1056 trace = ds_read_bts(context->tracer);
1057 if (!trace)
1058 return -ESRCH;
1059
1060 memset(&cfg, 0, sizeof(cfg));
1061 cfg.size = trace->ds.end - trace->ds.begin;
1062 cfg.signal = context->bts_ovfl_signal;
1063 cfg.bts_size = sizeof(struct bts_struct);
1064
1065 if (cfg.signal)
1066 cfg.flags |= PTRACE_BTS_O_SIGNAL;
1067
1068 if (trace->ds.flags & BTS_USER)
1069 cfg.flags |= PTRACE_BTS_O_TRACE;
1070
1071 if (trace->ds.flags & BTS_TIMESTAMPS)
1072 cfg.flags |= PTRACE_BTS_O_SCHED;
1073
1074 if (copy_to_user(ucfg, &cfg, sizeof(cfg)))
1075 return -EFAULT;
1076
1077 return sizeof(cfg);
1078}
1079
1080static int ptrace_bts_clear(struct task_struct *child)
1081{
1082 struct bts_context *context;
1083 const struct bts_trace *trace;
1084
1085 context = child->bts;
1086 if (!context)
1087 return -ESRCH;
1088
1089 trace = ds_read_bts(context->tracer);
1090 if (!trace)
1091 return -ESRCH;
1092
1093 memset(trace->ds.begin, 0, trace->ds.n * trace->ds.size);
1094
1095 return ds_reset_bts(context->tracer);
1096}
1097
1098static int ptrace_bts_size(struct task_struct *child)
1099{
1100 struct bts_context *context;
1101 const struct bts_trace *trace;
1102
1103 context = child->bts;
1104 if (!context)
1105 return -ESRCH;
1106
1107 trace = ds_read_bts(context->tracer);
1108 if (!trace)
1109 return -ESRCH;
1110
1111 return (trace->ds.top - trace->ds.begin) / trace->ds.size;
1112}
1113
1114/*
1115 * Called from __ptrace_unlink() after the child has been moved back
1116 * to its original parent.
1117 */
1118void ptrace_bts_untrace(struct task_struct *child)
1119{
1120 if (unlikely(child->bts)) {
1121 free_bts_context(child->bts);
1122 child->bts = NULL;
1123 }
1124}
1125#endif /* CONFIG_X86_PTRACE_BTS */
1126
1127/* 786/*
1128 * Called by kernel/ptrace.c when detaching.. 787 * Called by kernel/ptrace.c when detaching..
1129 * 788 *
@@ -1251,39 +910,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
1251 break; 910 break;
1252#endif 911#endif
1253 912
1254 /*
1255 * These bits need more cooking - not enabled yet:
1256 */
1257#ifdef CONFIG_X86_PTRACE_BTS
1258 case PTRACE_BTS_CONFIG:
1259 ret = ptrace_bts_config
1260 (child, data, (struct ptrace_bts_config __user *)addr);
1261 break;
1262
1263 case PTRACE_BTS_STATUS:
1264 ret = ptrace_bts_status
1265 (child, data, (struct ptrace_bts_config __user *)addr);
1266 break;
1267
1268 case PTRACE_BTS_SIZE:
1269 ret = ptrace_bts_size(child);
1270 break;
1271
1272 case PTRACE_BTS_GET:
1273 ret = ptrace_bts_read_record
1274 (child, data, (struct bts_struct __user *) addr);
1275 break;
1276
1277 case PTRACE_BTS_CLEAR:
1278 ret = ptrace_bts_clear(child);
1279 break;
1280
1281 case PTRACE_BTS_DRAIN:
1282 ret = ptrace_bts_drain
1283 (child, data, (struct bts_struct __user *) addr);
1284 break;
1285#endif /* CONFIG_X86_PTRACE_BTS */
1286
1287 default: 913 default:
1288 ret = ptrace_request(child, request, addr, data); 914 ret = ptrace_request(child, request, addr, data);
1289 break; 915 break;
@@ -1543,14 +1169,6 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
1543 1169
1544 case PTRACE_GET_THREAD_AREA: 1170 case PTRACE_GET_THREAD_AREA:
1545 case PTRACE_SET_THREAD_AREA: 1171 case PTRACE_SET_THREAD_AREA:
1546#ifdef CONFIG_X86_PTRACE_BTS
1547 case PTRACE_BTS_CONFIG:
1548 case PTRACE_BTS_STATUS:
1549 case PTRACE_BTS_SIZE:
1550 case PTRACE_BTS_GET:
1551 case PTRACE_BTS_CLEAR:
1552 case PTRACE_BTS_DRAIN:
1553#endif /* CONFIG_X86_PTRACE_BTS */
1554 return arch_ptrace(child, request, addr, data); 1172 return arch_ptrace(child, request, addr, data);
1555 1173
1556 default: 1174 default: