aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAnton Blanchard <anton@samba.org>2009-07-16 09:15:52 -0400
committerIngo Molnar <mingo@elte.hu>2009-07-18 05:21:29 -0400
commit413ee3b48ab582ffea33e7e140c7a2c5ea657e9a (patch)
tree3ecf161d9db15c95ba0a4d02aa18e243b796a04b
parent23cdb5d5171d591ec911aada682e09d53c14a810 (diff)
perf_counter: Make sure we dont leak kernel memory to userspace
There are a few places we are leaking tiny amounts of kernel memory to userspace. This happens when writing out strings because we always align the end to 64 bits. To avoid this we should always use an appropriately sized temporary buffer and ensure it is zeroed. Since d_path assembles the string from the end of the buffer backwards, we need to add 64 bits after the buffer to allow for alignment. We also need to copy arch_vma_name to the temporary buffer, because if we use it directly we may end up copying to userspace a number of bytes after the end of the string constant. Signed-off-by: Anton Blanchard <anton@samba.org> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <20090716104817.273972048@samba.org> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--kernel/perf_counter.c19
1 files changed, 15 insertions, 4 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index c6c38fb7766a..f7a8ab9576e4 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -2968,8 +2968,10 @@ static void perf_counter_comm_event(struct perf_comm_event *comm_event)
2968 struct perf_cpu_context *cpuctx; 2968 struct perf_cpu_context *cpuctx;
2969 struct perf_counter_context *ctx; 2969 struct perf_counter_context *ctx;
2970 unsigned int size; 2970 unsigned int size;
2971 char *comm = comm_event->task->comm; 2971 char comm[TASK_COMM_LEN];
2972 2972
2973 memset(comm, 0, sizeof(comm));
2974 strncpy(comm, comm_event->task->comm, sizeof(comm));
2973 size = ALIGN(strlen(comm)+1, sizeof(u64)); 2975 size = ALIGN(strlen(comm)+1, sizeof(u64));
2974 2976
2975 comm_event->comm = comm; 2977 comm_event->comm = comm;
@@ -3088,8 +3090,15 @@ static void perf_counter_mmap_event(struct perf_mmap_event *mmap_event)
3088 char *buf = NULL; 3090 char *buf = NULL;
3089 const char *name; 3091 const char *name;
3090 3092
3093 memset(tmp, 0, sizeof(tmp));
3094
3091 if (file) { 3095 if (file) {
3092 buf = kzalloc(PATH_MAX, GFP_KERNEL); 3096 /*
3097 * d_path works from the end of the buffer backwards, so we
3098 * need to add enough zero bytes after the string to handle
3099 * the 64bit alignment we do later.
3100 */
3101 buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
3093 if (!buf) { 3102 if (!buf) {
3094 name = strncpy(tmp, "//enomem", sizeof(tmp)); 3103 name = strncpy(tmp, "//enomem", sizeof(tmp));
3095 goto got_name; 3104 goto got_name;
@@ -3100,9 +3109,11 @@ static void perf_counter_mmap_event(struct perf_mmap_event *mmap_event)
3100 goto got_name; 3109 goto got_name;
3101 } 3110 }
3102 } else { 3111 } else {
3103 name = arch_vma_name(mmap_event->vma); 3112 if (arch_vma_name(mmap_event->vma)) {
3104 if (name) 3113 name = strncpy(tmp, arch_vma_name(mmap_event->vma),
3114 sizeof(tmp));
3105 goto got_name; 3115 goto got_name;
3116 }
3106 3117
3107 if (!vma->vm_mm) { 3118 if (!vma->vm_mm) {
3108 name = strncpy(tmp, "[vdso]", sizeof(tmp)); 3119 name = strncpy(tmp, "[vdso]", sizeof(tmp));