diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/cgroup.c | 8 | ||||
-rw-r--r-- | kernel/fork.c | 2 | ||||
-rw-r--r-- | kernel/posix-timers.c | 6 | ||||
-rw-r--r-- | kernel/ptrace.c | 12 | ||||
-rw-r--r-- | kernel/trace/ring_buffer.c | 19 |
5 files changed, 40 insertions, 7 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 8185a0f09594..2606d0fb4e54 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
@@ -1024,7 +1024,7 @@ static int cgroup_get_sb(struct file_system_type *fs_type, | |||
1024 | if (ret == -EBUSY) { | 1024 | if (ret == -EBUSY) { |
1025 | mutex_unlock(&cgroup_mutex); | 1025 | mutex_unlock(&cgroup_mutex); |
1026 | mutex_unlock(&inode->i_mutex); | 1026 | mutex_unlock(&inode->i_mutex); |
1027 | goto drop_new_super; | 1027 | goto free_cg_links; |
1028 | } | 1028 | } |
1029 | 1029 | ||
1030 | /* EBUSY should be the only error here */ | 1030 | /* EBUSY should be the only error here */ |
@@ -1073,10 +1073,11 @@ static int cgroup_get_sb(struct file_system_type *fs_type, | |||
1073 | 1073 | ||
1074 | return simple_set_mnt(mnt, sb); | 1074 | return simple_set_mnt(mnt, sb); |
1075 | 1075 | ||
1076 | free_cg_links: | ||
1077 | free_cg_links(&tmp_cg_links); | ||
1076 | drop_new_super: | 1078 | drop_new_super: |
1077 | up_write(&sb->s_umount); | 1079 | up_write(&sb->s_umount); |
1078 | deactivate_super(sb); | 1080 | deactivate_super(sb); |
1079 | free_cg_links(&tmp_cg_links); | ||
1080 | return ret; | 1081 | return ret; |
1081 | } | 1082 | } |
1082 | 1083 | ||
@@ -2934,9 +2935,6 @@ int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys, | |||
2934 | again: | 2935 | again: |
2935 | root = subsys->root; | 2936 | root = subsys->root; |
2936 | if (root == &rootnode) { | 2937 | if (root == &rootnode) { |
2937 | printk(KERN_INFO | ||
2938 | "Not cloning cgroup for unused subsystem %s\n", | ||
2939 | subsys->name); | ||
2940 | mutex_unlock(&cgroup_mutex); | 2938 | mutex_unlock(&cgroup_mutex); |
2941 | return 0; | 2939 | return 0; |
2942 | } | 2940 | } |
diff --git a/kernel/fork.c b/kernel/fork.c index 7b93da72d4a2..65ce60adc8e8 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -1096,6 +1096,8 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1096 | #ifdef CONFIG_DEBUG_MUTEXES | 1096 | #ifdef CONFIG_DEBUG_MUTEXES |
1097 | p->blocked_on = NULL; /* not blocked yet */ | 1097 | p->blocked_on = NULL; /* not blocked yet */ |
1098 | #endif | 1098 | #endif |
1099 | if (unlikely(ptrace_reparented(current))) | ||
1100 | ptrace_fork(p, clone_flags); | ||
1099 | 1101 | ||
1100 | /* Perform scheduler related setup. Assign this task to a CPU. */ | 1102 | /* Perform scheduler related setup. Assign this task to a CPU. */ |
1101 | sched_fork(p, clone_flags); | 1103 | sched_fork(p, clone_flags); |
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c index 5e79c662294b..a140e44eebba 100644 --- a/kernel/posix-timers.c +++ b/kernel/posix-timers.c | |||
@@ -197,6 +197,11 @@ static int common_timer_create(struct k_itimer *new_timer) | |||
197 | return 0; | 197 | return 0; |
198 | } | 198 | } |
199 | 199 | ||
200 | static int no_timer_create(struct k_itimer *new_timer) | ||
201 | { | ||
202 | return -EOPNOTSUPP; | ||
203 | } | ||
204 | |||
200 | /* | 205 | /* |
201 | * Return nonzero if we know a priori this clockid_t value is bogus. | 206 | * Return nonzero if we know a priori this clockid_t value is bogus. |
202 | */ | 207 | */ |
@@ -248,6 +253,7 @@ static __init int init_posix_timers(void) | |||
248 | .clock_getres = hrtimer_get_res, | 253 | .clock_getres = hrtimer_get_res, |
249 | .clock_get = posix_get_monotonic_raw, | 254 | .clock_get = posix_get_monotonic_raw, |
250 | .clock_set = do_posix_clock_nosettime, | 255 | .clock_set = do_posix_clock_nosettime, |
256 | .timer_create = no_timer_create, | ||
251 | }; | 257 | }; |
252 | 258 | ||
253 | register_posix_clock(CLOCK_REALTIME, &clock_realtime); | 259 | register_posix_clock(CLOCK_REALTIME, &clock_realtime); |
diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 4c8bcd7dd8e0..100a71cfdaba 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c | |||
@@ -25,6 +25,17 @@ | |||
25 | #include <asm/pgtable.h> | 25 | #include <asm/pgtable.h> |
26 | #include <asm/uaccess.h> | 26 | #include <asm/uaccess.h> |
27 | 27 | ||
28 | |||
29 | /* | ||
30 | * Initialize a new task whose father had been ptraced. | ||
31 | * | ||
32 | * Called from copy_process(). | ||
33 | */ | ||
34 | void ptrace_fork(struct task_struct *child, unsigned long clone_flags) | ||
35 | { | ||
36 | arch_ptrace_fork(child, clone_flags); | ||
37 | } | ||
38 | |||
28 | /* | 39 | /* |
29 | * ptrace a task: make the debugger its new parent and | 40 | * ptrace a task: make the debugger its new parent and |
30 | * move it to the ptrace list. | 41 | * move it to the ptrace list. |
@@ -72,6 +83,7 @@ void __ptrace_unlink(struct task_struct *child) | |||
72 | child->parent = child->real_parent; | 83 | child->parent = child->real_parent; |
73 | list_del_init(&child->ptrace_entry); | 84 | list_del_init(&child->ptrace_entry); |
74 | 85 | ||
86 | arch_ptrace_untrace(child); | ||
75 | if (task_is_traced(child)) | 87 | if (task_is_traced(child)) |
76 | ptrace_untrace(child); | 88 | ptrace_untrace(child); |
77 | } | 89 | } |
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index bb6922a931b1..76f34c0ef29c 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -838,6 +838,7 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer) | |||
838 | * back to us). This allows us to do a simple loop to | 838 | * back to us). This allows us to do a simple loop to |
839 | * assign the commit to the tail. | 839 | * assign the commit to the tail. |
840 | */ | 840 | */ |
841 | again: | ||
841 | while (cpu_buffer->commit_page != cpu_buffer->tail_page) { | 842 | while (cpu_buffer->commit_page != cpu_buffer->tail_page) { |
842 | cpu_buffer->commit_page->page->commit = | 843 | cpu_buffer->commit_page->page->commit = |
843 | cpu_buffer->commit_page->write; | 844 | cpu_buffer->commit_page->write; |
@@ -853,6 +854,17 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer) | |||
853 | cpu_buffer->commit_page->write; | 854 | cpu_buffer->commit_page->write; |
854 | barrier(); | 855 | barrier(); |
855 | } | 856 | } |
857 | |||
858 | /* again, keep gcc from optimizing */ | ||
859 | barrier(); | ||
860 | |||
861 | /* | ||
862 | * If an interrupt came in just after the first while loop | ||
863 | * and pushed the tail page forward, we will be left with | ||
864 | * a dangling commit that will never go forward. | ||
865 | */ | ||
866 | if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page)) | ||
867 | goto again; | ||
856 | } | 868 | } |
857 | 869 | ||
858 | static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | 870 | static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer) |
@@ -950,12 +962,15 @@ static struct ring_buffer_event * | |||
950 | __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, | 962 | __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, |
951 | unsigned type, unsigned long length, u64 *ts) | 963 | unsigned type, unsigned long length, u64 *ts) |
952 | { | 964 | { |
953 | struct buffer_page *tail_page, *head_page, *reader_page; | 965 | struct buffer_page *tail_page, *head_page, *reader_page, *commit_page; |
954 | unsigned long tail, write; | 966 | unsigned long tail, write; |
955 | struct ring_buffer *buffer = cpu_buffer->buffer; | 967 | struct ring_buffer *buffer = cpu_buffer->buffer; |
956 | struct ring_buffer_event *event; | 968 | struct ring_buffer_event *event; |
957 | unsigned long flags; | 969 | unsigned long flags; |
958 | 970 | ||
971 | commit_page = cpu_buffer->commit_page; | ||
972 | /* we just need to protect against interrupts */ | ||
973 | barrier(); | ||
959 | tail_page = cpu_buffer->tail_page; | 974 | tail_page = cpu_buffer->tail_page; |
960 | write = local_add_return(length, &tail_page->write); | 975 | write = local_add_return(length, &tail_page->write); |
961 | tail = write - length; | 976 | tail = write - length; |
@@ -981,7 +996,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, | |||
981 | * it all the way around the buffer, bail, and warn | 996 | * it all the way around the buffer, bail, and warn |
982 | * about it. | 997 | * about it. |
983 | */ | 998 | */ |
984 | if (unlikely(next_page == cpu_buffer->commit_page)) { | 999 | if (unlikely(next_page == commit_page)) { |
985 | WARN_ON_ONCE(1); | 1000 | WARN_ON_ONCE(1); |
986 | goto out_unlock; | 1001 | goto out_unlock; |
987 | } | 1002 | } |