aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cgroup.c151
-rw-r--r--kernel/fork.c27
-rw-r--r--kernel/kexec.c2
-rw-r--r--kernel/kprobes.c2
-rw-r--r--kernel/perf_counter.c103
-rw-r--r--kernel/posix-timers.c7
-rw-r--r--kernel/sched_cpupri.c15
-rw-r--r--kernel/sched_fair.c32
-rw-r--r--kernel/signal.c25
-rw-r--r--kernel/smp.c2
-rw-r--r--kernel/trace/trace_event_profile.c2
-rw-r--r--kernel/trace/trace_events.c2
12 files changed, 254 insertions, 116 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 3737a682cdf5..b6eadfe30e7b 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -47,6 +47,7 @@
47#include <linux/hash.h> 47#include <linux/hash.h>
48#include <linux/namei.h> 48#include <linux/namei.h>
49#include <linux/smp_lock.h> 49#include <linux/smp_lock.h>
50#include <linux/pid_namespace.h>
50 51
51#include <asm/atomic.h> 52#include <asm/atomic.h>
52 53
@@ -734,16 +735,28 @@ static void cgroup_d_remove_dir(struct dentry *dentry)
734 * reference to css->refcnt. In general, this refcnt is expected to goes down 735 * reference to css->refcnt. In general, this refcnt is expected to goes down
735 * to zero, soon. 736 * to zero, soon.
736 * 737 *
737 * CGRP_WAIT_ON_RMDIR flag is modified under cgroup's inode->i_mutex; 738 * CGRP_WAIT_ON_RMDIR flag is set under cgroup's inode->i_mutex;
738 */ 739 */
739DECLARE_WAIT_QUEUE_HEAD(cgroup_rmdir_waitq); 740DECLARE_WAIT_QUEUE_HEAD(cgroup_rmdir_waitq);
740 741
741static void cgroup_wakeup_rmdir_waiters(const struct cgroup *cgrp) 742static void cgroup_wakeup_rmdir_waiter(struct cgroup *cgrp)
742{ 743{
743 if (unlikely(test_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags))) 744 if (unlikely(test_and_clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags)))
744 wake_up_all(&cgroup_rmdir_waitq); 745 wake_up_all(&cgroup_rmdir_waitq);
745} 746}
746 747
748void cgroup_exclude_rmdir(struct cgroup_subsys_state *css)
749{
750 css_get(css);
751}
752
753void cgroup_release_and_wakeup_rmdir(struct cgroup_subsys_state *css)
754{
755 cgroup_wakeup_rmdir_waiter(css->cgroup);
756 css_put(css);
757}
758
759
747static int rebind_subsystems(struct cgroupfs_root *root, 760static int rebind_subsystems(struct cgroupfs_root *root,
748 unsigned long final_bits) 761 unsigned long final_bits)
749{ 762{
@@ -960,6 +973,7 @@ static void init_cgroup_housekeeping(struct cgroup *cgrp)
960 INIT_LIST_HEAD(&cgrp->children); 973 INIT_LIST_HEAD(&cgrp->children);
961 INIT_LIST_HEAD(&cgrp->css_sets); 974 INIT_LIST_HEAD(&cgrp->css_sets);
962 INIT_LIST_HEAD(&cgrp->release_list); 975 INIT_LIST_HEAD(&cgrp->release_list);
976 INIT_LIST_HEAD(&cgrp->pids_list);
963 init_rwsem(&cgrp->pids_mutex); 977 init_rwsem(&cgrp->pids_mutex);
964} 978}
965static void init_cgroup_root(struct cgroupfs_root *root) 979static void init_cgroup_root(struct cgroupfs_root *root)
@@ -1357,7 +1371,7 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
1357 * wake up rmdir() waiter. the rmdir should fail since the cgroup 1371 * wake up rmdir() waiter. the rmdir should fail since the cgroup
1358 * is no longer empty. 1372 * is no longer empty.
1359 */ 1373 */
1360 cgroup_wakeup_rmdir_waiters(cgrp); 1374 cgroup_wakeup_rmdir_waiter(cgrp);
1361 return 0; 1375 return 0;
1362} 1376}
1363 1377
@@ -2201,12 +2215,30 @@ err:
2201 return ret; 2215 return ret;
2202} 2216}
2203 2217
2218/*
2219 * Cache pids for all threads in the same pid namespace that are
2220 * opening the same "tasks" file.
2221 */
2222struct cgroup_pids {
2223 /* The node in cgrp->pids_list */
2224 struct list_head list;
2225 /* The cgroup those pids belong to */
2226 struct cgroup *cgrp;
2227 /* The namepsace those pids belong to */
2228 struct pid_namespace *ns;
2229 /* Array of process ids in the cgroup */
2230 pid_t *tasks_pids;
2231 /* How many files are using the this tasks_pids array */
2232 int use_count;
2233 /* Length of the current tasks_pids array */
2234 int length;
2235};
2236
2204static int cmppid(const void *a, const void *b) 2237static int cmppid(const void *a, const void *b)
2205{ 2238{
2206 return *(pid_t *)a - *(pid_t *)b; 2239 return *(pid_t *)a - *(pid_t *)b;
2207} 2240}
2208 2241
2209
2210/* 2242/*
2211 * seq_file methods for the "tasks" file. The seq_file position is the 2243 * seq_file methods for the "tasks" file. The seq_file position is the
2212 * next pid to display; the seq_file iterator is a pointer to the pid 2244 * next pid to display; the seq_file iterator is a pointer to the pid
@@ -2221,45 +2253,47 @@ static void *cgroup_tasks_start(struct seq_file *s, loff_t *pos)
2221 * after a seek to the start). Use a binary-search to find the 2253 * after a seek to the start). Use a binary-search to find the
2222 * next pid to display, if any 2254 * next pid to display, if any
2223 */ 2255 */
2224 struct cgroup *cgrp = s->private; 2256 struct cgroup_pids *cp = s->private;
2257 struct cgroup *cgrp = cp->cgrp;
2225 int index = 0, pid = *pos; 2258 int index = 0, pid = *pos;
2226 int *iter; 2259 int *iter;
2227 2260
2228 down_read(&cgrp->pids_mutex); 2261 down_read(&cgrp->pids_mutex);
2229 if (pid) { 2262 if (pid) {
2230 int end = cgrp->pids_length; 2263 int end = cp->length;
2231 2264
2232 while (index < end) { 2265 while (index < end) {
2233 int mid = (index + end) / 2; 2266 int mid = (index + end) / 2;
2234 if (cgrp->tasks_pids[mid] == pid) { 2267 if (cp->tasks_pids[mid] == pid) {
2235 index = mid; 2268 index = mid;
2236 break; 2269 break;
2237 } else if (cgrp->tasks_pids[mid] <= pid) 2270 } else if (cp->tasks_pids[mid] <= pid)
2238 index = mid + 1; 2271 index = mid + 1;
2239 else 2272 else
2240 end = mid; 2273 end = mid;
2241 } 2274 }
2242 } 2275 }
2243 /* If we're off the end of the array, we're done */ 2276 /* If we're off the end of the array, we're done */
2244 if (index >= cgrp->pids_length) 2277 if (index >= cp->length)
2245 return NULL; 2278 return NULL;
2246 /* Update the abstract position to be the actual pid that we found */ 2279 /* Update the abstract position to be the actual pid that we found */
2247 iter = cgrp->tasks_pids + index; 2280 iter = cp->tasks_pids + index;
2248 *pos = *iter; 2281 *pos = *iter;
2249 return iter; 2282 return iter;
2250} 2283}
2251 2284
2252static void cgroup_tasks_stop(struct seq_file *s, void *v) 2285static void cgroup_tasks_stop(struct seq_file *s, void *v)
2253{ 2286{
2254 struct cgroup *cgrp = s->private; 2287 struct cgroup_pids *cp = s->private;
2288 struct cgroup *cgrp = cp->cgrp;
2255 up_read(&cgrp->pids_mutex); 2289 up_read(&cgrp->pids_mutex);
2256} 2290}
2257 2291
2258static void *cgroup_tasks_next(struct seq_file *s, void *v, loff_t *pos) 2292static void *cgroup_tasks_next(struct seq_file *s, void *v, loff_t *pos)
2259{ 2293{
2260 struct cgroup *cgrp = s->private; 2294 struct cgroup_pids *cp = s->private;
2261 int *p = v; 2295 int *p = v;
2262 int *end = cgrp->tasks_pids + cgrp->pids_length; 2296 int *end = cp->tasks_pids + cp->length;
2263 2297
2264 /* 2298 /*
2265 * Advance to the next pid in the array. If this goes off the 2299 * Advance to the next pid in the array. If this goes off the
@@ -2286,26 +2320,33 @@ static struct seq_operations cgroup_tasks_seq_operations = {
2286 .show = cgroup_tasks_show, 2320 .show = cgroup_tasks_show,
2287}; 2321};
2288 2322
2289static void release_cgroup_pid_array(struct cgroup *cgrp) 2323static void release_cgroup_pid_array(struct cgroup_pids *cp)
2290{ 2324{
2325 struct cgroup *cgrp = cp->cgrp;
2326
2291 down_write(&cgrp->pids_mutex); 2327 down_write(&cgrp->pids_mutex);
2292 BUG_ON(!cgrp->pids_use_count); 2328 BUG_ON(!cp->use_count);
2293 if (!--cgrp->pids_use_count) { 2329 if (!--cp->use_count) {
2294 kfree(cgrp->tasks_pids); 2330 list_del(&cp->list);
2295 cgrp->tasks_pids = NULL; 2331 put_pid_ns(cp->ns);
2296 cgrp->pids_length = 0; 2332 kfree(cp->tasks_pids);
2333 kfree(cp);
2297 } 2334 }
2298 up_write(&cgrp->pids_mutex); 2335 up_write(&cgrp->pids_mutex);
2299} 2336}
2300 2337
2301static int cgroup_tasks_release(struct inode *inode, struct file *file) 2338static int cgroup_tasks_release(struct inode *inode, struct file *file)
2302{ 2339{
2303 struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent); 2340 struct seq_file *seq;
2341 struct cgroup_pids *cp;
2304 2342
2305 if (!(file->f_mode & FMODE_READ)) 2343 if (!(file->f_mode & FMODE_READ))
2306 return 0; 2344 return 0;
2307 2345
2308 release_cgroup_pid_array(cgrp); 2346 seq = file->private_data;
2347 cp = seq->private;
2348
2349 release_cgroup_pid_array(cp);
2309 return seq_release(inode, file); 2350 return seq_release(inode, file);
2310} 2351}
2311 2352
@@ -2324,6 +2365,8 @@ static struct file_operations cgroup_tasks_operations = {
2324static int cgroup_tasks_open(struct inode *unused, struct file *file) 2365static int cgroup_tasks_open(struct inode *unused, struct file *file)
2325{ 2366{
2326 struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent); 2367 struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent);
2368 struct pid_namespace *ns = current->nsproxy->pid_ns;
2369 struct cgroup_pids *cp;
2327 pid_t *pidarray; 2370 pid_t *pidarray;
2328 int npids; 2371 int npids;
2329 int retval; 2372 int retval;
@@ -2350,20 +2393,37 @@ static int cgroup_tasks_open(struct inode *unused, struct file *file)
2350 * array if necessary 2393 * array if necessary
2351 */ 2394 */
2352 down_write(&cgrp->pids_mutex); 2395 down_write(&cgrp->pids_mutex);
2353 kfree(cgrp->tasks_pids); 2396
2354 cgrp->tasks_pids = pidarray; 2397 list_for_each_entry(cp, &cgrp->pids_list, list) {
2355 cgrp->pids_length = npids; 2398 if (ns == cp->ns)
2356 cgrp->pids_use_count++; 2399 goto found;
2400 }
2401
2402 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
2403 if (!cp) {
2404 up_write(&cgrp->pids_mutex);
2405 kfree(pidarray);
2406 return -ENOMEM;
2407 }
2408 cp->cgrp = cgrp;
2409 cp->ns = ns;
2410 get_pid_ns(ns);
2411 list_add(&cp->list, &cgrp->pids_list);
2412found:
2413 kfree(cp->tasks_pids);
2414 cp->tasks_pids = pidarray;
2415 cp->length = npids;
2416 cp->use_count++;
2357 up_write(&cgrp->pids_mutex); 2417 up_write(&cgrp->pids_mutex);
2358 2418
2359 file->f_op = &cgroup_tasks_operations; 2419 file->f_op = &cgroup_tasks_operations;
2360 2420
2361 retval = seq_open(file, &cgroup_tasks_seq_operations); 2421 retval = seq_open(file, &cgroup_tasks_seq_operations);
2362 if (retval) { 2422 if (retval) {
2363 release_cgroup_pid_array(cgrp); 2423 release_cgroup_pid_array(cp);
2364 return retval; 2424 return retval;
2365 } 2425 }
2366 ((struct seq_file *)file->private_data)->private = cgrp; 2426 ((struct seq_file *)file->private_data)->private = cp;
2367 return 0; 2427 return 0;
2368} 2428}
2369 2429
@@ -2696,33 +2756,42 @@ again:
2696 mutex_unlock(&cgroup_mutex); 2756 mutex_unlock(&cgroup_mutex);
2697 2757
2698 /* 2758 /*
2759 * In general, subsystem has no css->refcnt after pre_destroy(). But
2760 * in racy cases, subsystem may have to get css->refcnt after
2761 * pre_destroy() and it makes rmdir return with -EBUSY. This sometimes
2762 * make rmdir return -EBUSY too often. To avoid that, we use waitqueue
2763 * for cgroup's rmdir. CGRP_WAIT_ON_RMDIR is for synchronizing rmdir
2764 * and subsystem's reference count handling. Please see css_get/put
2765 * and css_tryget() and cgroup_wakeup_rmdir_waiter() implementation.
2766 */
2767 set_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags);
2768
2769 /*
2699 * Call pre_destroy handlers of subsys. Notify subsystems 2770 * Call pre_destroy handlers of subsys. Notify subsystems
2700 * that rmdir() request comes. 2771 * that rmdir() request comes.
2701 */ 2772 */
2702 ret = cgroup_call_pre_destroy(cgrp); 2773 ret = cgroup_call_pre_destroy(cgrp);
2703 if (ret) 2774 if (ret) {
2775 clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags);
2704 return ret; 2776 return ret;
2777 }
2705 2778
2706 mutex_lock(&cgroup_mutex); 2779 mutex_lock(&cgroup_mutex);
2707 parent = cgrp->parent; 2780 parent = cgrp->parent;
2708 if (atomic_read(&cgrp->count) || !list_empty(&cgrp->children)) { 2781 if (atomic_read(&cgrp->count) || !list_empty(&cgrp->children)) {
2782 clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags);
2709 mutex_unlock(&cgroup_mutex); 2783 mutex_unlock(&cgroup_mutex);
2710 return -EBUSY; 2784 return -EBUSY;
2711 } 2785 }
2712 /*
2713 * css_put/get is provided for subsys to grab refcnt to css. In typical
2714 * case, subsystem has no reference after pre_destroy(). But, under
2715 * hierarchy management, some *temporal* refcnt can be hold.
2716 * To avoid returning -EBUSY to a user, waitqueue is used. If subsys
2717 * is really busy, it should return -EBUSY at pre_destroy(). wake_up
2718 * is called when css_put() is called and refcnt goes down to 0.
2719 */
2720 set_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags);
2721 prepare_to_wait(&cgroup_rmdir_waitq, &wait, TASK_INTERRUPTIBLE); 2786 prepare_to_wait(&cgroup_rmdir_waitq, &wait, TASK_INTERRUPTIBLE);
2722
2723 if (!cgroup_clear_css_refs(cgrp)) { 2787 if (!cgroup_clear_css_refs(cgrp)) {
2724 mutex_unlock(&cgroup_mutex); 2788 mutex_unlock(&cgroup_mutex);
2725 schedule(); 2789 /*
2790 * Because someone may call cgroup_wakeup_rmdir_waiter() before
2791 * prepare_to_wait(), we need to check this flag.
2792 */
2793 if (test_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags))
2794 schedule();
2726 finish_wait(&cgroup_rmdir_waitq, &wait); 2795 finish_wait(&cgroup_rmdir_waitq, &wait);
2727 clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags); 2796 clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags);
2728 if (signal_pending(current)) 2797 if (signal_pending(current))
@@ -3294,7 +3363,7 @@ void __css_put(struct cgroup_subsys_state *css)
3294 set_bit(CGRP_RELEASABLE, &cgrp->flags); 3363 set_bit(CGRP_RELEASABLE, &cgrp->flags);
3295 check_for_release(cgrp); 3364 check_for_release(cgrp);
3296 } 3365 }
3297 cgroup_wakeup_rmdir_waiters(cgrp); 3366 cgroup_wakeup_rmdir_waiter(cgrp);
3298 } 3367 }
3299 rcu_read_unlock(); 3368 rcu_read_unlock();
3300} 3369}
diff --git a/kernel/fork.c b/kernel/fork.c
index 9b42695f0d14..021e1138556e 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -426,6 +426,7 @@ static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p)
426 init_rwsem(&mm->mmap_sem); 426 init_rwsem(&mm->mmap_sem);
427 INIT_LIST_HEAD(&mm->mmlist); 427 INIT_LIST_HEAD(&mm->mmlist);
428 mm->flags = (current->mm) ? current->mm->flags : default_dump_filter; 428 mm->flags = (current->mm) ? current->mm->flags : default_dump_filter;
429 mm->oom_adj = (current->mm) ? current->mm->oom_adj : 0;
429 mm->core_state = NULL; 430 mm->core_state = NULL;
430 mm->nr_ptes = 0; 431 mm->nr_ptes = 0;
431 set_mm_counter(mm, file_rss, 0); 432 set_mm_counter(mm, file_rss, 0);
@@ -567,18 +568,18 @@ void mm_release(struct task_struct *tsk, struct mm_struct *mm)
567 * the value intact in a core dump, and to save the unnecessary 568 * the value intact in a core dump, and to save the unnecessary
568 * trouble otherwise. Userland only wants this done for a sys_exit. 569 * trouble otherwise. Userland only wants this done for a sys_exit.
569 */ 570 */
570 if (tsk->clear_child_tid 571 if (tsk->clear_child_tid) {
571 && !(tsk->flags & PF_SIGNALED) 572 if (!(tsk->flags & PF_SIGNALED) &&
572 && atomic_read(&mm->mm_users) > 1) { 573 atomic_read(&mm->mm_users) > 1) {
573 u32 __user * tidptr = tsk->clear_child_tid; 574 /*
575 * We don't check the error code - if userspace has
576 * not set up a proper pointer then tough luck.
577 */
578 put_user(0, tsk->clear_child_tid);
579 sys_futex(tsk->clear_child_tid, FUTEX_WAKE,
580 1, NULL, NULL, 0);
581 }
574 tsk->clear_child_tid = NULL; 582 tsk->clear_child_tid = NULL;
575
576 /*
577 * We don't check the error code - if userspace has
578 * not set up a proper pointer then tough luck.
579 */
580 put_user(0, tidptr);
581 sys_futex(tidptr, FUTEX_WAKE, 1, NULL, NULL, 0);
582 } 583 }
583} 584}
584 585
@@ -1268,6 +1269,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1268 write_unlock_irq(&tasklist_lock); 1269 write_unlock_irq(&tasklist_lock);
1269 proc_fork_connector(p); 1270 proc_fork_connector(p);
1270 cgroup_post_fork(p); 1271 cgroup_post_fork(p);
1272 perf_counter_fork(p);
1271 return p; 1273 return p;
1272 1274
1273bad_fork_free_pid: 1275bad_fork_free_pid:
@@ -1409,9 +1411,6 @@ long do_fork(unsigned long clone_flags,
1409 init_completion(&vfork); 1411 init_completion(&vfork);
1410 } 1412 }
1411 1413
1412 if (!(clone_flags & CLONE_THREAD))
1413 perf_counter_fork(p);
1414
1415 audit_finish_fork(p); 1414 audit_finish_fork(p);
1416 tracehook_report_clone(regs, clone_flags, nr, p); 1415 tracehook_report_clone(regs, clone_flags, nr, p);
1417 1416
diff --git a/kernel/kexec.c b/kernel/kexec.c
index ae1c35201cc8..f336e2107f98 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -1228,7 +1228,7 @@ static int __init parse_crashkernel_mem(char *cmdline,
1228 } while (*cur++ == ','); 1228 } while (*cur++ == ',');
1229 1229
1230 if (*crash_size > 0) { 1230 if (*crash_size > 0) {
1231 while (*cur != ' ' && *cur != '@') 1231 while (*cur && *cur != ' ' && *cur != '@')
1232 cur++; 1232 cur++;
1233 if (*cur == '@') { 1233 if (*cur == '@') {
1234 cur++; 1234 cur++;
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 16b5739c516a..0540948e29ab 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -694,7 +694,7 @@ int __kprobes register_kprobe(struct kprobe *p)
694 p->addr = addr; 694 p->addr = addr;
695 695
696 preempt_disable(); 696 preempt_disable();
697 if (!__kernel_text_address((unsigned long) p->addr) || 697 if (!kernel_text_address((unsigned long) p->addr) ||
698 in_kprobes_functions((unsigned long) p->addr)) { 698 in_kprobes_functions((unsigned long) p->addr)) {
699 preempt_enable(); 699 preempt_enable();
700 return -EINVAL; 700 return -EINVAL;
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 950931041954..673c1aaf7332 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -42,6 +42,7 @@ static int perf_overcommit __read_mostly = 1;
42static atomic_t nr_counters __read_mostly; 42static atomic_t nr_counters __read_mostly;
43static atomic_t nr_mmap_counters __read_mostly; 43static atomic_t nr_mmap_counters __read_mostly;
44static atomic_t nr_comm_counters __read_mostly; 44static atomic_t nr_comm_counters __read_mostly;
45static atomic_t nr_task_counters __read_mostly;
45 46
46/* 47/*
47 * perf counter paranoia level: 48 * perf counter paranoia level:
@@ -1103,7 +1104,7 @@ static void perf_counter_sync_stat(struct perf_counter_context *ctx,
1103 __perf_counter_sync_stat(counter, next_counter); 1104 __perf_counter_sync_stat(counter, next_counter);
1104 1105
1105 counter = list_next_entry(counter, event_entry); 1106 counter = list_next_entry(counter, event_entry);
1106 next_counter = list_next_entry(counter, event_entry); 1107 next_counter = list_next_entry(next_counter, event_entry);
1107 } 1108 }
1108} 1109}
1109 1110
@@ -1654,6 +1655,8 @@ static void free_counter(struct perf_counter *counter)
1654 atomic_dec(&nr_mmap_counters); 1655 atomic_dec(&nr_mmap_counters);
1655 if (counter->attr.comm) 1656 if (counter->attr.comm)
1656 atomic_dec(&nr_comm_counters); 1657 atomic_dec(&nr_comm_counters);
1658 if (counter->attr.task)
1659 atomic_dec(&nr_task_counters);
1657 } 1660 }
1658 1661
1659 if (counter->destroy) 1662 if (counter->destroy)
@@ -1688,6 +1691,18 @@ static int perf_release(struct inode *inode, struct file *file)
1688 return 0; 1691 return 0;
1689} 1692}
1690 1693
1694static u64 perf_counter_read_tree(struct perf_counter *counter)
1695{
1696 struct perf_counter *child;
1697 u64 total = 0;
1698
1699 total += perf_counter_read(counter);
1700 list_for_each_entry(child, &counter->child_list, child_list)
1701 total += perf_counter_read(child);
1702
1703 return total;
1704}
1705
1691/* 1706/*
1692 * Read the performance counter - simple non blocking version for now 1707 * Read the performance counter - simple non blocking version for now
1693 */ 1708 */
@@ -1707,7 +1722,7 @@ perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
1707 1722
1708 WARN_ON_ONCE(counter->ctx->parent_ctx); 1723 WARN_ON_ONCE(counter->ctx->parent_ctx);
1709 mutex_lock(&counter->child_mutex); 1724 mutex_lock(&counter->child_mutex);
1710 values[0] = perf_counter_read(counter); 1725 values[0] = perf_counter_read_tree(counter);
1711 n = 1; 1726 n = 1;
1712 if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 1727 if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1713 values[n++] = counter->total_time_enabled + 1728 values[n++] = counter->total_time_enabled +
@@ -2819,10 +2834,12 @@ perf_counter_read_event(struct perf_counter *counter,
2819} 2834}
2820 2835
2821/* 2836/*
2822 * fork tracking 2837 * task tracking -- fork/exit
2838 *
2839 * enabled by: attr.comm | attr.mmap | attr.task
2823 */ 2840 */
2824 2841
2825struct perf_fork_event { 2842struct perf_task_event {
2826 struct task_struct *task; 2843 struct task_struct *task;
2827 2844
2828 struct { 2845 struct {
@@ -2830,37 +2847,42 @@ struct perf_fork_event {
2830 2847
2831 u32 pid; 2848 u32 pid;
2832 u32 ppid; 2849 u32 ppid;
2850 u32 tid;
2851 u32 ptid;
2833 } event; 2852 } event;
2834}; 2853};
2835 2854
2836static void perf_counter_fork_output(struct perf_counter *counter, 2855static void perf_counter_task_output(struct perf_counter *counter,
2837 struct perf_fork_event *fork_event) 2856 struct perf_task_event *task_event)
2838{ 2857{
2839 struct perf_output_handle handle; 2858 struct perf_output_handle handle;
2840 int size = fork_event->event.header.size; 2859 int size = task_event->event.header.size;
2841 struct task_struct *task = fork_event->task; 2860 struct task_struct *task = task_event->task;
2842 int ret = perf_output_begin(&handle, counter, size, 0, 0); 2861 int ret = perf_output_begin(&handle, counter, size, 0, 0);
2843 2862
2844 if (ret) 2863 if (ret)
2845 return; 2864 return;
2846 2865
2847 fork_event->event.pid = perf_counter_pid(counter, task); 2866 task_event->event.pid = perf_counter_pid(counter, task);
2848 fork_event->event.ppid = perf_counter_pid(counter, task->real_parent); 2867 task_event->event.ppid = perf_counter_pid(counter, task->real_parent);
2849 2868
2850 perf_output_put(&handle, fork_event->event); 2869 task_event->event.tid = perf_counter_tid(counter, task);
2870 task_event->event.ptid = perf_counter_tid(counter, task->real_parent);
2871
2872 perf_output_put(&handle, task_event->event);
2851 perf_output_end(&handle); 2873 perf_output_end(&handle);
2852} 2874}
2853 2875
2854static int perf_counter_fork_match(struct perf_counter *counter) 2876static int perf_counter_task_match(struct perf_counter *counter)
2855{ 2877{
2856 if (counter->attr.comm || counter->attr.mmap) 2878 if (counter->attr.comm || counter->attr.mmap || counter->attr.task)
2857 return 1; 2879 return 1;
2858 2880
2859 return 0; 2881 return 0;
2860} 2882}
2861 2883
2862static void perf_counter_fork_ctx(struct perf_counter_context *ctx, 2884static void perf_counter_task_ctx(struct perf_counter_context *ctx,
2863 struct perf_fork_event *fork_event) 2885 struct perf_task_event *task_event)
2864{ 2886{
2865 struct perf_counter *counter; 2887 struct perf_counter *counter;
2866 2888
@@ -2869,19 +2891,19 @@ static void perf_counter_fork_ctx(struct perf_counter_context *ctx,
2869 2891
2870 rcu_read_lock(); 2892 rcu_read_lock();
2871 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) { 2893 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
2872 if (perf_counter_fork_match(counter)) 2894 if (perf_counter_task_match(counter))
2873 perf_counter_fork_output(counter, fork_event); 2895 perf_counter_task_output(counter, task_event);
2874 } 2896 }
2875 rcu_read_unlock(); 2897 rcu_read_unlock();
2876} 2898}
2877 2899
2878static void perf_counter_fork_event(struct perf_fork_event *fork_event) 2900static void perf_counter_task_event(struct perf_task_event *task_event)
2879{ 2901{
2880 struct perf_cpu_context *cpuctx; 2902 struct perf_cpu_context *cpuctx;
2881 struct perf_counter_context *ctx; 2903 struct perf_counter_context *ctx;
2882 2904
2883 cpuctx = &get_cpu_var(perf_cpu_context); 2905 cpuctx = &get_cpu_var(perf_cpu_context);
2884 perf_counter_fork_ctx(&cpuctx->ctx, fork_event); 2906 perf_counter_task_ctx(&cpuctx->ctx, task_event);
2885 put_cpu_var(perf_cpu_context); 2907 put_cpu_var(perf_cpu_context);
2886 2908
2887 rcu_read_lock(); 2909 rcu_read_lock();
@@ -2891,32 +2913,40 @@ static void perf_counter_fork_event(struct perf_fork_event *fork_event)
2891 */ 2913 */
2892 ctx = rcu_dereference(current->perf_counter_ctxp); 2914 ctx = rcu_dereference(current->perf_counter_ctxp);
2893 if (ctx) 2915 if (ctx)
2894 perf_counter_fork_ctx(ctx, fork_event); 2916 perf_counter_task_ctx(ctx, task_event);
2895 rcu_read_unlock(); 2917 rcu_read_unlock();
2896} 2918}
2897 2919
2898void perf_counter_fork(struct task_struct *task) 2920static void perf_counter_task(struct task_struct *task, int new)
2899{ 2921{
2900 struct perf_fork_event fork_event; 2922 struct perf_task_event task_event;
2901 2923
2902 if (!atomic_read(&nr_comm_counters) && 2924 if (!atomic_read(&nr_comm_counters) &&
2903 !atomic_read(&nr_mmap_counters)) 2925 !atomic_read(&nr_mmap_counters) &&
2926 !atomic_read(&nr_task_counters))
2904 return; 2927 return;
2905 2928
2906 fork_event = (struct perf_fork_event){ 2929 task_event = (struct perf_task_event){
2907 .task = task, 2930 .task = task,
2908 .event = { 2931 .event = {
2909 .header = { 2932 .header = {
2910 .type = PERF_EVENT_FORK, 2933 .type = new ? PERF_EVENT_FORK : PERF_EVENT_EXIT,
2911 .misc = 0, 2934 .misc = 0,
2912 .size = sizeof(fork_event.event), 2935 .size = sizeof(task_event.event),
2913 }, 2936 },
2914 /* .pid */ 2937 /* .pid */
2915 /* .ppid */ 2938 /* .ppid */
2939 /* .tid */
2940 /* .ptid */
2916 }, 2941 },
2917 }; 2942 };
2918 2943
2919 perf_counter_fork_event(&fork_event); 2944 perf_counter_task_event(&task_event);
2945}
2946
2947void perf_counter_fork(struct task_struct *task)
2948{
2949 perf_counter_task(task, 1);
2920} 2950}
2921 2951
2922/* 2952/*
@@ -3875,6 +3905,8 @@ done:
3875 atomic_inc(&nr_mmap_counters); 3905 atomic_inc(&nr_mmap_counters);
3876 if (counter->attr.comm) 3906 if (counter->attr.comm)
3877 atomic_inc(&nr_comm_counters); 3907 atomic_inc(&nr_comm_counters);
3908 if (counter->attr.task)
3909 atomic_inc(&nr_task_counters);
3878 } 3910 }
3879 3911
3880 return counter; 3912 return counter;
@@ -4236,8 +4268,10 @@ void perf_counter_exit_task(struct task_struct *child)
4236 struct perf_counter_context *child_ctx; 4268 struct perf_counter_context *child_ctx;
4237 unsigned long flags; 4269 unsigned long flags;
4238 4270
4239 if (likely(!child->perf_counter_ctxp)) 4271 if (likely(!child->perf_counter_ctxp)) {
4272 perf_counter_task(child, 0);
4240 return; 4273 return;
4274 }
4241 4275
4242 local_irq_save(flags); 4276 local_irq_save(flags);
4243 /* 4277 /*
@@ -4255,15 +4289,22 @@ void perf_counter_exit_task(struct task_struct *child)
4255 * incremented the context's refcount before we do put_ctx below. 4289 * incremented the context's refcount before we do put_ctx below.
4256 */ 4290 */
4257 spin_lock(&child_ctx->lock); 4291 spin_lock(&child_ctx->lock);
4258 child->perf_counter_ctxp = NULL;
4259 /* 4292 /*
4260 * If this context is a clone; unclone it so it can't get 4293 * If this context is a clone; unclone it so it can't get
4261 * swapped to another process while we're removing all 4294 * swapped to another process while we're removing all
4262 * the counters from it. 4295 * the counters from it.
4263 */ 4296 */
4264 unclone_ctx(child_ctx); 4297 unclone_ctx(child_ctx);
4265 spin_unlock(&child_ctx->lock); 4298 spin_unlock_irqrestore(&child_ctx->lock, flags);
4266 local_irq_restore(flags); 4299
4300 /*
4301 * Report the task dead after unscheduling the counters so that we
4302 * won't get any samples after PERF_EVENT_EXIT. We can however still
4303 * get a few PERF_EVENT_READ events.
4304 */
4305 perf_counter_task(child, 0);
4306
4307 child->perf_counter_ctxp = NULL;
4267 4308
4268 /* 4309 /*
4269 * We can recurse on the same lock type through: 4310 * We can recurse on the same lock type through:
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
index 052ec4d195c7..d089d052c4a9 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -202,6 +202,12 @@ static int no_timer_create(struct k_itimer *new_timer)
202 return -EOPNOTSUPP; 202 return -EOPNOTSUPP;
203} 203}
204 204
205static int no_nsleep(const clockid_t which_clock, int flags,
206 struct timespec *tsave, struct timespec __user *rmtp)
207{
208 return -EOPNOTSUPP;
209}
210
205/* 211/*
206 * Return nonzero if we know a priori this clockid_t value is bogus. 212 * Return nonzero if we know a priori this clockid_t value is bogus.
207 */ 213 */
@@ -254,6 +260,7 @@ static __init int init_posix_timers(void)
254 .clock_get = posix_get_monotonic_raw, 260 .clock_get = posix_get_monotonic_raw,
255 .clock_set = do_posix_clock_nosettime, 261 .clock_set = do_posix_clock_nosettime,
256 .timer_create = no_timer_create, 262 .timer_create = no_timer_create,
263 .nsleep = no_nsleep,
257 }; 264 };
258 265
259 register_posix_clock(CLOCK_REALTIME, &clock_realtime); 266 register_posix_clock(CLOCK_REALTIME, &clock_realtime);
diff --git a/kernel/sched_cpupri.c b/kernel/sched_cpupri.c
index e6c251790dde..d014efbf947a 100644
--- a/kernel/sched_cpupri.c
+++ b/kernel/sched_cpupri.c
@@ -81,8 +81,21 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p,
81 if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids) 81 if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids)
82 continue; 82 continue;
83 83
84 if (lowest_mask) 84 if (lowest_mask) {
85 cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask); 85 cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask);
86
87 /*
88 * We have to ensure that we have at least one bit
89 * still set in the array, since the map could have
90 * been concurrently emptied between the first and
91 * second reads of vec->mask. If we hit this
92 * condition, simply act as though we never hit this
93 * priority level and continue on.
94 */
95 if (cpumask_any(lowest_mask) >= nr_cpu_ids)
96 continue;
97 }
98
86 return 1; 99 return 1;
87 } 100 }
88 101
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 9ffb2b2ceba4..652e8bdef9aa 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -611,9 +611,13 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
611static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) 611static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
612{ 612{
613#ifdef CONFIG_SCHEDSTATS 613#ifdef CONFIG_SCHEDSTATS
614 struct task_struct *tsk = NULL;
615
616 if (entity_is_task(se))
617 tsk = task_of(se);
618
614 if (se->sleep_start) { 619 if (se->sleep_start) {
615 u64 delta = rq_of(cfs_rq)->clock - se->sleep_start; 620 u64 delta = rq_of(cfs_rq)->clock - se->sleep_start;
616 struct task_struct *tsk = task_of(se);
617 621
618 if ((s64)delta < 0) 622 if ((s64)delta < 0)
619 delta = 0; 623 delta = 0;
@@ -624,11 +628,11 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
624 se->sleep_start = 0; 628 se->sleep_start = 0;
625 se->sum_sleep_runtime += delta; 629 se->sum_sleep_runtime += delta;
626 630
627 account_scheduler_latency(tsk, delta >> 10, 1); 631 if (tsk)
632 account_scheduler_latency(tsk, delta >> 10, 1);
628 } 633 }
629 if (se->block_start) { 634 if (se->block_start) {
630 u64 delta = rq_of(cfs_rq)->clock - se->block_start; 635 u64 delta = rq_of(cfs_rq)->clock - se->block_start;
631 struct task_struct *tsk = task_of(se);
632 636
633 if ((s64)delta < 0) 637 if ((s64)delta < 0)
634 delta = 0; 638 delta = 0;
@@ -639,17 +643,19 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
639 se->block_start = 0; 643 se->block_start = 0;
640 se->sum_sleep_runtime += delta; 644 se->sum_sleep_runtime += delta;
641 645
642 /* 646 if (tsk) {
643 * Blocking time is in units of nanosecs, so shift by 20 to 647 /*
644 * get a milliseconds-range estimation of the amount of 648 * Blocking time is in units of nanosecs, so shift by
645 * time that the task spent sleeping: 649 * 20 to get a milliseconds-range estimation of the
646 */ 650 * amount of time that the task spent sleeping:
647 if (unlikely(prof_on == SLEEP_PROFILING)) { 651 */
648 652 if (unlikely(prof_on == SLEEP_PROFILING)) {
649 profile_hits(SLEEP_PROFILING, (void *)get_wchan(tsk), 653 profile_hits(SLEEP_PROFILING,
650 delta >> 20); 654 (void *)get_wchan(tsk),
655 delta >> 20);
656 }
657 account_scheduler_latency(tsk, delta >> 10, 0);
651 } 658 }
652 account_scheduler_latency(tsk, delta >> 10, 0);
653 } 659 }
654#endif 660#endif
655} 661}
diff --git a/kernel/signal.c b/kernel/signal.c
index ccf1ceedaebe..64c5deeaca5d 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -2454,11 +2454,9 @@ do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long s
2454 stack_t oss; 2454 stack_t oss;
2455 int error; 2455 int error;
2456 2456
2457 if (uoss) { 2457 oss.ss_sp = (void __user *) current->sas_ss_sp;
2458 oss.ss_sp = (void __user *) current->sas_ss_sp; 2458 oss.ss_size = current->sas_ss_size;
2459 oss.ss_size = current->sas_ss_size; 2459 oss.ss_flags = sas_ss_flags(sp);
2460 oss.ss_flags = sas_ss_flags(sp);
2461 }
2462 2460
2463 if (uss) { 2461 if (uss) {
2464 void __user *ss_sp; 2462 void __user *ss_sp;
@@ -2466,10 +2464,12 @@ do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long s
2466 int ss_flags; 2464 int ss_flags;
2467 2465
2468 error = -EFAULT; 2466 error = -EFAULT;
2469 if (!access_ok(VERIFY_READ, uss, sizeof(*uss)) 2467 if (!access_ok(VERIFY_READ, uss, sizeof(*uss)))
2470 || __get_user(ss_sp, &uss->ss_sp) 2468 goto out;
2471 || __get_user(ss_flags, &uss->ss_flags) 2469 error = __get_user(ss_sp, &uss->ss_sp) |
2472 || __get_user(ss_size, &uss->ss_size)) 2470 __get_user(ss_flags, &uss->ss_flags) |
2471 __get_user(ss_size, &uss->ss_size);
2472 if (error)
2473 goto out; 2473 goto out;
2474 2474
2475 error = -EPERM; 2475 error = -EPERM;
@@ -2501,13 +2501,16 @@ do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long s
2501 current->sas_ss_size = ss_size; 2501 current->sas_ss_size = ss_size;
2502 } 2502 }
2503 2503
2504 error = 0;
2504 if (uoss) { 2505 if (uoss) {
2505 error = -EFAULT; 2506 error = -EFAULT;
2506 if (copy_to_user(uoss, &oss, sizeof(oss))) 2507 if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
2507 goto out; 2508 goto out;
2509 error = __put_user(oss.ss_sp, &uoss->ss_sp) |
2510 __put_user(oss.ss_size, &uoss->ss_size) |
2511 __put_user(oss.ss_flags, &uoss->ss_flags);
2508 } 2512 }
2509 2513
2510 error = 0;
2511out: 2514out:
2512 return error; 2515 return error;
2513} 2516}
diff --git a/kernel/smp.c b/kernel/smp.c
index ad63d8501207..94188b8ecc33 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -57,7 +57,7 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
57 return NOTIFY_BAD; 57 return NOTIFY_BAD;
58 break; 58 break;
59 59
60#ifdef CONFIG_CPU_HOTPLUG 60#ifdef CONFIG_HOTPLUG_CPU
61 case CPU_UP_CANCELED: 61 case CPU_UP_CANCELED:
62 case CPU_UP_CANCELED_FROZEN: 62 case CPU_UP_CANCELED_FROZEN:
63 63
diff --git a/kernel/trace/trace_event_profile.c b/kernel/trace/trace_event_profile.c
index 5b5895afecfe..11ba5bb4ed0a 100644
--- a/kernel/trace/trace_event_profile.c
+++ b/kernel/trace/trace_event_profile.c
@@ -14,7 +14,7 @@ int ftrace_profile_enable(int event_id)
14 14
15 mutex_lock(&event_mutex); 15 mutex_lock(&event_mutex);
16 list_for_each_entry(event, &ftrace_events, list) { 16 list_for_each_entry(event, &ftrace_events, list) {
17 if (event->id == event_id) { 17 if (event->id == event_id && event->profile_enable) {
18 ret = event->profile_enable(event); 18 ret = event->profile_enable(event);
19 break; 19 break;
20 } 20 }
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 23d2972b22d6..e75276a49cf5 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -940,7 +940,7 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
940 entry = trace_create_file("enable", 0644, call->dir, call, 940 entry = trace_create_file("enable", 0644, call->dir, call,
941 enable); 941 enable);
942 942
943 if (call->id) 943 if (call->id && call->profile_enable)
944 entry = trace_create_file("id", 0444, call->dir, call, 944 entry = trace_create_file("id", 0444, call->dir, call,
945 id); 945 id);
946 946