diff options
Diffstat (limited to 'kernel')
30 files changed, 504 insertions, 271 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 3737a682cdf5..b6eadfe30e7b 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
| @@ -47,6 +47,7 @@ | |||
| 47 | #include <linux/hash.h> | 47 | #include <linux/hash.h> |
| 48 | #include <linux/namei.h> | 48 | #include <linux/namei.h> |
| 49 | #include <linux/smp_lock.h> | 49 | #include <linux/smp_lock.h> |
| 50 | #include <linux/pid_namespace.h> | ||
| 50 | 51 | ||
| 51 | #include <asm/atomic.h> | 52 | #include <asm/atomic.h> |
| 52 | 53 | ||
| @@ -734,16 +735,28 @@ static void cgroup_d_remove_dir(struct dentry *dentry) | |||
| 734 | * reference to css->refcnt. In general, this refcnt is expected to goes down | 735 | * reference to css->refcnt. In general, this refcnt is expected to goes down |
| 735 | * to zero, soon. | 736 | * to zero, soon. |
| 736 | * | 737 | * |
| 737 | * CGRP_WAIT_ON_RMDIR flag is modified under cgroup's inode->i_mutex; | 738 | * CGRP_WAIT_ON_RMDIR flag is set under cgroup's inode->i_mutex; |
| 738 | */ | 739 | */ |
| 739 | DECLARE_WAIT_QUEUE_HEAD(cgroup_rmdir_waitq); | 740 | DECLARE_WAIT_QUEUE_HEAD(cgroup_rmdir_waitq); |
| 740 | 741 | ||
| 741 | static void cgroup_wakeup_rmdir_waiters(const struct cgroup *cgrp) | 742 | static void cgroup_wakeup_rmdir_waiter(struct cgroup *cgrp) |
| 742 | { | 743 | { |
| 743 | if (unlikely(test_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags))) | 744 | if (unlikely(test_and_clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags))) |
| 744 | wake_up_all(&cgroup_rmdir_waitq); | 745 | wake_up_all(&cgroup_rmdir_waitq); |
| 745 | } | 746 | } |
| 746 | 747 | ||
| 748 | void cgroup_exclude_rmdir(struct cgroup_subsys_state *css) | ||
| 749 | { | ||
| 750 | css_get(css); | ||
| 751 | } | ||
| 752 | |||
| 753 | void cgroup_release_and_wakeup_rmdir(struct cgroup_subsys_state *css) | ||
| 754 | { | ||
| 755 | cgroup_wakeup_rmdir_waiter(css->cgroup); | ||
| 756 | css_put(css); | ||
| 757 | } | ||
| 758 | |||
| 759 | |||
| 747 | static int rebind_subsystems(struct cgroupfs_root *root, | 760 | static int rebind_subsystems(struct cgroupfs_root *root, |
| 748 | unsigned long final_bits) | 761 | unsigned long final_bits) |
| 749 | { | 762 | { |
| @@ -960,6 +973,7 @@ static void init_cgroup_housekeeping(struct cgroup *cgrp) | |||
| 960 | INIT_LIST_HEAD(&cgrp->children); | 973 | INIT_LIST_HEAD(&cgrp->children); |
| 961 | INIT_LIST_HEAD(&cgrp->css_sets); | 974 | INIT_LIST_HEAD(&cgrp->css_sets); |
| 962 | INIT_LIST_HEAD(&cgrp->release_list); | 975 | INIT_LIST_HEAD(&cgrp->release_list); |
| 976 | INIT_LIST_HEAD(&cgrp->pids_list); | ||
| 963 | init_rwsem(&cgrp->pids_mutex); | 977 | init_rwsem(&cgrp->pids_mutex); |
| 964 | } | 978 | } |
| 965 | static void init_cgroup_root(struct cgroupfs_root *root) | 979 | static void init_cgroup_root(struct cgroupfs_root *root) |
| @@ -1357,7 +1371,7 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk) | |||
| 1357 | * wake up rmdir() waiter. the rmdir should fail since the cgroup | 1371 | * wake up rmdir() waiter. the rmdir should fail since the cgroup |
| 1358 | * is no longer empty. | 1372 | * is no longer empty. |
| 1359 | */ | 1373 | */ |
| 1360 | cgroup_wakeup_rmdir_waiters(cgrp); | 1374 | cgroup_wakeup_rmdir_waiter(cgrp); |
| 1361 | return 0; | 1375 | return 0; |
| 1362 | } | 1376 | } |
| 1363 | 1377 | ||
| @@ -2201,12 +2215,30 @@ err: | |||
| 2201 | return ret; | 2215 | return ret; |
| 2202 | } | 2216 | } |
| 2203 | 2217 | ||
| 2218 | /* | ||
| 2219 | * Cache pids for all threads in the same pid namespace that are | ||
| 2220 | * opening the same "tasks" file. | ||
| 2221 | */ | ||
| 2222 | struct cgroup_pids { | ||
| 2223 | /* The node in cgrp->pids_list */ | ||
| 2224 | struct list_head list; | ||
| 2225 | /* The cgroup those pids belong to */ | ||
| 2226 | struct cgroup *cgrp; | ||
| 2227 | /* The namepsace those pids belong to */ | ||
| 2228 | struct pid_namespace *ns; | ||
| 2229 | /* Array of process ids in the cgroup */ | ||
| 2230 | pid_t *tasks_pids; | ||
| 2231 | /* How many files are using the this tasks_pids array */ | ||
| 2232 | int use_count; | ||
| 2233 | /* Length of the current tasks_pids array */ | ||
| 2234 | int length; | ||
| 2235 | }; | ||
| 2236 | |||
| 2204 | static int cmppid(const void *a, const void *b) | 2237 | static int cmppid(const void *a, const void *b) |
| 2205 | { | 2238 | { |
| 2206 | return *(pid_t *)a - *(pid_t *)b; | 2239 | return *(pid_t *)a - *(pid_t *)b; |
| 2207 | } | 2240 | } |
| 2208 | 2241 | ||
| 2209 | |||
| 2210 | /* | 2242 | /* |
| 2211 | * seq_file methods for the "tasks" file. The seq_file position is the | 2243 | * seq_file methods for the "tasks" file. The seq_file position is the |
| 2212 | * next pid to display; the seq_file iterator is a pointer to the pid | 2244 | * next pid to display; the seq_file iterator is a pointer to the pid |
| @@ -2221,45 +2253,47 @@ static void *cgroup_tasks_start(struct seq_file *s, loff_t *pos) | |||
| 2221 | * after a seek to the start). Use a binary-search to find the | 2253 | * after a seek to the start). Use a binary-search to find the |
| 2222 | * next pid to display, if any | 2254 | * next pid to display, if any |
| 2223 | */ | 2255 | */ |
| 2224 | struct cgroup *cgrp = s->private; | 2256 | struct cgroup_pids *cp = s->private; |
| 2257 | struct cgroup *cgrp = cp->cgrp; | ||
| 2225 | int index = 0, pid = *pos; | 2258 | int index = 0, pid = *pos; |
| 2226 | int *iter; | 2259 | int *iter; |
| 2227 | 2260 | ||
| 2228 | down_read(&cgrp->pids_mutex); | 2261 | down_read(&cgrp->pids_mutex); |
| 2229 | if (pid) { | 2262 | if (pid) { |
| 2230 | int end = cgrp->pids_length; | 2263 | int end = cp->length; |
| 2231 | 2264 | ||
| 2232 | while (index < end) { | 2265 | while (index < end) { |
| 2233 | int mid = (index + end) / 2; | 2266 | int mid = (index + end) / 2; |
| 2234 | if (cgrp->tasks_pids[mid] == pid) { | 2267 | if (cp->tasks_pids[mid] == pid) { |
| 2235 | index = mid; | 2268 | index = mid; |
| 2236 | break; | 2269 | break; |
| 2237 | } else if (cgrp->tasks_pids[mid] <= pid) | 2270 | } else if (cp->tasks_pids[mid] <= pid) |
| 2238 | index = mid + 1; | 2271 | index = mid + 1; |
| 2239 | else | 2272 | else |
| 2240 | end = mid; | 2273 | end = mid; |
| 2241 | } | 2274 | } |
| 2242 | } | 2275 | } |
| 2243 | /* If we're off the end of the array, we're done */ | 2276 | /* If we're off the end of the array, we're done */ |
| 2244 | if (index >= cgrp->pids_length) | 2277 | if (index >= cp->length) |
| 2245 | return NULL; | 2278 | return NULL; |
| 2246 | /* Update the abstract position to be the actual pid that we found */ | 2279 | /* Update the abstract position to be the actual pid that we found */ |
| 2247 | iter = cgrp->tasks_pids + index; | 2280 | iter = cp->tasks_pids + index; |
| 2248 | *pos = *iter; | 2281 | *pos = *iter; |
| 2249 | return iter; | 2282 | return iter; |
| 2250 | } | 2283 | } |
| 2251 | 2284 | ||
| 2252 | static void cgroup_tasks_stop(struct seq_file *s, void *v) | 2285 | static void cgroup_tasks_stop(struct seq_file *s, void *v) |
| 2253 | { | 2286 | { |
| 2254 | struct cgroup *cgrp = s->private; | 2287 | struct cgroup_pids *cp = s->private; |
| 2288 | struct cgroup *cgrp = cp->cgrp; | ||
| 2255 | up_read(&cgrp->pids_mutex); | 2289 | up_read(&cgrp->pids_mutex); |
| 2256 | } | 2290 | } |
| 2257 | 2291 | ||
| 2258 | static void *cgroup_tasks_next(struct seq_file *s, void *v, loff_t *pos) | 2292 | static void *cgroup_tasks_next(struct seq_file *s, void *v, loff_t *pos) |
| 2259 | { | 2293 | { |
| 2260 | struct cgroup *cgrp = s->private; | 2294 | struct cgroup_pids *cp = s->private; |
| 2261 | int *p = v; | 2295 | int *p = v; |
| 2262 | int *end = cgrp->tasks_pids + cgrp->pids_length; | 2296 | int *end = cp->tasks_pids + cp->length; |
| 2263 | 2297 | ||
| 2264 | /* | 2298 | /* |
| 2265 | * Advance to the next pid in the array. If this goes off the | 2299 | * Advance to the next pid in the array. If this goes off the |
| @@ -2286,26 +2320,33 @@ static struct seq_operations cgroup_tasks_seq_operations = { | |||
| 2286 | .show = cgroup_tasks_show, | 2320 | .show = cgroup_tasks_show, |
| 2287 | }; | 2321 | }; |
| 2288 | 2322 | ||
| 2289 | static void release_cgroup_pid_array(struct cgroup *cgrp) | 2323 | static void release_cgroup_pid_array(struct cgroup_pids *cp) |
| 2290 | { | 2324 | { |
| 2325 | struct cgroup *cgrp = cp->cgrp; | ||
| 2326 | |||
| 2291 | down_write(&cgrp->pids_mutex); | 2327 | down_write(&cgrp->pids_mutex); |
| 2292 | BUG_ON(!cgrp->pids_use_count); | 2328 | BUG_ON(!cp->use_count); |
| 2293 | if (!--cgrp->pids_use_count) { | 2329 | if (!--cp->use_count) { |
| 2294 | kfree(cgrp->tasks_pids); | 2330 | list_del(&cp->list); |
| 2295 | cgrp->tasks_pids = NULL; | 2331 | put_pid_ns(cp->ns); |
| 2296 | cgrp->pids_length = 0; | 2332 | kfree(cp->tasks_pids); |
| 2333 | kfree(cp); | ||
| 2297 | } | 2334 | } |
| 2298 | up_write(&cgrp->pids_mutex); | 2335 | up_write(&cgrp->pids_mutex); |
| 2299 | } | 2336 | } |
| 2300 | 2337 | ||
| 2301 | static int cgroup_tasks_release(struct inode *inode, struct file *file) | 2338 | static int cgroup_tasks_release(struct inode *inode, struct file *file) |
| 2302 | { | 2339 | { |
| 2303 | struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent); | 2340 | struct seq_file *seq; |
| 2341 | struct cgroup_pids *cp; | ||
| 2304 | 2342 | ||
| 2305 | if (!(file->f_mode & FMODE_READ)) | 2343 | if (!(file->f_mode & FMODE_READ)) |
| 2306 | return 0; | 2344 | return 0; |
| 2307 | 2345 | ||
| 2308 | release_cgroup_pid_array(cgrp); | 2346 | seq = file->private_data; |
| 2347 | cp = seq->private; | ||
| 2348 | |||
| 2349 | release_cgroup_pid_array(cp); | ||
| 2309 | return seq_release(inode, file); | 2350 | return seq_release(inode, file); |
| 2310 | } | 2351 | } |
| 2311 | 2352 | ||
| @@ -2324,6 +2365,8 @@ static struct file_operations cgroup_tasks_operations = { | |||
| 2324 | static int cgroup_tasks_open(struct inode *unused, struct file *file) | 2365 | static int cgroup_tasks_open(struct inode *unused, struct file *file) |
| 2325 | { | 2366 | { |
| 2326 | struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent); | 2367 | struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent); |
| 2368 | struct pid_namespace *ns = current->nsproxy->pid_ns; | ||
| 2369 | struct cgroup_pids *cp; | ||
| 2327 | pid_t *pidarray; | 2370 | pid_t *pidarray; |
| 2328 | int npids; | 2371 | int npids; |
| 2329 | int retval; | 2372 | int retval; |
| @@ -2350,20 +2393,37 @@ static int cgroup_tasks_open(struct inode *unused, struct file *file) | |||
| 2350 | * array if necessary | 2393 | * array if necessary |
| 2351 | */ | 2394 | */ |
| 2352 | down_write(&cgrp->pids_mutex); | 2395 | down_write(&cgrp->pids_mutex); |
| 2353 | kfree(cgrp->tasks_pids); | 2396 | |
| 2354 | cgrp->tasks_pids = pidarray; | 2397 | list_for_each_entry(cp, &cgrp->pids_list, list) { |
| 2355 | cgrp->pids_length = npids; | 2398 | if (ns == cp->ns) |
| 2356 | cgrp->pids_use_count++; | 2399 | goto found; |
| 2400 | } | ||
| 2401 | |||
| 2402 | cp = kzalloc(sizeof(*cp), GFP_KERNEL); | ||
| 2403 | if (!cp) { | ||
| 2404 | up_write(&cgrp->pids_mutex); | ||
| 2405 | kfree(pidarray); | ||
| 2406 | return -ENOMEM; | ||
| 2407 | } | ||
| 2408 | cp->cgrp = cgrp; | ||
| 2409 | cp->ns = ns; | ||
| 2410 | get_pid_ns(ns); | ||
| 2411 | list_add(&cp->list, &cgrp->pids_list); | ||
| 2412 | found: | ||
| 2413 | kfree(cp->tasks_pids); | ||
| 2414 | cp->tasks_pids = pidarray; | ||
| 2415 | cp->length = npids; | ||
| 2416 | cp->use_count++; | ||
| 2357 | up_write(&cgrp->pids_mutex); | 2417 | up_write(&cgrp->pids_mutex); |
| 2358 | 2418 | ||
| 2359 | file->f_op = &cgroup_tasks_operations; | 2419 | file->f_op = &cgroup_tasks_operations; |
| 2360 | 2420 | ||
| 2361 | retval = seq_open(file, &cgroup_tasks_seq_operations); | 2421 | retval = seq_open(file, &cgroup_tasks_seq_operations); |
| 2362 | if (retval) { | 2422 | if (retval) { |
| 2363 | release_cgroup_pid_array(cgrp); | 2423 | release_cgroup_pid_array(cp); |
| 2364 | return retval; | 2424 | return retval; |
| 2365 | } | 2425 | } |
| 2366 | ((struct seq_file *)file->private_data)->private = cgrp; | 2426 | ((struct seq_file *)file->private_data)->private = cp; |
| 2367 | return 0; | 2427 | return 0; |
| 2368 | } | 2428 | } |
| 2369 | 2429 | ||
| @@ -2696,33 +2756,42 @@ again: | |||
| 2696 | mutex_unlock(&cgroup_mutex); | 2756 | mutex_unlock(&cgroup_mutex); |
| 2697 | 2757 | ||
| 2698 | /* | 2758 | /* |
| 2759 | * In general, subsystem has no css->refcnt after pre_destroy(). But | ||
| 2760 | * in racy cases, subsystem may have to get css->refcnt after | ||
| 2761 | * pre_destroy() and it makes rmdir return with -EBUSY. This sometimes | ||
| 2762 | * make rmdir return -EBUSY too often. To avoid that, we use waitqueue | ||
| 2763 | * for cgroup's rmdir. CGRP_WAIT_ON_RMDIR is for synchronizing rmdir | ||
| 2764 | * and subsystem's reference count handling. Please see css_get/put | ||
| 2765 | * and css_tryget() and cgroup_wakeup_rmdir_waiter() implementation. | ||
| 2766 | */ | ||
| 2767 | set_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags); | ||
| 2768 | |||
| 2769 | /* | ||
| 2699 | * Call pre_destroy handlers of subsys. Notify subsystems | 2770 | * Call pre_destroy handlers of subsys. Notify subsystems |
| 2700 | * that rmdir() request comes. | 2771 | * that rmdir() request comes. |
| 2701 | */ | 2772 | */ |
| 2702 | ret = cgroup_call_pre_destroy(cgrp); | 2773 | ret = cgroup_call_pre_destroy(cgrp); |
| 2703 | if (ret) | 2774 | if (ret) { |
| 2775 | clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags); | ||
| 2704 | return ret; | 2776 | return ret; |
| 2777 | } | ||
| 2705 | 2778 | ||
| 2706 | mutex_lock(&cgroup_mutex); | 2779 | mutex_lock(&cgroup_mutex); |
| 2707 | parent = cgrp->parent; | 2780 | parent = cgrp->parent; |
| 2708 | if (atomic_read(&cgrp->count) || !list_empty(&cgrp->children)) { | 2781 | if (atomic_read(&cgrp->count) || !list_empty(&cgrp->children)) { |
| 2782 | clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags); | ||
| 2709 | mutex_unlock(&cgroup_mutex); | 2783 | mutex_unlock(&cgroup_mutex); |
| 2710 | return -EBUSY; | 2784 | return -EBUSY; |
| 2711 | } | 2785 | } |
| 2712 | /* | ||
| 2713 | * css_put/get is provided for subsys to grab refcnt to css. In typical | ||
| 2714 | * case, subsystem has no reference after pre_destroy(). But, under | ||
| 2715 | * hierarchy management, some *temporal* refcnt can be hold. | ||
| 2716 | * To avoid returning -EBUSY to a user, waitqueue is used. If subsys | ||
| 2717 | * is really busy, it should return -EBUSY at pre_destroy(). wake_up | ||
| 2718 | * is called when css_put() is called and refcnt goes down to 0. | ||
| 2719 | */ | ||
| 2720 | set_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags); | ||
| 2721 | prepare_to_wait(&cgroup_rmdir_waitq, &wait, TASK_INTERRUPTIBLE); | 2786 | prepare_to_wait(&cgroup_rmdir_waitq, &wait, TASK_INTERRUPTIBLE); |
| 2722 | |||
| 2723 | if (!cgroup_clear_css_refs(cgrp)) { | 2787 | if (!cgroup_clear_css_refs(cgrp)) { |
| 2724 | mutex_unlock(&cgroup_mutex); | 2788 | mutex_unlock(&cgroup_mutex); |
| 2725 | schedule(); | 2789 | /* |
| 2790 | * Because someone may call cgroup_wakeup_rmdir_waiter() before | ||
| 2791 | * prepare_to_wait(), we need to check this flag. | ||
| 2792 | */ | ||
| 2793 | if (test_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags)) | ||
| 2794 | schedule(); | ||
| 2726 | finish_wait(&cgroup_rmdir_waitq, &wait); | 2795 | finish_wait(&cgroup_rmdir_waitq, &wait); |
| 2727 | clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags); | 2796 | clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags); |
| 2728 | if (signal_pending(current)) | 2797 | if (signal_pending(current)) |
| @@ -3294,7 +3363,7 @@ void __css_put(struct cgroup_subsys_state *css) | |||
| 3294 | set_bit(CGRP_RELEASABLE, &cgrp->flags); | 3363 | set_bit(CGRP_RELEASABLE, &cgrp->flags); |
| 3295 | check_for_release(cgrp); | 3364 | check_for_release(cgrp); |
| 3296 | } | 3365 | } |
| 3297 | cgroup_wakeup_rmdir_waiters(cgrp); | 3366 | cgroup_wakeup_rmdir_waiter(cgrp); |
| 3298 | } | 3367 | } |
| 3299 | rcu_read_unlock(); | 3368 | rcu_read_unlock(); |
| 3300 | } | 3369 | } |
diff --git a/kernel/fork.c b/kernel/fork.c index 9b42695f0d14..021e1138556e 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
| @@ -426,6 +426,7 @@ static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p) | |||
| 426 | init_rwsem(&mm->mmap_sem); | 426 | init_rwsem(&mm->mmap_sem); |
| 427 | INIT_LIST_HEAD(&mm->mmlist); | 427 | INIT_LIST_HEAD(&mm->mmlist); |
| 428 | mm->flags = (current->mm) ? current->mm->flags : default_dump_filter; | 428 | mm->flags = (current->mm) ? current->mm->flags : default_dump_filter; |
| 429 | mm->oom_adj = (current->mm) ? current->mm->oom_adj : 0; | ||
| 429 | mm->core_state = NULL; | 430 | mm->core_state = NULL; |
| 430 | mm->nr_ptes = 0; | 431 | mm->nr_ptes = 0; |
| 431 | set_mm_counter(mm, file_rss, 0); | 432 | set_mm_counter(mm, file_rss, 0); |
| @@ -567,18 +568,18 @@ void mm_release(struct task_struct *tsk, struct mm_struct *mm) | |||
| 567 | * the value intact in a core dump, and to save the unnecessary | 568 | * the value intact in a core dump, and to save the unnecessary |
| 568 | * trouble otherwise. Userland only wants this done for a sys_exit. | 569 | * trouble otherwise. Userland only wants this done for a sys_exit. |
| 569 | */ | 570 | */ |
| 570 | if (tsk->clear_child_tid | 571 | if (tsk->clear_child_tid) { |
| 571 | && !(tsk->flags & PF_SIGNALED) | 572 | if (!(tsk->flags & PF_SIGNALED) && |
| 572 | && atomic_read(&mm->mm_users) > 1) { | 573 | atomic_read(&mm->mm_users) > 1) { |
| 573 | u32 __user * tidptr = tsk->clear_child_tid; | 574 | /* |
| 575 | * We don't check the error code - if userspace has | ||
| 576 | * not set up a proper pointer then tough luck. | ||
| 577 | */ | ||
| 578 | put_user(0, tsk->clear_child_tid); | ||
| 579 | sys_futex(tsk->clear_child_tid, FUTEX_WAKE, | ||
| 580 | 1, NULL, NULL, 0); | ||
| 581 | } | ||
| 574 | tsk->clear_child_tid = NULL; | 582 | tsk->clear_child_tid = NULL; |
| 575 | |||
| 576 | /* | ||
| 577 | * We don't check the error code - if userspace has | ||
| 578 | * not set up a proper pointer then tough luck. | ||
| 579 | */ | ||
| 580 | put_user(0, tidptr); | ||
| 581 | sys_futex(tidptr, FUTEX_WAKE, 1, NULL, NULL, 0); | ||
| 582 | } | 583 | } |
| 583 | } | 584 | } |
| 584 | 585 | ||
| @@ -1268,6 +1269,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
| 1268 | write_unlock_irq(&tasklist_lock); | 1269 | write_unlock_irq(&tasklist_lock); |
| 1269 | proc_fork_connector(p); | 1270 | proc_fork_connector(p); |
| 1270 | cgroup_post_fork(p); | 1271 | cgroup_post_fork(p); |
| 1272 | perf_counter_fork(p); | ||
| 1271 | return p; | 1273 | return p; |
| 1272 | 1274 | ||
| 1273 | bad_fork_free_pid: | 1275 | bad_fork_free_pid: |
| @@ -1409,9 +1411,6 @@ long do_fork(unsigned long clone_flags, | |||
| 1409 | init_completion(&vfork); | 1411 | init_completion(&vfork); |
| 1410 | } | 1412 | } |
| 1411 | 1413 | ||
| 1412 | if (!(clone_flags & CLONE_THREAD)) | ||
| 1413 | perf_counter_fork(p); | ||
| 1414 | |||
| 1415 | audit_finish_fork(p); | 1414 | audit_finish_fork(p); |
| 1416 | tracehook_report_clone(regs, clone_flags, nr, p); | 1415 | tracehook_report_clone(regs, clone_flags, nr, p); |
| 1417 | 1416 | ||
diff --git a/kernel/irq/numa_migrate.c b/kernel/irq/numa_migrate.c index 2f69bee57bf2..3fd30197da2e 100644 --- a/kernel/irq/numa_migrate.c +++ b/kernel/irq/numa_migrate.c | |||
| @@ -107,8 +107,8 @@ out_unlock: | |||
| 107 | 107 | ||
| 108 | struct irq_desc *move_irq_desc(struct irq_desc *desc, int node) | 108 | struct irq_desc *move_irq_desc(struct irq_desc *desc, int node) |
| 109 | { | 109 | { |
| 110 | /* those all static, do move them */ | 110 | /* those static or target node is -1, do not move them */ |
| 111 | if (desc->irq < NR_IRQS_LEGACY) | 111 | if (desc->irq < NR_IRQS_LEGACY || node == -1) |
| 112 | return desc; | 112 | return desc; |
| 113 | 113 | ||
| 114 | if (desc->node != node) | 114 | if (desc->node != node) |
diff --git a/kernel/kexec.c b/kernel/kexec.c index ae1c35201cc8..f336e2107f98 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c | |||
| @@ -1228,7 +1228,7 @@ static int __init parse_crashkernel_mem(char *cmdline, | |||
| 1228 | } while (*cur++ == ','); | 1228 | } while (*cur++ == ','); |
| 1229 | 1229 | ||
| 1230 | if (*crash_size > 0) { | 1230 | if (*crash_size > 0) { |
| 1231 | while (*cur != ' ' && *cur != '@') | 1231 | while (*cur && *cur != ' ' && *cur != '@') |
| 1232 | cur++; | 1232 | cur++; |
| 1233 | if (*cur == '@') { | 1233 | if (*cur == '@') { |
| 1234 | cur++; | 1234 | cur++; |
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 16b5739c516a..0540948e29ab 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
| @@ -694,7 +694,7 @@ int __kprobes register_kprobe(struct kprobe *p) | |||
| 694 | p->addr = addr; | 694 | p->addr = addr; |
| 695 | 695 | ||
| 696 | preempt_disable(); | 696 | preempt_disable(); |
| 697 | if (!__kernel_text_address((unsigned long) p->addr) || | 697 | if (!kernel_text_address((unsigned long) p->addr) || |
| 698 | in_kprobes_functions((unsigned long) p->addr)) { | 698 | in_kprobes_functions((unsigned long) p->addr)) { |
| 699 | preempt_enable(); | 699 | preempt_enable(); |
| 700 | return -EINVAL; | 700 | return -EINVAL; |
diff --git a/kernel/kthread.c b/kernel/kthread.c index 9b1a7de26979..eb8751aa0418 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c | |||
| @@ -180,10 +180,12 @@ EXPORT_SYMBOL(kthread_bind); | |||
| 180 | * @k: thread created by kthread_create(). | 180 | * @k: thread created by kthread_create(). |
| 181 | * | 181 | * |
| 182 | * Sets kthread_should_stop() for @k to return true, wakes it, and | 182 | * Sets kthread_should_stop() for @k to return true, wakes it, and |
| 183 | * waits for it to exit. Your threadfn() must not call do_exit() | 183 | * waits for it to exit. This can also be called after kthread_create() |
| 184 | * itself if you use this function! This can also be called after | 184 | * instead of calling wake_up_process(): the thread will exit without |
| 185 | * kthread_create() instead of calling wake_up_process(): the thread | 185 | * calling threadfn(). |
| 186 | * will exit without calling threadfn(). | 186 | * |
| 187 | * If threadfn() may call do_exit() itself, the caller must ensure | ||
| 188 | * task_struct can't go away. | ||
| 187 | * | 189 | * |
| 188 | * Returns the result of threadfn(), or %-EINTR if wake_up_process() | 190 | * Returns the result of threadfn(), or %-EINTR if wake_up_process() |
| 189 | * was never called. | 191 | * was never called. |
diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c index d7135aa2d2c4..e94caa666dba 100644 --- a/kernel/lockdep_proc.c +++ b/kernel/lockdep_proc.c | |||
| @@ -758,7 +758,8 @@ static int __init lockdep_proc_init(void) | |||
| 758 | &proc_lockdep_stats_operations); | 758 | &proc_lockdep_stats_operations); |
| 759 | 759 | ||
| 760 | #ifdef CONFIG_LOCK_STAT | 760 | #ifdef CONFIG_LOCK_STAT |
| 761 | proc_create("lock_stat", S_IRUSR, NULL, &proc_lock_stat_operations); | 761 | proc_create("lock_stat", S_IRUSR | S_IWUSR, NULL, |
| 762 | &proc_lock_stat_operations); | ||
| 762 | #endif | 763 | #endif |
| 763 | 764 | ||
| 764 | return 0; | 765 | return 0; |
diff --git a/kernel/module.c b/kernel/module.c index 0a049837008e..fd1411403558 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
| @@ -1068,7 +1068,8 @@ static inline int check_modstruct_version(Elf_Shdr *sechdrs, | |||
| 1068 | { | 1068 | { |
| 1069 | const unsigned long *crc; | 1069 | const unsigned long *crc; |
| 1070 | 1070 | ||
| 1071 | if (!find_symbol("module_layout", NULL, &crc, true, false)) | 1071 | if (!find_symbol(MODULE_SYMBOL_PREFIX "module_layout", NULL, |
| 1072 | &crc, true, false)) | ||
| 1072 | BUG(); | 1073 | BUG(); |
| 1073 | return check_version(sechdrs, versindex, "module_layout", mod, crc); | 1074 | return check_version(sechdrs, versindex, "module_layout", mod, crc); |
| 1074 | } | 1075 | } |
diff --git a/kernel/panic.c b/kernel/panic.c index 984b3ecbd72c..512ab73b0ca3 100644 --- a/kernel/panic.c +++ b/kernel/panic.c | |||
| @@ -301,6 +301,7 @@ int oops_may_print(void) | |||
| 301 | */ | 301 | */ |
| 302 | void oops_enter(void) | 302 | void oops_enter(void) |
| 303 | { | 303 | { |
| 304 | tracing_off(); | ||
| 304 | /* can't trust the integrity of the kernel anymore: */ | 305 | /* can't trust the integrity of the kernel anymore: */ |
| 305 | debug_locks_off(); | 306 | debug_locks_off(); |
| 306 | do_oops_enter_exit(); | 307 | do_oops_enter_exit(); |
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 950931041954..b0b20a07f394 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c | |||
| @@ -42,6 +42,7 @@ static int perf_overcommit __read_mostly = 1; | |||
| 42 | static atomic_t nr_counters __read_mostly; | 42 | static atomic_t nr_counters __read_mostly; |
| 43 | static atomic_t nr_mmap_counters __read_mostly; | 43 | static atomic_t nr_mmap_counters __read_mostly; |
| 44 | static atomic_t nr_comm_counters __read_mostly; | 44 | static atomic_t nr_comm_counters __read_mostly; |
| 45 | static atomic_t nr_task_counters __read_mostly; | ||
| 45 | 46 | ||
| 46 | /* | 47 | /* |
| 47 | * perf counter paranoia level: | 48 | * perf counter paranoia level: |
| @@ -1103,7 +1104,7 @@ static void perf_counter_sync_stat(struct perf_counter_context *ctx, | |||
| 1103 | __perf_counter_sync_stat(counter, next_counter); | 1104 | __perf_counter_sync_stat(counter, next_counter); |
| 1104 | 1105 | ||
| 1105 | counter = list_next_entry(counter, event_entry); | 1106 | counter = list_next_entry(counter, event_entry); |
| 1106 | next_counter = list_next_entry(counter, event_entry); | 1107 | next_counter = list_next_entry(next_counter, event_entry); |
| 1107 | } | 1108 | } |
| 1108 | } | 1109 | } |
| 1109 | 1110 | ||
| @@ -1654,6 +1655,8 @@ static void free_counter(struct perf_counter *counter) | |||
| 1654 | atomic_dec(&nr_mmap_counters); | 1655 | atomic_dec(&nr_mmap_counters); |
| 1655 | if (counter->attr.comm) | 1656 | if (counter->attr.comm) |
| 1656 | atomic_dec(&nr_comm_counters); | 1657 | atomic_dec(&nr_comm_counters); |
| 1658 | if (counter->attr.task) | ||
| 1659 | atomic_dec(&nr_task_counters); | ||
| 1657 | } | 1660 | } |
| 1658 | 1661 | ||
| 1659 | if (counter->destroy) | 1662 | if (counter->destroy) |
| @@ -1688,6 +1691,18 @@ static int perf_release(struct inode *inode, struct file *file) | |||
| 1688 | return 0; | 1691 | return 0; |
| 1689 | } | 1692 | } |
| 1690 | 1693 | ||
| 1694 | static u64 perf_counter_read_tree(struct perf_counter *counter) | ||
| 1695 | { | ||
| 1696 | struct perf_counter *child; | ||
| 1697 | u64 total = 0; | ||
| 1698 | |||
| 1699 | total += perf_counter_read(counter); | ||
| 1700 | list_for_each_entry(child, &counter->child_list, child_list) | ||
| 1701 | total += perf_counter_read(child); | ||
| 1702 | |||
| 1703 | return total; | ||
| 1704 | } | ||
| 1705 | |||
| 1691 | /* | 1706 | /* |
| 1692 | * Read the performance counter - simple non blocking version for now | 1707 | * Read the performance counter - simple non blocking version for now |
| 1693 | */ | 1708 | */ |
| @@ -1707,7 +1722,7 @@ perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count) | |||
| 1707 | 1722 | ||
| 1708 | WARN_ON_ONCE(counter->ctx->parent_ctx); | 1723 | WARN_ON_ONCE(counter->ctx->parent_ctx); |
| 1709 | mutex_lock(&counter->child_mutex); | 1724 | mutex_lock(&counter->child_mutex); |
| 1710 | values[0] = perf_counter_read(counter); | 1725 | values[0] = perf_counter_read_tree(counter); |
| 1711 | n = 1; | 1726 | n = 1; |
| 1712 | if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) | 1727 | if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) |
| 1713 | values[n++] = counter->total_time_enabled + | 1728 | values[n++] = counter->total_time_enabled + |
| @@ -2699,6 +2714,18 @@ static void perf_counter_output(struct perf_counter *counter, int nmi, | |||
| 2699 | header.size += sizeof(u64); | 2714 | header.size += sizeof(u64); |
| 2700 | } | 2715 | } |
| 2701 | 2716 | ||
| 2717 | if (sample_type & PERF_SAMPLE_RAW) { | ||
| 2718 | int size = sizeof(u32); | ||
| 2719 | |||
| 2720 | if (data->raw) | ||
| 2721 | size += data->raw->size; | ||
| 2722 | else | ||
| 2723 | size += sizeof(u32); | ||
| 2724 | |||
| 2725 | WARN_ON_ONCE(size & (sizeof(u64)-1)); | ||
| 2726 | header.size += size; | ||
| 2727 | } | ||
| 2728 | |||
| 2702 | ret = perf_output_begin(&handle, counter, header.size, nmi, 1); | 2729 | ret = perf_output_begin(&handle, counter, header.size, nmi, 1); |
| 2703 | if (ret) | 2730 | if (ret) |
| 2704 | return; | 2731 | return; |
| @@ -2762,6 +2789,22 @@ static void perf_counter_output(struct perf_counter *counter, int nmi, | |||
| 2762 | } | 2789 | } |
| 2763 | } | 2790 | } |
| 2764 | 2791 | ||
| 2792 | if (sample_type & PERF_SAMPLE_RAW) { | ||
| 2793 | if (data->raw) { | ||
| 2794 | perf_output_put(&handle, data->raw->size); | ||
| 2795 | perf_output_copy(&handle, data->raw->data, data->raw->size); | ||
| 2796 | } else { | ||
| 2797 | struct { | ||
| 2798 | u32 size; | ||
| 2799 | u32 data; | ||
| 2800 | } raw = { | ||
| 2801 | .size = sizeof(u32), | ||
| 2802 | .data = 0, | ||
| 2803 | }; | ||
| 2804 | perf_output_put(&handle, raw); | ||
| 2805 | } | ||
| 2806 | } | ||
| 2807 | |||
| 2765 | perf_output_end(&handle); | 2808 | perf_output_end(&handle); |
| 2766 | } | 2809 | } |
| 2767 | 2810 | ||
| @@ -2819,48 +2862,56 @@ perf_counter_read_event(struct perf_counter *counter, | |||
| 2819 | } | 2862 | } |
| 2820 | 2863 | ||
| 2821 | /* | 2864 | /* |
| 2822 | * fork tracking | 2865 | * task tracking -- fork/exit |
| 2866 | * | ||
| 2867 | * enabled by: attr.comm | attr.mmap | attr.task | ||
| 2823 | */ | 2868 | */ |
| 2824 | 2869 | ||
| 2825 | struct perf_fork_event { | 2870 | struct perf_task_event { |
| 2826 | struct task_struct *task; | 2871 | struct task_struct *task; |
| 2872 | struct perf_counter_context *task_ctx; | ||
| 2827 | 2873 | ||
| 2828 | struct { | 2874 | struct { |
| 2829 | struct perf_event_header header; | 2875 | struct perf_event_header header; |
| 2830 | 2876 | ||
| 2831 | u32 pid; | 2877 | u32 pid; |
| 2832 | u32 ppid; | 2878 | u32 ppid; |
| 2879 | u32 tid; | ||
| 2880 | u32 ptid; | ||
| 2833 | } event; | 2881 | } event; |
| 2834 | }; | 2882 | }; |
| 2835 | 2883 | ||
| 2836 | static void perf_counter_fork_output(struct perf_counter *counter, | 2884 | static void perf_counter_task_output(struct perf_counter *counter, |
| 2837 | struct perf_fork_event *fork_event) | 2885 | struct perf_task_event *task_event) |
| 2838 | { | 2886 | { |
| 2839 | struct perf_output_handle handle; | 2887 | struct perf_output_handle handle; |
| 2840 | int size = fork_event->event.header.size; | 2888 | int size = task_event->event.header.size; |
| 2841 | struct task_struct *task = fork_event->task; | 2889 | struct task_struct *task = task_event->task; |
| 2842 | int ret = perf_output_begin(&handle, counter, size, 0, 0); | 2890 | int ret = perf_output_begin(&handle, counter, size, 0, 0); |
| 2843 | 2891 | ||
| 2844 | if (ret) | 2892 | if (ret) |
| 2845 | return; | 2893 | return; |
| 2846 | 2894 | ||
| 2847 | fork_event->event.pid = perf_counter_pid(counter, task); | 2895 | task_event->event.pid = perf_counter_pid(counter, task); |
| 2848 | fork_event->event.ppid = perf_counter_pid(counter, task->real_parent); | 2896 | task_event->event.ppid = perf_counter_pid(counter, task->real_parent); |
| 2849 | 2897 | ||
| 2850 | perf_output_put(&handle, fork_event->event); | 2898 | task_event->event.tid = perf_counter_tid(counter, task); |
| 2899 | task_event->event.ptid = perf_counter_tid(counter, task->real_parent); | ||
| 2900 | |||
| 2901 | perf_output_put(&handle, task_event->event); | ||
| 2851 | perf_output_end(&handle); | 2902 | perf_output_end(&handle); |
| 2852 | } | 2903 | } |
| 2853 | 2904 | ||
| 2854 | static int perf_counter_fork_match(struct perf_counter *counter) | 2905 | static int perf_counter_task_match(struct perf_counter *counter) |
| 2855 | { | 2906 | { |
| 2856 | if (counter->attr.comm || counter->attr.mmap) | 2907 | if (counter->attr.comm || counter->attr.mmap || counter->attr.task) |
| 2857 | return 1; | 2908 | return 1; |
| 2858 | 2909 | ||
| 2859 | return 0; | 2910 | return 0; |
| 2860 | } | 2911 | } |
| 2861 | 2912 | ||
| 2862 | static void perf_counter_fork_ctx(struct perf_counter_context *ctx, | 2913 | static void perf_counter_task_ctx(struct perf_counter_context *ctx, |
| 2863 | struct perf_fork_event *fork_event) | 2914 | struct perf_task_event *task_event) |
| 2864 | { | 2915 | { |
| 2865 | struct perf_counter *counter; | 2916 | struct perf_counter *counter; |
| 2866 | 2917 | ||
| @@ -2869,54 +2920,62 @@ static void perf_counter_fork_ctx(struct perf_counter_context *ctx, | |||
| 2869 | 2920 | ||
| 2870 | rcu_read_lock(); | 2921 | rcu_read_lock(); |
| 2871 | list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) { | 2922 | list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) { |
| 2872 | if (perf_counter_fork_match(counter)) | 2923 | if (perf_counter_task_match(counter)) |
| 2873 | perf_counter_fork_output(counter, fork_event); | 2924 | perf_counter_task_output(counter, task_event); |
| 2874 | } | 2925 | } |
| 2875 | rcu_read_unlock(); | 2926 | rcu_read_unlock(); |
| 2876 | } | 2927 | } |
| 2877 | 2928 | ||
| 2878 | static void perf_counter_fork_event(struct perf_fork_event *fork_event) | 2929 | static void perf_counter_task_event(struct perf_task_event *task_event) |
| 2879 | { | 2930 | { |
| 2880 | struct perf_cpu_context *cpuctx; | 2931 | struct perf_cpu_context *cpuctx; |
| 2881 | struct perf_counter_context *ctx; | 2932 | struct perf_counter_context *ctx = task_event->task_ctx; |
| 2882 | 2933 | ||
| 2883 | cpuctx = &get_cpu_var(perf_cpu_context); | 2934 | cpuctx = &get_cpu_var(perf_cpu_context); |
| 2884 | perf_counter_fork_ctx(&cpuctx->ctx, fork_event); | 2935 | perf_counter_task_ctx(&cpuctx->ctx, task_event); |
| 2885 | put_cpu_var(perf_cpu_context); | 2936 | put_cpu_var(perf_cpu_context); |
| 2886 | 2937 | ||
| 2887 | rcu_read_lock(); | 2938 | rcu_read_lock(); |
| 2888 | /* | 2939 | if (!ctx) |
| 2889 | * doesn't really matter which of the child contexts the | 2940 | ctx = rcu_dereference(task_event->task->perf_counter_ctxp); |
| 2890 | * events ends up in. | ||
| 2891 | */ | ||
| 2892 | ctx = rcu_dereference(current->perf_counter_ctxp); | ||
| 2893 | if (ctx) | 2941 | if (ctx) |
| 2894 | perf_counter_fork_ctx(ctx, fork_event); | 2942 | perf_counter_task_ctx(ctx, task_event); |
| 2895 | rcu_read_unlock(); | 2943 | rcu_read_unlock(); |
| 2896 | } | 2944 | } |
| 2897 | 2945 | ||
| 2898 | void perf_counter_fork(struct task_struct *task) | 2946 | static void perf_counter_task(struct task_struct *task, |
| 2947 | struct perf_counter_context *task_ctx, | ||
| 2948 | int new) | ||
| 2899 | { | 2949 | { |
| 2900 | struct perf_fork_event fork_event; | 2950 | struct perf_task_event task_event; |
| 2901 | 2951 | ||
| 2902 | if (!atomic_read(&nr_comm_counters) && | 2952 | if (!atomic_read(&nr_comm_counters) && |
| 2903 | !atomic_read(&nr_mmap_counters)) | 2953 | !atomic_read(&nr_mmap_counters) && |
| 2954 | !atomic_read(&nr_task_counters)) | ||
| 2904 | return; | 2955 | return; |
| 2905 | 2956 | ||
| 2906 | fork_event = (struct perf_fork_event){ | 2957 | task_event = (struct perf_task_event){ |
| 2907 | .task = task, | 2958 | .task = task, |
| 2908 | .event = { | 2959 | .task_ctx = task_ctx, |
| 2960 | .event = { | ||
| 2909 | .header = { | 2961 | .header = { |
| 2910 | .type = PERF_EVENT_FORK, | 2962 | .type = new ? PERF_EVENT_FORK : PERF_EVENT_EXIT, |
| 2911 | .misc = 0, | 2963 | .misc = 0, |
| 2912 | .size = sizeof(fork_event.event), | 2964 | .size = sizeof(task_event.event), |
| 2913 | }, | 2965 | }, |
| 2914 | /* .pid */ | 2966 | /* .pid */ |
| 2915 | /* .ppid */ | 2967 | /* .ppid */ |
| 2968 | /* .tid */ | ||
| 2969 | /* .ptid */ | ||
| 2916 | }, | 2970 | }, |
| 2917 | }; | 2971 | }; |
| 2918 | 2972 | ||
| 2919 | perf_counter_fork_event(&fork_event); | 2973 | perf_counter_task_event(&task_event); |
| 2974 | } | ||
| 2975 | |||
| 2976 | void perf_counter_fork(struct task_struct *task) | ||
| 2977 | { | ||
| 2978 | perf_counter_task(task, NULL, 1); | ||
| 2920 | } | 2979 | } |
| 2921 | 2980 | ||
| 2922 | /* | 2981 | /* |
| @@ -3305,87 +3364,81 @@ int perf_counter_overflow(struct perf_counter *counter, int nmi, | |||
| 3305 | * Generic software counter infrastructure | 3364 | * Generic software counter infrastructure |
| 3306 | */ | 3365 | */ |
| 3307 | 3366 | ||
| 3308 | static void perf_swcounter_update(struct perf_counter *counter) | 3367 | /* |
| 3368 | * We directly increment counter->count and keep a second value in | ||
| 3369 | * counter->hw.period_left to count intervals. This period counter | ||
| 3370 | * is kept in the range [-sample_period, 0] so that we can use the | ||
| 3371 | * sign as trigger. | ||
| 3372 | */ | ||
| 3373 | |||
| 3374 | static u64 perf_swcounter_set_period(struct perf_counter *counter) | ||
| 3309 | { | 3375 | { |
| 3310 | struct hw_perf_counter *hwc = &counter->hw; | 3376 | struct hw_perf_counter *hwc = &counter->hw; |
| 3311 | u64 prev, now; | 3377 | u64 period = hwc->last_period; |
| 3312 | s64 delta; | 3378 | u64 nr, offset; |
| 3379 | s64 old, val; | ||
| 3380 | |||
| 3381 | hwc->last_period = hwc->sample_period; | ||
| 3313 | 3382 | ||
| 3314 | again: | 3383 | again: |
| 3315 | prev = atomic64_read(&hwc->prev_count); | 3384 | old = val = atomic64_read(&hwc->period_left); |
| 3316 | now = atomic64_read(&hwc->count); | 3385 | if (val < 0) |
| 3317 | if (atomic64_cmpxchg(&hwc->prev_count, prev, now) != prev) | 3386 | return 0; |
| 3318 | goto again; | ||
| 3319 | 3387 | ||
| 3320 | delta = now - prev; | 3388 | nr = div64_u64(period + val, period); |
| 3389 | offset = nr * period; | ||
| 3390 | val -= offset; | ||
| 3391 | if (atomic64_cmpxchg(&hwc->period_left, old, val) != old) | ||
| 3392 | goto again; | ||
| 3321 | 3393 | ||
| 3322 | atomic64_add(delta, &counter->count); | 3394 | return nr; |
| 3323 | atomic64_sub(delta, &hwc->period_left); | ||
| 3324 | } | 3395 | } |
| 3325 | 3396 | ||
| 3326 | static void perf_swcounter_set_period(struct perf_counter *counter) | 3397 | static void perf_swcounter_overflow(struct perf_counter *counter, |
| 3398 | int nmi, struct perf_sample_data *data) | ||
| 3327 | { | 3399 | { |
| 3328 | struct hw_perf_counter *hwc = &counter->hw; | 3400 | struct hw_perf_counter *hwc = &counter->hw; |
| 3329 | s64 left = atomic64_read(&hwc->period_left); | 3401 | u64 overflow; |
| 3330 | s64 period = hwc->sample_period; | ||
| 3331 | 3402 | ||
| 3332 | if (unlikely(left <= -period)) { | 3403 | data->period = counter->hw.last_period; |
| 3333 | left = period; | 3404 | overflow = perf_swcounter_set_period(counter); |
| 3334 | atomic64_set(&hwc->period_left, left); | ||
| 3335 | hwc->last_period = period; | ||
| 3336 | } | ||
| 3337 | 3405 | ||
| 3338 | if (unlikely(left <= 0)) { | 3406 | if (hwc->interrupts == MAX_INTERRUPTS) |
| 3339 | left += period; | 3407 | return; |
| 3340 | atomic64_add(period, &hwc->period_left); | ||
| 3341 | hwc->last_period = period; | ||
| 3342 | } | ||
| 3343 | 3408 | ||
| 3344 | atomic64_set(&hwc->prev_count, -left); | 3409 | for (; overflow; overflow--) { |
| 3345 | atomic64_set(&hwc->count, -left); | 3410 | if (perf_counter_overflow(counter, nmi, data)) { |
| 3411 | /* | ||
| 3412 | * We inhibit the overflow from happening when | ||
| 3413 | * hwc->interrupts == MAX_INTERRUPTS. | ||
| 3414 | */ | ||
| 3415 | break; | ||
| 3416 | } | ||
| 3417 | } | ||
| 3346 | } | 3418 | } |
| 3347 | 3419 | ||
| 3348 | static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer) | 3420 | static void perf_swcounter_unthrottle(struct perf_counter *counter) |
| 3349 | { | 3421 | { |
| 3350 | enum hrtimer_restart ret = HRTIMER_RESTART; | ||
| 3351 | struct perf_sample_data data; | ||
| 3352 | struct perf_counter *counter; | ||
| 3353 | u64 period; | ||
| 3354 | |||
| 3355 | counter = container_of(hrtimer, struct perf_counter, hw.hrtimer); | ||
| 3356 | counter->pmu->read(counter); | ||
| 3357 | |||
| 3358 | data.addr = 0; | ||
| 3359 | data.regs = get_irq_regs(); | ||
| 3360 | /* | 3422 | /* |
| 3361 | * In case we exclude kernel IPs or are somehow not in interrupt | 3423 | * Nothing to do, we already reset hwc->interrupts. |
| 3362 | * context, provide the next best thing, the user IP. | ||
| 3363 | */ | 3424 | */ |
| 3364 | if ((counter->attr.exclude_kernel || !data.regs) && | 3425 | } |
| 3365 | !counter->attr.exclude_user) | ||
| 3366 | data.regs = task_pt_regs(current); | ||
| 3367 | 3426 | ||
| 3368 | if (data.regs) { | 3427 | static void perf_swcounter_add(struct perf_counter *counter, u64 nr, |
| 3369 | if (perf_counter_overflow(counter, 0, &data)) | 3428 | int nmi, struct perf_sample_data *data) |
| 3370 | ret = HRTIMER_NORESTART; | 3429 | { |
| 3371 | } | 3430 | struct hw_perf_counter *hwc = &counter->hw; |
| 3372 | 3431 | ||
| 3373 | period = max_t(u64, 10000, counter->hw.sample_period); | 3432 | atomic64_add(nr, &counter->count); |
| 3374 | hrtimer_forward_now(hrtimer, ns_to_ktime(period)); | ||
| 3375 | 3433 | ||
| 3376 | return ret; | 3434 | if (!hwc->sample_period) |
| 3377 | } | 3435 | return; |
| 3378 | 3436 | ||
| 3379 | static void perf_swcounter_overflow(struct perf_counter *counter, | 3437 | if (!data->regs) |
| 3380 | int nmi, struct perf_sample_data *data) | 3438 | return; |
| 3381 | { | ||
| 3382 | data->period = counter->hw.last_period; | ||
| 3383 | 3439 | ||
| 3384 | perf_swcounter_update(counter); | 3440 | if (!atomic64_add_negative(nr, &hwc->period_left)) |
| 3385 | perf_swcounter_set_period(counter); | 3441 | perf_swcounter_overflow(counter, nmi, data); |
| 3386 | if (perf_counter_overflow(counter, nmi, data)) | ||
| 3387 | /* soft-disable the counter */ | ||
| 3388 | ; | ||
| 3389 | } | 3442 | } |
| 3390 | 3443 | ||
| 3391 | static int perf_swcounter_is_counting(struct perf_counter *counter) | 3444 | static int perf_swcounter_is_counting(struct perf_counter *counter) |
| @@ -3449,15 +3502,6 @@ static int perf_swcounter_match(struct perf_counter *counter, | |||
| 3449 | return 1; | 3502 | return 1; |
| 3450 | } | 3503 | } |
| 3451 | 3504 | ||
| 3452 | static void perf_swcounter_add(struct perf_counter *counter, u64 nr, | ||
| 3453 | int nmi, struct perf_sample_data *data) | ||
| 3454 | { | ||
| 3455 | int neg = atomic64_add_negative(nr, &counter->hw.count); | ||
| 3456 | |||
| 3457 | if (counter->hw.sample_period && !neg && data->regs) | ||
| 3458 | perf_swcounter_overflow(counter, nmi, data); | ||
| 3459 | } | ||
| 3460 | |||
| 3461 | static void perf_swcounter_ctx_event(struct perf_counter_context *ctx, | 3505 | static void perf_swcounter_ctx_event(struct perf_counter_context *ctx, |
| 3462 | enum perf_type_id type, | 3506 | enum perf_type_id type, |
| 3463 | u32 event, u64 nr, int nmi, | 3507 | u32 event, u64 nr, int nmi, |
| @@ -3536,27 +3580,66 @@ void __perf_swcounter_event(u32 event, u64 nr, int nmi, | |||
| 3536 | 3580 | ||
| 3537 | static void perf_swcounter_read(struct perf_counter *counter) | 3581 | static void perf_swcounter_read(struct perf_counter *counter) |
| 3538 | { | 3582 | { |
| 3539 | perf_swcounter_update(counter); | ||
| 3540 | } | 3583 | } |
| 3541 | 3584 | ||
| 3542 | static int perf_swcounter_enable(struct perf_counter *counter) | 3585 | static int perf_swcounter_enable(struct perf_counter *counter) |
| 3543 | { | 3586 | { |
| 3544 | perf_swcounter_set_period(counter); | 3587 | struct hw_perf_counter *hwc = &counter->hw; |
| 3588 | |||
| 3589 | if (hwc->sample_period) { | ||
| 3590 | hwc->last_period = hwc->sample_period; | ||
| 3591 | perf_swcounter_set_period(counter); | ||
| 3592 | } | ||
| 3545 | return 0; | 3593 | return 0; |
| 3546 | } | 3594 | } |
| 3547 | 3595 | ||
| 3548 | static void perf_swcounter_disable(struct perf_counter *counter) | 3596 | static void perf_swcounter_disable(struct perf_counter *counter) |
| 3549 | { | 3597 | { |
| 3550 | perf_swcounter_update(counter); | ||
| 3551 | } | 3598 | } |
| 3552 | 3599 | ||
| 3553 | static const struct pmu perf_ops_generic = { | 3600 | static const struct pmu perf_ops_generic = { |
| 3554 | .enable = perf_swcounter_enable, | 3601 | .enable = perf_swcounter_enable, |
| 3555 | .disable = perf_swcounter_disable, | 3602 | .disable = perf_swcounter_disable, |
| 3556 | .read = perf_swcounter_read, | 3603 | .read = perf_swcounter_read, |
| 3604 | .unthrottle = perf_swcounter_unthrottle, | ||
| 3557 | }; | 3605 | }; |
| 3558 | 3606 | ||
| 3559 | /* | 3607 | /* |
| 3608 | * hrtimer based swcounter callback | ||
| 3609 | */ | ||
| 3610 | |||
| 3611 | static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer) | ||
| 3612 | { | ||
| 3613 | enum hrtimer_restart ret = HRTIMER_RESTART; | ||
| 3614 | struct perf_sample_data data; | ||
| 3615 | struct perf_counter *counter; | ||
| 3616 | u64 period; | ||
| 3617 | |||
| 3618 | counter = container_of(hrtimer, struct perf_counter, hw.hrtimer); | ||
| 3619 | counter->pmu->read(counter); | ||
| 3620 | |||
| 3621 | data.addr = 0; | ||
| 3622 | data.regs = get_irq_regs(); | ||
| 3623 | /* | ||
| 3624 | * In case we exclude kernel IPs or are somehow not in interrupt | ||
| 3625 | * context, provide the next best thing, the user IP. | ||
| 3626 | */ | ||
| 3627 | if ((counter->attr.exclude_kernel || !data.regs) && | ||
| 3628 | !counter->attr.exclude_user) | ||
| 3629 | data.regs = task_pt_regs(current); | ||
| 3630 | |||
| 3631 | if (data.regs) { | ||
| 3632 | if (perf_counter_overflow(counter, 0, &data)) | ||
| 3633 | ret = HRTIMER_NORESTART; | ||
| 3634 | } | ||
| 3635 | |||
| 3636 | period = max_t(u64, 10000, counter->hw.sample_period); | ||
| 3637 | hrtimer_forward_now(hrtimer, ns_to_ktime(period)); | ||
| 3638 | |||
| 3639 | return ret; | ||
| 3640 | } | ||
| 3641 | |||
| 3642 | /* | ||
| 3560 | * Software counter: cpu wall time clock | 3643 | * Software counter: cpu wall time clock |
| 3561 | */ | 3644 | */ |
| 3562 | 3645 | ||
| @@ -3673,17 +3756,24 @@ static const struct pmu perf_ops_task_clock = { | |||
| 3673 | }; | 3756 | }; |
| 3674 | 3757 | ||
| 3675 | #ifdef CONFIG_EVENT_PROFILE | 3758 | #ifdef CONFIG_EVENT_PROFILE |
| 3676 | void perf_tpcounter_event(int event_id) | 3759 | void perf_tpcounter_event(int event_id, u64 addr, u64 count, void *record, |
| 3760 | int entry_size) | ||
| 3677 | { | 3761 | { |
| 3762 | struct perf_raw_record raw = { | ||
| 3763 | .size = entry_size, | ||
| 3764 | .data = record, | ||
| 3765 | }; | ||
| 3766 | |||
| 3678 | struct perf_sample_data data = { | 3767 | struct perf_sample_data data = { |
| 3679 | .regs = get_irq_regs(), | 3768 | .regs = get_irq_regs(), |
| 3680 | .addr = 0, | 3769 | .addr = addr, |
| 3770 | .raw = &raw, | ||
| 3681 | }; | 3771 | }; |
| 3682 | 3772 | ||
| 3683 | if (!data.regs) | 3773 | if (!data.regs) |
| 3684 | data.regs = task_pt_regs(current); | 3774 | data.regs = task_pt_regs(current); |
| 3685 | 3775 | ||
| 3686 | do_perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, 1, 1, &data); | 3776 | do_perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, count, 1, &data); |
| 3687 | } | 3777 | } |
| 3688 | EXPORT_SYMBOL_GPL(perf_tpcounter_event); | 3778 | EXPORT_SYMBOL_GPL(perf_tpcounter_event); |
| 3689 | 3779 | ||
| @@ -3697,6 +3787,14 @@ static void tp_perf_counter_destroy(struct perf_counter *counter) | |||
| 3697 | 3787 | ||
| 3698 | static const struct pmu *tp_perf_counter_init(struct perf_counter *counter) | 3788 | static const struct pmu *tp_perf_counter_init(struct perf_counter *counter) |
| 3699 | { | 3789 | { |
| 3790 | /* | ||
| 3791 | * Raw tracepoint data is a severe data leak, only allow root to | ||
| 3792 | * have these. | ||
| 3793 | */ | ||
| 3794 | if ((counter->attr.sample_type & PERF_SAMPLE_RAW) && | ||
| 3795 | !capable(CAP_SYS_ADMIN)) | ||
| 3796 | return ERR_PTR(-EPERM); | ||
| 3797 | |||
| 3700 | if (ftrace_profile_enable(counter->attr.config)) | 3798 | if (ftrace_profile_enable(counter->attr.config)) |
| 3701 | return NULL; | 3799 | return NULL; |
| 3702 | 3800 | ||
| @@ -3875,6 +3973,8 @@ done: | |||
| 3875 | atomic_inc(&nr_mmap_counters); | 3973 | atomic_inc(&nr_mmap_counters); |
| 3876 | if (counter->attr.comm) | 3974 | if (counter->attr.comm) |
| 3877 | atomic_inc(&nr_comm_counters); | 3975 | atomic_inc(&nr_comm_counters); |
| 3976 | if (counter->attr.task) | ||
| 3977 | atomic_inc(&nr_task_counters); | ||
| 3878 | } | 3978 | } |
| 3879 | 3979 | ||
| 3880 | return counter; | 3980 | return counter; |
| @@ -4236,8 +4336,10 @@ void perf_counter_exit_task(struct task_struct *child) | |||
| 4236 | struct perf_counter_context *child_ctx; | 4336 | struct perf_counter_context *child_ctx; |
| 4237 | unsigned long flags; | 4337 | unsigned long flags; |
| 4238 | 4338 | ||
| 4239 | if (likely(!child->perf_counter_ctxp)) | 4339 | if (likely(!child->perf_counter_ctxp)) { |
| 4340 | perf_counter_task(child, NULL, 0); | ||
| 4240 | return; | 4341 | return; |
| 4342 | } | ||
| 4241 | 4343 | ||
| 4242 | local_irq_save(flags); | 4344 | local_irq_save(flags); |
| 4243 | /* | 4345 | /* |
| @@ -4262,8 +4364,14 @@ void perf_counter_exit_task(struct task_struct *child) | |||
| 4262 | * the counters from it. | 4364 | * the counters from it. |
| 4263 | */ | 4365 | */ |
| 4264 | unclone_ctx(child_ctx); | 4366 | unclone_ctx(child_ctx); |
| 4265 | spin_unlock(&child_ctx->lock); | 4367 | spin_unlock_irqrestore(&child_ctx->lock, flags); |
| 4266 | local_irq_restore(flags); | 4368 | |
| 4369 | /* | ||
| 4370 | * Report the task dead after unscheduling the counters so that we | ||
| 4371 | * won't get any samples after PERF_EVENT_EXIT. We can however still | ||
| 4372 | * get a few PERF_EVENT_READ events. | ||
| 4373 | */ | ||
| 4374 | perf_counter_task(child, child_ctx, 0); | ||
| 4267 | 4375 | ||
| 4268 | /* | 4376 | /* |
| 4269 | * We can recurse on the same lock type through: | 4377 | * We can recurse on the same lock type through: |
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index bece7c0b67b2..e33a21cb9407 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c | |||
| @@ -521,11 +521,12 @@ void posix_cpu_timers_exit(struct task_struct *tsk) | |||
| 521 | } | 521 | } |
| 522 | void posix_cpu_timers_exit_group(struct task_struct *tsk) | 522 | void posix_cpu_timers_exit_group(struct task_struct *tsk) |
| 523 | { | 523 | { |
| 524 | struct task_cputime cputime; | 524 | struct signal_struct *const sig = tsk->signal; |
| 525 | 525 | ||
| 526 | thread_group_cputimer(tsk, &cputime); | ||
| 527 | cleanup_timers(tsk->signal->cpu_timers, | 526 | cleanup_timers(tsk->signal->cpu_timers, |
| 528 | cputime.utime, cputime.stime, cputime.sum_exec_runtime); | 527 | cputime_add(tsk->utime, sig->utime), |
| 528 | cputime_add(tsk->stime, sig->stime), | ||
| 529 | tsk->se.sum_exec_runtime + sig->sum_sched_runtime); | ||
| 529 | } | 530 | } |
| 530 | 531 | ||
| 531 | static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now) | 532 | static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now) |
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c index 052ec4d195c7..d089d052c4a9 100644 --- a/kernel/posix-timers.c +++ b/kernel/posix-timers.c | |||
| @@ -202,6 +202,12 @@ static int no_timer_create(struct k_itimer *new_timer) | |||
| 202 | return -EOPNOTSUPP; | 202 | return -EOPNOTSUPP; |
| 203 | } | 203 | } |
| 204 | 204 | ||
| 205 | static int no_nsleep(const clockid_t which_clock, int flags, | ||
| 206 | struct timespec *tsave, struct timespec __user *rmtp) | ||
| 207 | { | ||
| 208 | return -EOPNOTSUPP; | ||
| 209 | } | ||
| 210 | |||
| 205 | /* | 211 | /* |
| 206 | * Return nonzero if we know a priori this clockid_t value is bogus. | 212 | * Return nonzero if we know a priori this clockid_t value is bogus. |
| 207 | */ | 213 | */ |
| @@ -254,6 +260,7 @@ static __init int init_posix_timers(void) | |||
| 254 | .clock_get = posix_get_monotonic_raw, | 260 | .clock_get = posix_get_monotonic_raw, |
| 255 | .clock_set = do_posix_clock_nosettime, | 261 | .clock_set = do_posix_clock_nosettime, |
| 256 | .timer_create = no_timer_create, | 262 | .timer_create = no_timer_create, |
| 263 | .nsleep = no_nsleep, | ||
| 257 | }; | 264 | }; |
| 258 | 265 | ||
| 259 | register_posix_clock(CLOCK_REALTIME, &clock_realtime); | 266 | register_posix_clock(CLOCK_REALTIME, &clock_realtime); |
diff --git a/kernel/profile.c b/kernel/profile.c index 69911b5745eb..419250ebec4d 100644 --- a/kernel/profile.c +++ b/kernel/profile.c | |||
| @@ -117,11 +117,12 @@ int __ref profile_init(void) | |||
| 117 | 117 | ||
| 118 | cpumask_copy(prof_cpu_mask, cpu_possible_mask); | 118 | cpumask_copy(prof_cpu_mask, cpu_possible_mask); |
| 119 | 119 | ||
| 120 | prof_buffer = kzalloc(buffer_bytes, GFP_KERNEL); | 120 | prof_buffer = kzalloc(buffer_bytes, GFP_KERNEL|__GFP_NOWARN); |
| 121 | if (prof_buffer) | 121 | if (prof_buffer) |
| 122 | return 0; | 122 | return 0; |
| 123 | 123 | ||
| 124 | prof_buffer = alloc_pages_exact(buffer_bytes, GFP_KERNEL|__GFP_ZERO); | 124 | prof_buffer = alloc_pages_exact(buffer_bytes, |
| 125 | GFP_KERNEL|__GFP_ZERO|__GFP_NOWARN); | ||
| 125 | if (prof_buffer) | 126 | if (prof_buffer) |
| 126 | return 0; | 127 | return 0; |
| 127 | 128 | ||
diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c index fcd107a78c5a..29bd4baf9e75 100644 --- a/kernel/rtmutex.c +++ b/kernel/rtmutex.c | |||
| @@ -1039,16 +1039,14 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock, | |||
| 1039 | if (!rt_mutex_owner(lock) || try_to_steal_lock(lock, task)) { | 1039 | if (!rt_mutex_owner(lock) || try_to_steal_lock(lock, task)) { |
| 1040 | /* We got the lock for task. */ | 1040 | /* We got the lock for task. */ |
| 1041 | debug_rt_mutex_lock(lock); | 1041 | debug_rt_mutex_lock(lock); |
| 1042 | |||
| 1043 | rt_mutex_set_owner(lock, task, 0); | 1042 | rt_mutex_set_owner(lock, task, 0); |
| 1044 | 1043 | spin_unlock(&lock->wait_lock); | |
| 1045 | rt_mutex_deadlock_account_lock(lock, task); | 1044 | rt_mutex_deadlock_account_lock(lock, task); |
| 1046 | return 1; | 1045 | return 1; |
| 1047 | } | 1046 | } |
| 1048 | 1047 | ||
| 1049 | ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock); | 1048 | ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock); |
| 1050 | 1049 | ||
| 1051 | |||
| 1052 | if (ret && !waiter->task) { | 1050 | if (ret && !waiter->task) { |
| 1053 | /* | 1051 | /* |
| 1054 | * Reset the return value. We might have | 1052 | * Reset the return value. We might have |
diff --git a/kernel/sched_cpupri.c b/kernel/sched_cpupri.c index e6c251790dde..d014efbf947a 100644 --- a/kernel/sched_cpupri.c +++ b/kernel/sched_cpupri.c | |||
| @@ -81,8 +81,21 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p, | |||
| 81 | if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids) | 81 | if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids) |
| 82 | continue; | 82 | continue; |
| 83 | 83 | ||
| 84 | if (lowest_mask) | 84 | if (lowest_mask) { |
| 85 | cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask); | 85 | cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask); |
| 86 | |||
| 87 | /* | ||
| 88 | * We have to ensure that we have at least one bit | ||
| 89 | * still set in the array, since the map could have | ||
| 90 | * been concurrently emptied between the first and | ||
| 91 | * second reads of vec->mask. If we hit this | ||
| 92 | * condition, simply act as though we never hit this | ||
| 93 | * priority level and continue on. | ||
| 94 | */ | ||
| 95 | if (cpumask_any(lowest_mask) >= nr_cpu_ids) | ||
| 96 | continue; | ||
| 97 | } | ||
| 98 | |||
| 86 | return 1; | 99 | return 1; |
| 87 | } | 100 | } |
| 88 | 101 | ||
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 9ffb2b2ceba4..652e8bdef9aa 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
| @@ -611,9 +611,13 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
| 611 | static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) | 611 | static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) |
| 612 | { | 612 | { |
| 613 | #ifdef CONFIG_SCHEDSTATS | 613 | #ifdef CONFIG_SCHEDSTATS |
| 614 | struct task_struct *tsk = NULL; | ||
| 615 | |||
| 616 | if (entity_is_task(se)) | ||
| 617 | tsk = task_of(se); | ||
| 618 | |||
| 614 | if (se->sleep_start) { | 619 | if (se->sleep_start) { |
| 615 | u64 delta = rq_of(cfs_rq)->clock - se->sleep_start; | 620 | u64 delta = rq_of(cfs_rq)->clock - se->sleep_start; |
| 616 | struct task_struct *tsk = task_of(se); | ||
| 617 | 621 | ||
| 618 | if ((s64)delta < 0) | 622 | if ((s64)delta < 0) |
| 619 | delta = 0; | 623 | delta = 0; |
| @@ -624,11 +628,11 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
| 624 | se->sleep_start = 0; | 628 | se->sleep_start = 0; |
| 625 | se->sum_sleep_runtime += delta; | 629 | se->sum_sleep_runtime += delta; |
| 626 | 630 | ||
| 627 | account_scheduler_latency(tsk, delta >> 10, 1); | 631 | if (tsk) |
| 632 | account_scheduler_latency(tsk, delta >> 10, 1); | ||
| 628 | } | 633 | } |
| 629 | if (se->block_start) { | 634 | if (se->block_start) { |
| 630 | u64 delta = rq_of(cfs_rq)->clock - se->block_start; | 635 | u64 delta = rq_of(cfs_rq)->clock - se->block_start; |
| 631 | struct task_struct *tsk = task_of(se); | ||
| 632 | 636 | ||
| 633 | if ((s64)delta < 0) | 637 | if ((s64)delta < 0) |
| 634 | delta = 0; | 638 | delta = 0; |
| @@ -639,17 +643,19 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
| 639 | se->block_start = 0; | 643 | se->block_start = 0; |
| 640 | se->sum_sleep_runtime += delta; | 644 | se->sum_sleep_runtime += delta; |
| 641 | 645 | ||
| 642 | /* | 646 | if (tsk) { |
| 643 | * Blocking time is in units of nanosecs, so shift by 20 to | 647 | /* |
| 644 | * get a milliseconds-range estimation of the amount of | 648 | * Blocking time is in units of nanosecs, so shift by |
| 645 | * time that the task spent sleeping: | 649 | * 20 to get a milliseconds-range estimation of the |
| 646 | */ | 650 | * amount of time that the task spent sleeping: |
| 647 | if (unlikely(prof_on == SLEEP_PROFILING)) { | 651 | */ |
| 648 | 652 | if (unlikely(prof_on == SLEEP_PROFILING)) { | |
| 649 | profile_hits(SLEEP_PROFILING, (void *)get_wchan(tsk), | 653 | profile_hits(SLEEP_PROFILING, |
| 650 | delta >> 20); | 654 | (void *)get_wchan(tsk), |
| 655 | delta >> 20); | ||
| 656 | } | ||
| 657 | account_scheduler_latency(tsk, delta >> 10, 0); | ||
| 651 | } | 658 | } |
| 652 | account_scheduler_latency(tsk, delta >> 10, 0); | ||
| 653 | } | 659 | } |
| 654 | #endif | 660 | #endif |
| 655 | } | 661 | } |
diff --git a/kernel/signal.c b/kernel/signal.c index ccf1ceedaebe..64c5deeaca5d 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
| @@ -2454,11 +2454,9 @@ do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long s | |||
| 2454 | stack_t oss; | 2454 | stack_t oss; |
| 2455 | int error; | 2455 | int error; |
| 2456 | 2456 | ||
| 2457 | if (uoss) { | 2457 | oss.ss_sp = (void __user *) current->sas_ss_sp; |
| 2458 | oss.ss_sp = (void __user *) current->sas_ss_sp; | 2458 | oss.ss_size = current->sas_ss_size; |
| 2459 | oss.ss_size = current->sas_ss_size; | 2459 | oss.ss_flags = sas_ss_flags(sp); |
| 2460 | oss.ss_flags = sas_ss_flags(sp); | ||
| 2461 | } | ||
| 2462 | 2460 | ||
| 2463 | if (uss) { | 2461 | if (uss) { |
| 2464 | void __user *ss_sp; | 2462 | void __user *ss_sp; |
| @@ -2466,10 +2464,12 @@ do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long s | |||
| 2466 | int ss_flags; | 2464 | int ss_flags; |
| 2467 | 2465 | ||
| 2468 | error = -EFAULT; | 2466 | error = -EFAULT; |
| 2469 | if (!access_ok(VERIFY_READ, uss, sizeof(*uss)) | 2467 | if (!access_ok(VERIFY_READ, uss, sizeof(*uss))) |
| 2470 | || __get_user(ss_sp, &uss->ss_sp) | 2468 | goto out; |
| 2471 | || __get_user(ss_flags, &uss->ss_flags) | 2469 | error = __get_user(ss_sp, &uss->ss_sp) | |
| 2472 | || __get_user(ss_size, &uss->ss_size)) | 2470 | __get_user(ss_flags, &uss->ss_flags) | |
| 2471 | __get_user(ss_size, &uss->ss_size); | ||
| 2472 | if (error) | ||
| 2473 | goto out; | 2473 | goto out; |
| 2474 | 2474 | ||
| 2475 | error = -EPERM; | 2475 | error = -EPERM; |
| @@ -2501,13 +2501,16 @@ do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long s | |||
| 2501 | current->sas_ss_size = ss_size; | 2501 | current->sas_ss_size = ss_size; |
| 2502 | } | 2502 | } |
| 2503 | 2503 | ||
| 2504 | error = 0; | ||
| 2504 | if (uoss) { | 2505 | if (uoss) { |
| 2505 | error = -EFAULT; | 2506 | error = -EFAULT; |
| 2506 | if (copy_to_user(uoss, &oss, sizeof(oss))) | 2507 | if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss))) |
| 2507 | goto out; | 2508 | goto out; |
| 2509 | error = __put_user(oss.ss_sp, &uoss->ss_sp) | | ||
| 2510 | __put_user(oss.ss_size, &uoss->ss_size) | | ||
| 2511 | __put_user(oss.ss_flags, &uoss->ss_flags); | ||
| 2508 | } | 2512 | } |
| 2509 | 2513 | ||
| 2510 | error = 0; | ||
| 2511 | out: | 2514 | out: |
| 2512 | return error; | 2515 | return error; |
| 2513 | } | 2516 | } |
diff --git a/kernel/smp.c b/kernel/smp.c index ad63d8501207..94188b8ecc33 100644 --- a/kernel/smp.c +++ b/kernel/smp.c | |||
| @@ -57,7 +57,7 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
| 57 | return NOTIFY_BAD; | 57 | return NOTIFY_BAD; |
| 58 | break; | 58 | break; |
| 59 | 59 | ||
| 60 | #ifdef CONFIG_CPU_HOTPLUG | 60 | #ifdef CONFIG_HOTPLUG_CPU |
| 61 | case CPU_UP_CANCELED: | 61 | case CPU_UP_CANCELED: |
| 62 | case CPU_UP_CANCELED_FROZEN: | 62 | case CPU_UP_CANCELED_FROZEN: |
| 63 | 63 | ||
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 1090b0aed9ba..7a34cb563fec 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c | |||
| @@ -267,8 +267,8 @@ static void blk_trace_free(struct blk_trace *bt) | |||
| 267 | { | 267 | { |
| 268 | debugfs_remove(bt->msg_file); | 268 | debugfs_remove(bt->msg_file); |
| 269 | debugfs_remove(bt->dropped_file); | 269 | debugfs_remove(bt->dropped_file); |
| 270 | debugfs_remove(bt->dir); | ||
| 271 | relay_close(bt->rchan); | 270 | relay_close(bt->rchan); |
| 271 | debugfs_remove(bt->dir); | ||
| 272 | free_percpu(bt->sequence); | 272 | free_percpu(bt->sequence); |
| 273 | free_percpu(bt->msg_data); | 273 | free_percpu(bt->msg_data); |
| 274 | kfree(bt); | 274 | kfree(bt); |
| @@ -378,18 +378,8 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf, | |||
| 378 | 378 | ||
| 379 | static int blk_remove_buf_file_callback(struct dentry *dentry) | 379 | static int blk_remove_buf_file_callback(struct dentry *dentry) |
| 380 | { | 380 | { |
| 381 | struct dentry *parent = dentry->d_parent; | ||
| 382 | debugfs_remove(dentry); | 381 | debugfs_remove(dentry); |
| 383 | 382 | ||
| 384 | /* | ||
| 385 | * this will fail for all but the last file, but that is ok. what we | ||
| 386 | * care about is the top level buts->name directory going away, when | ||
| 387 | * the last trace file is gone. Then we don't have to rmdir() that | ||
| 388 | * manually on trace stop, so it nicely solves the issue with | ||
| 389 | * force killing of running traces. | ||
| 390 | */ | ||
| 391 | |||
| 392 | debugfs_remove(parent); | ||
| 393 | return 0; | 383 | return 0; |
| 394 | } | 384 | } |
| 395 | 385 | ||
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 4521c77d1a1a..1e1d23c26308 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
| @@ -1662,7 +1662,7 @@ ftrace_regex_open(struct inode *inode, struct file *file, int enable) | |||
| 1662 | 1662 | ||
| 1663 | mutex_lock(&ftrace_regex_lock); | 1663 | mutex_lock(&ftrace_regex_lock); |
| 1664 | if ((file->f_mode & FMODE_WRITE) && | 1664 | if ((file->f_mode & FMODE_WRITE) && |
| 1665 | !(file->f_flags & O_APPEND)) | 1665 | (file->f_flags & O_TRUNC)) |
| 1666 | ftrace_filter_reset(enable); | 1666 | ftrace_filter_reset(enable); |
| 1667 | 1667 | ||
| 1668 | if (file->f_mode & FMODE_READ) { | 1668 | if (file->f_mode & FMODE_READ) { |
| @@ -2577,7 +2577,7 @@ ftrace_graph_open(struct inode *inode, struct file *file) | |||
| 2577 | 2577 | ||
| 2578 | mutex_lock(&graph_lock); | 2578 | mutex_lock(&graph_lock); |
| 2579 | if ((file->f_mode & FMODE_WRITE) && | 2579 | if ((file->f_mode & FMODE_WRITE) && |
| 2580 | !(file->f_flags & O_APPEND)) { | 2580 | (file->f_flags & O_TRUNC)) { |
| 2581 | ftrace_graph_count = 0; | 2581 | ftrace_graph_count = 0; |
| 2582 | memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs)); | 2582 | memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs)); |
| 2583 | } | 2583 | } |
| @@ -2596,6 +2596,14 @@ ftrace_graph_open(struct inode *inode, struct file *file) | |||
| 2596 | } | 2596 | } |
| 2597 | 2597 | ||
| 2598 | static int | 2598 | static int |
| 2599 | ftrace_graph_release(struct inode *inode, struct file *file) | ||
| 2600 | { | ||
| 2601 | if (file->f_mode & FMODE_READ) | ||
| 2602 | seq_release(inode, file); | ||
| 2603 | return 0; | ||
| 2604 | } | ||
| 2605 | |||
| 2606 | static int | ||
| 2599 | ftrace_set_func(unsigned long *array, int *idx, char *buffer) | 2607 | ftrace_set_func(unsigned long *array, int *idx, char *buffer) |
| 2600 | { | 2608 | { |
| 2601 | struct dyn_ftrace *rec; | 2609 | struct dyn_ftrace *rec; |
| @@ -2724,9 +2732,10 @@ ftrace_graph_write(struct file *file, const char __user *ubuf, | |||
| 2724 | } | 2732 | } |
| 2725 | 2733 | ||
| 2726 | static const struct file_operations ftrace_graph_fops = { | 2734 | static const struct file_operations ftrace_graph_fops = { |
| 2727 | .open = ftrace_graph_open, | 2735 | .open = ftrace_graph_open, |
| 2728 | .read = seq_read, | 2736 | .read = seq_read, |
| 2729 | .write = ftrace_graph_write, | 2737 | .write = ftrace_graph_write, |
| 2738 | .release = ftrace_graph_release, | ||
| 2730 | }; | 2739 | }; |
| 2731 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 2740 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
| 2732 | 2741 | ||
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index bf27bb7a63e2..a330513d96ce 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
| @@ -735,6 +735,7 @@ ring_buffer_free(struct ring_buffer *buffer) | |||
| 735 | 735 | ||
| 736 | put_online_cpus(); | 736 | put_online_cpus(); |
| 737 | 737 | ||
| 738 | kfree(buffer->buffers); | ||
| 738 | free_cpumask_var(buffer->cpumask); | 739 | free_cpumask_var(buffer->cpumask); |
| 739 | 740 | ||
| 740 | kfree(buffer); | 741 | kfree(buffer); |
| @@ -1785,7 +1786,7 @@ void ring_buffer_discard_commit(struct ring_buffer *buffer, | |||
| 1785 | */ | 1786 | */ |
| 1786 | RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing)); | 1787 | RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing)); |
| 1787 | 1788 | ||
| 1788 | if (!rb_try_to_discard(cpu_buffer, event)) | 1789 | if (rb_try_to_discard(cpu_buffer, event)) |
| 1789 | goto out; | 1790 | goto out; |
| 1790 | 1791 | ||
| 1791 | /* | 1792 | /* |
| @@ -2383,7 +2384,6 @@ rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) | |||
| 2383 | * the box. Return the padding, and we will release | 2384 | * the box. Return the padding, and we will release |
| 2384 | * the current locks, and try again. | 2385 | * the current locks, and try again. |
| 2385 | */ | 2386 | */ |
| 2386 | rb_advance_reader(cpu_buffer); | ||
| 2387 | return event; | 2387 | return event; |
| 2388 | 2388 | ||
| 2389 | case RINGBUF_TYPE_TIME_EXTEND: | 2389 | case RINGBUF_TYPE_TIME_EXTEND: |
| @@ -2486,7 +2486,7 @@ static inline int rb_ok_to_lock(void) | |||
| 2486 | * buffer too. A one time deal is all you get from reading | 2486 | * buffer too. A one time deal is all you get from reading |
| 2487 | * the ring buffer from an NMI. | 2487 | * the ring buffer from an NMI. |
| 2488 | */ | 2488 | */ |
| 2489 | if (likely(!in_nmi() && !oops_in_progress)) | 2489 | if (likely(!in_nmi())) |
| 2490 | return 1; | 2490 | return 1; |
| 2491 | 2491 | ||
| 2492 | tracing_off_permanent(); | 2492 | tracing_off_permanent(); |
| @@ -2519,6 +2519,8 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) | |||
| 2519 | if (dolock) | 2519 | if (dolock) |
| 2520 | spin_lock(&cpu_buffer->reader_lock); | 2520 | spin_lock(&cpu_buffer->reader_lock); |
| 2521 | event = rb_buffer_peek(buffer, cpu, ts); | 2521 | event = rb_buffer_peek(buffer, cpu, ts); |
| 2522 | if (event && event->type_len == RINGBUF_TYPE_PADDING) | ||
| 2523 | rb_advance_reader(cpu_buffer); | ||
| 2522 | if (dolock) | 2524 | if (dolock) |
| 2523 | spin_unlock(&cpu_buffer->reader_lock); | 2525 | spin_unlock(&cpu_buffer->reader_lock); |
| 2524 | local_irq_restore(flags); | 2526 | local_irq_restore(flags); |
| @@ -2590,12 +2592,9 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts) | |||
| 2590 | spin_lock(&cpu_buffer->reader_lock); | 2592 | spin_lock(&cpu_buffer->reader_lock); |
| 2591 | 2593 | ||
| 2592 | event = rb_buffer_peek(buffer, cpu, ts); | 2594 | event = rb_buffer_peek(buffer, cpu, ts); |
| 2593 | if (!event) | 2595 | if (event) |
| 2594 | goto out_unlock; | 2596 | rb_advance_reader(cpu_buffer); |
| 2595 | |||
| 2596 | rb_advance_reader(cpu_buffer); | ||
| 2597 | 2597 | ||
| 2598 | out_unlock: | ||
| 2599 | if (dolock) | 2598 | if (dolock) |
| 2600 | spin_unlock(&cpu_buffer->reader_lock); | 2599 | spin_unlock(&cpu_buffer->reader_lock); |
| 2601 | local_irq_restore(flags); | 2600 | local_irq_restore(flags); |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 8bc8d8afea6a..c22b40f8f576 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
| @@ -848,6 +848,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, | |||
| 848 | ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | | 848 | ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | |
| 849 | (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0); | 849 | (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0); |
| 850 | } | 850 | } |
| 851 | EXPORT_SYMBOL_GPL(tracing_generic_entry_update); | ||
| 851 | 852 | ||
| 852 | struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr, | 853 | struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr, |
| 853 | int type, | 854 | int type, |
| @@ -2031,7 +2032,7 @@ static int tracing_open(struct inode *inode, struct file *file) | |||
| 2031 | 2032 | ||
| 2032 | /* If this file was open for write, then erase contents */ | 2033 | /* If this file was open for write, then erase contents */ |
| 2033 | if ((file->f_mode & FMODE_WRITE) && | 2034 | if ((file->f_mode & FMODE_WRITE) && |
| 2034 | !(file->f_flags & O_APPEND)) { | 2035 | (file->f_flags & O_TRUNC)) { |
| 2035 | long cpu = (long) inode->i_private; | 2036 | long cpu = (long) inode->i_private; |
| 2036 | 2037 | ||
| 2037 | if (cpu == TRACE_PIPE_ALL_CPU) | 2038 | if (cpu == TRACE_PIPE_ALL_CPU) |
| @@ -3085,7 +3086,8 @@ tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter) | |||
| 3085 | break; | 3086 | break; |
| 3086 | } | 3087 | } |
| 3087 | 3088 | ||
| 3088 | trace_consume(iter); | 3089 | if (ret != TRACE_TYPE_NO_CONSUME) |
| 3090 | trace_consume(iter); | ||
| 3089 | rem -= count; | 3091 | rem -= count; |
| 3090 | if (!find_next_entry_inc(iter)) { | 3092 | if (!find_next_entry_inc(iter)) { |
| 3091 | rem = 0; | 3093 | rem = 0; |
| @@ -4233,8 +4235,11 @@ static void __ftrace_dump(bool disable_tracing) | |||
| 4233 | iter.pos = -1; | 4235 | iter.pos = -1; |
| 4234 | 4236 | ||
| 4235 | if (find_next_entry_inc(&iter) != NULL) { | 4237 | if (find_next_entry_inc(&iter) != NULL) { |
| 4236 | print_trace_line(&iter); | 4238 | int ret; |
| 4237 | trace_consume(&iter); | 4239 | |
| 4240 | ret = print_trace_line(&iter); | ||
| 4241 | if (ret != TRACE_TYPE_NO_CONSUME) | ||
| 4242 | trace_consume(&iter); | ||
| 4238 | } | 4243 | } |
| 4239 | 4244 | ||
| 4240 | trace_printk_seq(&iter.seq); | 4245 | trace_printk_seq(&iter.seq); |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 3548ae5cc780..8b9f4f6e9559 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
| @@ -438,10 +438,6 @@ struct trace_entry *tracing_get_trace_entry(struct trace_array *tr, | |||
| 438 | struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, | 438 | struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, |
| 439 | int *ent_cpu, u64 *ent_ts); | 439 | int *ent_cpu, u64 *ent_ts); |
| 440 | 440 | ||
| 441 | void tracing_generic_entry_update(struct trace_entry *entry, | ||
| 442 | unsigned long flags, | ||
| 443 | int pc); | ||
| 444 | |||
| 445 | void default_wait_pipe(struct trace_iterator *iter); | 441 | void default_wait_pipe(struct trace_iterator *iter); |
| 446 | void poll_wait_pipe(struct trace_iterator *iter); | 442 | void poll_wait_pipe(struct trace_iterator *iter); |
| 447 | 443 | ||
diff --git a/kernel/trace/trace_event_profile.c b/kernel/trace/trace_event_profile.c index 5b5895afecfe..11ba5bb4ed0a 100644 --- a/kernel/trace/trace_event_profile.c +++ b/kernel/trace/trace_event_profile.c | |||
| @@ -14,7 +14,7 @@ int ftrace_profile_enable(int event_id) | |||
| 14 | 14 | ||
| 15 | mutex_lock(&event_mutex); | 15 | mutex_lock(&event_mutex); |
| 16 | list_for_each_entry(event, &ftrace_events, list) { | 16 | list_for_each_entry(event, &ftrace_events, list) { |
| 17 | if (event->id == event_id) { | 17 | if (event->id == event_id && event->profile_enable) { |
| 18 | ret = event->profile_enable(event); | 18 | ret = event->profile_enable(event); |
| 19 | break; | 19 | break; |
| 20 | } | 20 | } |
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 53c8fd376a88..e75276a49cf5 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
| @@ -376,7 +376,7 @@ ftrace_event_seq_open(struct inode *inode, struct file *file) | |||
| 376 | const struct seq_operations *seq_ops; | 376 | const struct seq_operations *seq_ops; |
| 377 | 377 | ||
| 378 | if ((file->f_mode & FMODE_WRITE) && | 378 | if ((file->f_mode & FMODE_WRITE) && |
| 379 | !(file->f_flags & O_APPEND)) | 379 | (file->f_flags & O_TRUNC)) |
| 380 | ftrace_clear_events(); | 380 | ftrace_clear_events(); |
| 381 | 381 | ||
| 382 | seq_ops = inode->i_private; | 382 | seq_ops = inode->i_private; |
| @@ -940,7 +940,7 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events, | |||
| 940 | entry = trace_create_file("enable", 0644, call->dir, call, | 940 | entry = trace_create_file("enable", 0644, call->dir, call, |
| 941 | enable); | 941 | enable); |
| 942 | 942 | ||
| 943 | if (call->id) | 943 | if (call->id && call->profile_enable) |
| 944 | entry = trace_create_file("id", 0444, call->dir, call, | 944 | entry = trace_create_file("id", 0444, call->dir, call, |
| 945 | id); | 945 | id); |
| 946 | 946 | ||
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 936c621bbf46..f32dc9d1ea7b 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c | |||
| @@ -624,9 +624,6 @@ static int filter_add_subsystem_pred(struct filter_parse_state *ps, | |||
| 624 | return -ENOSPC; | 624 | return -ENOSPC; |
| 625 | } | 625 | } |
| 626 | 626 | ||
| 627 | filter->preds[filter->n_preds] = pred; | ||
| 628 | filter->n_preds++; | ||
| 629 | |||
| 630 | list_for_each_entry(call, &ftrace_events, list) { | 627 | list_for_each_entry(call, &ftrace_events, list) { |
| 631 | 628 | ||
| 632 | if (!call->define_fields) | 629 | if (!call->define_fields) |
| @@ -643,6 +640,9 @@ static int filter_add_subsystem_pred(struct filter_parse_state *ps, | |||
| 643 | } | 640 | } |
| 644 | replace_filter_string(call->filter, filter_string); | 641 | replace_filter_string(call->filter, filter_string); |
| 645 | } | 642 | } |
| 643 | |||
| 644 | filter->preds[filter->n_preds] = pred; | ||
| 645 | filter->n_preds++; | ||
| 646 | out: | 646 | out: |
| 647 | return err; | 647 | return err; |
| 648 | } | 648 | } |
| @@ -1029,12 +1029,17 @@ static int replace_preds(struct event_subsystem *system, | |||
| 1029 | 1029 | ||
| 1030 | if (elt->op == OP_AND || elt->op == OP_OR) { | 1030 | if (elt->op == OP_AND || elt->op == OP_OR) { |
| 1031 | pred = create_logical_pred(elt->op); | 1031 | pred = create_logical_pred(elt->op); |
| 1032 | if (!pred) | ||
| 1033 | return -ENOMEM; | ||
| 1032 | if (call) { | 1034 | if (call) { |
| 1033 | err = filter_add_pred(ps, call, pred); | 1035 | err = filter_add_pred(ps, call, pred); |
| 1034 | filter_free_pred(pred); | 1036 | filter_free_pred(pred); |
| 1035 | } else | 1037 | } else { |
| 1036 | err = filter_add_subsystem_pred(ps, system, | 1038 | err = filter_add_subsystem_pred(ps, system, |
| 1037 | pred, filter_string); | 1039 | pred, filter_string); |
| 1040 | if (err) | ||
| 1041 | filter_free_pred(pred); | ||
| 1042 | } | ||
| 1038 | if (err) | 1043 | if (err) |
| 1039 | return err; | 1044 | return err; |
| 1040 | 1045 | ||
| @@ -1048,12 +1053,17 @@ static int replace_preds(struct event_subsystem *system, | |||
| 1048 | } | 1053 | } |
| 1049 | 1054 | ||
| 1050 | pred = create_pred(elt->op, operand1, operand2); | 1055 | pred = create_pred(elt->op, operand1, operand2); |
| 1056 | if (!pred) | ||
| 1057 | return -ENOMEM; | ||
| 1051 | if (call) { | 1058 | if (call) { |
| 1052 | err = filter_add_pred(ps, call, pred); | 1059 | err = filter_add_pred(ps, call, pred); |
| 1053 | filter_free_pred(pred); | 1060 | filter_free_pred(pred); |
| 1054 | } else | 1061 | } else { |
| 1055 | err = filter_add_subsystem_pred(ps, system, pred, | 1062 | err = filter_add_subsystem_pred(ps, system, pred, |
| 1056 | filter_string); | 1063 | filter_string); |
| 1064 | if (err) | ||
| 1065 | filter_free_pred(pred); | ||
| 1066 | } | ||
| 1057 | if (err) | 1067 | if (err) |
| 1058 | return err; | 1068 | return err; |
| 1059 | 1069 | ||
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index d2249abafb53..420ec3487579 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
| @@ -843,9 +843,16 @@ print_graph_function(struct trace_iterator *iter) | |||
| 843 | 843 | ||
| 844 | switch (entry->type) { | 844 | switch (entry->type) { |
| 845 | case TRACE_GRAPH_ENT: { | 845 | case TRACE_GRAPH_ENT: { |
| 846 | struct ftrace_graph_ent_entry *field; | 846 | /* |
| 847 | * print_graph_entry() may consume the current event, | ||
| 848 | * thus @field may become invalid, so we need to save it. | ||
| 849 | * sizeof(struct ftrace_graph_ent_entry) is very small, | ||
| 850 | * it can be safely saved at the stack. | ||
| 851 | */ | ||
| 852 | struct ftrace_graph_ent_entry *field, saved; | ||
| 847 | trace_assign_type(field, entry); | 853 | trace_assign_type(field, entry); |
| 848 | return print_graph_entry(field, s, iter); | 854 | saved = *field; |
| 855 | return print_graph_entry(&saved, s, iter); | ||
| 849 | } | 856 | } |
| 850 | case TRACE_GRAPH_RET: { | 857 | case TRACE_GRAPH_RET: { |
| 851 | struct ftrace_graph_ret_entry *field; | 858 | struct ftrace_graph_ret_entry *field; |
diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c index 7b6278110827..687699d365ae 100644 --- a/kernel/trace/trace_printk.c +++ b/kernel/trace/trace_printk.c | |||
| @@ -176,7 +176,7 @@ static int t_show(struct seq_file *m, void *v) | |||
| 176 | const char *str = *fmt; | 176 | const char *str = *fmt; |
| 177 | int i; | 177 | int i; |
| 178 | 178 | ||
| 179 | seq_printf(m, "0x%lx : \"", (unsigned long)fmt); | 179 | seq_printf(m, "0x%lx : \"", *(unsigned long *)fmt); |
| 180 | 180 | ||
| 181 | /* | 181 | /* |
| 182 | * Tabs and new lines need to be converted. | 182 | * Tabs and new lines need to be converted. |
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index e644af910124..6a2a9d484cd6 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c | |||
| @@ -301,17 +301,14 @@ static const struct seq_operations stack_trace_seq_ops = { | |||
| 301 | 301 | ||
| 302 | static int stack_trace_open(struct inode *inode, struct file *file) | 302 | static int stack_trace_open(struct inode *inode, struct file *file) |
| 303 | { | 303 | { |
| 304 | int ret; | 304 | return seq_open(file, &stack_trace_seq_ops); |
| 305 | |||
| 306 | ret = seq_open(file, &stack_trace_seq_ops); | ||
| 307 | |||
| 308 | return ret; | ||
| 309 | } | 305 | } |
| 310 | 306 | ||
| 311 | static const struct file_operations stack_trace_fops = { | 307 | static const struct file_operations stack_trace_fops = { |
| 312 | .open = stack_trace_open, | 308 | .open = stack_trace_open, |
| 313 | .read = seq_read, | 309 | .read = seq_read, |
| 314 | .llseek = seq_lseek, | 310 | .llseek = seq_lseek, |
| 311 | .release = seq_release, | ||
| 315 | }; | 312 | }; |
| 316 | 313 | ||
| 317 | int | 314 | int |
diff --git a/kernel/trace/trace_stat.c b/kernel/trace/trace_stat.c index e66f5e493342..aea321c82fa0 100644 --- a/kernel/trace/trace_stat.c +++ b/kernel/trace/trace_stat.c | |||
| @@ -73,7 +73,7 @@ static struct rb_node *release_next(struct rb_node *node) | |||
| 73 | } | 73 | } |
| 74 | } | 74 | } |
| 75 | 75 | ||
| 76 | static void reset_stat_session(struct stat_session *session) | 76 | static void __reset_stat_session(struct stat_session *session) |
| 77 | { | 77 | { |
| 78 | struct rb_node *node = session->stat_root.rb_node; | 78 | struct rb_node *node = session->stat_root.rb_node; |
| 79 | 79 | ||
| @@ -83,10 +83,17 @@ static void reset_stat_session(struct stat_session *session) | |||
| 83 | session->stat_root = RB_ROOT; | 83 | session->stat_root = RB_ROOT; |
| 84 | } | 84 | } |
| 85 | 85 | ||
| 86 | static void reset_stat_session(struct stat_session *session) | ||
| 87 | { | ||
| 88 | mutex_lock(&session->stat_mutex); | ||
| 89 | __reset_stat_session(session); | ||
| 90 | mutex_unlock(&session->stat_mutex); | ||
| 91 | } | ||
| 92 | |||
| 86 | static void destroy_session(struct stat_session *session) | 93 | static void destroy_session(struct stat_session *session) |
| 87 | { | 94 | { |
| 88 | debugfs_remove(session->file); | 95 | debugfs_remove(session->file); |
| 89 | reset_stat_session(session); | 96 | __reset_stat_session(session); |
| 90 | mutex_destroy(&session->stat_mutex); | 97 | mutex_destroy(&session->stat_mutex); |
| 91 | kfree(session); | 98 | kfree(session); |
| 92 | } | 99 | } |
| @@ -150,7 +157,7 @@ static int stat_seq_init(struct stat_session *session) | |||
| 150 | int i; | 157 | int i; |
| 151 | 158 | ||
| 152 | mutex_lock(&session->stat_mutex); | 159 | mutex_lock(&session->stat_mutex); |
| 153 | reset_stat_session(session); | 160 | __reset_stat_session(session); |
| 154 | 161 | ||
| 155 | if (!ts->stat_cmp) | 162 | if (!ts->stat_cmp) |
| 156 | ts->stat_cmp = dummy_cmp; | 163 | ts->stat_cmp = dummy_cmp; |
| @@ -183,7 +190,7 @@ exit: | |||
| 183 | return ret; | 190 | return ret; |
| 184 | 191 | ||
| 185 | exit_free_rbtree: | 192 | exit_free_rbtree: |
| 186 | reset_stat_session(session); | 193 | __reset_stat_session(session); |
| 187 | mutex_unlock(&session->stat_mutex); | 194 | mutex_unlock(&session->stat_mutex); |
| 188 | return ret; | 195 | return ret; |
| 189 | } | 196 | } |
| @@ -250,16 +257,21 @@ static const struct seq_operations trace_stat_seq_ops = { | |||
| 250 | static int tracing_stat_open(struct inode *inode, struct file *file) | 257 | static int tracing_stat_open(struct inode *inode, struct file *file) |
| 251 | { | 258 | { |
| 252 | int ret; | 259 | int ret; |
| 253 | 260 | struct seq_file *m; | |
| 254 | struct stat_session *session = inode->i_private; | 261 | struct stat_session *session = inode->i_private; |
| 255 | 262 | ||
| 263 | ret = stat_seq_init(session); | ||
| 264 | if (ret) | ||
| 265 | return ret; | ||
| 266 | |||
| 256 | ret = seq_open(file, &trace_stat_seq_ops); | 267 | ret = seq_open(file, &trace_stat_seq_ops); |
| 257 | if (!ret) { | 268 | if (ret) { |
| 258 | struct seq_file *m = file->private_data; | 269 | reset_stat_session(session); |
| 259 | m->private = session; | 270 | return ret; |
| 260 | ret = stat_seq_init(session); | ||
| 261 | } | 271 | } |
| 262 | 272 | ||
| 273 | m = file->private_data; | ||
| 274 | m->private = session; | ||
| 263 | return ret; | 275 | return ret; |
| 264 | } | 276 | } |
| 265 | 277 | ||
| @@ -270,11 +282,9 @@ static int tracing_stat_release(struct inode *i, struct file *f) | |||
| 270 | { | 282 | { |
| 271 | struct stat_session *session = i->i_private; | 283 | struct stat_session *session = i->i_private; |
| 272 | 284 | ||
| 273 | mutex_lock(&session->stat_mutex); | ||
| 274 | reset_stat_session(session); | 285 | reset_stat_session(session); |
| 275 | mutex_unlock(&session->stat_mutex); | ||
| 276 | 286 | ||
| 277 | return 0; | 287 | return seq_release(i, f); |
| 278 | } | 288 | } |
| 279 | 289 | ||
| 280 | static const struct file_operations tracing_stat_fops = { | 290 | static const struct file_operations tracing_stat_fops = { |
